1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-30 21:42:05 +03:00

tableam: Rename wrapper functions to match callback names.

Some of the wrapper functions didn't match the callback names. Many of
them due to staying "consistent" with historic naming of the wrapped
functionality. We decided that for most cases it's more important to
be for tableam to be consistent going forward, than with the past.

The one exception is beginscan/endscan/...  because it'd have looked
odd to have systable_beginscan/endscan/... with a different naming
scheme, and changing the systable_* APIs would have caused way too
much churn (including breaking a lot of external users).

Author: Ashwin Agrawal, with some small additions by Andres Freund
Reviewed-By: Andres Freund
Discussion: https://postgr.es/m/CALfoeiugyrXZfX7n0ORCa4L-m834dzmaE8eFdbNR6PMpetU4Ww@mail.gmail.com
This commit is contained in:
Andres Freund
2019-05-23 16:25:48 -07:00
parent 54487d1560
commit 73b8c3bd28
14 changed files with 178 additions and 169 deletions

View File

@ -3332,7 +3332,7 @@ GetTupleForTrigger(EState *estate,
*/
if (!IsolationUsesXactSnapshot())
lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
test = table_lock_tuple(relation, tid, estate->es_snapshot, oldslot,
test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
estate->es_output_cid,
lockmode, LockWaitBlock,
lockflags,
@ -3386,7 +3386,7 @@ GetTupleForTrigger(EState *estate,
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
elog(ERROR, "unexpected table_lock_tuple status: %u", test);
elog(ERROR, "unexpected table_tuple_lock status: %u", test);
break;
case TM_Deleted:
@ -3402,7 +3402,7 @@ GetTupleForTrigger(EState *estate,
break;
default:
elog(ERROR, "unrecognized table_lock_tuple status: %u", test);
elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
return false; /* keep compiler quiet */
}
}
@ -3412,7 +3412,8 @@ GetTupleForTrigger(EState *estate,
* We expect the tuple to be present, thus very simple error handling
* suffices.
*/
if (!table_fetch_row_version(relation, tid, SnapshotAny, oldslot))
if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
oldslot))
elog(ERROR, "failed to fetch tuple for trigger");
}
@ -4270,7 +4271,9 @@ AfterTriggerExecute(EState *estate,
{
LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
if (!table_fetch_row_version(rel, &(event->ate_ctid1), SnapshotAny, LocTriggerData.tg_trigslot))
if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid1),
SnapshotAny,
LocTriggerData.tg_trigslot))
elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
LocTriggerData.tg_trigtuple =
ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
@ -4287,7 +4290,9 @@ AfterTriggerExecute(EState *estate,
{
LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
if (!table_fetch_row_version(rel, &(event->ate_ctid2), SnapshotAny, LocTriggerData.tg_newslot))
if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid2),
SnapshotAny,
LocTriggerData.tg_newslot))
elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
LocTriggerData.tg_newtuple =
ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);