1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-11 00:12:06 +03:00

Allow locking updated tuples in tuple_update() and tuple_delete()

Currently, in read committed transaction isolation mode (default), we have the
following sequence of actions when tuple_update()/tuple_delete() finds
the tuple updated by the concurrent transaction.

1. Attempt to update/delete tuple with tuple_update()/tuple_delete(), which
   returns TM_Updated.
2. Lock tuple with tuple_lock().
3. Re-evaluate plan qual (recheck if we still need to update/delete and
   calculate the new tuple for update).
4. Second attempt to update/delete tuple with tuple_update()/tuple_delete().
   This attempt should be successful, since the tuple was previously locked.

This commit eliminates step 2 by taking the lock during the first
tuple_update()/tuple_delete() call.  The heap table access method saves some
effort by checking the updated tuple once instead of twice.  Future
undo-based table access methods, which will start from the latest row version,
can immediately place a lock there.

Also, this commit makes tuple_update()/tuple_delete() optionally save the old
tuple into the dedicated slot.  That saves efforts on re-fetching tuples in
certain cases.

The code in nodeModifyTable.c is simplified by removing the nested switch/case.

Discussion: https://postgr.es/m/CAPpHfdua-YFw3XTprfutzGp28xXLigFtzNbuFY8yPhqeq6X5kg%40mail.gmail.com
Reviewed-by: Aleksander Alekseev, Pavel Borisov, Vignesh C, Mason Sharp
Reviewed-by: Andres Freund, Chris Travers
This commit is contained in:
Alexander Korotkov
2024-03-26 01:27:56 +02:00
parent c7076ba6ad
commit 87985cc925
9 changed files with 502 additions and 346 deletions

View File

@@ -2773,8 +2773,8 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
void
ExecARDeleteTriggers(EState *estate,
ResultRelInfo *relinfo,
ItemPointer tupleid,
HeapTuple fdw_trigtuple,
TupleTableSlot *slot,
TransitionCaptureState *transition_capture,
bool is_crosspart_update)
{
@@ -2783,20 +2783,11 @@ ExecARDeleteTriggers(EState *estate,
if ((trigdesc && trigdesc->trig_delete_after_row) ||
(transition_capture && transition_capture->tcs_delete_old_table))
{
TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
if (fdw_trigtuple == NULL)
GetTupleForTrigger(estate,
NULL,
relinfo,
tupleid,
LockTupleExclusive,
slot,
NULL,
NULL,
NULL);
else
/*
* Put the FDW old tuple to the slot. Otherwise, caller is expected
* to have old tuple alredy fetched to the slot.
*/
if (fdw_trigtuple != NULL)
ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
@@ -3087,18 +3078,17 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
* Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
* and destination partitions, respectively, of a cross-partition update of
* the root partitioned table mentioned in the query, given by 'relinfo'.
* 'tupleid' in that case refers to the ctid of the "old" tuple in the source
* partition, and 'newslot' contains the "new" tuple in the destination
* partition. This interface allows to support the requirements of
* ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
* that case.
* 'oldslot' contains the "old" tuple in the source partition, and 'newslot'
* contains the "new" tuple in the destination partition. This interface
* allows to support the requirements of ExecCrossPartitionUpdateForeignKey();
* is_crosspart_update must be true in that case.
*/
void
ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
ResultRelInfo *src_partinfo,
ResultRelInfo *dst_partinfo,
ItemPointer tupleid,
HeapTuple fdw_trigtuple,
TupleTableSlot *oldslot,
TupleTableSlot *newslot,
List *recheckIndexes,
TransitionCaptureState *transition_capture,
@@ -3117,29 +3107,14 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
* separately for DELETE and INSERT to capture transition table rows.
* In such case, either old tuple or new tuple can be NULL.
*/
TupleTableSlot *oldslot;
ResultRelInfo *tupsrc;
Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
!is_crosspart_update);
tupsrc = src_partinfo ? src_partinfo : relinfo;
oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
GetTupleForTrigger(estate,
NULL,
tupsrc,
tupleid,
LockTupleExclusive,
oldslot,
NULL,
NULL,
NULL);
else if (fdw_trigtuple != NULL)
if (fdw_trigtuple != NULL)
{
Assert(oldslot);
ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
else
ExecClearTuple(oldslot);
}
AfterTriggerSaveEvent(estate, relinfo,
src_partinfo, dst_partinfo,