mirror of
https://github.com/postgres/postgres.git
synced 2025-11-06 07:49:08 +03:00
Allow locking updated tuples in tuple_update() and tuple_delete()
Currently, in read committed transaction isolation mode (default), we have the following sequence of actions when tuple_update()/tuple_delete() finds the tuple updated by the concurrent transaction. 1. Attempt to update/delete tuple with tuple_update()/tuple_delete(), which returns TM_Updated. 2. Lock tuple with tuple_lock(). 3. Re-evaluate plan qual (recheck if we still need to update/delete and calculate the new tuple for update). 4. Second attempt to update/delete tuple with tuple_update()/tuple_delete(). This attempt should be successful, since the tuple was previously locked. This commit eliminates step 2 by taking the lock during the first tuple_update()/tuple_delete() call. The heap table access method saves some effort by checking the updated tuple once instead of twice. Future undo-based table access methods, which will start from the latest row version, can immediately place a lock there. Also, this commit makes tuple_update()/tuple_delete() optionally save the old tuple into the dedicated slot. That saves efforts on re-fetching tuples in certain cases. The code in nodeModifyTable.c is simplified by removing the nested switch/case. Discussion: https://postgr.es/m/CAPpHfdua-YFw3XTprfutzGp28xXLigFtzNbuFY8yPhqeq6X5kg%40mail.gmail.com Reviewed-by: Aleksander Alekseev, Pavel Borisov, Vignesh C, Mason Sharp Reviewed-by: Andres Freund, Chris Travers
This commit is contained in:
@@ -2496,10 +2496,11 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
|
||||
}
|
||||
|
||||
/*
|
||||
* heap_delete - delete a tuple
|
||||
* heap_delete - delete a tuple, optionally fetching it into a slot
|
||||
*
|
||||
* See table_tuple_delete() for an explanation of the parameters, except that
|
||||
* this routine directly takes a tuple rather than a slot.
|
||||
* this routine directly takes a tuple rather than a slot. Also, we don't
|
||||
* place a lock on the tuple in this function, just fetch the existing version.
|
||||
*
|
||||
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
|
||||
* t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
|
||||
@@ -2508,8 +2509,9 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
|
||||
*/
|
||||
TM_Result
|
||||
heap_delete(Relation relation, ItemPointer tid,
|
||||
CommandId cid, Snapshot crosscheck, bool wait,
|
||||
TM_FailureData *tmfd, bool changingPart)
|
||||
CommandId cid, Snapshot crosscheck, int options,
|
||||
TM_FailureData *tmfd, bool changingPart,
|
||||
TupleTableSlot *oldSlot)
|
||||
{
|
||||
TM_Result result;
|
||||
TransactionId xid = GetCurrentTransactionId();
|
||||
@@ -2587,7 +2589,7 @@ l1:
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("attempted to delete invisible tuple")));
|
||||
}
|
||||
else if (result == TM_BeingModified && wait)
|
||||
else if (result == TM_BeingModified && (options & TABLE_MODIFY_WAIT))
|
||||
{
|
||||
TransactionId xwait;
|
||||
uint16 infomask;
|
||||
@@ -2728,7 +2730,30 @@ l1:
|
||||
tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
|
||||
else
|
||||
tmfd->cmax = InvalidCommandId;
|
||||
UnlockReleaseBuffer(buffer);
|
||||
|
||||
/*
|
||||
* If we're asked to lock the updated tuple, we just fetch the
|
||||
* existing tuple. That let's the caller save some resources on
|
||||
* placing the lock.
|
||||
*/
|
||||
if (result == TM_Updated &&
|
||||
(options & TABLE_MODIFY_LOCK_UPDATED))
|
||||
{
|
||||
BufferHeapTupleTableSlot *bslot;
|
||||
|
||||
Assert(TTS_IS_BUFFERTUPLE(oldSlot));
|
||||
bslot = (BufferHeapTupleTableSlot *) oldSlot;
|
||||
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
bslot->base.tupdata = tp;
|
||||
ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
|
||||
oldSlot,
|
||||
buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
UnlockReleaseBuffer(buffer);
|
||||
}
|
||||
if (have_tuple_lock)
|
||||
UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
|
||||
if (vmbuffer != InvalidBuffer)
|
||||
@@ -2902,8 +2927,24 @@ l1:
|
||||
*/
|
||||
CacheInvalidateHeapTuple(relation, &tp, NULL);
|
||||
|
||||
/* Now we can release the buffer */
|
||||
ReleaseBuffer(buffer);
|
||||
/* Fetch the old tuple version if we're asked for that. */
|
||||
if (options & TABLE_MODIFY_FETCH_OLD_TUPLE)
|
||||
{
|
||||
BufferHeapTupleTableSlot *bslot;
|
||||
|
||||
Assert(TTS_IS_BUFFERTUPLE(oldSlot));
|
||||
bslot = (BufferHeapTupleTableSlot *) oldSlot;
|
||||
|
||||
bslot->base.tupdata = tp;
|
||||
ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
|
||||
oldSlot,
|
||||
buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Now we can release the buffer */
|
||||
ReleaseBuffer(buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the lmgr tuple lock, if we had it.
|
||||
@@ -2935,8 +2976,8 @@ simple_heap_delete(Relation relation, ItemPointer tid)
|
||||
|
||||
result = heap_delete(relation, tid,
|
||||
GetCurrentCommandId(true), InvalidSnapshot,
|
||||
true /* wait for commit */ ,
|
||||
&tmfd, false /* changingPart */ );
|
||||
TABLE_MODIFY_WAIT /* wait for commit */ ,
|
||||
&tmfd, false /* changingPart */ , NULL);
|
||||
switch (result)
|
||||
{
|
||||
case TM_SelfModified:
|
||||
@@ -2963,10 +3004,11 @@ simple_heap_delete(Relation relation, ItemPointer tid)
|
||||
}
|
||||
|
||||
/*
|
||||
* heap_update - replace a tuple
|
||||
* heap_update - replace a tuple, optionally fetching it into a slot
|
||||
*
|
||||
* See table_tuple_update() for an explanation of the parameters, except that
|
||||
* this routine directly takes a tuple rather than a slot.
|
||||
* this routine directly takes a tuple rather than a slot. Also, we don't
|
||||
* place a lock on the tuple in this function, just fetch the existing version.
|
||||
*
|
||||
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
|
||||
* t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
|
||||
@@ -2975,9 +3017,9 @@ simple_heap_delete(Relation relation, ItemPointer tid)
|
||||
*/
|
||||
TM_Result
|
||||
heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
|
||||
CommandId cid, Snapshot crosscheck, bool wait,
|
||||
CommandId cid, Snapshot crosscheck, int options,
|
||||
TM_FailureData *tmfd, LockTupleMode *lockmode,
|
||||
TU_UpdateIndexes *update_indexes)
|
||||
TU_UpdateIndexes *update_indexes, TupleTableSlot *oldSlot)
|
||||
{
|
||||
TM_Result result;
|
||||
TransactionId xid = GetCurrentTransactionId();
|
||||
@@ -3154,7 +3196,7 @@ l2:
|
||||
result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
|
||||
|
||||
/* see below about the "no wait" case */
|
||||
Assert(result != TM_BeingModified || wait);
|
||||
Assert(result != TM_BeingModified || (options & TABLE_MODIFY_WAIT));
|
||||
|
||||
if (result == TM_Invisible)
|
||||
{
|
||||
@@ -3163,7 +3205,7 @@ l2:
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("attempted to update invisible tuple")));
|
||||
}
|
||||
else if (result == TM_BeingModified && wait)
|
||||
else if (result == TM_BeingModified && (options & TABLE_MODIFY_WAIT))
|
||||
{
|
||||
TransactionId xwait;
|
||||
uint16 infomask;
|
||||
@@ -3367,7 +3409,30 @@ l2:
|
||||
tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
|
||||
else
|
||||
tmfd->cmax = InvalidCommandId;
|
||||
UnlockReleaseBuffer(buffer);
|
||||
|
||||
/*
|
||||
* If we're asked to lock the updated tuple, we just fetch the
|
||||
* existing tuple. That let's the caller save some resouces on
|
||||
* placing the lock.
|
||||
*/
|
||||
if (result == TM_Updated &&
|
||||
(options & TABLE_MODIFY_LOCK_UPDATED))
|
||||
{
|
||||
BufferHeapTupleTableSlot *bslot;
|
||||
|
||||
Assert(TTS_IS_BUFFERTUPLE(oldSlot));
|
||||
bslot = (BufferHeapTupleTableSlot *) oldSlot;
|
||||
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
bslot->base.tupdata = oldtup;
|
||||
ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
|
||||
oldSlot,
|
||||
buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
UnlockReleaseBuffer(buffer);
|
||||
}
|
||||
if (have_tuple_lock)
|
||||
UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
|
||||
if (vmbuffer != InvalidBuffer)
|
||||
@@ -3846,7 +3911,26 @@ l2:
|
||||
/* Now we can release the buffer(s) */
|
||||
if (newbuf != buffer)
|
||||
ReleaseBuffer(newbuf);
|
||||
ReleaseBuffer(buffer);
|
||||
|
||||
/* Fetch the old tuple version if we're asked for that. */
|
||||
if (options & TABLE_MODIFY_FETCH_OLD_TUPLE)
|
||||
{
|
||||
BufferHeapTupleTableSlot *bslot;
|
||||
|
||||
Assert(TTS_IS_BUFFERTUPLE(oldSlot));
|
||||
bslot = (BufferHeapTupleTableSlot *) oldSlot;
|
||||
|
||||
bslot->base.tupdata = oldtup;
|
||||
ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata,
|
||||
oldSlot,
|
||||
buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Now we can release the buffer */
|
||||
ReleaseBuffer(buffer);
|
||||
}
|
||||
|
||||
if (BufferIsValid(vmbuffer_new))
|
||||
ReleaseBuffer(vmbuffer_new);
|
||||
if (BufferIsValid(vmbuffer))
|
||||
@@ -4054,8 +4138,8 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup,
|
||||
|
||||
result = heap_update(relation, otid, tup,
|
||||
GetCurrentCommandId(true), InvalidSnapshot,
|
||||
true /* wait for commit */ ,
|
||||
&tmfd, &lockmode, update_indexes);
|
||||
TABLE_MODIFY_WAIT /* wait for commit */ ,
|
||||
&tmfd, &lockmode, update_indexes, NULL);
|
||||
switch (result)
|
||||
{
|
||||
case TM_SelfModified:
|
||||
@@ -4118,12 +4202,14 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
|
||||
* tuples.
|
||||
*
|
||||
* Output parameters:
|
||||
* *tuple: all fields filled in
|
||||
* *buffer: set to buffer holding tuple (pinned but not locked at exit)
|
||||
* *slot: BufferHeapTupleTableSlot filled with tuple
|
||||
* *tmfd: filled in failure cases (see below)
|
||||
*
|
||||
* Function results are the same as the ones for table_tuple_lock().
|
||||
*
|
||||
* If *slot already contains the target tuple, it takes advantage on that by
|
||||
* skipping the ReadBuffer() call.
|
||||
*
|
||||
* In the failure cases other than TM_Invisible, the routine fills
|
||||
* *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
|
||||
* if necessary), and t_cmax (the last only for TM_SelfModified,
|
||||
@@ -4134,15 +4220,14 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
|
||||
* See README.tuplock for a thorough explanation of this mechanism.
|
||||
*/
|
||||
TM_Result
|
||||
heap_lock_tuple(Relation relation, HeapTuple tuple,
|
||||
heap_lock_tuple(Relation relation, ItemPointer tid, TupleTableSlot *slot,
|
||||
CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
|
||||
bool follow_updates,
|
||||
Buffer *buffer, TM_FailureData *tmfd)
|
||||
bool follow_updates, TM_FailureData *tmfd)
|
||||
{
|
||||
TM_Result result;
|
||||
ItemPointer tid = &(tuple->t_self);
|
||||
ItemId lp;
|
||||
Page page;
|
||||
Buffer buffer;
|
||||
Buffer vmbuffer = InvalidBuffer;
|
||||
BlockNumber block;
|
||||
TransactionId xid,
|
||||
@@ -4154,8 +4239,24 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
|
||||
bool skip_tuple_lock = false;
|
||||
bool have_tuple_lock = false;
|
||||
bool cleared_all_frozen = false;
|
||||
BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
|
||||
HeapTuple tuple = &bslot->base.tupdata;
|
||||
|
||||
*buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
|
||||
Assert(TTS_IS_BUFFERTUPLE(slot));
|
||||
|
||||
/* Take advantage if slot already contains the relevant tuple */
|
||||
if (!TTS_EMPTY(slot) &&
|
||||
slot->tts_tableOid == relation->rd_id &&
|
||||
ItemPointerCompare(&slot->tts_tid, tid) == 0 &&
|
||||
BufferIsValid(bslot->buffer))
|
||||
{
|
||||
buffer = bslot->buffer;
|
||||
IncrBufferRefCount(buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
|
||||
}
|
||||
block = ItemPointerGetBlockNumber(tid);
|
||||
|
||||
/*
|
||||
@@ -4164,21 +4265,22 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
|
||||
* in the middle of changing this, so we'll need to recheck after we have
|
||||
* the lock.
|
||||
*/
|
||||
if (PageIsAllVisible(BufferGetPage(*buffer)))
|
||||
if (PageIsAllVisible(BufferGetPage(buffer)))
|
||||
visibilitymap_pin(relation, block, &vmbuffer);
|
||||
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
page = BufferGetPage(*buffer);
|
||||
page = BufferGetPage(buffer);
|
||||
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
|
||||
Assert(ItemIdIsNormal(lp));
|
||||
|
||||
tuple->t_self = *tid;
|
||||
tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
|
||||
tuple->t_len = ItemIdGetLength(lp);
|
||||
tuple->t_tableOid = RelationGetRelid(relation);
|
||||
|
||||
l3:
|
||||
result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
|
||||
result = HeapTupleSatisfiesUpdate(tuple, cid, buffer);
|
||||
|
||||
if (result == TM_Invisible)
|
||||
{
|
||||
@@ -4207,7 +4309,7 @@ l3:
|
||||
infomask2 = tuple->t_data->t_infomask2;
|
||||
ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
|
||||
|
||||
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
/*
|
||||
* If any subtransaction of the current top transaction already holds
|
||||
@@ -4359,12 +4461,12 @@ l3:
|
||||
{
|
||||
result = res;
|
||||
/* recovery code expects to have buffer lock held */
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* Make sure it's still an appropriate lock, else start over.
|
||||
@@ -4399,7 +4501,7 @@ l3:
|
||||
if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
|
||||
!HEAP_XMAX_IS_EXCL_LOCKED(infomask))
|
||||
{
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* Make sure it's still an appropriate lock, else start over.
|
||||
@@ -4427,7 +4529,7 @@ l3:
|
||||
* No conflict, but if the xmax changed under us in the
|
||||
* meantime, start over.
|
||||
*/
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
|
||||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
|
||||
xwait))
|
||||
@@ -4439,7 +4541,7 @@ l3:
|
||||
}
|
||||
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
|
||||
{
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
/* if the xmax changed in the meantime, start over */
|
||||
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
|
||||
@@ -4467,7 +4569,7 @@ l3:
|
||||
TransactionIdIsCurrentTransactionId(xwait))
|
||||
{
|
||||
/* ... but if the xmax changed in the meantime, start over */
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
|
||||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
|
||||
xwait))
|
||||
@@ -4489,7 +4591,7 @@ l3:
|
||||
*/
|
||||
if (require_sleep && (result == TM_Updated || result == TM_Deleted))
|
||||
{
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
goto failed;
|
||||
}
|
||||
else if (require_sleep)
|
||||
@@ -4514,7 +4616,7 @@ l3:
|
||||
*/
|
||||
result = TM_WouldBlock;
|
||||
/* recovery code expects to have buffer lock held */
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@@ -4540,7 +4642,7 @@ l3:
|
||||
{
|
||||
result = TM_WouldBlock;
|
||||
/* recovery code expects to have buffer lock held */
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
goto failed;
|
||||
}
|
||||
break;
|
||||
@@ -4580,7 +4682,7 @@ l3:
|
||||
{
|
||||
result = TM_WouldBlock;
|
||||
/* recovery code expects to have buffer lock held */
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
goto failed;
|
||||
}
|
||||
break;
|
||||
@@ -4606,12 +4708,12 @@ l3:
|
||||
{
|
||||
result = res;
|
||||
/* recovery code expects to have buffer lock held */
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* xwait is done, but if xwait had just locked the tuple then some
|
||||
@@ -4633,7 +4735,7 @@ l3:
|
||||
* don't check for this in the multixact case, because some
|
||||
* locker transactions might still be running.
|
||||
*/
|
||||
UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
|
||||
UpdateXmaxHintBits(tuple->t_data, buffer, xwait);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4692,9 +4794,9 @@ failed:
|
||||
*/
|
||||
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
|
||||
{
|
||||
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
visibilitymap_pin(relation, block, &vmbuffer);
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
goto l3;
|
||||
}
|
||||
|
||||
@@ -4757,7 +4859,7 @@ failed:
|
||||
cleared_all_frozen = true;
|
||||
|
||||
|
||||
MarkBufferDirty(*buffer);
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/*
|
||||
* XLOG stuff. You might think that we don't need an XLOG record because
|
||||
@@ -4777,7 +4879,7 @@ failed:
|
||||
XLogRecPtr recptr;
|
||||
|
||||
XLogBeginInsert();
|
||||
XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
|
||||
XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
|
||||
|
||||
xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
|
||||
xlrec.xmax = xid;
|
||||
@@ -4798,7 +4900,7 @@ failed:
|
||||
result = TM_Ok;
|
||||
|
||||
out_locked:
|
||||
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
out_unlocked:
|
||||
if (BufferIsValid(vmbuffer))
|
||||
@@ -4816,6 +4918,9 @@ out_unlocked:
|
||||
if (have_tuple_lock)
|
||||
UnlockTupleTuplock(relation, tid, mode);
|
||||
|
||||
/* Put the target tuple to the slot */
|
||||
ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -45,6 +45,12 @@
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/rel.h"
|
||||
|
||||
static TM_Result heapam_tuple_lock(Relation relation, ItemPointer tid,
|
||||
Snapshot snapshot, TupleTableSlot *slot,
|
||||
CommandId cid, LockTupleMode mode,
|
||||
LockWaitPolicy wait_policy, uint8 flags,
|
||||
TM_FailureData *tmfd);
|
||||
|
||||
static void reform_and_rewrite_tuple(HeapTuple tuple,
|
||||
Relation OldHeap, Relation NewHeap,
|
||||
Datum *values, bool *isnull, RewriteState rwstate);
|
||||
@@ -300,23 +306,55 @@ heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
|
||||
|
||||
static TM_Result
|
||||
heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid,
|
||||
Snapshot snapshot, Snapshot crosscheck, bool wait,
|
||||
TM_FailureData *tmfd, bool changingPart)
|
||||
Snapshot snapshot, Snapshot crosscheck, int options,
|
||||
TM_FailureData *tmfd, bool changingPart,
|
||||
TupleTableSlot *oldSlot)
|
||||
{
|
||||
TM_Result result;
|
||||
|
||||
/*
|
||||
* Currently Deleting of index tuples are handled at vacuum, in case if
|
||||
* the storage itself is cleaning the dead tuples by itself, it is the
|
||||
* time to call the index tuple deletion also.
|
||||
*/
|
||||
return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
|
||||
result = heap_delete(relation, tid, cid, crosscheck, options,
|
||||
tmfd, changingPart, oldSlot);
|
||||
|
||||
/*
|
||||
* If the tuple has been concurrently updated, then get the lock on it.
|
||||
* (Do only if caller asked for this by setting the
|
||||
* TABLE_MODIFY_LOCK_UPDATED option) With the lock held retry of the
|
||||
* delete should succeed even if there are more concurrent update
|
||||
* attempts.
|
||||
*/
|
||||
if (result == TM_Updated && (options & TABLE_MODIFY_LOCK_UPDATED))
|
||||
{
|
||||
/*
|
||||
* heapam_tuple_lock() will take advantage of tuple loaded into
|
||||
* oldSlot by heap_delete().
|
||||
*/
|
||||
result = heapam_tuple_lock(relation, tid, snapshot,
|
||||
oldSlot, cid, LockTupleExclusive,
|
||||
(options & TABLE_MODIFY_WAIT) ?
|
||||
LockWaitBlock :
|
||||
LockWaitSkip,
|
||||
TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
|
||||
tmfd);
|
||||
|
||||
if (result == TM_Ok)
|
||||
return TM_Updated;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static TM_Result
|
||||
heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
|
||||
CommandId cid, Snapshot snapshot, Snapshot crosscheck,
|
||||
bool wait, TM_FailureData *tmfd,
|
||||
LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
|
||||
int options, TM_FailureData *tmfd,
|
||||
LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes,
|
||||
TupleTableSlot *oldSlot)
|
||||
{
|
||||
bool shouldFree = true;
|
||||
HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
|
||||
@@ -326,8 +364,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
|
||||
slot->tts_tableOid = RelationGetRelid(relation);
|
||||
tuple->t_tableOid = slot->tts_tableOid;
|
||||
|
||||
result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
|
||||
tmfd, lockmode, update_indexes);
|
||||
result = heap_update(relation, otid, tuple, cid, crosscheck, options,
|
||||
tmfd, lockmode, update_indexes, oldSlot);
|
||||
ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
|
||||
|
||||
/*
|
||||
@@ -354,6 +392,31 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
|
||||
if (shouldFree)
|
||||
pfree(tuple);
|
||||
|
||||
/*
|
||||
* If the tuple has been concurrently updated, then get the lock on it.
|
||||
* (Do only if caller asked for this by setting the
|
||||
* TABLE_MODIFY_LOCK_UPDATED option) With the lock held retry of the
|
||||
* update should succeed even if there are more concurrent update
|
||||
* attempts.
|
||||
*/
|
||||
if (result == TM_Updated && (options & TABLE_MODIFY_LOCK_UPDATED))
|
||||
{
|
||||
/*
|
||||
* heapam_tuple_lock() will take advantage of tuple loaded into
|
||||
* oldSlot by heap_update().
|
||||
*/
|
||||
result = heapam_tuple_lock(relation, otid, snapshot,
|
||||
oldSlot, cid, *lockmode,
|
||||
(options & TABLE_MODIFY_WAIT) ?
|
||||
LockWaitBlock :
|
||||
LockWaitSkip,
|
||||
TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
|
||||
tmfd);
|
||||
|
||||
if (result == TM_Ok)
|
||||
return TM_Updated;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -365,7 +428,6 @@ heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
|
||||
{
|
||||
BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
|
||||
TM_Result result;
|
||||
Buffer buffer;
|
||||
HeapTuple tuple = &bslot->base.tupdata;
|
||||
bool follow_updates;
|
||||
|
||||
@@ -375,9 +437,8 @@ heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
|
||||
Assert(TTS_IS_BUFFERTUPLE(slot));
|
||||
|
||||
tuple_lock_retry:
|
||||
tuple->t_self = *tid;
|
||||
result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
|
||||
follow_updates, &buffer, tmfd);
|
||||
result = heap_lock_tuple(relation, tid, slot, cid, mode, wait_policy,
|
||||
follow_updates, tmfd);
|
||||
|
||||
if (result == TM_Updated &&
|
||||
(flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION))
|
||||
@@ -385,8 +446,6 @@ tuple_lock_retry:
|
||||
/* Should not encounter speculative tuple on recheck */
|
||||
Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data));
|
||||
|
||||
ReleaseBuffer(buffer);
|
||||
|
||||
if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
|
||||
{
|
||||
SnapshotData SnapshotDirty;
|
||||
@@ -408,6 +467,8 @@ tuple_lock_retry:
|
||||
InitDirtySnapshot(SnapshotDirty);
|
||||
for (;;)
|
||||
{
|
||||
Buffer buffer = InvalidBuffer;
|
||||
|
||||
if (ItemPointerIndicatesMovedPartitions(tid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
|
||||
@@ -502,7 +563,7 @@ tuple_lock_retry:
|
||||
/*
|
||||
* This is a live tuple, so try to lock it again.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
|
||||
goto tuple_lock_retry;
|
||||
}
|
||||
|
||||
@@ -513,7 +574,7 @@ tuple_lock_retry:
|
||||
*/
|
||||
if (tuple->t_data == NULL)
|
||||
{
|
||||
Assert(!BufferIsValid(buffer));
|
||||
ReleaseBuffer(buffer);
|
||||
return TM_Deleted;
|
||||
}
|
||||
|
||||
@@ -566,9 +627,6 @@ tuple_lock_retry:
|
||||
slot->tts_tableOid = RelationGetRelid(relation);
|
||||
tuple->t_tableOid = slot->tts_tableOid;
|
||||
|
||||
/* store in slot, transferring existing pin */
|
||||
ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user