diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index b012df1c5d9..63863ddf82c 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -156,9 +156,9 @@ * PredicateLockTuple(Relation relation, HeapTuple tuple, * Snapshot snapshot) * PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, - * BlockNumber newblkno); + * BlockNumber newblkno) * PredicateLockPageCombine(Relation relation, BlockNumber oldblkno, - * BlockNumber newblkno); + * BlockNumber newblkno) * TransferPredicateLocksToHeapRelation(Relation relation) * ReleasePredicateLocks(bool isCommit) * @@ -381,7 +381,7 @@ static SHM_QUEUE *FinishedSerializableTransactions; * this entry, you can ensure that there's enough scratch space available for * inserting one entry in the hash table. This is an otherwise-invalid tag. */ -static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0, 0}; +static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0}; static uint32 ScratchTargetTagHash; static int ScratchPartitionLock; @@ -2492,8 +2492,6 @@ PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot) } } } - else - targetxmin = InvalidTransactionId; /* * Do quick-but-not-definitive test for a relation lock first. This will @@ -2512,8 +2510,7 @@ PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot) relation->rd_node.dbNode, relation->rd_id, ItemPointerGetBlockNumber(tid), - ItemPointerGetOffsetNumber(tid), - targetxmin); + ItemPointerGetOffsetNumber(tid)); PredicateLockAcquire(&tag); } @@ -4283,8 +4280,7 @@ CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, relation->rd_node.dbNode, relation->rd_id, ItemPointerGetBlockNumber(&(tuple->t_data->t_ctid)), - ItemPointerGetOffsetNumber(&(tuple->t_data->t_ctid)), - HeapTupleHeaderGetXmin(tuple->t_data)); + ItemPointerGetOffsetNumber(&(tuple->t_data->t_ctid))); CheckTargetForConflictsIn(&targettag); } diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h index 3800610de51..55454d15d52 100644 --- a/src/include/storage/predicate_internals.h +++ b/src/include/storage/predicate_internals.h @@ -254,10 +254,10 @@ typedef struct SERIALIZABLEXID * be the target of predicate locks. * * Note that the hash function being used doesn't properly respect tag - * length -- it will go to a four byte boundary past the end of the tag. - * If you change this struct, make sure any slack space is initialized, - * so that any random bytes in the middle or at the end are not included - * in the hash. + * length -- if the length of the structure isn't a multiple of four bytes it + * will go to a four byte boundary past the end of the tag. If you change + * this struct, make sure any slack space is initialized, so that any random + * bytes in the middle or at the end are not included in the hash. * * TODO SSI: If we always use the same fields for the same type of value, we * should rename these. Holding off until it's clear there are no exceptions. @@ -272,7 +272,6 @@ typedef struct PREDICATELOCKTARGETTAG uint32 locktag_field2; /* a 32-bit ID field */ uint32 locktag_field3; /* a 32-bit ID field */ uint32 locktag_field4; /* a 32-bit ID field */ - uint32 locktag_field5; /* a 32-bit ID field */ } PREDICATELOCKTARGETTAG; /* @@ -283,12 +282,6 @@ typedef struct PREDICATELOCKTARGETTAG * added when a predicate lock is requested on an object which doesn't * already have one. An entry is removed when the last lock is removed from * its list. - * - * Because a particular target might become obsolete, due to update to a new - * version, before the reading transaction is obsolete, we need some way to - * prevent errors from reuse of a tuple ID. Rather than attempting to clean - * up the targets as the related tuples are pruned or vacuumed, we check the - * xmin on access. This should be far less costly. */ typedef struct PREDICATELOCKTARGET { @@ -398,22 +391,19 @@ typedef struct PredicateLockData ((locktag).locktag_field1 = (dboid), \ (locktag).locktag_field2 = (reloid), \ (locktag).locktag_field3 = InvalidBlockNumber, \ - (locktag).locktag_field4 = InvalidOffsetNumber, \ - (locktag).locktag_field5 = InvalidTransactionId) + (locktag).locktag_field4 = InvalidOffsetNumber) #define SET_PREDICATELOCKTARGETTAG_PAGE(locktag,dboid,reloid,blocknum) \ ((locktag).locktag_field1 = (dboid), \ (locktag).locktag_field2 = (reloid), \ (locktag).locktag_field3 = (blocknum), \ - (locktag).locktag_field4 = InvalidOffsetNumber, \ - (locktag).locktag_field5 = InvalidTransactionId) + (locktag).locktag_field4 = InvalidOffsetNumber) -#define SET_PREDICATELOCKTARGETTAG_TUPLE(locktag,dboid,reloid,blocknum,offnum,xmin) \ +#define SET_PREDICATELOCKTARGETTAG_TUPLE(locktag,dboid,reloid,blocknum,offnum) \ ((locktag).locktag_field1 = (dboid), \ (locktag).locktag_field2 = (reloid), \ (locktag).locktag_field3 = (blocknum), \ - (locktag).locktag_field4 = (offnum), \ - (locktag).locktag_field5 = (xmin)) + (locktag).locktag_field4 = (offnum)) #define GET_PREDICATELOCKTARGETTAG_DB(locktag) \ ((Oid) (locktag).locktag_field1) @@ -423,8 +413,6 @@ typedef struct PredicateLockData ((BlockNumber) (locktag).locktag_field3) #define GET_PREDICATELOCKTARGETTAG_OFFSET(locktag) \ ((OffsetNumber) (locktag).locktag_field4) -#define GET_PREDICATELOCKTARGETTAG_XMIN(locktag) \ - ((TransactionId) (locktag).locktag_field5) #define GET_PREDICATELOCKTARGETTAG_TYPE(locktag) \ (((locktag).locktag_field4 != InvalidOffsetNumber) ? PREDLOCKTAG_TUPLE : \ (((locktag).locktag_field3 != InvalidBlockNumber) ? PREDLOCKTAG_PAGE : \ @@ -462,6 +450,7 @@ typedef struct TwoPhasePredicateXactRecord typedef struct TwoPhasePredicateLockRecord { PREDICATELOCKTARGETTAG target; + uint32 filler; /* to avoid length change in back-patched fix */ } TwoPhasePredicateLockRecord; typedef struct TwoPhasePredicateRecord