From fe8091c9e39e65c8a0044349f0c0e7c3386ae921 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Sat, 2 Nov 2024 09:05:00 -0700 Subject: [PATCH] Revert "For inplace update, send nontransactional invalidations." This reverts commit 95c5acb3fc261067ab65ddc0b2dca8e162f09442 (v17) and counterparts in each other non-master branch. If released, that commit would have caused a worst-in-years minor release regression, via undetected LWLock self-deadlock. This commit and its self-deadlock fix warrant more bake time in the master branch. Reported by Alexander Lakhin. Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com --- src/backend/access/heap/heapam.c | 43 +-- src/backend/access/transam/xact.c | 26 +- src/backend/catalog/index.c | 11 +- src/backend/replication/logical/decode.c | 26 +- src/backend/utils/cache/catcache.c | 7 +- src/backend/utils/cache/inval.c | 259 +++++------------- src/backend/utils/cache/syscache.c | 3 +- src/include/utils/catcache.h | 3 +- src/include/utils/inval.h | 6 - src/test/isolation/expected/inplace-inval.out | 10 +- src/test/isolation/specs/inplace-inval.spec | 12 +- 11 files changed, 117 insertions(+), 289 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index e09f454e42d..0c2c5f0cd78 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6115,24 +6115,6 @@ heap_inplace_update_and_unlock(Relation relation, if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff) elog(ERROR, "wrong tuple length"); - /* - * Construct shared cache inval if necessary. Note that because we only - * pass the new version of the tuple, this mustn't be used for any - * operations that could change catcache lookup keys. But we aren't - * bothering with index updates either, so that's true a fortiori. - */ - CacheInvalidateHeapTupleInplace(relation, tuple, NULL); - - /* - * Unlink relcache init files as needed. If unlinking, acquire - * RelCacheInitLock until after associated invalidations. By doing this - * in advance, if we checkpoint and then crash between inplace - * XLogInsert() and inval, we don't rely on StartupXLOG() -> - * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would - * neglect to PANIC on EIO. - */ - PreInplace_Inval(); - /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -6176,28 +6158,17 @@ heap_inplace_update_and_unlock(Relation relation, PageSetLSN(BufferGetPage(buffer), recptr); } - LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - - /* - * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we - * do this before UnlockTuple(). - * - * If we're mutating a tuple visible only to this transaction, there's an - * equivalent transactional inval from the action that created the tuple, - * and this inval is superfluous. - */ - AtInplace_Inval(); - END_CRIT_SECTION(); - UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock); - AcceptInvalidationMessages(); /* local processing of just-sent inval */ + heap_inplace_unlock(relation, oldtup, buffer); /* - * Queue a transactional inval. The immediate invalidation we just sent - * is the only one known to be necessary. To reduce risk from the - * transition to immediate invalidation, continue sending a transactional - * invalidation like we've long done. Third-party code might rely on it. + * Send out shared cache inval if necessary. Note that because we only + * pass the new version of the tuple, this mustn't be used for any + * operations that could change catcache lookup keys. But we aren't + * bothering with index updates either, so that's true a fortiori. + * + * XXX ROLLBACK discards the invalidation. See test inplace-inval.spec. */ if (!IsBootstrapProcessingMode()) CacheInvalidateHeapTuple(relation, tuple, NULL); diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index cda6e336138..ffe26e26f66 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -1249,24 +1249,14 @@ RecordTransactionCommit(void) /* * Transactions without an assigned xid can contain invalidation - * messages. While inplace updates do this, this is not known to be - * necessary; see comment at inplace CacheInvalidateHeapTuple(). - * Extensions might still rely on this capability, and standbys may - * need to process those invals. We can't emit a commit record - * without an xid, and we don't want to force assigning an xid, - * because that'd be problematic for e.g. vacuum. Hence we emit a - * bespoke record for the invalidations. We don't want to use that in - * case a commit record is emitted, so they happen synchronously with - * commits (besides not wanting to emit more WAL records). - * - * XXX Every known use of this capability is a defect. Since an XID - * isn't controlling visibility of the change that prompted invals, - * other sessions need the inval even if this transactions aborts. - * - * ON COMMIT DELETE ROWS does a nontransactional index_build(), which - * queues a relcache inval, including in transactions without an xid - * that had read the (empty) table. Standbys don't need any ON COMMIT - * DELETE ROWS invals, but we've not done the work to withhold them. + * messages (e.g. explicit relcache invalidations or catcache + * invalidations for inplace updates); standbys need to process those. + * We can't emit a commit record without an xid, and we don't want to + * force assigning an xid, because that'd be problematic for e.g. + * vacuum. Hence we emit a bespoke record for the invalidations. We + * don't want to use that in case a commit record is emitted, so they + * happen synchronously with commits (besides not wanting to emit more + * WAL records). */ if (nmsgs != 0) { diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 9ab38b9b441..4ca86d955fe 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -2882,19 +2882,12 @@ index_update_stats(Relation rel, if (dirty) { systable_inplace_update_finish(state, tuple); - /* the above sends transactional and immediate cache inval messages */ + /* the above sends a cache inval message */ } else { systable_inplace_update_cancel(state); - - /* - * While we didn't change relhasindex, CREATE INDEX needs a - * transactional inval for when the new index's catalog rows become - * visible. Other CREATE INDEX and REINDEX code happens to also queue - * this inval, but keep this in case rare callers rely on this part of - * our API contract. - */ + /* no need to change tuple, but force relcache inval anyway */ CacheInvalidateRelcacheByTuple(tuple); } diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index b3b51a90862..297eb11b5a8 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -460,19 +460,23 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) /* * Inplace updates are only ever performed on catalog tuples and - * can, per definition, not change tuple visibility. Inplace - * updates don't affect storage or interpretation of table rows, - * so they don't affect logicalrep_write_tuple() outcomes. Hence, - * we don't process invalidations from the original operation. If - * inplace updates did affect those things, invalidations wouldn't - * make it work, since there are no snapshot-specific versions of - * inplace-updated values. Since we also don't decode catalog - * tuples, we're not interested in the record's contents. + * can, per definition, not change tuple visibility. Since we + * don't decode catalog tuples, we're not interested in the + * record's contents. * - * WAL contains likely-unnecessary commit-time invals from the - * CacheInvalidateHeapTuple() call in heap_inplace_update(). - * Excess invalidation is safe. + * In-place updates can be used either by XID-bearing transactions + * (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less + * transactions (e.g. VACUUM). In the former case, the commit + * record will include cache invalidations, so we mark the + * transaction as catalog modifying here. Currently that's + * redundant because the commit will do that as well, but once we + * support decoding in-progress relations, this will be important. */ + if (!TransactionIdIsValid(xid)) + break; + + SnapBuildProcessChange(builder, xid, buf->origptr); + ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr); break; case XLOG_HEAP_CONFIRM: diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 9a6189004d9..b53232f6278 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -2129,8 +2129,7 @@ void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, - void (*function) (int, uint32, Oid, void *), - void *context) + void (*function) (int, uint32, Oid)) { slist_iter iter; Oid reloid; @@ -2171,7 +2170,7 @@ PrepareToInvalidateCacheTuple(Relation relation, hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple); dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId; - (*function) (ccp->id, hashvalue, dbid, context); + (*function) (ccp->id, hashvalue, dbid); if (newtuple) { @@ -2180,7 +2179,7 @@ PrepareToInvalidateCacheTuple(Relation relation, newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple); if (newhashvalue != hashvalue) - (*function) (ccp->id, newhashvalue, dbid, context); + (*function) (ccp->id, newhashvalue, dbid); } } } diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index a0dc745c71a..8fe1f0d9222 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -99,10 +99,6 @@ * worth trying to avoid sending such inval traffic in the future, if those * problems can be overcome cheaply. * - * When making a nontransactional change to a cacheable object, we must - * likewise send the invalidation immediately, before ending the change's - * critical section. This includes inplace heap updates, relmap, and smgr. - * * * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -154,7 +150,7 @@ typedef struct InvalidationListHeader } InvalidationListHeader; /*---------------- - * Transactional invalidation info is divided into two lists: + * Invalidation info is divided into two lists: * 1) events so far in current command, not yet reflected to caches. * 2) events in previous commands of current transaction; these have * been reflected to local caches, and must be either broadcast to @@ -170,36 +166,26 @@ typedef struct InvalidationListHeader *---------------- */ -/* fields common to both transactional and inplace invalidation */ -typedef struct InvalidationInfo -{ - /* head of current-command event list */ - InvalidationListHeader CurrentCmdInvalidMsgs; - - /* init file must be invalidated? */ - bool RelcacheInitFileInval; -} InvalidationInfo; - -/* subclass adding fields specific to transactional invalidation */ typedef struct TransInvalidationInfo { - /* Base class */ - struct InvalidationInfo ii; - - /* head of previous-commands event list */ - InvalidationListHeader PriorCmdInvalidMsgs; - /* Back link to parent transaction's info */ struct TransInvalidationInfo *parent; /* Subtransaction nesting depth */ int my_level; + + /* head of current-command event list */ + InvalidationListHeader CurrentCmdInvalidMsgs; + + /* head of previous-commands event list */ + InvalidationListHeader PriorCmdInvalidMsgs; + + /* init file must be invalidated? */ + bool RelcacheInitFileInval; } TransInvalidationInfo; static TransInvalidationInfo *transInvalInfo = NULL; -static InvalidationInfo *inplaceInvalInfo = NULL; - static SharedInvalidationMessage *SharedInvalidMessagesArray; static int numSharedInvalidMessagesArray; static int maxSharedInvalidMessagesArray; @@ -513,12 +499,9 @@ ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, - Oid dbId, - void *context) + Oid dbId) { - InvalidationInfo *info = (InvalidationInfo *) context; - - AddCatcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, + AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, cacheId, hashValue, dbId); } @@ -528,9 +511,10 @@ RegisterCatcacheInvalidation(int cacheId, * Register an invalidation event for all catcache entries from a catalog. */ static void -RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId) +RegisterCatalogInvalidation(Oid dbId, Oid catId) { - AddCatalogInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, catId); + AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, + dbId, catId); } /* @@ -539,9 +523,10 @@ RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId) * As above, but register a relcache invalidation event. */ static void -RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId) +RegisterRelcacheInvalidation(Oid dbId, Oid relId) { - AddRelcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId); + AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, + dbId, relId); /* * Most of the time, relcache invalidation is associated with system @@ -558,7 +543,7 @@ RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId) * as well. Also zap when we are invalidating whole relcache. */ if (relId == InvalidOid || RelationIdIsInInitFile(relId)) - info->RelcacheInitFileInval = true; + transInvalInfo->RelcacheInitFileInval = true; } /* @@ -568,9 +553,10 @@ RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId) * Only needed for catalogs that don't have catcaches. */ static void -RegisterSnapshotInvalidation(InvalidationInfo *info, Oid dbId, Oid relId) +RegisterSnapshotInvalidation(Oid dbId, Oid relId) { - AddSnapshotInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId); + AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, + dbId, relId); } /* @@ -762,18 +748,14 @@ AcceptInvalidationMessages(void) * PrepareInvalidationState * Initialize inval lists for the current (sub)transaction. */ -static InvalidationInfo * +static void PrepareInvalidationState(void) { TransInvalidationInfo *myInfo; - Assert(IsTransactionState()); - /* Can't queue transactional message while collecting inplace messages. */ - Assert(inplaceInvalInfo == NULL); - if (transInvalInfo != NULL && transInvalInfo->my_level == GetCurrentTransactionNestLevel()) - return (InvalidationInfo *) transInvalInfo; + return; myInfo = (TransInvalidationInfo *) MemoryContextAllocZero(TopTransactionContext, @@ -789,29 +771,6 @@ PrepareInvalidationState(void) myInfo->my_level > transInvalInfo->my_level); transInvalInfo = myInfo; - return (InvalidationInfo *) myInfo; -} - -/* - * PrepareInplaceInvalidationState - * Initialize inval data for an inplace update. - * - * See previous function for more background. - */ -static InvalidationInfo * -PrepareInplaceInvalidationState(void) -{ - InvalidationInfo *myInfo; - - Assert(IsTransactionState()); - /* limit of one inplace update under assembly */ - Assert(inplaceInvalInfo == NULL); - - /* gone after WAL insertion CritSection ends, so use current context */ - myInfo = (InvalidationInfo *) palloc0(sizeof(InvalidationInfo)); - - inplaceInvalInfo = myInfo; - return myInfo; } /* @@ -907,7 +866,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, * after we send the SI messages. However, we need not do anything unless * we committed. */ - *RelcacheInitFileInval = transInvalInfo->ii.RelcacheInitFileInval; + *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval; /* * Walk through TransInvalidationInfo to collect all the messages into a @@ -919,7 +878,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, */ oldcontext = MemoryContextSwitchTo(CurTransactionContext); - ProcessInvalidationMessagesMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs, + ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs, MakeSharedInvalidMessagesArray); ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs, MakeSharedInvalidMessagesArray); @@ -1009,9 +968,7 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, void AtEOXact_Inval(bool isCommit) { - inplaceInvalInfo = NULL; - - /* Quick exit if no transactional messages */ + /* Quick exit if no messages */ if (transInvalInfo == NULL) return; @@ -1025,16 +982,16 @@ AtEOXact_Inval(bool isCommit) * after we send the SI messages. However, we need not do anything * unless we committed. */ - if (transInvalInfo->ii.RelcacheInitFileInval) + if (transInvalInfo->RelcacheInitFileInval) RelationCacheInitFilePreInvalidate(); AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, - &transInvalInfo->ii.CurrentCmdInvalidMsgs); + &transInvalInfo->CurrentCmdInvalidMsgs); ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs, SendSharedInvalidMessages); - if (transInvalInfo->ii.RelcacheInitFileInval) + if (transInvalInfo->RelcacheInitFileInval) RelationCacheInitFilePostInvalidate(); } else @@ -1049,45 +1006,6 @@ AtEOXact_Inval(bool isCommit) numSharedInvalidMessagesArray = 0; } -/* - * PreInplace_Inval - * Process queued-up invalidation before inplace update critical section. - * - * Tasks belong here if they are safe even if the inplace update does not - * complete. Currently, this just unlinks a cache file, which can fail. The - * sum of this and AtInplace_Inval() mirrors AtEOXact_Inval(isCommit=true). - */ -void -PreInplace_Inval(void) -{ - Assert(CritSectionCount == 0); - - if (inplaceInvalInfo && inplaceInvalInfo->RelcacheInitFileInval) - RelationCacheInitFilePreInvalidate(); -} - -/* - * AtInplace_Inval - * Process queued-up invalidations after inplace update buffer mutation. - */ -void -AtInplace_Inval(void) -{ - Assert(CritSectionCount > 0); - - if (inplaceInvalInfo == NULL) - return; - - ProcessInvalidationMessagesMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs, - SendSharedInvalidMessages); - - if (inplaceInvalInfo->RelcacheInitFileInval) - RelationCacheInitFilePostInvalidate(); - - inplaceInvalInfo = NULL; - /* inplace doesn't use SharedInvalidMessagesArray */ -} - /* * AtEOSubXact_Inval * Process queued-up invalidation messages at end of subtransaction. @@ -1110,20 +1028,9 @@ void AtEOSubXact_Inval(bool isCommit) { int my_level; - TransInvalidationInfo *myInfo; + TransInvalidationInfo *myInfo = transInvalInfo; - /* - * Successful inplace update must clear this, but we clear it on abort. - * Inplace updates allocate this in CurrentMemoryContext, which has - * lifespan <= subtransaction lifespan. Hence, don't free it explicitly. - */ - if (isCommit) - Assert(inplaceInvalInfo == NULL); - else - inplaceInvalInfo = NULL; - - /* Quick exit if no transactional messages. */ - myInfo = transInvalInfo; + /* Quick exit if no messages. */ if (myInfo == NULL) return; @@ -1157,8 +1064,8 @@ AtEOSubXact_Inval(bool isCommit) &myInfo->PriorCmdInvalidMsgs); /* Pending relcache inval becomes parent's problem too */ - if (myInfo->ii.RelcacheInitFileInval) - myInfo->parent->ii.RelcacheInitFileInval = true; + if (myInfo->RelcacheInitFileInval) + myInfo->parent->RelcacheInitFileInval = true; /* Pop the transaction state stack */ transInvalInfo = myInfo->parent; @@ -1205,24 +1112,29 @@ CommandEndInvalidationMessages(void) if (transInvalInfo == NULL) return; - ProcessInvalidationMessages(&transInvalInfo->ii.CurrentCmdInvalidMsgs, + ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, LocalExecuteInvalidationMessage); AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, - &transInvalInfo->ii.CurrentCmdInvalidMsgs); + &transInvalInfo->CurrentCmdInvalidMsgs); } /* - * CacheInvalidateHeapTupleCommon - * Common logic for end-of-command and inplace variants. + * CacheInvalidateHeapTuple + * Register the given tuple for invalidation at end of command + * (ie, current command is creating or outdating this tuple). + * Also, detect whether a relcache invalidation is implied. + * + * For an insert or delete, tuple is the target tuple and newtuple is NULL. + * For an update, we are called just once, with tuple being the old tuple + * version and newtuple the new version. This allows avoidance of duplicate + * effort during an update. */ -static void -CacheInvalidateHeapTupleCommon(Relation relation, - HeapTuple tuple, - HeapTuple newtuple, - InvalidationInfo *(*prepare_callback) (void)) +void +CacheInvalidateHeapTuple(Relation relation, + HeapTuple tuple, + HeapTuple newtuple) { - InvalidationInfo *info; Oid tupleRelId; Oid databaseId; Oid relationId; @@ -1246,8 +1158,11 @@ CacheInvalidateHeapTupleCommon(Relation relation, if (IsToastRelation(relation)) return; - /* Allocate any required resources. */ - info = prepare_callback(); + /* + * If we're not prepared to queue invalidation messages for this + * subtransaction level, get ready now. + */ + PrepareInvalidationState(); /* * First let the catcache do its thing @@ -1256,12 +1171,11 @@ CacheInvalidateHeapTupleCommon(Relation relation, if (RelationInvalidatesSnapshotsOnly(tupleRelId)) { databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId; - RegisterSnapshotInvalidation(info, databaseId, tupleRelId); + RegisterSnapshotInvalidation(databaseId, tupleRelId); } else PrepareToInvalidateCacheTuple(relation, tuple, newtuple, - RegisterCatcacheInvalidation, - (void *) info); + RegisterCatcacheInvalidation); /* * Now, is this tuple one of the primary definers of a relcache entry? See @@ -1334,44 +1248,7 @@ CacheInvalidateHeapTupleCommon(Relation relation, /* * Yes. We need to register a relcache invalidation event. */ - RegisterRelcacheInvalidation(info, databaseId, relationId); -} - -/* - * CacheInvalidateHeapTuple - * Register the given tuple for invalidation at end of command - * (ie, current command is creating or outdating this tuple) and end of - * transaction. Also, detect whether a relcache invalidation is implied. - * - * For an insert or delete, tuple is the target tuple and newtuple is NULL. - * For an update, we are called just once, with tuple being the old tuple - * version and newtuple the new version. This allows avoidance of duplicate - * effort during an update. - */ -void -CacheInvalidateHeapTuple(Relation relation, - HeapTuple tuple, - HeapTuple newtuple) -{ - CacheInvalidateHeapTupleCommon(relation, tuple, newtuple, - PrepareInvalidationState); -} - -/* - * CacheInvalidateHeapTupleInplace - * Register the given tuple for nontransactional invalidation pertaining - * to an inplace update. Also, detect whether a relcache invalidation is - * implied. - * - * Like CacheInvalidateHeapTuple(), but for inplace updates. - */ -void -CacheInvalidateHeapTupleInplace(Relation relation, - HeapTuple tuple, - HeapTuple newtuple) -{ - CacheInvalidateHeapTupleCommon(relation, tuple, newtuple, - PrepareInplaceInvalidationState); + RegisterRelcacheInvalidation(databaseId, relationId); } /* @@ -1390,13 +1267,14 @@ CacheInvalidateCatalog(Oid catalogId) { Oid databaseId; + PrepareInvalidationState(); + if (IsSharedRelation(catalogId)) databaseId = InvalidOid; else databaseId = MyDatabaseId; - RegisterCatalogInvalidation(PrepareInvalidationState(), - databaseId, catalogId); + RegisterCatalogInvalidation(databaseId, catalogId); } /* @@ -1414,14 +1292,15 @@ CacheInvalidateRelcache(Relation relation) Oid databaseId; Oid relationId; + PrepareInvalidationState(); + relationId = RelationGetRelid(relation); if (relation->rd_rel->relisshared) databaseId = InvalidOid; else databaseId = MyDatabaseId; - RegisterRelcacheInvalidation(PrepareInvalidationState(), - databaseId, relationId); + RegisterRelcacheInvalidation(databaseId, relationId); } /* @@ -1434,8 +1313,9 @@ CacheInvalidateRelcache(Relation relation) void CacheInvalidateRelcacheAll(void) { - RegisterRelcacheInvalidation(PrepareInvalidationState(), - InvalidOid, InvalidOid); + PrepareInvalidationState(); + + RegisterRelcacheInvalidation(InvalidOid, InvalidOid); } /* @@ -1449,13 +1329,14 @@ CacheInvalidateRelcacheByTuple(HeapTuple classTuple) Oid databaseId; Oid relationId; + PrepareInvalidationState(); + relationId = classtup->oid; if (classtup->relisshared) databaseId = InvalidOid; else databaseId = MyDatabaseId; - RegisterRelcacheInvalidation(PrepareInvalidationState(), - databaseId, relationId); + RegisterRelcacheInvalidation(databaseId, relationId); } /* @@ -1469,6 +1350,8 @@ CacheInvalidateRelcacheByRelid(Oid relid) { HeapTuple tup; + PrepareInvalidationState(); + tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for relation %u", relid); diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index e4a8fb2661b..8ad87914d4d 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -1249,7 +1249,8 @@ SearchSysCacheLocked1(int cacheId, /* * If an inplace update just finished, ensure we process the syscache - * inval. + * inval. XXX this is insufficient: the inplace updater may not yet + * have reached AtEOXact_Inval(). See test at inplace-inval.spec. * * If a heap_update() call just released its LOCKTAG_TUPLE, we'll * probably find the old tuple and reach "tuple concurrently updated". diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h index 27221736e58..f4aa316604e 100644 --- a/src/include/utils/catcache.h +++ b/src/include/utils/catcache.h @@ -223,8 +223,7 @@ extern void CatCacheInvalidate(CatCache *cache, uint32 hashValue); extern void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, - void (*function) (int, uint32, Oid, void *), - void *context); + void (*function) (int, uint32, Oid)); extern void PrintCatCacheLeakWarning(HeapTuple tuple); extern void PrintCatCacheListLeakWarning(CatCList *list); diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h index 3ff52e89c20..4c6b86c9610 100644 --- a/src/include/utils/inval.h +++ b/src/include/utils/inval.h @@ -27,9 +27,6 @@ extern void AcceptInvalidationMessages(void); extern void AtEOXact_Inval(bool isCommit); -extern void PreInplace_Inval(void); -extern void AtInplace_Inval(void); - extern void AtEOSubXact_Inval(bool isCommit); extern void PostPrepare_Inval(void); @@ -39,9 +36,6 @@ extern void CommandEndInvalidationMessages(void); extern void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple); -extern void CacheInvalidateHeapTupleInplace(Relation relation, - HeapTuple tuple, - HeapTuple newtuple); extern void CacheInvalidateCatalog(Oid catalogId); diff --git a/src/test/isolation/expected/inplace-inval.out b/src/test/isolation/expected/inplace-inval.out index c35895a8aa7..e68eca5de98 100644 --- a/src/test/isolation/expected/inplace-inval.out +++ b/src/test/isolation/expected/inplace-inval.out @@ -1,6 +1,6 @@ Parsed test spec with 3 sessions -starting permutation: cachefill3 cir1 cic2 ddl3 read1 +starting permutation: cachefill3 cir1 cic2 ddl3 step cachefill3: TABLE newly_indexed; c - @@ -9,14 +9,6 @@ c step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK; step cic2: CREATE INDEX i2 ON newly_indexed (c); step ddl3: ALTER TABLE newly_indexed ADD extra int; -step read1: - SELECT relhasindex FROM pg_class WHERE oid = 'newly_indexed'::regclass; - -relhasindex ------------ -t -(1 row) - starting permutation: cir1 cic2 ddl3 read1 step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK; diff --git a/src/test/isolation/specs/inplace-inval.spec b/src/test/isolation/specs/inplace-inval.spec index b99112ddb88..96954fd86c4 100644 --- a/src/test/isolation/specs/inplace-inval.spec +++ b/src/test/isolation/specs/inplace-inval.spec @@ -1,7 +1,7 @@ -# An inplace update had been able to abort before sending the inplace -# invalidation message to the shared queue. If a heap_update() caller then -# retrieved its oldtup from a cache, the heap_update() could revert the -# inplace update. +# If a heap_update() caller retrieves its oldtup from a cache, it's possible +# for that cache entry to predate an inplace update, causing loss of that +# inplace update. This arises because the transaction may abort before +# sending the inplace invalidation message to the shared queue. setup { @@ -27,12 +27,14 @@ step cachefill3 { TABLE newly_indexed; } step ddl3 { ALTER TABLE newly_indexed ADD extra int; } +# XXX shows an extant bug. Adding step read1 at the end would usually print +# relhasindex=f (not wanted). This does not reach the unwanted behavior under +# -DCATCACHE_FORCE_RELEASE and friends. permutation cachefill3 # populates the pg_class row in the catcache cir1 # sets relhasindex=true; rollback discards cache inval cic2 # sees relhasindex=true, skips changing it (so no inval) ddl3 # cached row as the oldtup of an update, losing relhasindex - read1 # observe damage # without cachefill3, no bug permutation cir1 cic2 ddl3 read1