1
0
mirror of https://github.com/postgres/postgres.git synced 2025-05-28 05:21:27 +03:00

Revert "For inplace update, send nontransactional invalidations."

This reverts commit 95c5acb3fc261067ab65ddc0b2dca8e162f09442 (v17) and
counterparts in each other non-master branch.  If released, that commit
would have caused a worst-in-years minor release regression, via
undetected LWLock self-deadlock.  This commit and its self-deadlock fix
warrant more bake time in the master branch.

Reported by Alexander Lakhin.

Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com
This commit is contained in:
Noah Misch 2024-11-02 09:05:00 -07:00
parent bc6bad8857
commit c1099dd745
13 changed files with 137 additions and 320 deletions

View File

@ -6355,24 +6355,6 @@ heap_inplace_update_and_unlock(Relation relation,
if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff) if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
elog(ERROR, "wrong tuple length"); elog(ERROR, "wrong tuple length");
/*
* Construct shared cache inval if necessary. Note that because we only
* pass the new version of the tuple, this mustn't be used for any
* operations that could change catcache lookup keys. But we aren't
* bothering with index updates either, so that's true a fortiori.
*/
CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
/*
* Unlink relcache init files as needed. If unlinking, acquire
* RelCacheInitLock until after associated invalidations. By doing this
* in advance, if we checkpoint and then crash between inplace
* XLogInsert() and inval, we don't rely on StartupXLOG() ->
* RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
* neglect to PANIC on EIO.
*/
PreInplace_Inval();
/* NO EREPORT(ERROR) from here till changes are logged */ /* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION(); START_CRIT_SECTION();
@ -6416,28 +6398,17 @@ heap_inplace_update_and_unlock(Relation relation,
PageSetLSN(BufferGetPage(buffer), recptr); PageSetLSN(BufferGetPage(buffer), recptr);
} }
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
* Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
* do this before UnlockTuple().
*
* If we're mutating a tuple visible only to this transaction, there's an
* equivalent transactional inval from the action that created the tuple,
* and this inval is superfluous.
*/
AtInplace_Inval();
END_CRIT_SECTION(); END_CRIT_SECTION();
UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
AcceptInvalidationMessages(); /* local processing of just-sent inval */ heap_inplace_unlock(relation, oldtup, buffer);
/* /*
* Queue a transactional inval. The immediate invalidation we just sent * Send out shared cache inval if necessary. Note that because we only
* is the only one known to be necessary. To reduce risk from the * pass the new version of the tuple, this mustn't be used for any
* transition to immediate invalidation, continue sending a transactional * operations that could change catcache lookup keys. But we aren't
* invalidation like we've long done. Third-party code might rely on it. * bothering with index updates either, so that's true a fortiori.
*
* XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
*/ */
if (!IsBootstrapProcessingMode()) if (!IsBootstrapProcessingMode())
CacheInvalidateHeapTuple(relation, tuple, NULL); CacheInvalidateHeapTuple(relation, tuple, NULL);

View File

@ -1358,24 +1358,14 @@ RecordTransactionCommit(void)
/* /*
* Transactions without an assigned xid can contain invalidation * Transactions without an assigned xid can contain invalidation
* messages. While inplace updates do this, this is not known to be * messages (e.g. explicit relcache invalidations or catcache
* necessary; see comment at inplace CacheInvalidateHeapTuple(). * invalidations for inplace updates); standbys need to process those.
* Extensions might still rely on this capability, and standbys may * We can't emit a commit record without an xid, and we don't want to
* need to process those invals. We can't emit a commit record * force assigning an xid, because that'd be problematic for e.g.
* without an xid, and we don't want to force assigning an xid, * vacuum. Hence we emit a bespoke record for the invalidations. We
* because that'd be problematic for e.g. vacuum. Hence we emit a * don't want to use that in case a commit record is emitted, so they
* bespoke record for the invalidations. We don't want to use that in * happen synchronously with commits (besides not wanting to emit more
* case a commit record is emitted, so they happen synchronously with * WAL records).
* commits (besides not wanting to emit more WAL records).
*
* XXX Every known use of this capability is a defect. Since an XID
* isn't controlling visibility of the change that prompted invals,
* other sessions need the inval even if this transactions aborts.
*
* ON COMMIT DELETE ROWS does a nontransactional index_build(), which
* queues a relcache inval, including in transactions without an xid
* that had read the (empty) table. Standbys don't need any ON COMMIT
* DELETE ROWS invals, but we've not done the work to withhold them.
*/ */
if (nmsgs != 0) if (nmsgs != 0)
{ {

View File

@ -2889,19 +2889,12 @@ index_update_stats(Relation rel,
if (dirty) if (dirty)
{ {
systable_inplace_update_finish(state, tuple); systable_inplace_update_finish(state, tuple);
/* the above sends transactional and immediate cache inval messages */ /* the above sends a cache inval message */
} }
else else
{ {
systable_inplace_update_cancel(state); systable_inplace_update_cancel(state);
/* no need to change tuple, but force relcache inval anyway */
/*
* While we didn't change relhasindex, CREATE INDEX needs a
* transactional inval for when the new index's catalog rows become
* visible. Other CREATE INDEX and REINDEX code happens to also queue
* this inval, but keep this in case rare callers rely on this part of
* our API contract.
*/
CacheInvalidateRelcacheByTuple(tuple); CacheInvalidateRelcacheByTuple(tuple);
} }

View File

@ -975,6 +975,11 @@ EventTriggerOnLogin(void)
* this instead of regular updates serves two purposes. First, * this instead of regular updates serves two purposes. First,
* that avoids possible waiting on the row-level lock. Second, * that avoids possible waiting on the row-level lock. Second,
* that avoids dealing with TOAST. * that avoids dealing with TOAST.
*
* Changes made by inplace update may be lost due to
* concurrent normal updates; see inplace-inval.spec. However,
* we are OK with that. The subsequent connections will still
* have a chance to set "dathasloginevt" to false.
*/ */
systable_inplace_update_finish(state, tuple); systable_inplace_update_finish(state, tuple);
} }

View File

@ -508,19 +508,23 @@ heap_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
/* /*
* Inplace updates are only ever performed on catalog tuples and * Inplace updates are only ever performed on catalog tuples and
* can, per definition, not change tuple visibility. Inplace * can, per definition, not change tuple visibility. Since we
* updates don't affect storage or interpretation of table rows, * don't decode catalog tuples, we're not interested in the
* so they don't affect logicalrep_write_tuple() outcomes. Hence, * record's contents.
* we don't process invalidations from the original operation. If
* inplace updates did affect those things, invalidations wouldn't
* make it work, since there are no snapshot-specific versions of
* inplace-updated values. Since we also don't decode catalog
* tuples, we're not interested in the record's contents.
* *
* WAL contains likely-unnecessary commit-time invals from the * In-place updates can be used either by XID-bearing transactions
* CacheInvalidateHeapTuple() call in heap_inplace_update(). * (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less
* Excess invalidation is safe. * transactions (e.g. VACUUM). In the former case, the commit
* record will include cache invalidations, so we mark the
* transaction as catalog modifying here. Currently that's
* redundant because the commit will do that as well, but once we
* support decoding in-progress relations, this will be important.
*/ */
if (!TransactionIdIsValid(xid))
break;
(void) SnapBuildProcessChange(builder, xid, buf->origptr);
ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr);
break; break;
case XLOG_HEAP_CONFIRM: case XLOG_HEAP_CONFIRM:

View File

@ -2288,8 +2288,7 @@ void
PrepareToInvalidateCacheTuple(Relation relation, PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple, HeapTuple tuple,
HeapTuple newtuple, HeapTuple newtuple,
void (*function) (int, uint32, Oid, void *), void (*function) (int, uint32, Oid))
void *context)
{ {
slist_iter iter; slist_iter iter;
Oid reloid; Oid reloid;
@ -2330,7 +2329,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple); hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId; dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
(*function) (ccp->id, hashvalue, dbid, context); (*function) (ccp->id, hashvalue, dbid);
if (newtuple) if (newtuple)
{ {
@ -2339,7 +2338,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple); newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
if (newhashvalue != hashvalue) if (newhashvalue != hashvalue)
(*function) (ccp->id, newhashvalue, dbid, context); (*function) (ccp->id, newhashvalue, dbid);
} }
} }
} }

View File

@ -94,10 +94,6 @@
* worth trying to avoid sending such inval traffic in the future, if those * worth trying to avoid sending such inval traffic in the future, if those
* problems can be overcome cheaply. * problems can be overcome cheaply.
* *
* When making a nontransactional change to a cacheable object, we must
* likewise send the invalidation immediately, before ending the change's
* critical section. This includes inplace heap updates, relmap, and smgr.
*
* When wal_level=logical, write invalidations into WAL at each command end to * When wal_level=logical, write invalidations into WAL at each command end to
* support the decoding of the in-progress transactions. See * support the decoding of the in-progress transactions. See
* CommandEndInvalidationMessages. * CommandEndInvalidationMessages.
@ -134,15 +130,13 @@
/* /*
* Pending requests are stored as ready-to-send SharedInvalidationMessages. * Pending requests are stored as ready-to-send SharedInvalidationMessages.
* We keep the messages themselves in arrays in TopTransactionContext (there * We keep the messages themselves in arrays in TopTransactionContext
* are separate arrays for catcache and relcache messages). For transactional * (there are separate arrays for catcache and relcache messages). Control
* messages, control information is kept in a chain of TransInvalidationInfo * information is kept in a chain of TransInvalidationInfo structs, also
* structs, also allocated in TopTransactionContext. (We could keep a * allocated in TopTransactionContext. (We could keep a subtransaction's
* subtransaction's TransInvalidationInfo in its CurTransactionContext; but * TransInvalidationInfo in its CurTransactionContext; but that's more
* that's more wasteful not less so, since in very many scenarios it'd be the * wasteful not less so, since in very many scenarios it'd be the only
* only allocation in the subtransaction's CurTransactionContext.) For * allocation in the subtransaction's CurTransactionContext.)
* inplace update messages, control information appears in an
* InvalidationInfo, allocated in CurrentMemoryContext.
* *
* We can store the message arrays densely, and yet avoid moving data around * We can store the message arrays densely, and yet avoid moving data around
* within an array, because within any one subtransaction we need only * within an array, because within any one subtransaction we need only
@ -153,9 +147,7 @@
* struct. Similarly, we need distinguish messages of prior subtransactions * struct. Similarly, we need distinguish messages of prior subtransactions
* from those of the current subtransaction only until the subtransaction * from those of the current subtransaction only until the subtransaction
* completes, after which we adjust the array indexes in the parent's * completes, after which we adjust the array indexes in the parent's
* TransInvalidationInfo to include the subtransaction's messages. Inplace * TransInvalidationInfo to include the subtransaction's messages.
* invalidations don't need a concept of command or subtransaction boundaries,
* since we send them during the WAL insertion critical section.
* *
* The ordering of the individual messages within a command's or * The ordering of the individual messages within a command's or
* subtransaction's output is not considered significant, although this * subtransaction's output is not considered significant, although this
@ -208,7 +200,7 @@ typedef struct InvalidationMsgsGroup
/*---------------- /*----------------
* Transactional invalidation messages are divided into two groups: * Invalidation messages are divided into two groups:
* 1) events so far in current command, not yet reflected to caches. * 1) events so far in current command, not yet reflected to caches.
* 2) events in previous commands of current transaction; these have * 2) events in previous commands of current transaction; these have
* been reflected to local caches, and must be either broadcast to * been reflected to local caches, and must be either broadcast to
@ -224,36 +216,26 @@ typedef struct InvalidationMsgsGroup
*---------------- *----------------
*/ */
/* fields common to both transactional and inplace invalidation */
typedef struct InvalidationInfo
{
/* Events emitted by current command */
InvalidationMsgsGroup CurrentCmdInvalidMsgs;
/* init file must be invalidated? */
bool RelcacheInitFileInval;
} InvalidationInfo;
/* subclass adding fields specific to transactional invalidation */
typedef struct TransInvalidationInfo typedef struct TransInvalidationInfo
{ {
/* Base class */
struct InvalidationInfo ii;
/* Events emitted by previous commands of this (sub)transaction */
InvalidationMsgsGroup PriorCmdInvalidMsgs;
/* Back link to parent transaction's info */ /* Back link to parent transaction's info */
struct TransInvalidationInfo *parent; struct TransInvalidationInfo *parent;
/* Subtransaction nesting depth */ /* Subtransaction nesting depth */
int my_level; int my_level;
/* Events emitted by current command */
InvalidationMsgsGroup CurrentCmdInvalidMsgs;
/* Events emitted by previous commands of this (sub)transaction */
InvalidationMsgsGroup PriorCmdInvalidMsgs;
/* init file must be invalidated? */
bool RelcacheInitFileInval;
} TransInvalidationInfo; } TransInvalidationInfo;
static TransInvalidationInfo *transInvalInfo = NULL; static TransInvalidationInfo *transInvalInfo = NULL;
static InvalidationInfo *inplaceInvalInfo = NULL;
/* GUC storage */ /* GUC storage */
int debug_discard_caches = 0; int debug_discard_caches = 0;
@ -561,12 +543,9 @@ ProcessInvalidationMessagesMulti(InvalidationMsgsGroup *group,
static void static void
RegisterCatcacheInvalidation(int cacheId, RegisterCatcacheInvalidation(int cacheId,
uint32 hashValue, uint32 hashValue,
Oid dbId, Oid dbId)
void *context)
{ {
InvalidationInfo *info = (InvalidationInfo *) context; AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
AddCatcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs,
cacheId, hashValue, dbId); cacheId, hashValue, dbId);
} }
@ -576,9 +555,10 @@ RegisterCatcacheInvalidation(int cacheId,
* Register an invalidation event for all catcache entries from a catalog. * Register an invalidation event for all catcache entries from a catalog.
*/ */
static void static void
RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId) RegisterCatalogInvalidation(Oid dbId, Oid catId)
{ {
AddCatalogInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, catId); AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
dbId, catId);
} }
/* /*
@ -587,9 +567,10 @@ RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId)
* As above, but register a relcache invalidation event. * As above, but register a relcache invalidation event.
*/ */
static void static void
RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId) RegisterRelcacheInvalidation(Oid dbId, Oid relId)
{ {
AddRelcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId); AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
dbId, relId);
/* /*
* Most of the time, relcache invalidation is associated with system * Most of the time, relcache invalidation is associated with system
@ -606,7 +587,7 @@ RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
* as well. Also zap when we are invalidating whole relcache. * as well. Also zap when we are invalidating whole relcache.
*/ */
if (relId == InvalidOid || RelationIdIsInInitFile(relId)) if (relId == InvalidOid || RelationIdIsInInitFile(relId))
info->RelcacheInitFileInval = true; transInvalInfo->RelcacheInitFileInval = true;
} }
/* /*
@ -616,27 +597,24 @@ RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
* Only needed for catalogs that don't have catcaches. * Only needed for catalogs that don't have catcaches.
*/ */
static void static void
RegisterSnapshotInvalidation(InvalidationInfo *info, Oid dbId, Oid relId) RegisterSnapshotInvalidation(Oid dbId, Oid relId)
{ {
AddSnapshotInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId); AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
dbId, relId);
} }
/* /*
* PrepareInvalidationState * PrepareInvalidationState
* Initialize inval data for the current (sub)transaction. * Initialize inval data for the current (sub)transaction.
*/ */
static InvalidationInfo * static void
PrepareInvalidationState(void) PrepareInvalidationState(void)
{ {
TransInvalidationInfo *myInfo; TransInvalidationInfo *myInfo;
Assert(IsTransactionState());
/* Can't queue transactional message while collecting inplace messages. */
Assert(inplaceInvalInfo == NULL);
if (transInvalInfo != NULL && if (transInvalInfo != NULL &&
transInvalInfo->my_level == GetCurrentTransactionNestLevel()) transInvalInfo->my_level == GetCurrentTransactionNestLevel())
return (InvalidationInfo *) transInvalInfo; return;
myInfo = (TransInvalidationInfo *) myInfo = (TransInvalidationInfo *)
MemoryContextAllocZero(TopTransactionContext, MemoryContextAllocZero(TopTransactionContext,
@ -659,7 +637,7 @@ PrepareInvalidationState(void)
* counter. This is a convenient place to check for that, as well as * counter. This is a convenient place to check for that, as well as
* being important to keep management of the message arrays simple. * being important to keep management of the message arrays simple.
*/ */
if (NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs) != 0) if (NumMessagesInGroup(&transInvalInfo->CurrentCmdInvalidMsgs) != 0)
elog(ERROR, "cannot start a subtransaction when there are unprocessed inval messages"); elog(ERROR, "cannot start a subtransaction when there are unprocessed inval messages");
/* /*
@ -668,8 +646,8 @@ PrepareInvalidationState(void)
* to update them to follow whatever is already in the arrays. * to update them to follow whatever is already in the arrays.
*/ */
SetGroupToFollow(&myInfo->PriorCmdInvalidMsgs, SetGroupToFollow(&myInfo->PriorCmdInvalidMsgs,
&transInvalInfo->ii.CurrentCmdInvalidMsgs); &transInvalInfo->CurrentCmdInvalidMsgs);
SetGroupToFollow(&myInfo->ii.CurrentCmdInvalidMsgs, SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
&myInfo->PriorCmdInvalidMsgs); &myInfo->PriorCmdInvalidMsgs);
} }
else else
@ -685,41 +663,6 @@ PrepareInvalidationState(void)
} }
transInvalInfo = myInfo; transInvalInfo = myInfo;
return (InvalidationInfo *) myInfo;
}
/*
* PrepareInplaceInvalidationState
* Initialize inval data for an inplace update.
*
* See previous function for more background.
*/
static InvalidationInfo *
PrepareInplaceInvalidationState(void)
{
InvalidationInfo *myInfo;
Assert(IsTransactionState());
/* limit of one inplace update under assembly */
Assert(inplaceInvalInfo == NULL);
/* gone after WAL insertion CritSection ends, so use current context */
myInfo = (InvalidationInfo *) palloc0(sizeof(InvalidationInfo));
/* Stash our messages past end of the transactional messages, if any. */
if (transInvalInfo != NULL)
SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
&transInvalInfo->ii.CurrentCmdInvalidMsgs);
else
{
InvalMessageArrays[CatCacheMsgs].msgs = NULL;
InvalMessageArrays[CatCacheMsgs].maxmsgs = 0;
InvalMessageArrays[RelCacheMsgs].msgs = NULL;
InvalMessageArrays[RelCacheMsgs].maxmsgs = 0;
}
inplaceInvalInfo = myInfo;
return myInfo;
} }
/* ---------------------------------------------------------------- /* ----------------------------------------------------------------
@ -959,7 +902,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
* after we send the SI messages. However, we need not do anything unless * after we send the SI messages. However, we need not do anything unless
* we committed. * we committed.
*/ */
*RelcacheInitFileInval = transInvalInfo->ii.RelcacheInitFileInval; *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
/* /*
* Collect all the pending messages into a single contiguous array of * Collect all the pending messages into a single contiguous array of
@ -970,7 +913,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
* not new ones. * not new ones.
*/ */
nummsgs = NumMessagesInGroup(&transInvalInfo->PriorCmdInvalidMsgs) + nummsgs = NumMessagesInGroup(&transInvalInfo->PriorCmdInvalidMsgs) +
NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs); NumMessagesInGroup(&transInvalInfo->CurrentCmdInvalidMsgs);
*msgs = msgarray = (SharedInvalidationMessage *) *msgs = msgarray = (SharedInvalidationMessage *)
MemoryContextAlloc(CurTransactionContext, MemoryContextAlloc(CurTransactionContext,
@ -983,7 +926,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
msgs, msgs,
n * sizeof(SharedInvalidationMessage)), n * sizeof(SharedInvalidationMessage)),
nmsgs += n)); nmsgs += n));
ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs, ProcessMessageSubGroupMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
CatCacheMsgs, CatCacheMsgs,
(memcpy(msgarray + nmsgs, (memcpy(msgarray + nmsgs,
msgs, msgs,
@ -995,7 +938,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
msgs, msgs,
n * sizeof(SharedInvalidationMessage)), n * sizeof(SharedInvalidationMessage)),
nmsgs += n)); nmsgs += n));
ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs, ProcessMessageSubGroupMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
RelCacheMsgs, RelCacheMsgs,
(memcpy(msgarray + nmsgs, (memcpy(msgarray + nmsgs,
msgs, msgs,
@ -1081,9 +1024,7 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
void void
AtEOXact_Inval(bool isCommit) AtEOXact_Inval(bool isCommit)
{ {
inplaceInvalInfo = NULL; /* Quick exit if no messages */
/* Quick exit if no transactional messages */
if (transInvalInfo == NULL) if (transInvalInfo == NULL)
return; return;
@ -1097,16 +1038,16 @@ AtEOXact_Inval(bool isCommit)
* after we send the SI messages. However, we need not do anything * after we send the SI messages. However, we need not do anything
* unless we committed. * unless we committed.
*/ */
if (transInvalInfo->ii.RelcacheInitFileInval) if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePreInvalidate(); RelationCacheInitFilePreInvalidate();
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
&transInvalInfo->ii.CurrentCmdInvalidMsgs); &transInvalInfo->CurrentCmdInvalidMsgs);
ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs, ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
SendSharedInvalidMessages); SendSharedInvalidMessages);
if (transInvalInfo->ii.RelcacheInitFileInval) if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePostInvalidate(); RelationCacheInitFilePostInvalidate();
} }
else else
@ -1119,44 +1060,6 @@ AtEOXact_Inval(bool isCommit)
transInvalInfo = NULL; transInvalInfo = NULL;
} }
/*
* PreInplace_Inval
* Process queued-up invalidation before inplace update critical section.
*
* Tasks belong here if they are safe even if the inplace update does not
* complete. Currently, this just unlinks a cache file, which can fail. The
* sum of this and AtInplace_Inval() mirrors AtEOXact_Inval(isCommit=true).
*/
void
PreInplace_Inval(void)
{
Assert(CritSectionCount == 0);
if (inplaceInvalInfo && inplaceInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePreInvalidate();
}
/*
* AtInplace_Inval
* Process queued-up invalidations after inplace update buffer mutation.
*/
void
AtInplace_Inval(void)
{
Assert(CritSectionCount > 0);
if (inplaceInvalInfo == NULL)
return;
ProcessInvalidationMessagesMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
SendSharedInvalidMessages);
if (inplaceInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePostInvalidate();
inplaceInvalInfo = NULL;
}
/* /*
* AtEOSubXact_Inval * AtEOSubXact_Inval
* Process queued-up invalidation messages at end of subtransaction. * Process queued-up invalidation messages at end of subtransaction.
@ -1179,20 +1082,9 @@ void
AtEOSubXact_Inval(bool isCommit) AtEOSubXact_Inval(bool isCommit)
{ {
int my_level; int my_level;
TransInvalidationInfo *myInfo; TransInvalidationInfo *myInfo = transInvalInfo;
/* /* Quick exit if no messages. */
* Successful inplace update must clear this, but we clear it on abort.
* Inplace updates allocate this in CurrentMemoryContext, which has
* lifespan <= subtransaction lifespan. Hence, don't free it explicitly.
*/
if (isCommit)
Assert(inplaceInvalInfo == NULL);
else
inplaceInvalInfo = NULL;
/* Quick exit if no transactional messages. */
myInfo = transInvalInfo;
if (myInfo == NULL) if (myInfo == NULL)
return; return;
@ -1233,12 +1125,12 @@ AtEOSubXact_Inval(bool isCommit)
&myInfo->PriorCmdInvalidMsgs); &myInfo->PriorCmdInvalidMsgs);
/* Must readjust parent's CurrentCmdInvalidMsgs indexes now */ /* Must readjust parent's CurrentCmdInvalidMsgs indexes now */
SetGroupToFollow(&myInfo->parent->ii.CurrentCmdInvalidMsgs, SetGroupToFollow(&myInfo->parent->CurrentCmdInvalidMsgs,
&myInfo->parent->PriorCmdInvalidMsgs); &myInfo->parent->PriorCmdInvalidMsgs);
/* Pending relcache inval becomes parent's problem too */ /* Pending relcache inval becomes parent's problem too */
if (myInfo->ii.RelcacheInitFileInval) if (myInfo->RelcacheInitFileInval)
myInfo->parent->ii.RelcacheInitFileInval = true; myInfo->parent->RelcacheInitFileInval = true;
/* Pop the transaction state stack */ /* Pop the transaction state stack */
transInvalInfo = myInfo->parent; transInvalInfo = myInfo->parent;
@ -1285,7 +1177,7 @@ CommandEndInvalidationMessages(void)
if (transInvalInfo == NULL) if (transInvalInfo == NULL)
return; return;
ProcessInvalidationMessages(&transInvalInfo->ii.CurrentCmdInvalidMsgs, ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
LocalExecuteInvalidationMessage); LocalExecuteInvalidationMessage);
/* WAL Log per-command invalidation messages for wal_level=logical */ /* WAL Log per-command invalidation messages for wal_level=logical */
@ -1293,21 +1185,26 @@ CommandEndInvalidationMessages(void)
LogLogicalInvalidations(); LogLogicalInvalidations();
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
&transInvalInfo->ii.CurrentCmdInvalidMsgs); &transInvalInfo->CurrentCmdInvalidMsgs);
} }
/* /*
* CacheInvalidateHeapTupleCommon * CacheInvalidateHeapTuple
* Common logic for end-of-command and inplace variants. * Register the given tuple for invalidation at end of command
* (ie, current command is creating or outdating this tuple).
* Also, detect whether a relcache invalidation is implied.
*
* For an insert or delete, tuple is the target tuple and newtuple is NULL.
* For an update, we are called just once, with tuple being the old tuple
* version and newtuple the new version. This allows avoidance of duplicate
* effort during an update.
*/ */
static void void
CacheInvalidateHeapTupleCommon(Relation relation, CacheInvalidateHeapTuple(Relation relation,
HeapTuple tuple, HeapTuple tuple,
HeapTuple newtuple, HeapTuple newtuple)
InvalidationInfo *(*prepare_callback) (void))
{ {
InvalidationInfo *info;
Oid tupleRelId; Oid tupleRelId;
Oid databaseId; Oid databaseId;
Oid relationId; Oid relationId;
@ -1331,8 +1228,11 @@ CacheInvalidateHeapTupleCommon(Relation relation,
if (IsToastRelation(relation)) if (IsToastRelation(relation))
return; return;
/* Allocate any required resources. */ /*
info = prepare_callback(); * If we're not prepared to queue invalidation messages for this
* subtransaction level, get ready now.
*/
PrepareInvalidationState();
/* /*
* First let the catcache do its thing * First let the catcache do its thing
@ -1341,12 +1241,11 @@ CacheInvalidateHeapTupleCommon(Relation relation,
if (RelationInvalidatesSnapshotsOnly(tupleRelId)) if (RelationInvalidatesSnapshotsOnly(tupleRelId))
{ {
databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId; databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
RegisterSnapshotInvalidation(info, databaseId, tupleRelId); RegisterSnapshotInvalidation(databaseId, tupleRelId);
} }
else else
PrepareToInvalidateCacheTuple(relation, tuple, newtuple, PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
RegisterCatcacheInvalidation, RegisterCatcacheInvalidation);
(void *) info);
/* /*
* Now, is this tuple one of the primary definers of a relcache entry? See * Now, is this tuple one of the primary definers of a relcache entry? See
@ -1419,44 +1318,7 @@ CacheInvalidateHeapTupleCommon(Relation relation,
/* /*
* Yes. We need to register a relcache invalidation event. * Yes. We need to register a relcache invalidation event.
*/ */
RegisterRelcacheInvalidation(info, databaseId, relationId); RegisterRelcacheInvalidation(databaseId, relationId);
}
/*
* CacheInvalidateHeapTuple
* Register the given tuple for invalidation at end of command
* (ie, current command is creating or outdating this tuple) and end of
* transaction. Also, detect whether a relcache invalidation is implied.
*
* For an insert or delete, tuple is the target tuple and newtuple is NULL.
* For an update, we are called just once, with tuple being the old tuple
* version and newtuple the new version. This allows avoidance of duplicate
* effort during an update.
*/
void
CacheInvalidateHeapTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple)
{
CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
PrepareInvalidationState);
}
/*
* CacheInvalidateHeapTupleInplace
* Register the given tuple for nontransactional invalidation pertaining
* to an inplace update. Also, detect whether a relcache invalidation is
* implied.
*
* Like CacheInvalidateHeapTuple(), but for inplace updates.
*/
void
CacheInvalidateHeapTupleInplace(Relation relation,
HeapTuple tuple,
HeapTuple newtuple)
{
CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
PrepareInplaceInvalidationState);
} }
/* /*
@ -1475,13 +1337,14 @@ CacheInvalidateCatalog(Oid catalogId)
{ {
Oid databaseId; Oid databaseId;
PrepareInvalidationState();
if (IsSharedRelation(catalogId)) if (IsSharedRelation(catalogId))
databaseId = InvalidOid; databaseId = InvalidOid;
else else
databaseId = MyDatabaseId; databaseId = MyDatabaseId;
RegisterCatalogInvalidation(PrepareInvalidationState(), RegisterCatalogInvalidation(databaseId, catalogId);
databaseId, catalogId);
} }
/* /*
@ -1499,14 +1362,15 @@ CacheInvalidateRelcache(Relation relation)
Oid databaseId; Oid databaseId;
Oid relationId; Oid relationId;
PrepareInvalidationState();
relationId = RelationGetRelid(relation); relationId = RelationGetRelid(relation);
if (relation->rd_rel->relisshared) if (relation->rd_rel->relisshared)
databaseId = InvalidOid; databaseId = InvalidOid;
else else
databaseId = MyDatabaseId; databaseId = MyDatabaseId;
RegisterRelcacheInvalidation(PrepareInvalidationState(), RegisterRelcacheInvalidation(databaseId, relationId);
databaseId, relationId);
} }
/* /*
@ -1519,8 +1383,9 @@ CacheInvalidateRelcache(Relation relation)
void void
CacheInvalidateRelcacheAll(void) CacheInvalidateRelcacheAll(void)
{ {
RegisterRelcacheInvalidation(PrepareInvalidationState(), PrepareInvalidationState();
InvalidOid, InvalidOid);
RegisterRelcacheInvalidation(InvalidOid, InvalidOid);
} }
/* /*
@ -1534,13 +1399,14 @@ CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
Oid databaseId; Oid databaseId;
Oid relationId; Oid relationId;
PrepareInvalidationState();
relationId = classtup->oid; relationId = classtup->oid;
if (classtup->relisshared) if (classtup->relisshared)
databaseId = InvalidOid; databaseId = InvalidOid;
else else
databaseId = MyDatabaseId; databaseId = MyDatabaseId;
RegisterRelcacheInvalidation(PrepareInvalidationState(), RegisterRelcacheInvalidation(databaseId, relationId);
databaseId, relationId);
} }
/* /*
@ -1554,6 +1420,8 @@ CacheInvalidateRelcacheByRelid(Oid relid)
{ {
HeapTuple tup; HeapTuple tup;
PrepareInvalidationState();
tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tup)) if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for relation %u", relid); elog(ERROR, "cache lookup failed for relation %u", relid);
@ -1743,7 +1611,7 @@ LogLogicalInvalidations(void)
if (transInvalInfo == NULL) if (transInvalInfo == NULL)
return; return;
group = &transInvalInfo->ii.CurrentCmdInvalidMsgs; group = &transInvalInfo->CurrentCmdInvalidMsgs;
nmsgs = NumMessagesInGroup(group); nmsgs = NumMessagesInGroup(group);
if (nmsgs > 0) if (nmsgs > 0)

View File

@ -351,7 +351,8 @@ SearchSysCacheLocked1(int cacheId,
/* /*
* If an inplace update just finished, ensure we process the syscache * If an inplace update just finished, ensure we process the syscache
* inval. * inval. XXX this is insufficient: the inplace updater may not yet
* have reached AtEOXact_Inval(). See test at inplace-inval.spec.
* *
* If a heap_update() call just released its LOCKTAG_TUPLE, we'll * If a heap_update() call just released its LOCKTAG_TUPLE, we'll
* probably find the old tuple and reach "tuple concurrently updated". * probably find the old tuple and reach "tuple concurrently updated".

View File

@ -225,7 +225,6 @@ extern void CatCacheInvalidate(CatCache *cache, uint32 hashValue);
extern void PrepareToInvalidateCacheTuple(Relation relation, extern void PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple, HeapTuple tuple,
HeapTuple newtuple, HeapTuple newtuple,
void (*function) (int, uint32, Oid, void *), void (*function) (int, uint32, Oid));
void *context);
#endif /* CATCACHE_H */ #endif /* CATCACHE_H */

View File

@ -28,9 +28,6 @@ extern void AcceptInvalidationMessages(void);
extern void AtEOXact_Inval(bool isCommit); extern void AtEOXact_Inval(bool isCommit);
extern void PreInplace_Inval(void);
extern void AtInplace_Inval(void);
extern void AtEOSubXact_Inval(bool isCommit); extern void AtEOSubXact_Inval(bool isCommit);
extern void PostPrepare_Inval(void); extern void PostPrepare_Inval(void);
@ -40,9 +37,6 @@ extern void CommandEndInvalidationMessages(void);
extern void CacheInvalidateHeapTuple(Relation relation, extern void CacheInvalidateHeapTuple(Relation relation,
HeapTuple tuple, HeapTuple tuple,
HeapTuple newtuple); HeapTuple newtuple);
extern void CacheInvalidateHeapTupleInplace(Relation relation,
HeapTuple tuple,
HeapTuple newtuple);
extern void CacheInvalidateCatalog(Oid catalogId); extern void CacheInvalidateCatalog(Oid catalogId);

View File

@ -1,6 +1,6 @@
Parsed test spec with 3 sessions Parsed test spec with 3 sessions
starting permutation: cachefill3 cir1 cic2 ddl3 read1 starting permutation: cachefill3 cir1 cic2 ddl3
step cachefill3: TABLE newly_indexed; step cachefill3: TABLE newly_indexed;
c c
- -
@ -9,14 +9,6 @@ c
step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK; step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK;
step cic2: CREATE INDEX i2 ON newly_indexed (c); step cic2: CREATE INDEX i2 ON newly_indexed (c);
step ddl3: ALTER TABLE newly_indexed ADD extra int; step ddl3: ALTER TABLE newly_indexed ADD extra int;
step read1:
SELECT relhasindex FROM pg_class WHERE oid = 'newly_indexed'::regclass;
relhasindex
-----------
t
(1 row)
starting permutation: cir1 cic2 ddl3 read1 starting permutation: cir1 cic2 ddl3 read1
step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK; step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK;

View File

@ -1,7 +1,7 @@
# An inplace update had been able to abort before sending the inplace # If a heap_update() caller retrieves its oldtup from a cache, it's possible
# invalidation message to the shared queue. If a heap_update() caller then # for that cache entry to predate an inplace update, causing loss of that
# retrieved its oldtup from a cache, the heap_update() could revert the # inplace update. This arises because the transaction may abort before
# inplace update. # sending the inplace invalidation message to the shared queue.
setup setup
{ {
@ -27,12 +27,14 @@ step cachefill3 { TABLE newly_indexed; }
step ddl3 { ALTER TABLE newly_indexed ADD extra int; } step ddl3 { ALTER TABLE newly_indexed ADD extra int; }
# XXX shows an extant bug. Adding step read1 at the end would usually print
# relhasindex=f (not wanted). This does not reach the unwanted behavior under
# -DCATCACHE_FORCE_RELEASE and friends.
permutation permutation
cachefill3 # populates the pg_class row in the catcache cachefill3 # populates the pg_class row in the catcache
cir1 # sets relhasindex=true; rollback discards cache inval cir1 # sets relhasindex=true; rollback discards cache inval
cic2 # sees relhasindex=true, skips changing it (so no inval) cic2 # sees relhasindex=true, skips changing it (so no inval)
ddl3 # cached row as the oldtup of an update, losing relhasindex ddl3 # cached row as the oldtup of an update, losing relhasindex
read1 # observe damage
# without cachefill3, no bug # without cachefill3, no bug
permutation cir1 cic2 ddl3 read1 permutation cir1 cic2 ddl3 read1

View File

@ -1253,7 +1253,6 @@ Interval
IntervalAggState IntervalAggState
IntoClause IntoClause
InvalMessageArray InvalMessageArray
InvalidationInfo
InvalidationMsgsGroup InvalidationMsgsGroup
IpcMemoryId IpcMemoryId
IpcMemoryKey IpcMemoryKey