1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-19 13:42:17 +03:00

For inplace update, send nontransactional invalidations.

The inplace update survives ROLLBACK.  The inval didn't, so another
backend's DDL could then update the row without incorporating the
inplace update.  In the test this fixes, a mix of CREATE INDEX and ALTER
TABLE resulted in a table with an index, yet relhasindex=f.  That is a
source of index corruption.  Back-patch to v12 (all supported versions).
The back branch versions don't change WAL, because those branches just
added end-of-recovery SIResetAll().  All branches change the ABI of
extern function PrepareToInvalidateCacheTuple().  No PGXN extension
calls that, and there's no apparent use case in extensions.

Reviewed by Nitin Motiani and (in earlier versions) Andres Freund.

Discussion: https://postgr.es/m/20240523000548.58.nmisch@google.com
This commit is contained in:
Noah Misch
2024-10-25 06:51:02 -07:00
parent 0fe173680e
commit 243e9b40f1
19 changed files with 402 additions and 146 deletions

View File

@@ -6326,6 +6326,9 @@ heap_inplace_update_and_unlock(Relation relation,
HeapTupleHeader htup = oldtup->t_data;
uint32 oldlen;
uint32 newlen;
int nmsgs = 0;
SharedInvalidationMessage *invalMessages = NULL;
bool RelcacheInitFileInval = false;
Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
oldlen = oldtup->t_len - htup->t_hoff;
@@ -6333,6 +6336,29 @@ heap_inplace_update_and_unlock(Relation relation,
if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
elog(ERROR, "wrong tuple length");
/*
* Construct shared cache inval if necessary. Note that because we only
* pass the new version of the tuple, this mustn't be used for any
* operations that could change catcache lookup keys. But we aren't
* bothering with index updates either, so that's true a fortiori.
*/
CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
/* Like RecordTransactionCommit(), log only if needed */
if (XLogStandbyInfoActive())
nmsgs = inplaceGetInvalidationMessages(&invalMessages,
&RelcacheInitFileInval);
/*
* Unlink relcache init files as needed. If unlinking, acquire
* RelCacheInitLock until after associated invalidations. By doing this
* in advance, if we checkpoint and then crash between inplace
* XLogInsert() and inval, we don't rely on StartupXLOG() ->
* RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
* neglect to PANIC on EIO.
*/
PreInplace_Inval();
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
@@ -6362,9 +6388,16 @@ heap_inplace_update_and_unlock(Relation relation,
XLogRecPtr recptr;
xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
xlrec.dbId = MyDatabaseId;
xlrec.tsId = MyDatabaseTableSpace;
xlrec.relcacheInitFileInval = RelcacheInitFileInval;
xlrec.nmsgs = nmsgs;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
XLogRegisterData((char *) &xlrec, MinSizeOfHeapInplace);
if (nmsgs != 0)
XLogRegisterData((char *) invalMessages,
nmsgs * sizeof(SharedInvalidationMessage));
XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
@@ -6376,17 +6409,28 @@ heap_inplace_update_and_unlock(Relation relation,
PageSetLSN(BufferGetPage(buffer), recptr);
}
END_CRIT_SECTION();
heap_inplace_unlock(relation, oldtup, buffer);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
* Send out shared cache inval if necessary. Note that because we only
* pass the new version of the tuple, this mustn't be used for any
* operations that could change catcache lookup keys. But we aren't
* bothering with index updates either, so that's true a fortiori.
* Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
* do this before UnlockTuple().
*
* XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
* If we're mutating a tuple visible only to this transaction, there's an
* equivalent transactional inval from the action that created the tuple,
* and this inval is superfluous.
*/
AtInplace_Inval();
END_CRIT_SECTION();
UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
AcceptInvalidationMessages(); /* local processing of just-sent inval */
/*
* Queue a transactional inval. The immediate invalidation we just sent
* is the only one known to be necessary. To reduce risk from the
* transition to immediate invalidation, continue sending a transactional
* invalidation like we've long done. Third-party code might rely on it.
*/
if (!IsBootstrapProcessingMode())
CacheInvalidateHeapTuple(relation, tuple, NULL);

View File

@@ -1170,6 +1170,12 @@ heap_xlog_inplace(XLogReaderState *record)
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
ProcessCommittedInvalidationMessages(xlrec->msgs,
xlrec->nmsgs,
xlrec->relcacheInitFileInval,
xlrec->dbId,
xlrec->tsId);
}
void