1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-08 11:42:09 +03:00

For inplace update durability, make heap_update() callers wait.

The previous commit fixed some ways of losing an inplace update.  It
remained possible to lose one when a backend working toward a
heap_update() copied a tuple into memory just before inplace update of
that tuple.  In catalogs eligible for inplace update, use LOCKTAG_TUPLE
to govern admission to the steps of copying an old tuple, modifying it,
and issuing heap_update().  This includes MERGE commands.  To avoid
changing most of the pg_class DDL, don't require LOCKTAG_TUPLE when
holding a relation lock sufficient to exclude inplace updaters.
Back-patch to v12 (all supported versions).  In v13 and v12, "UPDATE
pg_class" or "UPDATE pg_database" can still lose an inplace update.  The
v14+ UPDATE fix needs commit 86dc90056d,
and it wasn't worth reimplementing that fix without such infrastructure.

Reviewed by Nitin Motiani and (in earlier versions) Heikki Linnakangas.

Discussion: https://postgr.es/m/20231027214946.79.nmisch@google.com
This commit is contained in:
Noah Misch
2024-09-24 15:25:18 -07:00
parent 8590c942c1
commit 5c837f8fa0
19 changed files with 490 additions and 49 deletions

@ -52,6 +52,8 @@
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "catalog/pg_database.h"
#include "catalog/pg_database_d.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
@ -78,6 +80,12 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
Buffer newbuf, HeapTuple oldtup,
HeapTuple newtup, HeapTuple old_key_tuple,
bool all_visible_cleared, bool new_all_visible_cleared);
#ifdef USE_ASSERT_CHECKING
static void check_lock_if_inplace_updateable_rel(Relation relation,
ItemPointer otid,
HeapTuple newtup);
static void check_inplace_rel_lock(HeapTuple oldtup);
#endif
static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
Bitmapset *interesting_cols,
Bitmapset *external_cols,
@ -119,6 +127,8 @@ static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_re
* heavyweight lock mode and MultiXactStatus values to use for any particular
* tuple lock strength.
*
* These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
*
* Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
* instead.
*/
@ -3187,6 +3197,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
errmsg("cannot update tuples during a parallel operation")));
#ifdef USE_ASSERT_CHECKING
check_lock_if_inplace_updateable_rel(relation, otid, newtup);
#endif
/*
* Fetch the list of attributes to be checked for various operations.
*
@ -4014,6 +4028,128 @@ l2:
return TM_Ok;
}
#ifdef USE_ASSERT_CHECKING
/*
* Confirm adequate lock held during heap_update(), per rules from
* README.tuplock section "Locking to write inplace-updated tables".
*/
static void
check_lock_if_inplace_updateable_rel(Relation relation,
ItemPointer otid,
HeapTuple newtup)
{
/* LOCKTAG_TUPLE acceptable for any catalog */
switch (RelationGetRelid(relation))
{
case RelationRelationId:
case DatabaseRelationId:
{
LOCKTAG tuptag;
SET_LOCKTAG_TUPLE(tuptag,
relation->rd_lockInfo.lockRelId.dbId,
relation->rd_lockInfo.lockRelId.relId,
ItemPointerGetBlockNumber(otid),
ItemPointerGetOffsetNumber(otid));
if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock))
return;
}
break;
default:
Assert(!IsInplaceUpdateRelation(relation));
return;
}
switch (RelationGetRelid(relation))
{
case RelationRelationId:
{
/* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
Oid relid = classForm->oid;
Oid dbid;
LOCKTAG tag;
if (IsSharedRelation(relid))
dbid = InvalidOid;
else
dbid = MyDatabaseId;
if (classForm->relkind == RELKIND_INDEX)
{
Relation irel = index_open(relid, AccessShareLock);
SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
index_close(irel, AccessShareLock);
}
else
SET_LOCKTAG_RELATION(tag, dbid, relid);
if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock) &&
!LockOrStrongerHeldByMe(&tag, ShareRowExclusiveLock))
elog(WARNING,
"missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
NameStr(classForm->relname),
relid,
classForm->relkind,
ItemPointerGetBlockNumber(otid),
ItemPointerGetOffsetNumber(otid));
}
break;
case DatabaseRelationId:
{
/* LOCKTAG_TUPLE required */
Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
elog(WARNING,
"missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
NameStr(dbForm->datname),
dbForm->oid,
ItemPointerGetBlockNumber(otid),
ItemPointerGetOffsetNumber(otid));
}
break;
}
}
/*
* Confirm adequate relation lock held, per rules from README.tuplock section
* "Locking to write inplace-updated tables".
*/
static void
check_inplace_rel_lock(HeapTuple oldtup)
{
Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
Oid relid = classForm->oid;
Oid dbid;
LOCKTAG tag;
if (IsSharedRelation(relid))
dbid = InvalidOid;
else
dbid = MyDatabaseId;
if (classForm->relkind == RELKIND_INDEX)
{
Relation irel = index_open(relid, AccessShareLock);
SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
index_close(irel, AccessShareLock);
}
else
SET_LOCKTAG_RELATION(tag, dbid, relid);
if (!LockOrStrongerHeldByMe(&tag, ShareUpdateExclusiveLock))
elog(WARNING,
"missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
NameStr(classForm->relname),
relid,
classForm->relkind,
ItemPointerGetBlockNumber(&oldtup->t_self),
ItemPointerGetOffsetNumber(&oldtup->t_self));
}
#endif
/*
* Check if the specified attribute's values are the same. Subroutine for
* HeapDetermineColumnsInfo.
@ -6039,15 +6175,21 @@ heap_inplace_lock(Relation relation,
TM_Result result;
bool ret;
#ifdef USE_ASSERT_CHECKING
if (RelationGetRelid(relation) == RelationRelationId)
check_inplace_rel_lock(oldtup_ptr);
#endif
Assert(BufferIsValid(buffer));
LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*----------
* Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
*
* - wait unconditionally
* - no tuple locks
* - already locked tuple above, since inplace needs that unconditionally
* - don't recheck header after wait: simpler to defer to next iteration
* - don't try to continue even if the updater aborts: likewise
* - no crosscheck
@ -6131,7 +6273,10 @@ heap_inplace_lock(Relation relation,
* don't bother optimizing that.
*/
if (!ret)
{
UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
InvalidateCatalogSnapshot();
}
return ret;
}
@ -6140,6 +6285,8 @@ heap_inplace_lock(Relation relation,
*
* The tuple cannot change size, and therefore its header fields and null
* bitmap (if any) don't change either.
*
* Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
*/
void
heap_inplace_update_and_unlock(Relation relation,
@ -6223,6 +6370,7 @@ heap_inplace_unlock(Relation relation,
HeapTuple oldtup, Buffer buffer)
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
}
/*