mirror of
https://github.com/postgres/postgres.git
synced 2025-11-03 09:13:20 +03:00
For inplace update durability, make heap_update() callers wait.
The previous commit fixed some ways of losing an inplace update. It
remained possible to lose one when a backend working toward a
heap_update() copied a tuple into memory just before inplace update of
that tuple. In catalogs eligible for inplace update, use LOCKTAG_TUPLE
to govern admission to the steps of copying an old tuple, modifying it,
and issuing heap_update(). This includes MERGE commands. To avoid
changing most of the pg_class DDL, don't require LOCKTAG_TUPLE when
holding a relation lock sufficient to exclude inplace updaters.
Back-patch to v12 (all supported versions). In v13 and v12, "UPDATE
pg_class" or "UPDATE pg_database" can still lose an inplace update. The
v14+ UPDATE fix needs commit 86dc90056d,
and it wasn't worth reimplementing that fix without such infrastructure.
Reviewed by Nitin Motiani and (in earlier versions) Heikki Linnakangas.
Discussion: https://postgr.es/m/20231027214946.79.nmisch@google.com
This commit is contained in:
@@ -154,6 +154,48 @@ The following infomask bits are applicable:
|
||||
We currently never set the HEAP_XMAX_COMMITTED when the HEAP_XMAX_IS_MULTI bit
|
||||
is set.
|
||||
|
||||
Locking to write inplace-updated tables
|
||||
---------------------------------------
|
||||
|
||||
If IsInplaceUpdateRelation() returns true for a table, the table is a system
|
||||
catalog that receives systable_inplace_update_begin() calls. Preparing a
|
||||
heap_update() of these tables follows additional locking rules, to ensure we
|
||||
don't lose the effects of an inplace update. In particular, consider a moment
|
||||
when a backend has fetched the old tuple to modify, not yet having called
|
||||
heap_update(). Another backend's inplace update starting then can't conclude
|
||||
until the heap_update() places its new tuple in a buffer. We enforce that
|
||||
using locktags as follows. While DDL code is the main audience, the executor
|
||||
follows these rules to make e.g. "MERGE INTO pg_class" safer. Locking rules
|
||||
are per-catalog:
|
||||
|
||||
pg_class systable_inplace_update_begin() callers: before the call, acquire a
|
||||
lock on the relation in mode ShareUpdateExclusiveLock or stricter. If the
|
||||
update targets a row of RELKIND_INDEX (but not RELKIND_PARTITIONED_INDEX),
|
||||
that lock must be on the table. Locking the index rel is not necessary.
|
||||
(This allows VACUUM to overwrite per-index pg_class while holding a lock on
|
||||
the table alone.) systable_inplace_update_begin() acquires and releases
|
||||
LOCKTAG_TUPLE in InplaceUpdateTupleLock, an alias for ExclusiveLock, on each
|
||||
tuple it overwrites.
|
||||
|
||||
pg_class heap_update() callers: before copying the tuple to modify, take a
|
||||
lock on the tuple, a ShareUpdateExclusiveLock on the relation, or a
|
||||
ShareRowExclusiveLock or stricter on the relation.
|
||||
|
||||
SearchSysCacheLocked1() is one convenient way to acquire the tuple lock.
|
||||
Most heap_update() callers already hold a suitable lock on the relation for
|
||||
other reasons and can skip the tuple lock. If you do acquire the tuple
|
||||
lock, release it immediately after the update.
|
||||
|
||||
|
||||
pg_database: before copying the tuple to modify, all updaters of pg_database
|
||||
rows acquire LOCKTAG_TUPLE. (Few updaters acquire LOCKTAG_OBJECT on the
|
||||
database OID, so it wasn't worth extending that as a second option.)
|
||||
|
||||
Ideally, DDL might want to perform permissions checks before LockTuple(), as
|
||||
we do with RangeVarGetRelidExtended() callbacks. We typically don't bother.
|
||||
LOCKTAG_TUPLE acquirers release it after each row, so the potential
|
||||
inconvenience is lower.
|
||||
|
||||
Reading inplace-updated columns
|
||||
-------------------------------
|
||||
|
||||
|
||||
@@ -52,6 +52,8 @@
|
||||
#include "access/xloginsert.h"
|
||||
#include "access/xlogutils.h"
|
||||
#include "catalog/catalog.h"
|
||||
#include "catalog/pg_database.h"
|
||||
#include "catalog/pg_database_d.h"
|
||||
#include "miscadmin.h"
|
||||
#include "pgstat.h"
|
||||
#include "port/atomics.h"
|
||||
@@ -78,6 +80,12 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
|
||||
Buffer newbuf, HeapTuple oldtup,
|
||||
HeapTuple newtup, HeapTuple old_key_tuple,
|
||||
bool all_visible_cleared, bool new_all_visible_cleared);
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
static void check_lock_if_inplace_updateable_rel(Relation relation,
|
||||
ItemPointer otid,
|
||||
HeapTuple newtup);
|
||||
static void check_inplace_rel_lock(HeapTuple oldtup);
|
||||
#endif
|
||||
static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
|
||||
Bitmapset *interesting_cols,
|
||||
Bitmapset *external_cols,
|
||||
@@ -119,6 +127,8 @@ static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_re
|
||||
* heavyweight lock mode and MultiXactStatus values to use for any particular
|
||||
* tuple lock strength.
|
||||
*
|
||||
* These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
|
||||
*
|
||||
* Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
|
||||
* instead.
|
||||
*/
|
||||
@@ -3187,6 +3197,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
|
||||
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
|
||||
errmsg("cannot update tuples during a parallel operation")));
|
||||
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
check_lock_if_inplace_updateable_rel(relation, otid, newtup);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Fetch the list of attributes to be checked for various operations.
|
||||
*
|
||||
@@ -4014,6 +4028,128 @@ l2:
|
||||
return TM_Ok;
|
||||
}
|
||||
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
/*
|
||||
* Confirm adequate lock held during heap_update(), per rules from
|
||||
* README.tuplock section "Locking to write inplace-updated tables".
|
||||
*/
|
||||
static void
|
||||
check_lock_if_inplace_updateable_rel(Relation relation,
|
||||
ItemPointer otid,
|
||||
HeapTuple newtup)
|
||||
{
|
||||
/* LOCKTAG_TUPLE acceptable for any catalog */
|
||||
switch (RelationGetRelid(relation))
|
||||
{
|
||||
case RelationRelationId:
|
||||
case DatabaseRelationId:
|
||||
{
|
||||
LOCKTAG tuptag;
|
||||
|
||||
SET_LOCKTAG_TUPLE(tuptag,
|
||||
relation->rd_lockInfo.lockRelId.dbId,
|
||||
relation->rd_lockInfo.lockRelId.relId,
|
||||
ItemPointerGetBlockNumber(otid),
|
||||
ItemPointerGetOffsetNumber(otid));
|
||||
if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock))
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
Assert(!IsInplaceUpdateRelation(relation));
|
||||
return;
|
||||
}
|
||||
|
||||
switch (RelationGetRelid(relation))
|
||||
{
|
||||
case RelationRelationId:
|
||||
{
|
||||
/* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
|
||||
Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
|
||||
Oid relid = classForm->oid;
|
||||
Oid dbid;
|
||||
LOCKTAG tag;
|
||||
|
||||
if (IsSharedRelation(relid))
|
||||
dbid = InvalidOid;
|
||||
else
|
||||
dbid = MyDatabaseId;
|
||||
|
||||
if (classForm->relkind == RELKIND_INDEX)
|
||||
{
|
||||
Relation irel = index_open(relid, AccessShareLock);
|
||||
|
||||
SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
|
||||
index_close(irel, AccessShareLock);
|
||||
}
|
||||
else
|
||||
SET_LOCKTAG_RELATION(tag, dbid, relid);
|
||||
|
||||
if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock) &&
|
||||
!LockOrStrongerHeldByMe(&tag, ShareRowExclusiveLock))
|
||||
elog(WARNING,
|
||||
"missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
|
||||
NameStr(classForm->relname),
|
||||
relid,
|
||||
classForm->relkind,
|
||||
ItemPointerGetBlockNumber(otid),
|
||||
ItemPointerGetOffsetNumber(otid));
|
||||
}
|
||||
break;
|
||||
case DatabaseRelationId:
|
||||
{
|
||||
/* LOCKTAG_TUPLE required */
|
||||
Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
|
||||
|
||||
elog(WARNING,
|
||||
"missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
|
||||
NameStr(dbForm->datname),
|
||||
dbForm->oid,
|
||||
ItemPointerGetBlockNumber(otid),
|
||||
ItemPointerGetOffsetNumber(otid));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Confirm adequate relation lock held, per rules from README.tuplock section
|
||||
* "Locking to write inplace-updated tables".
|
||||
*/
|
||||
static void
|
||||
check_inplace_rel_lock(HeapTuple oldtup)
|
||||
{
|
||||
Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
|
||||
Oid relid = classForm->oid;
|
||||
Oid dbid;
|
||||
LOCKTAG tag;
|
||||
|
||||
if (IsSharedRelation(relid))
|
||||
dbid = InvalidOid;
|
||||
else
|
||||
dbid = MyDatabaseId;
|
||||
|
||||
if (classForm->relkind == RELKIND_INDEX)
|
||||
{
|
||||
Relation irel = index_open(relid, AccessShareLock);
|
||||
|
||||
SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
|
||||
index_close(irel, AccessShareLock);
|
||||
}
|
||||
else
|
||||
SET_LOCKTAG_RELATION(tag, dbid, relid);
|
||||
|
||||
if (!LockOrStrongerHeldByMe(&tag, ShareUpdateExclusiveLock))
|
||||
elog(WARNING,
|
||||
"missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
|
||||
NameStr(classForm->relname),
|
||||
relid,
|
||||
classForm->relkind,
|
||||
ItemPointerGetBlockNumber(&oldtup->t_self),
|
||||
ItemPointerGetOffsetNumber(&oldtup->t_self));
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check if the specified attribute's values are the same. Subroutine for
|
||||
* HeapDetermineColumnsInfo.
|
||||
@@ -6039,15 +6175,21 @@ heap_inplace_lock(Relation relation,
|
||||
TM_Result result;
|
||||
bool ret;
|
||||
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
if (RelationGetRelid(relation) == RelationRelationId)
|
||||
check_inplace_rel_lock(oldtup_ptr);
|
||||
#endif
|
||||
|
||||
Assert(BufferIsValid(buffer));
|
||||
|
||||
LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
/*----------
|
||||
* Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
|
||||
*
|
||||
* - wait unconditionally
|
||||
* - no tuple locks
|
||||
* - already locked tuple above, since inplace needs that unconditionally
|
||||
* - don't recheck header after wait: simpler to defer to next iteration
|
||||
* - don't try to continue even if the updater aborts: likewise
|
||||
* - no crosscheck
|
||||
@@ -6131,7 +6273,10 @@ heap_inplace_lock(Relation relation,
|
||||
* don't bother optimizing that.
|
||||
*/
|
||||
if (!ret)
|
||||
{
|
||||
UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
|
||||
InvalidateCatalogSnapshot();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -6140,6 +6285,8 @@ heap_inplace_lock(Relation relation,
|
||||
*
|
||||
* The tuple cannot change size, and therefore its header fields and null
|
||||
* bitmap (if any) don't change either.
|
||||
*
|
||||
* Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
|
||||
*/
|
||||
void
|
||||
heap_inplace_update_and_unlock(Relation relation,
|
||||
@@ -6223,6 +6370,7 @@ heap_inplace_unlock(Relation relation,
|
||||
HeapTuple oldtup, Buffer buffer)
|
||||
{
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -755,7 +755,9 @@ systable_endscan_ordered(SysScanDesc sysscan)
|
||||
*
|
||||
* Overwriting violates both MVCC and transactional safety, so the uses of
|
||||
* this function in Postgres are extremely limited. Nonetheless we find some
|
||||
* places to use it. Standard flow:
|
||||
* places to use it. See README.tuplock section "Locking to write
|
||||
* inplace-updated tables" and later sections for expectations of readers and
|
||||
* writers of a table that gets inplace updates. Standard flow:
|
||||
*
|
||||
* ... [any slow preparation not requiring oldtup] ...
|
||||
* systable_inplace_update_begin([...], &tup, &inplace_state);
|
||||
|
||||
Reference in New Issue
Block a user