1
0
mirror of https://github.com/postgres/postgres.git synced 2025-12-19 17:02:53 +03:00

Assert lack of hazardous buffer locks before possible catalog read.

Commit 0bada39c83 fixed a bug of this kind,
which existed in all branches for six days before detection.  While the
probability of reaching the trouble was low, the disruption was extreme.  No
new backends could start, and service restoration needed an immediate
shutdown.  Hence, add this to catch the next bug like it.

The new check in RelationIdGetRelation() suffices to make autovacuum detect
the bug in commit 243e9b40f1 that led to commit
0bada39.  This also checks in a number of similar places.  It replaces each
Assert(IsTransactionState()) that pertained to a conditional catalog read.

Back-patch to v14 - v17.  This a back-patch of commit
f4ece891fc (from before v18 branched) to
all supported branches, to accompany the back-patch of commits 243e9b4
and 0bada39.  For catalog indexes, the bttextcmp() behavior that
motivated IsCatalogTextUniqueIndexOid() was v18-specific.  Hence, this
back-patch doesn't need that or its correction from commit
4a4ee0c2c1.

Reported-by: Alexander Lakhin <exclusion@gmail.com>
Discussion: https://postgr.es/m/20250410191830.0e.nmisch@google.com
Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com
Backpatch-through: 14-17
This commit is contained in:
Noah Misch
2025-12-16 16:13:54 -08:00
parent e4b1986b98
commit 0dfbd191a9
10 changed files with 165 additions and 20 deletions

View File

@@ -36,6 +36,9 @@
#include "access/tableam.h"
#include "access/xlog.h"
#include "catalog/catalog.h"
#ifdef USE_ASSERT_CHECKING
#include "catalog/pg_tablespace_d.h"
#endif
#include "catalog/storage.h"
#include "executor/instrument.h"
#include "lib/binaryheap.h"
@@ -487,6 +490,10 @@ static void FindAndDropRelFileNodeBuffers(RelFileNode rnode,
BlockNumber firstDelBlock);
static void AtProcExit_Buffers(int code, Datum arg);
static void CheckForBufferLeaks(void);
#ifdef USE_ASSERT_CHECKING
static void AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
void *unused_context);
#endif
static int rnode_comparator(const void *p1, const void *p2);
static inline int buffertag_comparator(const BufferTag *a, const BufferTag *b);
static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
@@ -2697,6 +2704,65 @@ CheckForBufferLeaks(void)
#endif
}
#ifdef USE_ASSERT_CHECKING
/*
* Check for exclusive-locked catalog buffers. This is the core of
* AssertCouldGetRelation().
*
* A backend would self-deadlock on LWLocks if the catalog scan read the
* exclusive-locked buffer. The main threat is exclusive-locked buffers of
* catalogs used in relcache, because a catcache search on any catalog may
* build that catalog's relcache entry. We don't have an inventory of
* catalogs relcache uses, so just check buffers of most catalogs.
*
* It's better to minimize waits while holding an exclusive buffer lock, so it
* would be nice to broaden this check not to be catalog-specific. However,
* bttextcmp() accesses pg_collation, and non-core opclasses might similarly
* read tables. That is deadlock-free as long as there's no loop in the
* dependency graph: modifying table A may cause an opclass to read table B,
* but it must not cause a read of table A.
*/
void
AssertBufferLocksPermitCatalogRead(void)
{
ForEachLWLockHeldByMe(AssertNotCatalogBufferLock, NULL);
}
static void
AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
void *unused_context)
{
BufferDesc *bufHdr;
BufferTag tag;
Oid relid;
if (mode != LW_EXCLUSIVE)
return;
if (!((BufferDescPadded *) lock > BufferDescriptors &&
(BufferDescPadded *) lock < BufferDescriptors + NBuffers))
return; /* not a buffer lock */
bufHdr = (BufferDesc *)
((char *) lock - offsetof(BufferDesc, content_lock));
tag = bufHdr->tag;
/*
* This relNode==relid assumption holds until a catalog experiences VACUUM
* FULL or similar. After a command like that, relNode will be in the
* normal (non-catalog) range, and we lose the ability to detect hazardous
* access to that catalog. Calling RelidByRelfilenode() would close that
* gap, but RelidByRelfilenode() might then deadlock with a held lock.
*/
relid = tag.rnode.relNode;
Assert(!IsCatalogRelationOid(relid));
/* Shared rels are always catalogs: detect even after VACUUM FULL. */
Assert(tag.rnode.spcNode != GLOBALTABLESPACE_OID);
}
#endif
/*
* Helper routine to issue warnings when a buffer is unexpectedly pinned
*/

View File

@@ -1921,6 +1921,21 @@ LWLockReleaseAll(void)
}
/*
* ForEachLWLockHeldByMe - run a callback for each held lock
*
* This is meant as debug support only.
*/
void
ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
void *context)
{
int i;
for (i = 0; i < num_held_lwlocks; i++)
callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
}
/*
* LWLockHeldByMe - test whether my process holds a lock in any mode
*

View File

@@ -64,6 +64,7 @@
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_locale.h"
#include "utils/relcache.h"
#include "utils/syscache.h"
#ifdef USE_ICU
@@ -1262,6 +1263,8 @@ lookup_collation_cache(Oid collation, bool set_flags)
Assert(OidIsValid(collation));
Assert(collation != DEFAULT_COLLATION_OID);
AssertCouldGetRelation();
if (collation_cache == NULL)
{
/* First time through, initialize the hash table */

View File

@@ -942,12 +942,41 @@ RehashCatCache(CatCache *cp)
cp->cc_bucket = newbucket;
}
/*
* ConditionalCatalogCacheInitializeCache
*
* Call CatalogCacheInitializeCache() if not yet done.
*/
pg_attribute_always_inline
static void
ConditionalCatalogCacheInitializeCache(CatCache *cache)
{
#ifdef USE_ASSERT_CHECKING
/*
* TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
* for hashing. This isn't ideal. Since lookup_type_cache() both
* registers the callback and searches TYPEOID, reaching trouble likely
* requires OOM at an unlucky moment.
*
* InvalidateAttoptCacheCallback() runs outside transactions and likewise
* relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
*/
if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
IsTransactionState())
AssertCouldGetRelation();
else
Assert(cache->cc_tupdesc != NULL);
#endif
if (unlikely(cache->cc_tupdesc == NULL))
CatalogCacheInitializeCache(cache);
}
/*
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
* descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
* descriptor and set up the hash and equality function links.
*/
#ifdef CACHEDEBUG
#define CatalogCacheInitializeCache_DEBUG1 \
@@ -1082,8 +1111,7 @@ CatalogCacheInitializeCache(CatCache *cache)
void
InitCatCachePhase2(CatCache *cache, bool touch_index)
{
if (cache->cc_tupdesc == NULL)
CatalogCacheInitializeCache(cache);
ConditionalCatalogCacheInitializeCache(cache);
if (touch_index &&
cache->id != AMOID &&
@@ -1262,16 +1290,12 @@ SearchCatCacheInternal(CatCache *cache,
dlist_head *bucket;
CatCTup *ct;
/* Make sure we're in an xact, even if this ends up being a cache hit */
Assert(IsTransactionState());
Assert(cache->cc_nkeys == nkeys);
/*
* one-time startup overhead for each cache
*/
if (unlikely(cache->cc_tupdesc == NULL))
CatalogCacheInitializeCache(cache);
ConditionalCatalogCacheInitializeCache(cache);
#ifdef CATCACHE_STATS
cache->cc_searches++;
@@ -1550,8 +1574,7 @@ GetCatCacheHashValue(CatCache *cache,
/*
* one-time startup overhead for each cache
*/
if (cache->cc_tupdesc == NULL)
CatalogCacheInitializeCache(cache);
ConditionalCatalogCacheInitializeCache(cache);
/*
* calculate the hash value
@@ -1600,8 +1623,7 @@ SearchCatCacheList(CatCache *cache,
/*
* one-time startup overhead for each cache
*/
if (cache->cc_tupdesc == NULL)
CatalogCacheInitializeCache(cache);
ConditionalCatalogCacheInitializeCache(cache);
Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
@@ -2226,8 +2248,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
continue;
/* Just in case cache hasn't finished initialization yet... */
if (ccp->cc_tupdesc == NULL)
CatalogCacheInitializeCache(ccp);
ConditionalCatalogCacheInitializeCache(ccp);
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;

View File

@@ -720,6 +720,12 @@ InvalidateSystemCachesExtended(bool debug_discard)
void
AcceptInvalidationMessages(void)
{
#ifdef USE_ASSERT_CHECKING
/* message handlers shall access catalogs only during transactions */
if (IsTransactionState())
AssertCouldGetRelation();
#endif
ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
InvalidateSystemCaches);
@@ -771,7 +777,8 @@ PrepareInvalidationState(void)
{
TransInvalidationInfo *myInfo;
Assert(IsTransactionState());
/* PrepareToInvalidateCacheTuple() needs relcache */
AssertCouldGetRelation();
/* Can't queue transactional message while collecting inplace messages. */
Assert(inplaceInvalInfo == NULL);
@@ -807,7 +814,7 @@ PrepareInplaceInvalidationState(void)
{
InvalidationInfo *myInfo;
Assert(IsTransactionState());
AssertCouldGetRelation();
/* limit of one inplace update under assembly */
Assert(inplaceInvalInfo == NULL);
@@ -1248,6 +1255,9 @@ CacheInvalidateHeapTupleCommon(Relation relation,
Oid databaseId;
Oid relationId;
/* PrepareToInvalidateCacheTuple() needs relcache */
AssertCouldGetRelation();
/* Do nothing during bootstrap */
if (IsBootstrapProcessingMode())
return;

View File

@@ -2020,6 +2020,23 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_isvalid = true;
}
#ifdef USE_ASSERT_CHECKING
/*
* AssertCouldGetRelation
*
* Check safety of calling RelationIdGetRelation().
*
* In code that reads catalogs in the event of a cache miss, call this
* before checking the cache.
*/
void
AssertCouldGetRelation(void)
{
Assert(IsTransactionState());
AssertBufferLocksPermitCatalogRead();
}
#endif
/* ----------------------------------------------------------------
* Relation Descriptor Lookup Interface
@@ -2047,8 +2064,7 @@ RelationIdGetRelation(Oid relationId)
{
Relation rd;
/* Make sure we're in an xact, even if this ends up being a cache hit */
Assert(IsTransactionState());
AssertCouldGetRelation();
/*
* first try to find reldesc in the cache

View File

@@ -39,6 +39,7 @@
#include "mb/pg_wchar.h"
#include "utils/builtins.h"
#include "utils/memutils.h"
#include "utils/relcache.h"
#include "utils/syscache.h"
/*
@@ -310,7 +311,7 @@ InitializeClientEncoding(void)
{
Oid utf8_to_server_proc;
Assert(IsTransactionState());
AssertCouldGetRelation();
utf8_to_server_proc =
FindDefaultConversionProc(PG_UTF8,
current_server_encoding);

View File

@@ -196,6 +196,9 @@ extern void InitBufferPool(void);
extern void InitBufferPoolAccess(void);
extern void InitBufferPoolBackend(void);
extern void AtEOXact_Buffers(bool isCommit);
#ifdef USE_ASSERT_CHECKING
extern void AssertBufferLocksPermitCatalogRead(void);
#endif
extern void PrintBufferLeakWarning(Buffer buffer);
extern void CheckPointBuffers(int flags);
extern BlockNumber BufferGetBlockNumber(Buffer buffer);

View File

@@ -127,6 +127,8 @@ extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
extern void LWLockRelease(LWLock *lock);
extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
extern void LWLockReleaseAll(void);
extern void ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
void *context);
extern bool LWLockHeldByMe(LWLock *lock);
extern bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride);
extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);

View File

@@ -36,6 +36,14 @@ typedef Relation *RelationPtr;
/*
* Routines to open (lookup) and close a relcache entry
*/
#ifdef USE_ASSERT_CHECKING
extern void AssertCouldGetRelation(void);
#else
static inline void
AssertCouldGetRelation(void)
{
}
#endif
extern Relation RelationIdGetRelation(Oid relationId);
extern void RelationClose(Relation relation);