mirror of
https://github.com/postgres/postgres.git
synced 2025-11-15 03:41:20 +03:00
Fix and enhance the assertion of no palloc's in a critical section.
The assertion failed if WAL_DEBUG or LWLOCK_STATS was enabled; fix that by using separate memory contexts for the allocations made within those code blocks. This patch introduces a mechanism for marking any memory context as allowed in a critical section. Previously ErrorContext was exempt as a special case. Instead of a blanket exception of the checkpointer process, only exempt the memory context used for the pending ops hash table.
This commit is contained in:
@@ -104,8 +104,8 @@ typedef struct lwlock_stats
|
||||
int spin_delay_count;
|
||||
} lwlock_stats;
|
||||
|
||||
static int counts_for_pid = 0;
|
||||
static HTAB *lwlock_stats_htab;
|
||||
static lwlock_stats lwlock_stats_dummy;
|
||||
#endif
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
@@ -142,21 +142,39 @@ static void
|
||||
init_lwlock_stats(void)
|
||||
{
|
||||
HASHCTL ctl;
|
||||
static MemoryContext lwlock_stats_cxt = NULL;
|
||||
static bool exit_registered = false;
|
||||
|
||||
if (lwlock_stats_htab != NULL)
|
||||
{
|
||||
hash_destroy(lwlock_stats_htab);
|
||||
lwlock_stats_htab = NULL;
|
||||
}
|
||||
if (lwlock_stats_cxt != NULL)
|
||||
MemoryContextDelete(lwlock_stats_cxt);
|
||||
|
||||
/*
|
||||
* The LWLock stats will be updated within a critical section, which
|
||||
* requires allocating new hash entries. Allocations within a critical
|
||||
* section are normally not allowed because running out of memory would
|
||||
* lead to a PANIC, but LWLOCK_STATS is debugging code that's not normally
|
||||
* turned on in production, so that's an acceptable risk. The hash entries
|
||||
* are small, so the risk of running out of memory is minimal in practice.
|
||||
*/
|
||||
lwlock_stats_cxt = AllocSetContextCreate(TopMemoryContext,
|
||||
"LWLock stats",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true);
|
||||
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(lwlock_stats_key);
|
||||
ctl.entrysize = sizeof(lwlock_stats);
|
||||
ctl.hash = tag_hash;
|
||||
ctl.hcxt = lwlock_stats_cxt;
|
||||
lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
counts_for_pid = MyProcPid;
|
||||
on_shmem_exit(print_lwlock_stats, 0);
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
if (!exit_registered)
|
||||
{
|
||||
on_shmem_exit(print_lwlock_stats, 0);
|
||||
exit_registered = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -190,9 +208,13 @@ get_lwlock_stats_entry(LWLock *lock)
|
||||
lwlock_stats *lwstats;
|
||||
bool found;
|
||||
|
||||
/* Set up local count state first time through in a given process */
|
||||
if (counts_for_pid != MyProcPid)
|
||||
init_lwlock_stats();
|
||||
/*
|
||||
* During shared memory initialization, the hash table doesn't exist yet.
|
||||
* Stats of that phase aren't very interesting, so just collect operations
|
||||
* on all locks in a single dummy entry.
|
||||
*/
|
||||
if (lwlock_stats_htab == NULL)
|
||||
return &lwlock_stats_dummy;
|
||||
|
||||
/* Fetch or create the entry. */
|
||||
key.tranche = lock->tranche;
|
||||
@@ -361,6 +383,16 @@ CreateLWLocks(void)
|
||||
LWLockRegisterTranche(0, &MainLWLockTranche);
|
||||
}
|
||||
|
||||
/*
|
||||
* InitLWLockAccess - initialize backend-local state needed to hold LWLocks
|
||||
*/
|
||||
void
|
||||
InitLWLockAccess(void)
|
||||
{
|
||||
#ifdef LWLOCK_STATS
|
||||
init_lwlock_stats();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* LWLockAssign - assign a dynamically-allocated LWLock number
|
||||
|
||||
@@ -411,8 +411,9 @@ InitProcess(void)
|
||||
|
||||
/*
|
||||
* Now that we have a PGPROC, we could try to acquire locks, so initialize
|
||||
* the deadlock checker.
|
||||
* local state needed for LWLocks, and the deadlock checker.
|
||||
*/
|
||||
InitLWLockAccess();
|
||||
InitDeadLockChecking();
|
||||
}
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ typedef struct _MdfdVec
|
||||
struct _MdfdVec *mdfd_chain; /* next segment, or NULL */
|
||||
} MdfdVec;
|
||||
|
||||
static MemoryContext MdCxt; /* context for all md.c allocations */
|
||||
static MemoryContext MdCxt; /* context for all MdfdVec objects */
|
||||
|
||||
|
||||
/*
|
||||
@@ -157,6 +157,7 @@ typedef struct
|
||||
|
||||
static HTAB *pendingOpsTable = NULL;
|
||||
static List *pendingUnlinks = NIL;
|
||||
static MemoryContext pendingOpsCxt; /* context for the above */
|
||||
|
||||
static CycleCtr mdsync_cycle_ctr = 0;
|
||||
static CycleCtr mdckpt_cycle_ctr = 0;
|
||||
@@ -209,11 +210,27 @@ mdinit(void)
|
||||
{
|
||||
HASHCTL hash_ctl;
|
||||
|
||||
/*
|
||||
* XXX: The checkpointer needs to add entries to the pending ops table
|
||||
* when absorbing fsync requests. That is done within a critical
|
||||
* section, which isn't usually allowed, but we make an exception.
|
||||
* It means that there's a theoretical possibility that you run out of
|
||||
* memory while absorbing fsync requests, which leads to a PANIC.
|
||||
* Fortunately the hash table is small so that's unlikely to happen in
|
||||
* practice.
|
||||
*/
|
||||
pendingOpsCxt = AllocSetContextCreate(MdCxt,
|
||||
"Pending Ops Context",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
MemoryContextAllowInCriticalSection(pendingOpsCxt, true);
|
||||
|
||||
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(RelFileNode);
|
||||
hash_ctl.entrysize = sizeof(PendingOperationEntry);
|
||||
hash_ctl.hash = tag_hash;
|
||||
hash_ctl.hcxt = MdCxt;
|
||||
hash_ctl.hcxt = pendingOpsCxt;
|
||||
pendingOpsTable = hash_create("Pending Ops Table",
|
||||
100L,
|
||||
&hash_ctl,
|
||||
@@ -1516,7 +1533,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
|
||||
else if (segno == UNLINK_RELATION_REQUEST)
|
||||
{
|
||||
/* Unlink request: put it in the linked list */
|
||||
MemoryContext oldcxt = MemoryContextSwitchTo(MdCxt);
|
||||
MemoryContext oldcxt = MemoryContextSwitchTo(pendingOpsCxt);
|
||||
PendingUnlinkEntry *entry;
|
||||
|
||||
/* PendingUnlinkEntry doesn't store forknum, since it's always MAIN */
|
||||
@@ -1533,7 +1550,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
|
||||
else
|
||||
{
|
||||
/* Normal case: enter a request to fsync this segment */
|
||||
MemoryContext oldcxt = MemoryContextSwitchTo(MdCxt);
|
||||
MemoryContext oldcxt = MemoryContextSwitchTo(pendingOpsCxt);
|
||||
PendingOperationEntry *entry;
|
||||
bool found;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user