From e2c8100e60729368c84e9a49ada9b44df5a1b851 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 24 Jul 2017 16:45:46 -0400 Subject: [PATCH] Fix race condition in predicate-lock init code in EXEC_BACKEND builds. Trading a little too heavily on letting the code path be the same whether we were creating shared data structures or only attaching to them, InitPredicateLocks() inserted the "scratch" PredicateLockTargetHash entry unconditionally. This is just wrong if we're in a postmaster child, which would only reach this code in EXEC_BACKEND builds. Most of the time, the hash_search(HASH_ENTER) call would simply report that the entry already existed, causing no visible effect since the code did not bother to check for that possibility. However, if this happened while some other backend had transiently removed the "scratch" entry, then that other backend's eventual RestoreScratchTarget would suffer an assert failure; this appears to be the explanation for a recent failure on buildfarm member culicidae. In non-assert builds, there would be no visible consequences there either. But nonetheless this is a pretty bad bug for EXEC_BACKEND builds, for two reasons: 1. Each new backend would perform the hash_search(HASH_ENTER) call without holding any lock that would prevent concurrent access to the PredicateLockTargetHash hash table. This creates a low but certainly nonzero risk of corruption of that hash table. 2. In the event that the race condition occurred, by reinserting the scratch entry too soon, we were defeating the entire purpose of the scratch entry, namely to guarantee that transaction commit could move hash table entries around with no risk of out-of-memory failure. The odds of an actual OOM failure are quite low, but not zero, and if it did happen it would again result in corruption of the hash table. The user-visible symptoms of such corruption are a little hard to predict, but would presumably amount to misbehavior of SERIALIZABLE transactions that'd require a crash or postmaster restart to fix. To fix, just skip the hash insertion if IsUnderPostmaster. I also inserted a bunch of assertions that the expected things happen depending on whether IsUnderPostmaster is true. That might be overkill, since most comparable code in other functions isn't quite that paranoid, but once burnt twice shy. In passing, also move a couple of lines to places where they seemed to make more sense. Diagnosis of problem by Thomas Munro, patch by me. Back-patch to all supported branches. Discussion: https://postgr.es/m/10593.1500670709@sss.pgh.pa.us --- src/backend/storage/lmgr/predicate.c | 29 ++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index a4cb4d33add..74e4b35837b 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -815,6 +815,7 @@ OldSerXidInit(void) oldSerXidControl = (OldSerXidControl) ShmemInitStruct("OldSerXidControlData", sizeof(OldSerXidControlData), &found); + Assert(found == IsUnderPostmaster); if (!found) { /* @@ -1109,6 +1110,10 @@ InitPredicateLocks(void) Size requestSize; bool found; +#ifndef EXEC_BACKEND + Assert(!IsUnderPostmaster); +#endif + /* * Compute size of predicate lock target hashtable. Note these * calculations must agree with PredicateLockShmemSize! @@ -1131,16 +1136,22 @@ InitPredicateLocks(void) HASH_ELEM | HASH_BLOBS | HASH_PARTITION | HASH_FIXED_SIZE); - /* Assume an average of 2 xacts per target */ - max_table_size *= 2; - /* * Reserve a dummy entry in the hash table; we use it to make sure there's * always one entry available when we need to split or combine a page, * because running out of space there could mean aborting a * non-serializable transaction. */ - hash_search(PredicateLockTargetHash, &ScratchTargetTag, HASH_ENTER, NULL); + if (!IsUnderPostmaster) + { + (void) hash_search(PredicateLockTargetHash, &ScratchTargetTag, + HASH_ENTER, &found); + Assert(!found); + } + + /* Pre-calculate the hash and partition lock of the scratch entry */ + ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag); + ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash); /* * Allocate hash table for PREDICATELOCK structs. This stores per @@ -1152,6 +1163,9 @@ InitPredicateLocks(void) info.hash = predicatelock_hash; info.num_partitions = NUM_PREDICATELOCK_PARTITIONS; + /* Assume an average of 2 xacts per target */ + max_table_size *= 2; + PredicateLockHash = ShmemInitHash("PREDICATELOCK hash", max_table_size, max_table_size, @@ -1178,6 +1192,7 @@ InitPredicateLocks(void) PredXact = ShmemInitStruct("PredXactList", PredXactListDataSize, &found); + Assert(found == IsUnderPostmaster); if (!found) { int i; @@ -1250,6 +1265,7 @@ InitPredicateLocks(void) RWConflictPool = ShmemInitStruct("RWConflictPool", RWConflictPoolHeaderDataSize, &found); + Assert(found == IsUnderPostmaster); if (!found) { int i; @@ -1275,6 +1291,7 @@ InitPredicateLocks(void) ShmemInitStruct("FinishedSerializableTransactions", sizeof(SHM_QUEUE), &found); + Assert(found == IsUnderPostmaster); if (!found) SHMQueueInit(FinishedSerializableTransactions); @@ -1283,10 +1300,6 @@ InitPredicateLocks(void) * transactions. */ OldSerXidInit(); - - /* Pre-calculate the hash and partition lock of the scratch entry */ - ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag); - ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash); } /*