mirror of
https://github.com/postgres/postgres.git
synced 2025-06-10 09:21:54 +03:00
Fix catcache invalidation of a list entry that's being built
If a new catalog tuple is inserted that belongs to a catcache list entry, and cache invalidation happens while the list entry is being built, the list entry might miss the newly inserted tuple. To fix, change the way we detect concurrent invalidations while a catcache entry is being built. Keep a stack of entries that are being built, and apply cache invalidation to those entries in addition to the real catcache entries. This is similar to the in-progress list in relcache.c. Back-patch to all supported versions. Reviewed-by: Noah Misch Discussion: https://www.postgresql.org/message-id/2234dc98-06fe-42ed-b5db-ac17384dc880@iki.fi
This commit is contained in:
parent
bfda7d8dd6
commit
91fc447c21
231
src/backend/utils/cache/catcache.c
vendored
231
src/backend/utils/cache/catcache.c
vendored
@ -41,6 +41,24 @@
|
|||||||
#include "utils/resowner_private.h"
|
#include "utils/resowner_private.h"
|
||||||
#include "utils/syscache.h"
|
#include "utils/syscache.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a catcache invalidation is processed while we are in the middle of
|
||||||
|
* creating a catcache entry (or list), it might apply to the entry we're
|
||||||
|
* creating, making it invalid before it's been inserted to the catcache. To
|
||||||
|
* catch such cases, we have a stack of "create-in-progress" entries. Cache
|
||||||
|
* invalidation marks any matching entries in the stack as dead, in addition
|
||||||
|
* to the actual CatCTup and CatCList entries.
|
||||||
|
*/
|
||||||
|
typedef struct CatCInProgress
|
||||||
|
{
|
||||||
|
CatCache *cache; /* cache that the entry belongs to */
|
||||||
|
uint32 hash_value; /* hash of the entry; ignored for lists */
|
||||||
|
bool list; /* is it a list entry? */
|
||||||
|
bool dead; /* set when the entry is invalidated */
|
||||||
|
struct CatCInProgress *next;
|
||||||
|
} CatCInProgress;
|
||||||
|
|
||||||
|
static CatCInProgress *catcache_in_progress_stack = NULL;
|
||||||
|
|
||||||
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
|
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
|
||||||
|
|
||||||
@ -93,8 +111,7 @@ static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
|
|||||||
static void RehashCatCache(CatCache *cp);
|
static void RehashCatCache(CatCache *cp);
|
||||||
static void RehashCatCacheLists(CatCache *cp);
|
static void RehashCatCacheLists(CatCache *cp);
|
||||||
static void CatalogCacheInitializeCache(CatCache *cache);
|
static void CatalogCacheInitializeCache(CatCache *cache);
|
||||||
static CatCTup *CatalogCacheCreateEntry(CatCache *cache,
|
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
||||||
HeapTuple ntp, SysScanDesc scandesc,
|
|
||||||
Datum *arguments,
|
Datum *arguments,
|
||||||
uint32 hashValue, Index hashIndex);
|
uint32 hashValue, Index hashIndex);
|
||||||
|
|
||||||
@ -610,6 +627,16 @@ CatCacheInvalidate(CatCache *cache, uint32 hashValue)
|
|||||||
/* could be multiple matches, so keep looking! */
|
/* could be multiple matches, so keep looking! */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Also invalidate any entries that are being built */
|
||||||
|
for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
|
||||||
|
{
|
||||||
|
if (e->cache == cache)
|
||||||
|
{
|
||||||
|
if (e->list || e->hash_value == hashValue)
|
||||||
|
e->dead = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ----------------------------------------------------------------
|
/* ----------------------------------------------------------------
|
||||||
@ -646,9 +673,15 @@ CreateCacheMemoryContext(void)
|
|||||||
*
|
*
|
||||||
* This is not very efficient if the target cache is nearly empty.
|
* This is not very efficient if the target cache is nearly empty.
|
||||||
* However, it shouldn't need to be efficient; we don't invoke it often.
|
* However, it shouldn't need to be efficient; we don't invoke it often.
|
||||||
|
*
|
||||||
|
* If 'debug_discard' is true, we are being called as part of
|
||||||
|
* debug_discard_caches. In that case, the cache is not reset for
|
||||||
|
* correctness, but just to get more testing of cache invalidation. We skip
|
||||||
|
* resetting in-progress build entries in that case, or we'd never make any
|
||||||
|
* progress.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ResetCatalogCache(CatCache *cache)
|
ResetCatalogCache(CatCache *cache, bool debug_discard)
|
||||||
{
|
{
|
||||||
dlist_mutable_iter iter;
|
dlist_mutable_iter iter;
|
||||||
int i;
|
int i;
|
||||||
@ -692,6 +725,16 @@ ResetCatalogCache(CatCache *cache)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Also invalidate any entries that are being built */
|
||||||
|
if (!debug_discard)
|
||||||
|
{
|
||||||
|
for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
|
||||||
|
{
|
||||||
|
if (e->cache == cache)
|
||||||
|
e->dead = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -701,6 +744,12 @@ ResetCatalogCache(CatCache *cache)
|
|||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
ResetCatalogCaches(void)
|
ResetCatalogCaches(void)
|
||||||
|
{
|
||||||
|
ResetCatalogCachesExt(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ResetCatalogCachesExt(bool debug_discard)
|
||||||
{
|
{
|
||||||
slist_iter iter;
|
slist_iter iter;
|
||||||
|
|
||||||
@ -710,7 +759,7 @@ ResetCatalogCaches(void)
|
|||||||
{
|
{
|
||||||
CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
|
CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
|
||||||
|
|
||||||
ResetCatalogCache(cache);
|
ResetCatalogCache(cache, debug_discard);
|
||||||
}
|
}
|
||||||
|
|
||||||
CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
|
CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
|
||||||
@ -744,7 +793,7 @@ CatalogCacheFlushCatalog(Oid catId)
|
|||||||
if (cache->cc_reloid == catId)
|
if (cache->cc_reloid == catId)
|
||||||
{
|
{
|
||||||
/* Yes, so flush all its contents */
|
/* Yes, so flush all its contents */
|
||||||
ResetCatalogCache(cache);
|
ResetCatalogCache(cache, false);
|
||||||
|
|
||||||
/* Tell inval.c to call syscache callbacks for this cache */
|
/* Tell inval.c to call syscache callbacks for this cache */
|
||||||
CallSyscacheCallbacks(cache->id, 0);
|
CallSyscacheCallbacks(cache->id, 0);
|
||||||
@ -1439,7 +1488,7 @@ SearchCatCacheMiss(CatCache *cache,
|
|||||||
|
|
||||||
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
|
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
|
||||||
{
|
{
|
||||||
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
|
ct = CatalogCacheCreateEntry(cache, ntp, NULL,
|
||||||
hashValue, hashIndex);
|
hashValue, hashIndex);
|
||||||
/* upon failure, we must start the scan over */
|
/* upon failure, we must start the scan over */
|
||||||
if (ct == NULL)
|
if (ct == NULL)
|
||||||
@ -1474,7 +1523,7 @@ SearchCatCacheMiss(CatCache *cache,
|
|||||||
if (IsBootstrapProcessingMode())
|
if (IsBootstrapProcessingMode())
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ct = CatalogCacheCreateEntry(cache, NULL, NULL, arguments,
|
ct = CatalogCacheCreateEntry(cache, NULL, arguments,
|
||||||
hashValue, hashIndex);
|
hashValue, hashIndex);
|
||||||
|
|
||||||
/* Creating a negative cache entry shouldn't fail */
|
/* Creating a negative cache entry shouldn't fail */
|
||||||
@ -1604,6 +1653,8 @@ SearchCatCacheList(CatCache *cache,
|
|||||||
HeapTuple ntp;
|
HeapTuple ntp;
|
||||||
MemoryContext oldcxt;
|
MemoryContext oldcxt;
|
||||||
int i;
|
int i;
|
||||||
|
CatCInProgress *save_in_progress;
|
||||||
|
CatCInProgress in_progress_ent;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* one-time startup overhead for each cache
|
* one-time startup overhead for each cache
|
||||||
@ -1720,21 +1771,60 @@ SearchCatCacheList(CatCache *cache,
|
|||||||
|
|
||||||
ctlist = NIL;
|
ctlist = NIL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cache invalidation can happen while we're building the list.
|
||||||
|
* CatalogCacheCreateEntry() handles concurrent invalidation of individual
|
||||||
|
* tuples, but it's also possible that a new entry is concurrently added
|
||||||
|
* that should be part of the list we're building. Register an
|
||||||
|
* "in-progress" entry that will receive the invalidation, until we have
|
||||||
|
* built the final list entry.
|
||||||
|
*/
|
||||||
|
save_in_progress = catcache_in_progress_stack;
|
||||||
|
in_progress_ent.next = catcache_in_progress_stack;
|
||||||
|
in_progress_ent.cache = cache;
|
||||||
|
in_progress_ent.hash_value = lHashValue;
|
||||||
|
in_progress_ent.list = true;
|
||||||
|
in_progress_ent.dead = false;
|
||||||
|
catcache_in_progress_stack = &in_progress_ent;
|
||||||
|
|
||||||
PG_TRY();
|
PG_TRY();
|
||||||
{
|
{
|
||||||
ScanKeyData cur_skey[CATCACHE_MAXKEYS];
|
ScanKeyData cur_skey[CATCACHE_MAXKEYS];
|
||||||
Relation relation;
|
Relation relation;
|
||||||
SysScanDesc scandesc;
|
SysScanDesc scandesc;
|
||||||
bool stale;
|
|
||||||
|
|
||||||
relation = table_open(cache->cc_reloid, AccessShareLock);
|
relation = table_open(cache->cc_reloid, AccessShareLock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Scan the table for matching entries. If an invalidation arrives
|
||||||
|
* mid-build, we will loop back here to retry.
|
||||||
|
*/
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Ok, need to make a lookup in the relation, copy the scankey and
|
* If we are retrying, release refcounts on any items created on
|
||||||
* fill out any per-call fields. (We must re-do this when
|
* the previous iteration. We dare not try to free them if
|
||||||
* retrying, because systable_beginscan scribbles on the scankey.)
|
* they're now unreferenced, since an error while doing that would
|
||||||
|
* result in the PG_CATCH below doing extra refcount decrements.
|
||||||
|
* Besides, we'll likely re-adopt those items in the next
|
||||||
|
* iteration, so it's not worth complicating matters to try to get
|
||||||
|
* rid of them.
|
||||||
|
*/
|
||||||
|
foreach(ctlist_item, ctlist)
|
||||||
|
{
|
||||||
|
ct = (CatCTup *) lfirst(ctlist_item);
|
||||||
|
Assert(ct->c_list == NULL);
|
||||||
|
Assert(ct->refcount > 0);
|
||||||
|
ct->refcount--;
|
||||||
|
}
|
||||||
|
/* Reset ctlist in preparation for new try */
|
||||||
|
ctlist = NIL;
|
||||||
|
in_progress_ent.dead = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy the scankey and fill out any per-call fields. (We must
|
||||||
|
* re-do this when retrying, because systable_beginscan scribbles
|
||||||
|
* on the scankey.)
|
||||||
*/
|
*/
|
||||||
memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
|
memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
|
||||||
cur_skey[0].sk_argument = v1;
|
cur_skey[0].sk_argument = v1;
|
||||||
@ -1752,9 +1842,8 @@ SearchCatCacheList(CatCache *cache,
|
|||||||
/* The list will be ordered iff we are doing an index scan */
|
/* The list will be ordered iff we are doing an index scan */
|
||||||
ordered = (scandesc->irel != NULL);
|
ordered = (scandesc->irel != NULL);
|
||||||
|
|
||||||
stale = false;
|
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)) &&
|
||||||
|
!in_progress_ent.dead)
|
||||||
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
|
|
||||||
{
|
{
|
||||||
uint32 hashValue;
|
uint32 hashValue;
|
||||||
Index hashIndex;
|
Index hashIndex;
|
||||||
@ -1796,30 +1885,13 @@ SearchCatCacheList(CatCache *cache,
|
|||||||
if (!found)
|
if (!found)
|
||||||
{
|
{
|
||||||
/* We didn't find a usable entry, so make a new one */
|
/* We didn't find a usable entry, so make a new one */
|
||||||
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
|
ct = CatalogCacheCreateEntry(cache, ntp, NULL,
|
||||||
hashValue, hashIndex);
|
hashValue, hashIndex);
|
||||||
|
|
||||||
/* upon failure, we must start the scan over */
|
/* upon failure, we must start the scan over */
|
||||||
if (ct == NULL)
|
if (ct == NULL)
|
||||||
{
|
{
|
||||||
/*
|
in_progress_ent.dead = true;
|
||||||
* Release refcounts on any items we already had. We
|
|
||||||
* dare not try to free them if they're now
|
|
||||||
* unreferenced, since an error while doing that would
|
|
||||||
* result in the PG_CATCH below doing extra refcount
|
|
||||||
* decrements. Besides, we'll likely re-adopt those
|
|
||||||
* items in the next iteration, so it's not worth
|
|
||||||
* complicating matters to try to get rid of them.
|
|
||||||
*/
|
|
||||||
foreach(ctlist_item, ctlist)
|
|
||||||
{
|
|
||||||
ct = (CatCTup *) lfirst(ctlist_item);
|
|
||||||
Assert(ct->c_list == NULL);
|
|
||||||
Assert(ct->refcount > 0);
|
|
||||||
ct->refcount--;
|
|
||||||
}
|
|
||||||
/* Reset ctlist in preparation for new try */
|
|
||||||
ctlist = NIL;
|
|
||||||
stale = true;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1831,7 +1903,7 @@ SearchCatCacheList(CatCache *cache,
|
|||||||
}
|
}
|
||||||
|
|
||||||
systable_endscan(scandesc);
|
systable_endscan(scandesc);
|
||||||
} while (stale);
|
} while (in_progress_ent.dead);
|
||||||
|
|
||||||
table_close(relation, AccessShareLock);
|
table_close(relation, AccessShareLock);
|
||||||
|
|
||||||
@ -1856,6 +1928,9 @@ SearchCatCacheList(CatCache *cache,
|
|||||||
}
|
}
|
||||||
PG_CATCH();
|
PG_CATCH();
|
||||||
{
|
{
|
||||||
|
Assert(catcache_in_progress_stack == &in_progress_ent);
|
||||||
|
catcache_in_progress_stack = save_in_progress;
|
||||||
|
|
||||||
foreach(ctlist_item, ctlist)
|
foreach(ctlist_item, ctlist)
|
||||||
{
|
{
|
||||||
ct = (CatCTup *) lfirst(ctlist_item);
|
ct = (CatCTup *) lfirst(ctlist_item);
|
||||||
@ -1874,6 +1949,8 @@ SearchCatCacheList(CatCache *cache,
|
|||||||
PG_RE_THROW();
|
PG_RE_THROW();
|
||||||
}
|
}
|
||||||
PG_END_TRY();
|
PG_END_TRY();
|
||||||
|
Assert(catcache_in_progress_stack == &in_progress_ent);
|
||||||
|
catcache_in_progress_stack = save_in_progress;
|
||||||
|
|
||||||
cl->cl_magic = CL_MAGIC;
|
cl->cl_magic = CL_MAGIC;
|
||||||
cl->my_cache = cache;
|
cl->my_cache = cache;
|
||||||
@ -1939,23 +2016,6 @@ ReleaseCatCacheList(CatCList *list)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* equalTuple
|
|
||||||
* Are these tuples memcmp()-equal?
|
|
||||||
*/
|
|
||||||
static bool
|
|
||||||
equalTuple(HeapTuple a, HeapTuple b)
|
|
||||||
{
|
|
||||||
uint32 alen;
|
|
||||||
uint32 blen;
|
|
||||||
|
|
||||||
alen = a->t_len;
|
|
||||||
blen = b->t_len;
|
|
||||||
return (alen == blen &&
|
|
||||||
memcmp((char *) a->t_data,
|
|
||||||
(char *) b->t_data, blen) == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CatalogCacheCreateEntry
|
* CatalogCacheCreateEntry
|
||||||
* Create a new CatCTup entry, copying the given HeapTuple and other
|
* Create a new CatCTup entry, copying the given HeapTuple and other
|
||||||
@ -1963,34 +2023,33 @@ equalTuple(HeapTuple a, HeapTuple b)
|
|||||||
*
|
*
|
||||||
* To create a normal cache entry, ntp must be the HeapTuple just fetched
|
* To create a normal cache entry, ntp must be the HeapTuple just fetched
|
||||||
* from scandesc, and "arguments" is not used. To create a negative cache
|
* from scandesc, and "arguments" is not used. To create a negative cache
|
||||||
* entry, pass NULL for ntp and scandesc; then "arguments" is the cache
|
* entry, pass NULL for ntp; then "arguments" is the cache keys to use.
|
||||||
* keys to use. In either case, hashValue/hashIndex are the hash values
|
* In either case, hashValue/hashIndex are the hash values computed from
|
||||||
* computed from the cache keys.
|
* the cache keys.
|
||||||
*
|
*
|
||||||
* Returns NULL if we attempt to detoast the tuple and observe that it
|
* Returns NULL if we attempt to detoast the tuple and observe that it
|
||||||
* became stale. (This cannot happen for a negative entry.) Caller must
|
* became stale. (This cannot happen for a negative entry.) Caller must
|
||||||
* retry the tuple lookup in that case.
|
* retry the tuple lookup in that case.
|
||||||
*/
|
*/
|
||||||
static CatCTup *
|
static CatCTup *
|
||||||
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
|
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
|
||||||
Datum *arguments,
|
|
||||||
uint32 hashValue, Index hashIndex)
|
uint32 hashValue, Index hashIndex)
|
||||||
{
|
{
|
||||||
CatCTup *ct;
|
CatCTup *ct;
|
||||||
HeapTuple dtp;
|
|
||||||
MemoryContext oldcxt;
|
MemoryContext oldcxt;
|
||||||
|
|
||||||
if (ntp)
|
if (ntp)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
HeapTuple dtp = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The visibility recheck below essentially never fails during our
|
* The invalidation of the in-progress entry essentially never happens
|
||||||
* regression tests, and there's no easy way to force it to fail for
|
* during our regression tests, and there's no easy way to force it to
|
||||||
* testing purposes. To ensure we have test coverage for the retry
|
* fail for testing purposes. To ensure we have test coverage for the
|
||||||
* paths in our callers, make debug builds randomly fail about 0.1% of
|
* retry paths in our callers, make debug builds randomly fail about
|
||||||
* the times through this code path, even when there's no toasted
|
* 0.1% of the times through this code path, even when there's no
|
||||||
* fields.
|
* toasted fields.
|
||||||
*/
|
*/
|
||||||
#ifdef USE_ASSERT_CHECKING
|
#ifdef USE_ASSERT_CHECKING
|
||||||
if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
|
if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
|
||||||
@ -2006,34 +2065,34 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
|
|||||||
*/
|
*/
|
||||||
if (HeapTupleHasExternal(ntp))
|
if (HeapTupleHasExternal(ntp))
|
||||||
{
|
{
|
||||||
bool need_cmp = IsInplaceUpdateOid(cache->cc_reloid);
|
CatCInProgress *save_in_progress;
|
||||||
HeapTuple before = NULL;
|
CatCInProgress in_progress_ent;
|
||||||
bool matches = true;
|
|
||||||
|
|
||||||
if (need_cmp)
|
|
||||||
before = heap_copytuple(ntp);
|
|
||||||
dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The tuple could become stale while we are doing toast table
|
* The tuple could become stale while we are doing toast table
|
||||||
* access (since AcceptInvalidationMessages can run then).
|
* access (since AcceptInvalidationMessages can run then). The
|
||||||
* equalTuple() detects staleness from inplace updates, while
|
* invalidation will mark our in-progress entry as dead.
|
||||||
* systable_recheck_tuple() detects staleness from normal updates.
|
|
||||||
*
|
|
||||||
* While this equalTuple() follows the usual rule of reading with
|
|
||||||
* a pin and no buffer lock, it warrants suspicion since an
|
|
||||||
* inplace update could appear at any moment. It's safe because
|
|
||||||
* the inplace update sends an invalidation that can't reorder
|
|
||||||
* before the inplace heap change. If the heap change reaches
|
|
||||||
* this process just after equalTuple() looks, we've not missed
|
|
||||||
* its inval.
|
|
||||||
*/
|
*/
|
||||||
if (need_cmp)
|
save_in_progress = catcache_in_progress_stack;
|
||||||
|
in_progress_ent.next = catcache_in_progress_stack;
|
||||||
|
in_progress_ent.cache = cache;
|
||||||
|
in_progress_ent.hash_value = hashValue;
|
||||||
|
in_progress_ent.list = false;
|
||||||
|
in_progress_ent.dead = false;
|
||||||
|
catcache_in_progress_stack = &in_progress_ent;
|
||||||
|
|
||||||
|
PG_TRY();
|
||||||
{
|
{
|
||||||
matches = equalTuple(before, ntp);
|
dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
|
||||||
heap_freetuple(before);
|
|
||||||
}
|
}
|
||||||
if (!matches || !systable_recheck_tuple(scandesc, ntp))
|
PG_FINALLY();
|
||||||
|
{
|
||||||
|
Assert(catcache_in_progress_stack == &in_progress_ent);
|
||||||
|
catcache_in_progress_stack = save_in_progress;
|
||||||
|
}
|
||||||
|
PG_END_TRY();
|
||||||
|
|
||||||
|
if (in_progress_ent.dead)
|
||||||
{
|
{
|
||||||
heap_freetuple(dtp);
|
heap_freetuple(dtp);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
2
src/backend/utils/cache/inval.c
vendored
2
src/backend/utils/cache/inval.c
vendored
@ -710,7 +710,7 @@ InvalidateSystemCachesExtended(bool debug_discard)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
InvalidateCatalogSnapshot();
|
InvalidateCatalogSnapshot();
|
||||||
ResetCatalogCaches();
|
ResetCatalogCachesExt(debug_discard);
|
||||||
RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */
|
RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */
|
||||||
|
|
||||||
for (i = 0; i < syscache_callback_count; i++)
|
for (i = 0; i < syscache_callback_count; i++)
|
||||||
|
@ -223,6 +223,7 @@ extern CatCList *SearchCatCacheList(CatCache *cache, int nkeys,
|
|||||||
extern void ReleaseCatCacheList(CatCList *list);
|
extern void ReleaseCatCacheList(CatCList *list);
|
||||||
|
|
||||||
extern void ResetCatalogCaches(void);
|
extern void ResetCatalogCaches(void);
|
||||||
|
extern void ResetCatalogCachesExt(bool debug_discard);
|
||||||
extern void CatalogCacheFlushCatalog(Oid catId);
|
extern void CatalogCacheFlushCatalog(Oid catId);
|
||||||
extern void CatCacheInvalidate(CatCache *cache, uint32 hashValue);
|
extern void CatCacheInvalidate(CatCache *cache, uint32 hashValue);
|
||||||
extern void PrepareToInvalidateCacheTuple(Relation relation,
|
extern void PrepareToInvalidateCacheTuple(Relation relation,
|
||||||
|
@ -359,6 +359,7 @@ CaseTestExpr
|
|||||||
CaseWhen
|
CaseWhen
|
||||||
Cash
|
Cash
|
||||||
CastInfo
|
CastInfo
|
||||||
|
CatCInProgress
|
||||||
CatCList
|
CatCList
|
||||||
CatCTup
|
CatCTup
|
||||||
CatCache
|
CatCache
|
||||||
|
Loading…
x
Reference in New Issue
Block a user