mirror of
https://github.com/postgres/postgres.git
synced 2025-07-02 09:02:37 +03:00
pgindent run.
This commit is contained in:
116
src/backend/utils/cache/catcache.c
vendored
116
src/backend/utils/cache/catcache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.98 2002/09/02 01:05:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.99 2002/09/04 20:31:29 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -34,7 +34,7 @@
|
||||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
|
||||
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
|
||||
|
||||
/*
|
||||
* Constants related to size of the catcache.
|
||||
@ -102,6 +102,7 @@ static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
|
||||
ScanKey cur_skey);
|
||||
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
|
||||
HeapTuple tuple);
|
||||
|
||||
#ifdef CATCACHE_STATS
|
||||
static void CatCachePrintStats(void);
|
||||
#endif
|
||||
@ -109,8 +110,8 @@ static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
|
||||
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
|
||||
static void CatalogCacheInitializeCache(CatCache *cache);
|
||||
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
||||
uint32 hashValue, Index hashIndex,
|
||||
bool negative);
|
||||
uint32 hashValue, Index hashIndex,
|
||||
bool negative);
|
||||
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
|
||||
|
||||
|
||||
@ -325,8 +326,7 @@ CatCachePrintStats(void)
|
||||
cc_lsearches,
|
||||
cc_lhits);
|
||||
}
|
||||
|
||||
#endif /* CATCACHE_STATS */
|
||||
#endif /* CATCACHE_STATS */
|
||||
|
||||
|
||||
/*
|
||||
@ -372,7 +372,7 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl)
|
||||
Assert(cl->my_cache == cache);
|
||||
|
||||
/* delink from member tuples */
|
||||
for (i = cl->n_members; --i >= 0; )
|
||||
for (i = cl->n_members; --i >= 0;)
|
||||
{
|
||||
CatCTup *ct = cl->members[i];
|
||||
|
||||
@ -397,11 +397,11 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl)
|
||||
* item pointer. Positive entries are deleted if they match the item
|
||||
* pointer. Negative entries must be deleted if they match the hash
|
||||
* value (since we do not have the exact key of the tuple that's being
|
||||
* inserted). But this should only rarely result in loss of a cache
|
||||
* inserted). But this should only rarely result in loss of a cache
|
||||
* entry that could have been kept.
|
||||
*
|
||||
* Note that it's not very relevant whether the tuple identified by
|
||||
* the item pointer is being inserted or deleted. We don't expect to
|
||||
* the item pointer is being inserted or deleted. We don't expect to
|
||||
* find matching positive entries in the one case, and we don't expect
|
||||
* to find matching negative entries in the other; but we will do the
|
||||
* right things in any case.
|
||||
@ -435,8 +435,8 @@ CatalogCacheIdInvalidate(int cacheId,
|
||||
|
||||
/*
|
||||
* We don't bother to check whether the cache has finished
|
||||
* initialization yet; if not, there will be no entries in it
|
||||
* so no problem.
|
||||
* initialization yet; if not, there will be no entries in it so
|
||||
* no problem.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -819,7 +819,7 @@ InitCatCache(int id,
|
||||
cp->id = id;
|
||||
cp->cc_relname = relname;
|
||||
cp->cc_indname = indname;
|
||||
cp->cc_reloid = InvalidOid; /* temporary */
|
||||
cp->cc_reloid = InvalidOid; /* temporary */
|
||||
cp->cc_relisshared = false; /* temporary */
|
||||
cp->cc_tupdesc = (TupleDesc) NULL;
|
||||
cp->cc_reloidattr = reloidattr;
|
||||
@ -1015,8 +1015,8 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
|
||||
{
|
||||
/*
|
||||
* Since the OIDs of indexes aren't hardwired, it's painful to
|
||||
* figure out which is which. Just force all pg_index searches
|
||||
* to be heap scans while building the relcaches.
|
||||
* figure out which is which. Just force all pg_index searches to
|
||||
* be heap scans while building the relcaches.
|
||||
*/
|
||||
if (!criticalRelcachesBuilt)
|
||||
return false;
|
||||
@ -1037,7 +1037,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
|
||||
if (!criticalRelcachesBuilt)
|
||||
{
|
||||
/* Looking for an OID comparison function? */
|
||||
Oid lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
|
||||
Oid lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
|
||||
|
||||
if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
|
||||
return false;
|
||||
@ -1055,7 +1055,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
|
||||
* if necessary (on the first access to a particular cache).
|
||||
*
|
||||
* The result is NULL if not found, or a pointer to a HeapTuple in
|
||||
* the cache. The caller must not modify the tuple, and must call
|
||||
* the cache. The caller must not modify the tuple, and must call
|
||||
* ReleaseCatCache() when done with it.
|
||||
*
|
||||
* The search key values should be expressed as Datums of the key columns'
|
||||
@ -1077,7 +1077,7 @@ SearchCatCache(CatCache *cache,
|
||||
Dlelem *elt;
|
||||
CatCTup *ct;
|
||||
Relation relation;
|
||||
SysScanDesc scandesc;
|
||||
SysScanDesc scandesc;
|
||||
HeapTuple ntp;
|
||||
|
||||
/*
|
||||
@ -1134,18 +1134,18 @@ SearchCatCache(CatCache *cache,
|
||||
continue;
|
||||
|
||||
/*
|
||||
* we found a match in the cache: move it to the front of the global
|
||||
* LRU list. We also move it to the front of the list for its
|
||||
* hashbucket, in order to speed subsequent searches. (The most
|
||||
* frequently accessed elements in any hashbucket will tend to be
|
||||
* near the front of the hashbucket's list.)
|
||||
* we found a match in the cache: move it to the front of the
|
||||
* global LRU list. We also move it to the front of the list for
|
||||
* its hashbucket, in order to speed subsequent searches. (The
|
||||
* most frequently accessed elements in any hashbucket will tend
|
||||
* to be near the front of the hashbucket's list.)
|
||||
*/
|
||||
DLMoveToFront(&ct->lrulist_elem);
|
||||
DLMoveToFront(&ct->cache_elem);
|
||||
|
||||
/*
|
||||
* If it's a positive entry, bump its refcount and return it.
|
||||
* If it's negative, we can report failure to the caller.
|
||||
* If it's a positive entry, bump its refcount and return it. If
|
||||
* it's negative, we can report failure to the caller.
|
||||
*/
|
||||
if (!ct->negative)
|
||||
{
|
||||
@ -1175,8 +1175,8 @@ SearchCatCache(CatCache *cache,
|
||||
|
||||
/*
|
||||
* Tuple was not found in cache, so we have to try to retrieve it
|
||||
* directly from the relation. If found, we will add it to the
|
||||
* cache; if not found, we will add a negative cache entry instead.
|
||||
* directly from the relation. If found, we will add it to the cache;
|
||||
* if not found, we will add a negative cache entry instead.
|
||||
*
|
||||
* NOTE: it is possible for recursive cache lookups to occur while
|
||||
* reading the relation --- for example, due to shared-cache-inval
|
||||
@ -1213,8 +1213,8 @@ SearchCatCache(CatCache *cache,
|
||||
|
||||
/*
|
||||
* If tuple was not found, we need to build a negative cache entry
|
||||
* containing a fake tuple. The fake tuple has the correct key columns,
|
||||
* but nulls everywhere else.
|
||||
* containing a fake tuple. The fake tuple has the correct key
|
||||
* columns, but nulls everywhere else.
|
||||
*/
|
||||
if (ct == NULL)
|
||||
{
|
||||
@ -1307,7 +1307,7 @@ SearchCatCacheList(CatCache *cache,
|
||||
List *ctlist;
|
||||
int nmembers;
|
||||
Relation relation;
|
||||
SysScanDesc scandesc;
|
||||
SysScanDesc scandesc;
|
||||
bool ordered;
|
||||
HeapTuple ntp;
|
||||
MemoryContext oldcxt;
|
||||
@ -1336,8 +1336,8 @@ SearchCatCacheList(CatCache *cache,
|
||||
|
||||
/*
|
||||
* compute a hash value of the given keys for faster search. We don't
|
||||
* presently divide the CatCList items into buckets, but this still lets
|
||||
* us skip non-matching items quickly most of the time.
|
||||
* presently divide the CatCList items into buckets, but this still
|
||||
* lets us skip non-matching items quickly most of the time.
|
||||
*/
|
||||
lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
|
||||
|
||||
@ -1373,11 +1373,11 @@ SearchCatCacheList(CatCache *cache,
|
||||
|
||||
/*
|
||||
* we found a matching list: move each of its members to the front
|
||||
* of the global LRU list. Also move the list itself to the front
|
||||
* of the cache's list-of-lists, to speed subsequent searches.
|
||||
* (We do not move the members to the fronts of their hashbucket
|
||||
* of the global LRU list. Also move the list itself to the front
|
||||
* of the cache's list-of-lists, to speed subsequent searches. (We
|
||||
* do not move the members to the fronts of their hashbucket
|
||||
* lists, however, since there's no point in that unless they are
|
||||
* searched for individually.) Also bump the members' refcounts.
|
||||
* searched for individually.) Also bump the members' refcounts.
|
||||
*/
|
||||
for (i = 0; i < cl->n_members; i++)
|
||||
{
|
||||
@ -1400,9 +1400,9 @@ SearchCatCacheList(CatCache *cache,
|
||||
}
|
||||
|
||||
/*
|
||||
* List was not found in cache, so we have to build it by reading
|
||||
* the relation. For each matching tuple found in the relation,
|
||||
* use an existing cache entry if possible, else build a new one.
|
||||
* List was not found in cache, so we have to build it by reading the
|
||||
* relation. For each matching tuple found in the relation, use an
|
||||
* existing cache entry if possible, else build a new one.
|
||||
*/
|
||||
relation = heap_open(cache->cc_reloid, AccessShareLock);
|
||||
|
||||
@ -1438,17 +1438,17 @@ SearchCatCacheList(CatCache *cache,
|
||||
ct = (CatCTup *) DLE_VAL(elt);
|
||||
|
||||
if (ct->dead || ct->negative)
|
||||
continue; /* ignore dead and negative entries */
|
||||
continue; /* ignore dead and negative entries */
|
||||
|
||||
if (ct->hash_value != hashValue)
|
||||
continue; /* quickly skip entry if wrong hash val */
|
||||
continue; /* quickly skip entry if wrong hash val */
|
||||
|
||||
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
|
||||
continue; /* not same tuple */
|
||||
continue; /* not same tuple */
|
||||
|
||||
/*
|
||||
* Found a match, but can't use it if it belongs to another list
|
||||
* already
|
||||
* Found a match, but can't use it if it belongs to another
|
||||
* list already
|
||||
*/
|
||||
if (ct->c_list)
|
||||
continue;
|
||||
@ -1498,7 +1498,7 @@ SearchCatCacheList(CatCache *cache,
|
||||
cl->hash_value = lHashValue;
|
||||
cl->n_members = nmembers;
|
||||
/* The list is backwards because we built it with lcons */
|
||||
for (i = nmembers; --i >= 0; )
|
||||
for (i = nmembers; --i >= 0;)
|
||||
{
|
||||
cl->members[i] = ct = (CatCTup *) lfirst(ctlist);
|
||||
Assert(ct->c_list == NULL);
|
||||
@ -1531,7 +1531,7 @@ ReleaseCatCacheList(CatCList *list)
|
||||
Assert(list->cl_magic == CL_MAGIC);
|
||||
Assert(list->refcount > 0);
|
||||
|
||||
for (i = list->n_members; --i >= 0; )
|
||||
for (i = list->n_members; --i >= 0;)
|
||||
{
|
||||
CatCTup *ct = list->members[i];
|
||||
|
||||
@ -1558,7 +1558,7 @@ ReleaseCatCacheList(CatCList *list)
|
||||
/*
|
||||
* CatalogCacheCreateEntry
|
||||
* Create a new CatCTup entry, copying the given HeapTuple and other
|
||||
* supplied data into it. The new entry is given refcount 1.
|
||||
* supplied data into it. The new entry is given refcount 1.
|
||||
*/
|
||||
static CatCTup *
|
||||
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
||||
@ -1568,7 +1568,8 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
||||
MemoryContext oldcxt;
|
||||
|
||||
/*
|
||||
* Allocate CatCTup header in cache memory, and copy the tuple there too.
|
||||
* Allocate CatCTup header in cache memory, and copy the tuple there
|
||||
* too.
|
||||
*/
|
||||
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
|
||||
ct = (CatCTup *) palloc(sizeof(CatCTup));
|
||||
@ -1655,27 +1656,26 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
|
||||
|
||||
for (i = 0; i < nkeys; i++)
|
||||
{
|
||||
int attindex = cache->cc_key[i];
|
||||
Datum keyval = skeys[i].sk_argument;
|
||||
int attindex = cache->cc_key[i];
|
||||
Datum keyval = skeys[i].sk_argument;
|
||||
|
||||
if (attindex > 0)
|
||||
{
|
||||
/*
|
||||
* Here we must be careful in case the caller passed a
|
||||
* C string where a NAME is wanted: convert the given
|
||||
* argument to a correctly padded NAME. Otherwise the
|
||||
* memcpy() done in heap_formtuple could fall off the
|
||||
* end of memory.
|
||||
* Here we must be careful in case the caller passed a C
|
||||
* string where a NAME is wanted: convert the given argument
|
||||
* to a correctly padded NAME. Otherwise the memcpy() done in
|
||||
* heap_formtuple could fall off the end of memory.
|
||||
*/
|
||||
if (cache->cc_isname[i])
|
||||
{
|
||||
Name newval = &tempNames[i];
|
||||
Name newval = &tempNames[i];
|
||||
|
||||
namestrcpy(newval, DatumGetCString(keyval));
|
||||
keyval = NameGetDatum(newval);
|
||||
}
|
||||
values[attindex-1] = keyval;
|
||||
nulls[attindex-1] = ' ';
|
||||
values[attindex - 1] = keyval;
|
||||
nulls[attindex - 1] = ' ';
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1727,7 +1727,7 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
|
||||
void
|
||||
PrepareToInvalidateCacheTuple(Relation relation,
|
||||
HeapTuple tuple,
|
||||
void (*function) (int, uint32, ItemPointer, Oid))
|
||||
void (*function) (int, uint32, ItemPointer, Oid))
|
||||
{
|
||||
CatCache *ccp;
|
||||
Oid reloid;
|
||||
|
32
src/backend/utils/cache/inval.c
vendored
32
src/backend/utils/cache/inval.c
vendored
@ -10,7 +10,7 @@
|
||||
* ie, until the next CommandCounterIncrement() or transaction commit.
|
||||
* (See utils/time/tqual.c, and note that system catalogs are generally
|
||||
* scanned under SnapshotNow rules by the system, or plain user snapshots
|
||||
* for user queries.) At the command boundary, the old tuple stops
|
||||
* for user queries.) At the command boundary, the old tuple stops
|
||||
* being valid and the new version, if any, becomes valid. Therefore,
|
||||
* we cannot simply flush a tuple from the system caches during heap_update()
|
||||
* or heap_delete(). The tuple is still good at that point; what's more,
|
||||
@ -29,12 +29,12 @@
|
||||
*
|
||||
* If we successfully complete the transaction, we have to broadcast all
|
||||
* these invalidation events to other backends (via the SI message queue)
|
||||
* so that they can flush obsolete entries from their caches. Note we have
|
||||
* so that they can flush obsolete entries from their caches. Note we have
|
||||
* to record the transaction commit before sending SI messages, otherwise
|
||||
* the other backends won't see our updated tuples as good.
|
||||
*
|
||||
* In short, we need to remember until xact end every insert or delete
|
||||
* of a tuple that might be in the system caches. Updates are treated as
|
||||
* of a tuple that might be in the system caches. Updates are treated as
|
||||
* two events, delete + insert, for simplicity. (There are cases where
|
||||
* it'd be possible to record just one event, but we don't currently try.)
|
||||
*
|
||||
@ -74,7 +74,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/inval.c,v 1.54 2002/09/02 01:05:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/inval.c,v 1.55 2002/09/04 20:31:29 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -129,7 +129,7 @@ static InvalidationListHeader CurrentCmdInvalidMsgs;
|
||||
/* head of previous-commands event list */
|
||||
static InvalidationListHeader PriorCmdInvalidMsgs;
|
||||
|
||||
static bool RelcacheInitFileInval; /* init file must be invalidated? */
|
||||
static bool RelcacheInitFileInval; /* init file must be invalidated? */
|
||||
|
||||
/*
|
||||
* Dynamically-registered callback functions. Current implementation
|
||||
@ -395,6 +395,7 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId)
|
||||
{
|
||||
AddRelcacheInvalidationMessage(&CurrentCmdInvalidMsgs,
|
||||
dbId, relId);
|
||||
|
||||
/*
|
||||
* If the relation being invalidated is one of those cached in the
|
||||
* relcache init file, mark that we need to zap that file at commit.
|
||||
@ -505,10 +506,11 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple,
|
||||
*/
|
||||
if (!IsSystemRelation(relation))
|
||||
return;
|
||||
/*
|
||||
* TOAST tuples can likewise be ignored here.
|
||||
* Note that TOAST tables are considered system relations
|
||||
* so they are not filtered by the above test.
|
||||
|
||||
/*
|
||||
* TOAST tuples can likewise be ignored here. Note that TOAST tables
|
||||
* are considered system relations so they are not filtered by the
|
||||
* above test.
|
||||
*/
|
||||
if (IsToastRelation(relation))
|
||||
return;
|
||||
@ -573,12 +575,12 @@ AcceptInvalidationMessages(void)
|
||||
* If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
|
||||
* to the shared invalidation message queue. Note that these will be read
|
||||
* not only by other backends, but also by our own backend at the next
|
||||
* transaction start (via AcceptInvalidationMessages). This means that
|
||||
* transaction start (via AcceptInvalidationMessages). This means that
|
||||
* we can skip immediate local processing of anything that's still in
|
||||
* CurrentCmdInvalidMsgs, and just send that list out too.
|
||||
*
|
||||
* If not isCommit, we are aborting, and must locally process the messages
|
||||
* in PriorCmdInvalidMsgs. No messages need be sent to other backends,
|
||||
* in PriorCmdInvalidMsgs. No messages need be sent to other backends,
|
||||
* since they'll not have seen our changed tuples anyway. We can forget
|
||||
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
|
||||
* the caches yet.
|
||||
@ -596,9 +598,9 @@ AtEOXactInvalidationMessages(bool isCommit)
|
||||
if (isCommit)
|
||||
{
|
||||
/*
|
||||
* Relcache init file invalidation requires processing both
|
||||
* before and after we send the SI messages. However, we need
|
||||
* not do anything unless we committed.
|
||||
* Relcache init file invalidation requires processing both before
|
||||
* and after we send the SI messages. However, we need not do
|
||||
* anything unless we committed.
|
||||
*/
|
||||
if (RelcacheInitFileInval)
|
||||
RelationCacheInitFileInvalidate(true);
|
||||
@ -694,7 +696,7 @@ CacheInvalidateRelcache(Oid relationId)
|
||||
*
|
||||
* NOTE: currently, the OID argument to the callback routine is not
|
||||
* provided for syscache callbacks; the routine doesn't really get any
|
||||
* useful info as to exactly what changed. It should treat every call
|
||||
* useful info as to exactly what changed. It should treat every call
|
||||
* as a "cache flush" request.
|
||||
*/
|
||||
void
|
||||
|
14
src/backend/utils/cache/lsyscache.c
vendored
14
src/backend/utils/cache/lsyscache.c
vendored
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/lsyscache.c,v 1.82 2002/08/31 22:10:47 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/lsyscache.c,v 1.83 2002/09/04 20:31:30 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Eventually, the index information should go through here, too.
|
||||
@ -349,7 +349,7 @@ op_mergejoinable(Oid opno, Oid ltype, Oid rtype, Oid *leftOp, Oid *rightOp)
|
||||
*
|
||||
* Returns the cross-type comparison operators (ltype "<" rtype and
|
||||
* ltype ">" rtype) for an operator previously determined to be
|
||||
* mergejoinable. Optionally, fetches the regproc ids of these
|
||||
* mergejoinable. Optionally, fetches the regproc ids of these
|
||||
* operators, as well as their operator OIDs.
|
||||
*/
|
||||
void
|
||||
@ -651,7 +651,7 @@ get_relname_relid(const char *relname, Oid relnamespace)
|
||||
Oid
|
||||
get_system_catalog_relid(const char *catname)
|
||||
{
|
||||
Oid relid;
|
||||
Oid relid;
|
||||
|
||||
relid = GetSysCacheOid(RELNAMENSP,
|
||||
PointerGetDatum(catname),
|
||||
@ -737,7 +737,7 @@ get_rel_namespace(Oid relid)
|
||||
if (HeapTupleIsValid(tp))
|
||||
{
|
||||
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
||||
Oid result;
|
||||
Oid result;
|
||||
|
||||
result = reltup->relnamespace;
|
||||
ReleaseSysCache(tp);
|
||||
@ -766,7 +766,7 @@ get_rel_type_id(Oid relid)
|
||||
if (HeapTupleIsValid(tp))
|
||||
{
|
||||
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
||||
Oid result;
|
||||
Oid result;
|
||||
|
||||
result = reltup->reltype;
|
||||
ReleaseSysCache(tp);
|
||||
@ -1105,8 +1105,8 @@ getBaseTypeMod(Oid typid, int32 typmod)
|
||||
/*
|
||||
* The typmod applied to a domain should always be -1.
|
||||
*
|
||||
* We substitute the domain's typmod as we switch attention to
|
||||
* the base type.
|
||||
* We substitute the domain's typmod as we switch attention to the
|
||||
* base type.
|
||||
*/
|
||||
Assert(typmod < 0);
|
||||
|
||||
|
240
src/backend/utils/cache/relcache.c
vendored
240
src/backend/utils/cache/relcache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.174 2002/09/02 02:47:05 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.175 2002/09/04 20:31:30 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -81,7 +81,7 @@ static FormData_pg_attribute Desc_pg_type[Natts_pg_type] = {Schema_pg_type};
|
||||
* Relations are looked up two ways, by OID and by name,
|
||||
* thus there are two hash tables for referencing them.
|
||||
*
|
||||
* The OID index covers all relcache entries. The name index
|
||||
* The OID index covers all relcache entries. The name index
|
||||
* covers *only* system relations (only those in PG_CATALOG_NAMESPACE).
|
||||
*/
|
||||
static HTAB *RelationIdCache;
|
||||
@ -98,7 +98,7 @@ static HTAB *RelationNodeCache;
|
||||
* This flag is false until we have prepared the critical relcache entries
|
||||
* that are needed to do indexscans on the tables read by relcache building.
|
||||
*/
|
||||
bool criticalRelcachesBuilt = false;
|
||||
bool criticalRelcachesBuilt = false;
|
||||
|
||||
/*
|
||||
* This flag is set if we discover that we need to write a new relcache
|
||||
@ -108,7 +108,7 @@ static bool needNewCacheFile = false;
|
||||
|
||||
/*
|
||||
* This counter counts relcache inval events received since backend startup
|
||||
* (but only for rels that are actually in cache). Presently, we use it only
|
||||
* (but only for rels that are actually in cache). Presently, we use it only
|
||||
* to detect whether data about to be written by write_relcache_init_file()
|
||||
* might already be obsolete.
|
||||
*/
|
||||
@ -260,8 +260,8 @@ typedef struct opclasscacheent
|
||||
StrategyNumber numStrats; /* max # of strategies (from pg_am) */
|
||||
StrategyNumber numSupport; /* max # of support procs (from pg_am) */
|
||||
Oid *operatorOids; /* strategy operators' OIDs */
|
||||
RegProcedure *operatorProcs; /* strategy operators' procs */
|
||||
RegProcedure *supportProcs; /* support procs */
|
||||
RegProcedure *operatorProcs; /* strategy operators' procs */
|
||||
RegProcedure *supportProcs; /* support procs */
|
||||
} OpClassCacheEnt;
|
||||
|
||||
static HTAB *OpClassCache = NULL;
|
||||
@ -292,15 +292,15 @@ static void AttrDefaultFetch(Relation relation);
|
||||
static void CheckConstraintFetch(Relation relation);
|
||||
static List *insert_ordered_oid(List *list, Oid datum);
|
||||
static void IndexSupportInitialize(Form_pg_index iform,
|
||||
IndexStrategy indexStrategy,
|
||||
Oid *indexOperator,
|
||||
RegProcedure *indexSupport,
|
||||
StrategyNumber maxStrategyNumber,
|
||||
StrategyNumber maxSupportNumber,
|
||||
AttrNumber maxAttributeNumber);
|
||||
IndexStrategy indexStrategy,
|
||||
Oid *indexOperator,
|
||||
RegProcedure *indexSupport,
|
||||
StrategyNumber maxStrategyNumber,
|
||||
StrategyNumber maxSupportNumber,
|
||||
AttrNumber maxAttributeNumber);
|
||||
static OpClassCacheEnt *LookupOpclassInfo(Oid operatorClassOid,
|
||||
StrategyNumber numStrats,
|
||||
StrategyNumber numSupport);
|
||||
StrategyNumber numStrats,
|
||||
StrategyNumber numSupport);
|
||||
|
||||
|
||||
/*
|
||||
@ -345,7 +345,7 @@ ScanPgRelation(RelationBuildDescInfo buildinfo)
|
||||
ScanKeyEntryInitialize(&key[1], 0,
|
||||
Anum_pg_class_relnamespace,
|
||||
F_OIDEQ,
|
||||
ObjectIdGetDatum(PG_CATALOG_NAMESPACE));
|
||||
ObjectIdGetDatum(PG_CATALOG_NAMESPACE));
|
||||
nkeys = 2;
|
||||
indexRelname = ClassNameNspIndex;
|
||||
break;
|
||||
@ -356,9 +356,9 @@ ScanPgRelation(RelationBuildDescInfo buildinfo)
|
||||
}
|
||||
|
||||
/*
|
||||
* Open pg_class and fetch a tuple. Force heap scan if we haven't
|
||||
* yet built the critical relcache entries (this includes initdb
|
||||
* and startup without a pg_internal.init file).
|
||||
* Open pg_class and fetch a tuple. Force heap scan if we haven't yet
|
||||
* built the critical relcache entries (this includes initdb and
|
||||
* startup without a pg_internal.init file).
|
||||
*/
|
||||
pg_class_desc = heap_openr(RelationRelationName, AccessShareLock);
|
||||
pg_class_scan = systable_beginscan(pg_class_desc, indexRelname,
|
||||
@ -481,9 +481,9 @@ RelationBuildTupleDesc(RelationBuildDescInfo buildinfo,
|
||||
Int16GetDatum(0));
|
||||
|
||||
/*
|
||||
* Open pg_attribute and begin a scan. Force heap scan if we haven't
|
||||
* yet built the critical relcache entries (this includes initdb
|
||||
* and startup without a pg_internal.init file).
|
||||
* Open pg_attribute and begin a scan. Force heap scan if we haven't
|
||||
* yet built the critical relcache entries (this includes initdb and
|
||||
* startup without a pg_internal.init file).
|
||||
*/
|
||||
pg_attribute_desc = heap_openr(AttributeRelationName, AccessShareLock);
|
||||
pg_attribute_scan = systable_beginscan(pg_attribute_desc,
|
||||
@ -653,8 +653,8 @@ RelationBuildRuleLock(Relation relation)
|
||||
relation->rd_rulescxt = rulescxt;
|
||||
|
||||
/*
|
||||
* allocate an array to hold the rewrite rules (the array is extended if
|
||||
* necessary)
|
||||
* allocate an array to hold the rewrite rules (the array is extended
|
||||
* if necessary)
|
||||
*/
|
||||
maxlocks = 4;
|
||||
rules = (RewriteRule **)
|
||||
@ -672,14 +672,14 @@ RelationBuildRuleLock(Relation relation)
|
||||
/*
|
||||
* open pg_rewrite and begin a scan
|
||||
*
|
||||
* Note: since we scan the rules using RewriteRelRulenameIndex,
|
||||
* we will be reading the rules in name order, except possibly
|
||||
* during emergency-recovery operations (ie, IsIgnoringSystemIndexes).
|
||||
* This in turn ensures that rules will be fired in name order.
|
||||
* Note: since we scan the rules using RewriteRelRulenameIndex, we will
|
||||
* be reading the rules in name order, except possibly during
|
||||
* emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
|
||||
* in turn ensures that rules will be fired in name order.
|
||||
*/
|
||||
rewrite_desc = heap_openr(RewriteRelationName, AccessShareLock);
|
||||
rewrite_tupdesc = RelationGetDescr(rewrite_desc);
|
||||
rewrite_scan = systable_beginscan(rewrite_desc,
|
||||
rewrite_scan = systable_beginscan(rewrite_desc,
|
||||
RewriteRelRulenameIndex,
|
||||
true, SnapshotNow,
|
||||
1, &key);
|
||||
@ -723,7 +723,7 @@ RelationBuildRuleLock(Relation relation)
|
||||
&isnull);
|
||||
Assert(!isnull);
|
||||
rule_evqual_str = DatumGetCString(DirectFunctionCall1(textout,
|
||||
rule_evqual));
|
||||
rule_evqual));
|
||||
oldcxt = MemoryContextSwitchTo(rulescxt);
|
||||
rule->qual = (Node *) stringToNode(rule_evqual_str);
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
@ -767,9 +767,9 @@ equalRuleLocks(RuleLock *rlock1, RuleLock *rlock2)
|
||||
int i;
|
||||
|
||||
/*
|
||||
* As of 7.3 we assume the rule ordering is repeatable,
|
||||
* because RelationBuildRuleLock should read 'em in a
|
||||
* consistent order. So just compare corresponding slots.
|
||||
* As of 7.3 we assume the rule ordering is repeatable, because
|
||||
* RelationBuildRuleLock should read 'em in a consistent order. So
|
||||
* just compare corresponding slots.
|
||||
*/
|
||||
if (rlock1 != NULL)
|
||||
{
|
||||
@ -860,9 +860,10 @@ RelationBuildDesc(RelationBuildDescInfo buildinfo,
|
||||
RelationSetReferenceCount(relation, 1);
|
||||
|
||||
/*
|
||||
* normal relations are not nailed into the cache; nor can a pre-existing
|
||||
* relation be new. It could be temp though. (Actually, it could be new
|
||||
* too, but it's okay to forget that fact if forced to flush the entry.)
|
||||
* normal relations are not nailed into the cache; nor can a
|
||||
* pre-existing relation be new. It could be temp though. (Actually,
|
||||
* it could be new too, but it's okay to forget that fact if forced to
|
||||
* flush the entry.)
|
||||
*/
|
||||
relation->rd_isnailed = false;
|
||||
relation->rd_isnew = false;
|
||||
@ -950,8 +951,8 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
uint16 amsupport;
|
||||
|
||||
/*
|
||||
* Make a copy of the pg_index entry for the index. Note that this
|
||||
* is a variable-length tuple.
|
||||
* Make a copy of the pg_index entry for the index. Note that this is
|
||||
* a variable-length tuple.
|
||||
*/
|
||||
tuple = SearchSysCache(INDEXRELID,
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)),
|
||||
@ -1116,6 +1117,7 @@ IndexSupportInitialize(Form_pg_index iform,
|
||||
MemSet(mapentry, 0, sizeof(*mapentry));
|
||||
mapentry->sk_flags = 0;
|
||||
mapentry->sk_procedure = opcentry->operatorProcs[strategy];
|
||||
|
||||
/*
|
||||
* Mark mapentry->sk_func invalid, until and unless
|
||||
* someone sets it up.
|
||||
@ -1387,7 +1389,7 @@ formrdesc(const char *relationName,
|
||||
/*
|
||||
* It's important to distinguish between shared and non-shared
|
||||
* relations, even at bootstrap time, to make sure we know where they
|
||||
* are stored. At present, all relations that formrdesc is used for
|
||||
* are stored. At present, all relations that formrdesc is used for
|
||||
* are not shared.
|
||||
*/
|
||||
relation->rd_rel->relisshared = false;
|
||||
@ -1401,12 +1403,12 @@ formrdesc(const char *relationName,
|
||||
/*
|
||||
* initialize attribute tuple form
|
||||
*
|
||||
* Unlike the case with the relation tuple, this data had better be
|
||||
* right because it will never be replaced. The input values must be
|
||||
* Unlike the case with the relation tuple, this data had better be right
|
||||
* because it will never be replaced. The input values must be
|
||||
* correctly defined by macros in src/include/catalog/ headers.
|
||||
*/
|
||||
relation->rd_att = CreateTemplateTupleDesc(natts,
|
||||
relation->rd_rel->relhasoids);
|
||||
relation->rd_rel->relhasoids);
|
||||
|
||||
/*
|
||||
* initialize tuple desc info
|
||||
@ -1799,10 +1801,11 @@ RelationClearRelation(Relation relation, bool rebuild)
|
||||
FreeTriggerDesc(old_trigdesc);
|
||||
|
||||
/*
|
||||
* Update rd_nblocks. This is kind of expensive, but I think we must
|
||||
* do it in case relation has been truncated... we definitely must
|
||||
* do it if the rel is new or temp, since RelationGetNumberOfBlocks
|
||||
* will subsequently assume that the block count is correct.
|
||||
* Update rd_nblocks. This is kind of expensive, but I think we
|
||||
* must do it in case relation has been truncated... we definitely
|
||||
* must do it if the rel is new or temp, since
|
||||
* RelationGetNumberOfBlocks will subsequently assume that the
|
||||
* block count is correct.
|
||||
*/
|
||||
RelationUpdateNumberOfBlocks(relation);
|
||||
}
|
||||
@ -1971,12 +1974,13 @@ AtEOXact_RelationCache(bool commit)
|
||||
/*
|
||||
* Is it a relation created in the current transaction?
|
||||
*
|
||||
* During commit, reset the flag to false, since we are now out of the
|
||||
* creating transaction. During abort, simply delete the relcache
|
||||
* entry --- it isn't interesting any longer. (NOTE: if we have
|
||||
* forgotten the isnew state of a new relation due to a forced cache
|
||||
* flush, the entry will get deleted anyway by shared-cache-inval
|
||||
* processing of the aborted pg_class insertion.)
|
||||
* During commit, reset the flag to false, since we are now out of
|
||||
* the creating transaction. During abort, simply delete the
|
||||
* relcache entry --- it isn't interesting any longer. (NOTE: if
|
||||
* we have forgotten the isnew state of a new relation due to a
|
||||
* forced cache flush, the entry will get deleted anyway by
|
||||
* shared-cache-inval processing of the aborted pg_class
|
||||
* insertion.)
|
||||
*/
|
||||
if (relation->rd_isnew)
|
||||
{
|
||||
@ -1991,18 +1995,18 @@ AtEOXact_RelationCache(bool commit)
|
||||
|
||||
/*
|
||||
* During transaction abort, we must also reset relcache entry ref
|
||||
* counts to their normal not-in-a-transaction state. A ref count may
|
||||
* be too high because some routine was exited by elog() between
|
||||
* incrementing and decrementing the count.
|
||||
* counts to their normal not-in-a-transaction state. A ref count
|
||||
* may be too high because some routine was exited by elog()
|
||||
* between incrementing and decrementing the count.
|
||||
*
|
||||
* During commit, we should not have to do this, but it's still useful
|
||||
* to check that the counts are correct to catch missed relcache
|
||||
* closes.
|
||||
* During commit, we should not have to do this, but it's still
|
||||
* useful to check that the counts are correct to catch missed
|
||||
* relcache closes.
|
||||
*
|
||||
* In bootstrap mode, do NOT reset the refcnt nor complain that it's
|
||||
* nonzero --- the bootstrap code expects relations to stay open
|
||||
* across start/commit transaction calls. (That seems bogus, but it's
|
||||
* not worth fixing.)
|
||||
* across start/commit transaction calls. (That seems bogus, but
|
||||
* it's not worth fixing.)
|
||||
*/
|
||||
expected_refcnt = relation->rd_isnailed ? 1 : 0;
|
||||
|
||||
@ -2083,10 +2087,10 @@ RelationBuildLocalRelation(const char *relname,
|
||||
/*
|
||||
* create a new tuple descriptor from the one passed in. We do this
|
||||
* partly to copy it into the cache context, and partly because the
|
||||
* new relation can't have any defaults or constraints yet; they
|
||||
* have to be added in later steps, because they require additions
|
||||
* to multiple system catalogs. We can copy attnotnull constraints
|
||||
* here, however.
|
||||
* new relation can't have any defaults or constraints yet; they have
|
||||
* to be added in later steps, because they require additions to
|
||||
* multiple system catalogs. We can copy attnotnull constraints here,
|
||||
* however.
|
||||
*/
|
||||
rel->rd_att = CreateTupleDescCopy(tupDesc);
|
||||
for (i = 0; i < natts; i++)
|
||||
@ -2184,12 +2188,12 @@ RelationCacheInitialize(void)
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
|
||||
/*
|
||||
* Try to load the relcache cache file. If successful, we're done
|
||||
* for now. Otherwise, initialize the cache with pre-made descriptors
|
||||
* for the critical "nailed-in" system catalogs.
|
||||
* Try to load the relcache cache file. If successful, we're done for
|
||||
* now. Otherwise, initialize the cache with pre-made descriptors for
|
||||
* the critical "nailed-in" system catalogs.
|
||||
*/
|
||||
if (IsBootstrapProcessingMode() ||
|
||||
! load_relcache_init_file())
|
||||
!load_relcache_init_file())
|
||||
{
|
||||
formrdesc(RelationRelationName,
|
||||
Natts_pg_class, Desc_pg_class);
|
||||
@ -2228,22 +2232,23 @@ RelationCacheInitializePhase2(void)
|
||||
* If we didn't get the critical system indexes loaded into relcache,
|
||||
* do so now. These are critical because the catcache depends on them
|
||||
* for catcache fetches that are done during relcache load. Thus, we
|
||||
* have an infinite-recursion problem. We can break the recursion
|
||||
* by doing heapscans instead of indexscans at certain key spots.
|
||||
* To avoid hobbling performance, we only want to do that until we
|
||||
* have the critical indexes loaded into relcache. Thus, the flag
|
||||
* criticalRelcachesBuilt is used to decide whether to do heapscan
|
||||
* or indexscan at the key spots, and we set it true after we've loaded
|
||||
* have an infinite-recursion problem. We can break the recursion by
|
||||
* doing heapscans instead of indexscans at certain key spots. To
|
||||
* avoid hobbling performance, we only want to do that until we have
|
||||
* the critical indexes loaded into relcache. Thus, the flag
|
||||
* criticalRelcachesBuilt is used to decide whether to do heapscan or
|
||||
* indexscan at the key spots, and we set it true after we've loaded
|
||||
* the critical indexes.
|
||||
*
|
||||
* The critical indexes are marked as "nailed in cache", partly to make
|
||||
* it easy for load_relcache_init_file to count them, but mainly
|
||||
* because we cannot flush and rebuild them once we've set
|
||||
* criticalRelcachesBuilt to true. (NOTE: perhaps it would be possible
|
||||
* to reload them by temporarily setting criticalRelcachesBuilt to
|
||||
* false again. For now, though, we just nail 'em in.)
|
||||
* criticalRelcachesBuilt to true. (NOTE: perhaps it would be
|
||||
* possible to reload them by temporarily setting
|
||||
* criticalRelcachesBuilt to false again. For now, though, we just
|
||||
* nail 'em in.)
|
||||
*/
|
||||
if (! criticalRelcachesBuilt)
|
||||
if (!criticalRelcachesBuilt)
|
||||
{
|
||||
RelationBuildDescInfo buildinfo;
|
||||
Relation ird;
|
||||
@ -2265,7 +2270,7 @@ RelationCacheInitializePhase2(void)
|
||||
LOAD_CRIT_INDEX(AccessMethodProcedureIndex);
|
||||
LOAD_CRIT_INDEX(OperatorOidIndex);
|
||||
|
||||
#define NUM_CRITICAL_INDEXES 7 /* fix if you change list above */
|
||||
#define NUM_CRITICAL_INDEXES 7 /* fix if you change list above */
|
||||
|
||||
criticalRelcachesBuilt = true;
|
||||
}
|
||||
@ -2273,10 +2278,10 @@ RelationCacheInitializePhase2(void)
|
||||
/*
|
||||
* Now, scan all the relcache entries and update anything that might
|
||||
* be wrong in the results from formrdesc or the relcache cache file.
|
||||
* If we faked up relcache entries using formrdesc, then read
|
||||
* the real pg_class rows and replace the fake entries with them.
|
||||
* Also, if any of the relcache entries have rules or triggers,
|
||||
* load that info the hard way since it isn't recorded in the cache file.
|
||||
* If we faked up relcache entries using formrdesc, then read the real
|
||||
* pg_class rows and replace the fake entries with them. Also, if any
|
||||
* of the relcache entries have rules or triggers, load that info the
|
||||
* hard way since it isn't recorded in the cache file.
|
||||
*/
|
||||
hash_seq_init(&status, RelationIdCache);
|
||||
|
||||
@ -2293,12 +2298,13 @@ RelationCacheInitializePhase2(void)
|
||||
Form_pg_class relp;
|
||||
|
||||
htup = SearchSysCache(RELOID,
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)),
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(htup))
|
||||
elog(FATAL, "RelationCacheInitializePhase2: no pg_class entry for %s",
|
||||
RelationGetRelationName(relation));
|
||||
relp = (Form_pg_class) GETSTRUCT(htup);
|
||||
|
||||
/*
|
||||
* Copy tuple to relation->rd_rel. (See notes in
|
||||
* AllocateRelationDesc())
|
||||
@ -2335,11 +2341,11 @@ RelationCacheInitializePhase3(void)
|
||||
if (needNewCacheFile)
|
||||
{
|
||||
/*
|
||||
* Force all the catcaches to finish initializing and thereby
|
||||
* open the catalogs and indexes they use. This will preload
|
||||
* the relcache with entries for all the most important system
|
||||
* catalogs and indexes, so that the init file will be most
|
||||
* useful for future backends.
|
||||
* Force all the catcaches to finish initializing and thereby open
|
||||
* the catalogs and indexes they use. This will preload the
|
||||
* relcache with entries for all the most important system
|
||||
* catalogs and indexes, so that the init file will be most useful
|
||||
* for future backends.
|
||||
*/
|
||||
InitCatalogCachePhase2();
|
||||
|
||||
@ -2509,7 +2515,7 @@ CheckConstraintFetch(Relation relation)
|
||||
RelationGetRelationName(relation));
|
||||
|
||||
check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
|
||||
NameStr(conform->conname));
|
||||
NameStr(conform->conname));
|
||||
|
||||
/* Grab and test conbin is actually set */
|
||||
val = fastgetattr(htup,
|
||||
@ -2559,7 +2565,7 @@ List *
|
||||
RelationGetIndexList(Relation relation)
|
||||
{
|
||||
Relation indrel;
|
||||
SysScanDesc indscan;
|
||||
SysScanDesc indscan;
|
||||
ScanKeyData skey;
|
||||
HeapTuple htup;
|
||||
List *result;
|
||||
@ -2724,7 +2730,7 @@ load_relcache_init_file(void)
|
||||
nailed_rels = nailed_indexes = 0;
|
||||
initFileRelationIds = NIL;
|
||||
|
||||
for (relno = 0; ; relno++)
|
||||
for (relno = 0;; relno++)
|
||||
{
|
||||
Size len;
|
||||
size_t nread;
|
||||
@ -2820,9 +2826,9 @@ load_relcache_init_file(void)
|
||||
*/
|
||||
indexcxt = AllocSetContextCreate(CacheMemoryContext,
|
||||
RelationGetRelationName(rel),
|
||||
0, /* minsize */
|
||||
512, /* initsize */
|
||||
1024); /* maxsize */
|
||||
0, /* minsize */
|
||||
512, /* initsize */
|
||||
1024); /* maxsize */
|
||||
rel->rd_indexcxt = indexcxt;
|
||||
|
||||
/* next, read the index strategy map */
|
||||
@ -2883,8 +2889,9 @@ load_relcache_init_file(void)
|
||||
/*
|
||||
* Rules and triggers are not saved (mainly because the internal
|
||||
* format is complex and subject to change). They must be rebuilt
|
||||
* if needed by RelationCacheInitializePhase2. This is not expected
|
||||
* to be a big performance hit since few system catalogs have such.
|
||||
* if needed by RelationCacheInitializePhase2. This is not
|
||||
* expected to be a big performance hit since few system catalogs
|
||||
* have such.
|
||||
*/
|
||||
rel->rd_rules = NULL;
|
||||
rel->rd_rulescxt = NULL;
|
||||
@ -2917,8 +2924,8 @@ load_relcache_init_file(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* We reached the end of the init file without apparent problem.
|
||||
* Did we get the right number of nailed items? (This is a useful
|
||||
* We reached the end of the init file without apparent problem. Did
|
||||
* we get the right number of nailed items? (This is a useful
|
||||
* crosscheck in case the set of critical rels or indexes changes.)
|
||||
*/
|
||||
if (nailed_rels != NUM_CRITICAL_RELS ||
|
||||
@ -2945,9 +2952,9 @@ load_relcache_init_file(void)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* init file is broken, so do it the hard way. We don't bother
|
||||
* trying to free the clutter we just allocated; it's not in the
|
||||
* relcache so it won't hurt.
|
||||
* init file is broken, so do it the hard way. We don't bother trying
|
||||
* to free the clutter we just allocated; it's not in the relcache so
|
||||
* it won't hurt.
|
||||
*/
|
||||
read_failed:
|
||||
pfree(rels);
|
||||
@ -3052,7 +3059,7 @@ write_relcache_init_file(void)
|
||||
* afresh using the syscache, and write that.
|
||||
*/
|
||||
tuple = SearchSysCache(INDEXRELID,
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)),
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
elog(ERROR, "write_relcache_init_file: no pg_index entry for index %u",
|
||||
@ -3109,14 +3116,14 @@ write_relcache_init_file(void)
|
||||
|
||||
/*
|
||||
* Now we have to check whether the data we've so painstakingly
|
||||
* accumulated is already obsolete due to someone else's just-committed
|
||||
* catalog changes. If so, we just delete the temp file and leave it
|
||||
* to the next backend to try again. (Our own relcache entries will be
|
||||
* updated by SI message processing, but we can't be sure whether what
|
||||
* we wrote out was up-to-date.)
|
||||
* accumulated is already obsolete due to someone else's
|
||||
* just-committed catalog changes. If so, we just delete the temp
|
||||
* file and leave it to the next backend to try again. (Our own
|
||||
* relcache entries will be updated by SI message processing, but we
|
||||
* can't be sure whether what we wrote out was up-to-date.)
|
||||
*
|
||||
* This mustn't run concurrently with RelationCacheInitFileInvalidate,
|
||||
* so grab a serialization lock for the duration.
|
||||
* This mustn't run concurrently with RelationCacheInitFileInvalidate, so
|
||||
* grab a serialization lock for the duration.
|
||||
*/
|
||||
LWLockAcquire(RelCacheInitLock, LW_EXCLUSIVE);
|
||||
|
||||
@ -3140,9 +3147,10 @@ write_relcache_init_file(void)
|
||||
if (rename(tempfilename, finalfilename) < 0)
|
||||
{
|
||||
elog(WARNING, "Cannot rename init file %s to %s: %m\n\tContinuing anyway, but there's something wrong.", tempfilename, finalfilename);
|
||||
|
||||
/*
|
||||
* If we fail, try to clean up the useless temp file; don't bother
|
||||
* to complain if this fails too.
|
||||
* If we fail, try to clean up the useless temp file; don't
|
||||
* bother to complain if this fails too.
|
||||
*/
|
||||
unlink(tempfilename);
|
||||
}
|
||||
@ -3206,13 +3214,13 @@ RelationCacheInitFileInvalidate(bool beforeSend)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We need to interlock this against write_relcache_init_file,
|
||||
* to guard against possibility that someone renames a new-but-
|
||||
* We need to interlock this against write_relcache_init_file, to
|
||||
* guard against possibility that someone renames a new-but-
|
||||
* already-obsolete init file into place just after we unlink.
|
||||
* With the interlock, it's certain that write_relcache_init_file
|
||||
* will notice our SI inval message before renaming into place,
|
||||
* or else that we will execute second and successfully unlink
|
||||
* the file.
|
||||
* will notice our SI inval message before renaming into place, or
|
||||
* else that we will execute second and successfully unlink the
|
||||
* file.
|
||||
*/
|
||||
LWLockAcquire(RelCacheInitLock, LW_EXCLUSIVE);
|
||||
unlink(initfilename);
|
||||
|
19
src/backend/utils/cache/syscache.c
vendored
19
src/backend/utils/cache/syscache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/syscache.c,v 1.86 2002/08/05 03:29:17 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/syscache.c,v 1.87 2002/09/04 20:31:30 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These routines allow the parser/planner/executor to perform
|
||||
@ -174,7 +174,7 @@ static const struct cachedesc cacheinfo[] = {
|
||||
0
|
||||
}},
|
||||
{
|
||||
CastRelationName, /* CASTSOURCETARGET */
|
||||
CastRelationName, /* CASTSOURCETARGET */
|
||||
CastSourceTargetIndex,
|
||||
0,
|
||||
2,
|
||||
@ -204,7 +204,7 @@ static const struct cachedesc cacheinfo[] = {
|
||||
0,
|
||||
0
|
||||
}},
|
||||
{ConversionRelationName, /* CONDEFAULT */
|
||||
{ConversionRelationName, /* CONDEFAULT */
|
||||
ConversionDefaultIndex,
|
||||
0,
|
||||
4,
|
||||
@ -214,7 +214,7 @@ static const struct cachedesc cacheinfo[] = {
|
||||
Anum_pg_conversion_contoencoding,
|
||||
ObjectIdAttributeNumber,
|
||||
}},
|
||||
{ConversionRelationName, /* CONNAMENSP */
|
||||
{ConversionRelationName, /* CONNAMENSP */
|
||||
ConversionNameNspIndex,
|
||||
0,
|
||||
2,
|
||||
@ -224,7 +224,7 @@ static const struct cachedesc cacheinfo[] = {
|
||||
0,
|
||||
0
|
||||
}},
|
||||
{ConversionRelationName, /* CONOID */
|
||||
{ConversionRelationName, /* CONOID */
|
||||
ConversionOidIndex,
|
||||
0,
|
||||
1,
|
||||
@ -436,7 +436,8 @@ static const struct cachedesc cacheinfo[] = {
|
||||
}}
|
||||
};
|
||||
|
||||
static CatCache *SysCache[lengthof(cacheinfo)];
|
||||
static CatCache *SysCache[
|
||||
lengthof(cacheinfo)];
|
||||
static int SysCacheSize = lengthof(cacheinfo);
|
||||
static bool CacheInitialized = false;
|
||||
|
||||
@ -501,9 +502,7 @@ InitCatalogCachePhase2(void)
|
||||
Assert(CacheInitialized);
|
||||
|
||||
for (cacheId = 0; cacheId < SysCacheSize; cacheId++)
|
||||
{
|
||||
InitCatCachePhase2(SysCache[cacheId]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -531,7 +530,7 @@ SearchSysCache(int cacheId,
|
||||
Datum key4)
|
||||
{
|
||||
if (cacheId < 0 || cacheId >= SysCacheSize ||
|
||||
! PointerIsValid(SysCache[cacheId]))
|
||||
!PointerIsValid(SysCache[cacheId]))
|
||||
elog(ERROR, "SearchSysCache: Bad cache id %d", cacheId);
|
||||
|
||||
return SearchCatCache(SysCache[cacheId], key1, key2, key3, key4);
|
||||
@ -731,7 +730,7 @@ SearchSysCacheList(int cacheId, int nkeys,
|
||||
Datum key1, Datum key2, Datum key3, Datum key4)
|
||||
{
|
||||
if (cacheId < 0 || cacheId >= SysCacheSize ||
|
||||
! PointerIsValid(SysCache[cacheId]))
|
||||
!PointerIsValid(SysCache[cacheId]))
|
||||
elog(ERROR, "SearchSysCacheList: Bad cache id %d", cacheId);
|
||||
|
||||
return SearchCatCacheList(SysCache[cacheId], nkeys,
|
||||
|
Reference in New Issue
Block a user