1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-02 09:02:37 +03:00

Standard pgindent run for 8.1.

This commit is contained in:
Bruce Momjian
2005-10-15 02:49:52 +00:00
parent 790c01d280
commit 1dc3498251
770 changed files with 34334 additions and 32507 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.124 2005/09/24 22:54:39 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -35,7 +35,7 @@
#include "utils/syscache.h"
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
/*
* Constants related to size of the catcache.
@ -187,22 +187,22 @@ CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
case 4:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
cur_skey[3].sk_argument)) << 9;
cur_skey[3].sk_argument)) << 9;
/* FALLTHROUGH */
case 3:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
cur_skey[2].sk_argument)) << 6;
cur_skey[2].sk_argument)) << 6;
/* FALLTHROUGH */
case 2:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
cur_skey[1].sk_argument)) << 3;
cur_skey[1].sk_argument)) << 3;
/* FALLTHROUGH */
case 1:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
cur_skey[0].sk_argument));
cur_skey[0].sk_argument));
break;
default:
elog(FATAL, "wrong number of hash keys: %d", nkeys);
@ -448,8 +448,8 @@ CatalogCacheIdInvalidate(int cacheId,
/*
* We don't bother to check whether the cache has finished
* initialization yet; if not, there will be no entries in it so
* no problem.
* initialization yet; if not, there will be no entries in it so no
* problem.
*/
/*
@ -522,15 +522,15 @@ void
CreateCacheMemoryContext(void)
{
/*
* Purely for paranoia, check that context doesn't exist; caller
* probably did so already.
* Purely for paranoia, check that context doesn't exist; caller probably
* did so already.
*/
if (!CacheMemoryContext)
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
"CacheMemoryContext",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
}
@ -768,7 +768,6 @@ do { \
cp->cc_reloid, cp->cc_indexoid, cp->id, \
cp->cc_nkeys, cp->cc_nbuckets); \
} while(0)
#else
#define InitCatCache_DEBUG2
#endif
@ -786,8 +785,8 @@ InitCatCache(int id,
int i;
/*
* first switch to the cache context so our allocations do not vanish
* at the end of a transaction
* first switch to the cache context so our allocations do not vanish at
* the end of a transaction
*/
if (!CacheMemoryContext)
CreateCacheMemoryContext();
@ -878,7 +877,6 @@ do { \
i+1, cache->cc_nkeys, cache->cc_key[i]); \
} \
} while(0)
#else
#define CatalogCacheInitializeCache_DEBUG1
#define CatalogCacheInitializeCache_DEBUG2
@ -895,15 +893,15 @@ CatalogCacheInitializeCache(CatCache *cache)
CatalogCacheInitializeCache_DEBUG1;
/*
* Open the relation without locking --- we only need the tupdesc,
* which we assume will never change ...
* Open the relation without locking --- we only need the tupdesc, which
* we assume will never change ...
*/
relation = heap_open(cache->cc_reloid, NoLock);
Assert(RelationIsValid(relation));
/*
* switch to the cache context so our allocations do not vanish at the
* end of a transaction
* switch to the cache context so our allocations do not vanish at the end
* of a transaction
*/
Assert(CacheMemoryContext != NULL);
@ -915,8 +913,8 @@ CatalogCacheInitializeCache(CatCache *cache)
tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
/*
* save the relation's name and relisshared flag, too (cc_relname
* is used only for debugging purposes)
* save the relation's name and relisshared flag, too (cc_relname is used
* only for debugging purposes)
*/
cache->cc_relname = pstrdup(RelationGetRelationName(relation));
cache->cc_relisshared = RelationGetForm(relation)->relisshared;
@ -957,8 +955,8 @@ CatalogCacheInitializeCache(CatCache *cache)
cache->cc_isname[i] = (keytype == NAMEOID);
/*
* Do equality-function lookup (we assume this won't need a
* catalog lookup for any supported type)
* Do equality-function lookup (we assume this won't need a catalog
* lookup for any supported type)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
@ -1026,9 +1024,9 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
if (cache->id == INDEXRELID)
{
/*
* Since the OIDs of indexes aren't hardwired, it's painful to
* figure out which is which. Just force all pg_index searches to
* be heap scans while building the relcaches.
* Since the OIDs of indexes aren't hardwired, it's painful to figure
* out which is which. Just force all pg_index searches to be heap
* scans while building the relcaches.
*/
if (!criticalRelcachesBuilt)
return false;
@ -1037,10 +1035,10 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
cache->id == AMNAME)
{
/*
* Always do heap scans in pg_am, because it's so small there's
* not much point in an indexscan anyway. We *must* do this when
* initially building critical relcache entries, but we might as
* well just always do it.
* Always do heap scans in pg_am, because it's so small there's not
* much point in an indexscan anyway. We *must* do this when
* initially building critical relcache entries, but we might as well
* just always do it.
*/
return false;
}
@ -1146,18 +1144,18 @@ SearchCatCache(CatCache *cache,
continue;
/*
* we found a match in the cache: move it to the front of the
* global LRU list. We also move it to the front of the list for
* its hashbucket, in order to speed subsequent searches. (The
* most frequently accessed elements in any hashbucket will tend
* to be near the front of the hashbucket's list.)
* we found a match in the cache: move it to the front of the global
* LRU list. We also move it to the front of the list for its
* hashbucket, in order to speed subsequent searches. (The most
* frequently accessed elements in any hashbucket will tend to be near
* the front of the hashbucket's list.)
*/
DLMoveToFront(&ct->lrulist_elem);
DLMoveToFront(&ct->cache_elem);
/*
* If it's a positive entry, bump its refcount and return it. If
* it's negative, we can report failure to the caller.
* If it's a positive entry, bump its refcount and return it. If it's
* negative, we can report failure to the caller.
*/
if (!ct->negative)
{
@ -1188,19 +1186,19 @@ SearchCatCache(CatCache *cache,
}
/*
* Tuple was not found in cache, so we have to try to retrieve it
* directly from the relation. If found, we will add it to the cache;
* if not found, we will add a negative cache entry instead.
* Tuple was not found in cache, so we have to try to retrieve it directly
* from the relation. If found, we will add it to the cache; if not
* found, we will add a negative cache entry instead.
*
* NOTE: it is possible for recursive cache lookups to occur while
* reading the relation --- for example, due to shared-cache-inval
* messages being processed during heap_open(). This is OK. It's
* even possible for one of those lookups to find and enter the very
* same tuple we are trying to fetch here. If that happens, we will
* enter a second copy of the tuple into the cache. The first copy
* will never be referenced again, and will eventually age out of the
* cache, so there's no functional problem. This case is rare enough
* that it's not worth expending extra cycles to detect.
* NOTE: it is possible for recursive cache lookups to occur while reading
* the relation --- for example, due to shared-cache-inval messages being
* processed during heap_open(). This is OK. It's even possible for one
* of those lookups to find and enter the very same tuple we are trying to
* fetch here. If that happens, we will enter a second copy of the tuple
* into the cache. The first copy will never be referenced again, and
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
*/
relation = heap_open(cache->cc_reloid, AccessShareLock);
@ -1231,13 +1229,13 @@ SearchCatCache(CatCache *cache,
/*
* If tuple was not found, we need to build a negative cache entry
* containing a fake tuple. The fake tuple has the correct key
* columns, but nulls everywhere else.
* containing a fake tuple. The fake tuple has the correct key columns,
* but nulls everywhere else.
*
* In bootstrap mode, we don't build negative entries, because the
* cache invalidation mechanism isn't alive and can't clear them
* if the tuple gets created later. (Bootstrap doesn't do UPDATEs,
* so it doesn't need cache inval for that.)
* In bootstrap mode, we don't build negative entries, because the cache
* invalidation mechanism isn't alive and can't clear them if the tuple
* gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
* cache inval for that.)
*/
if (ct == NULL)
{
@ -1256,8 +1254,8 @@ SearchCatCache(CatCache *cache,
cache->cc_relname, hashIndex);
/*
* We are not returning the negative entry to the caller, so leave
* its refcount zero.
* We are not returning the negative entry to the caller, so leave its
* refcount zero.
*/
return NULL;
@ -1331,7 +1329,7 @@ SearchCatCacheList(CatCache *cache,
Dlelem *elt;
CatCList *cl;
CatCTup *ct;
List * volatile ctlist;
List *volatile ctlist;
ListCell *ctlist_item;
int nmembers;
bool ordered;
@ -1362,8 +1360,8 @@ SearchCatCacheList(CatCache *cache,
/*
* compute a hash value of the given keys for faster search. We don't
* presently divide the CatCList items into buckets, but this still
* lets us skip non-matching items quickly most of the time.
* presently divide the CatCList items into buckets, but this still lets
* us skip non-matching items quickly most of the time.
*/
lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
@ -1399,11 +1397,11 @@ SearchCatCacheList(CatCache *cache,
/*
* We found a matching list: mark it as touched since the last
* CatalogCacheCleanup() sweep. Also move the list to the front
* of the cache's list-of-lists, to speed subsequent searches.
* (We do not move the members to the fronts of their hashbucket
* lists, however, since there's no point in that unless they are
* searched for individually.)
* CatalogCacheCleanup() sweep. Also move the list to the front of
* the cache's list-of-lists, to speed subsequent searches. (We do not
* move the members to the fronts of their hashbucket lists, however,
* since there's no point in that unless they are searched for
* individually.)
*/
cl->touched = true;
DLMoveToFront(&cl->cache_elem);
@ -1428,10 +1426,10 @@ SearchCatCacheList(CatCache *cache,
* relation. For each matching tuple found in the relation, use an
* existing cache entry if possible, else build a new one.
*
* We have to bump the member refcounts temporarily to ensure they
* won't get dropped from the cache while loading other members.
* We use a PG_TRY block to ensure we can undo those refcounts if
* we get an error before we finish constructing the CatCList.
* We have to bump the member refcounts temporarily to ensure they won't get
* dropped from the cache while loading other members. We use a PG_TRY
* block to ensure we can undo those refcounts if we get an error before
* we finish constructing the CatCList.
*/
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
@ -1473,13 +1471,13 @@ SearchCatCacheList(CatCache *cache,
ct = (CatCTup *) DLE_VAL(elt);
if (ct->dead || ct->negative)
continue; /* ignore dead and negative entries */
continue; /* ignore dead and negative entries */
if (ct->hash_value != hashValue)
continue; /* quickly skip entry if wrong hash val */
continue; /* quickly skip entry if wrong hash val */
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
continue; /* not same tuple */
continue; /* not same tuple */
/*
* Found a match, but can't use it if it belongs to another
@ -1526,9 +1524,9 @@ SearchCatCacheList(CatCache *cache,
heap_freetuple(ntp);
/*
* We are now past the last thing that could trigger an elog before
* we have finished building the CatCList and remembering it in the
* resource owner. So it's OK to fall out of the PG_TRY, and indeed
* We are now past the last thing that could trigger an elog before we
* have finished building the CatCList and remembering it in the
* resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@ -1629,8 +1627,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
MemoryContext oldcxt;
/*
* Allocate CatCTup header in cache memory, and copy the tuple there
* too.
* Allocate CatCTup header in cache memory, and copy the tuple there too.
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
@ -1658,9 +1655,9 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
CacheHdr->ch_ntup++;
/*
* If we've exceeded the desired size of the caches, try to throw away
* the least recently used entry(s). NB: be careful not to throw away
* the newly-built entry...
* If we've exceeded the desired size of the caches, try to throw away the
* least recently used entry(s). NB: be careful not to throw away the
* newly-built entry...
*/
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
CatalogCacheCleanup(ct);
@ -1684,22 +1681,22 @@ CatalogCacheCleanup(CatCTup *savect)
*prevelt;
/*
* Each time we have to do this, try to cut the cache size down to
* about 90% of the maximum.
* Each time we have to do this, try to cut the cache size down to about
* 90% of the maximum.
*/
tup_target = (CacheHdr->ch_maxtup * 9) / 10;
/*
* Our strategy for managing CatCLists is that, each time we have to
* throw away some cache entries, we first move-to-front all the members
* of CatCLists that have been touched since the last cleanup sweep.
* Then we do strict LRU elimination by individual tuples, zapping a list
* if any of its members gets zapped. Before PostgreSQL 8.1, we moved
* members to front each time their owning list was touched, which was
* arguably more fair in balancing list members against standalone tuples
* --- but the overhead for large lists was horrendous. This scheme is
* more heavily biased towards preserving lists, but that is not
* necessarily bad either.
* Our strategy for managing CatCLists is that, each time we have to throw
* away some cache entries, we first move-to-front all the members of
* CatCLists that have been touched since the last cleanup sweep. Then we
* do strict LRU elimination by individual tuples, zapping a list if any
* of its members gets zapped. Before PostgreSQL 8.1, we moved members to
* front each time their owning list was touched, which was arguably more
* fair in balancing list members against standalone tuples --- but the
* overhead for large lists was horrendous. This scheme is more heavily
* biased towards preserving lists, but that is not necessarily bad
* either.
*/
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
@ -1710,7 +1707,7 @@ CatalogCacheCleanup(CatCTup *savect)
Assert(cl->cl_magic == CL_MAGIC);
if (cl->touched && !cl->dead)
{
int i;
int i;
for (i = 0; i < cl->n_members; i++)
DLMoveToFront(&cl->members[i]->lrulist_elem);
@ -1775,9 +1772,9 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
if (attindex > 0)
{
/*
* Here we must be careful in case the caller passed a C
* string where a NAME is wanted: convert the given argument
* to a correctly padded NAME. Otherwise the memcpy() done in
* Here we must be careful in case the caller passed a C string
* where a NAME is wanted: convert the given argument to a
* correctly padded NAME. Otherwise the memcpy() done in
* heap_formtuple could fall off the end of memory.
*/
if (cache->cc_isname[i])
@ -1840,7 +1837,7 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
void
PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
void (*function) (int, uint32, ItemPointer, Oid))
void (*function) (int, uint32, ItemPointer, Oid))
{
CatCache *ccp;
Oid reloid;

View File

@ -53,10 +53,10 @@
*
* Also, whenever we see an operation on a pg_class or pg_attribute tuple,
* we register a relcache flush operation for the relation described by that
* tuple. pg_class updates trigger an smgr flush operation as well.
* tuple. pg_class updates trigger an smgr flush operation as well.
*
* We keep the relcache and smgr flush requests in lists separate from the
* catcache tuple flush requests. This allows us to issue all the pending
* catcache tuple flush requests. This allows us to issue all the pending
* catcache flushes before we issue relcache flushes, which saves us from
* loading a catcache tuple during relcache load only to flush it again
* right away. Also, we avoid queuing multiple relcache flush requests for
@ -80,7 +80,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.72 2005/06/17 22:32:46 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.73 2005/10/15 02:49:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -142,7 +142,7 @@ typedef struct TransInvalidationInfo
struct TransInvalidationInfo *parent;
/* Subtransaction nesting depth */
int my_level;
int my_level;
/* head of current-command event list */
InvalidationListHeader CurrentCmdInvalidMsgs;
@ -173,9 +173,9 @@ static struct CACHECALLBACK
static int cache_callback_count = 0;
/* info values for 2PC callback */
#define TWOPHASE_INFO_MSG 0 /* SharedInvalidationMessage */
#define TWOPHASE_INFO_FILE_BEFORE 1 /* relcache file inval */
#define TWOPHASE_INFO_FILE_AFTER 2 /* relcache file inval */
#define TWOPHASE_INFO_MSG 0 /* SharedInvalidationMessage */
#define TWOPHASE_INFO_FILE_BEFORE 1 /* relcache file inval */
#define TWOPHASE_INFO_FILE_AFTER 2 /* relcache file inval */
static void PersistInvalidationMessage(SharedInvalidationMessage *msg);
@ -208,7 +208,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
sizeof(InvalidationChunk) +
(FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
(FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = FIRSTCHUNKSIZE;
chunk->next = *listHdr;
@ -222,7 +222,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
sizeof(InvalidationChunk) +
(chunksize - 1) *sizeof(SharedInvalidationMessage));
(chunksize - 1) *sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = chunksize;
chunk->next = *listHdr;
@ -316,7 +316,7 @@ AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
ProcessMessageList(hdr->rclist,
if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
msg->rc.relId == relId)
return);
return);
/* OK, add the item */
msg.rc.id = SHAREDINVALRELCACHE_ID;
@ -338,7 +338,7 @@ AddSmgrInvalidationMessage(InvalidationListHeader *hdr,
ProcessMessageList(hdr->rclist,
if (msg->sm.id == SHAREDINVALSMGR_ID &&
RelFileNodeEquals(msg->sm.rnode, rnode))
return);
return);
/* OK, add the item */
msg.sm.id = SHAREDINVALSMGR_ID;
@ -470,8 +470,8 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
else if (msg->id == SHAREDINVALSMGR_ID)
{
/*
* We could have smgr entries for relations of other databases,
* so no short-circuit test is possible here.
* We could have smgr entries for relations of other databases, so no
* short-circuit test is possible here.
*/
smgrclosenode(msg->sm.rnode);
}
@ -523,17 +523,16 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
return;
/*
* We only need to worry about invalidation for tuples that are in
* system relations; user-relation tuples are never in catcaches and
* can't affect the relcache either.
* We only need to worry about invalidation for tuples that are in system
* relations; user-relation tuples are never in catcaches and can't affect
* the relcache either.
*/
if (!IsSystemRelation(relation))
return;
/*
* TOAST tuples can likewise be ignored here. Note that TOAST tables
* are considered system relations so they are not filtered by the
* above test.
* TOAST tuples can likewise be ignored here. Note that TOAST tables are
* considered system relations so they are not filtered by the above test.
*/
if (IsToastRelation(relation))
return;
@ -561,16 +560,15 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
databaseId = MyDatabaseId;
/*
* We need to send out an smgr inval as well as a relcache inval.
* This is needed because other backends might possibly possess
* smgr cache but not relcache entries for the target relation.
* We need to send out an smgr inval as well as a relcache inval. This
* is needed because other backends might possibly possess smgr cache
* but not relcache entries for the target relation.
*
* Note: during a pg_class row update that assigns a new
* relfilenode or reltablespace value, we will be called on both
* the old and new tuples, and thus will broadcast invalidation
* messages showing both the old and new RelFileNode values. This
* ensures that other backends will close smgr references to the
* old file.
* Note: during a pg_class row update that assigns a new relfilenode or
* reltablespace value, we will be called on both the old and new
* tuples, and thus will broadcast invalidation messages showing both
* the old and new RelFileNode values. This ensures that other
* backends will close smgr references to the old file.
*
* XXX possible future cleanup: it might be better to trigger smgr
* flushes explicitly, rather than indirectly from pg_class updates.
@ -590,13 +588,12 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
relationId = atttup->attrelid;
/*
* KLUGE ALERT: we always send the relcache event with
* MyDatabaseId, even if the rel in question is shared (which we
* can't easily tell). This essentially means that only backends
* in this same database will react to the relcache flush request.
* This is in fact appropriate, since only those backends could
* see our pg_attribute change anyway. It looks a bit ugly
* though.
* KLUGE ALERT: we always send the relcache event with MyDatabaseId,
* even if the rel in question is shared (which we can't easily tell).
* This essentially means that only backends in this same database
* will react to the relcache flush request. This is in fact
* appropriate, since only those backends could see our pg_attribute
* change anyway. It looks a bit ugly though.
*/
databaseId = MyDatabaseId;
}
@ -646,7 +643,7 @@ AtStart_Inval(void)
/*
* AtPrepare_Inval
* Save the inval lists state at 2PC transaction prepare.
* Save the inval lists state at 2PC transaction prepare.
*
* In this phase we just generate 2PC records for all the pending invalidation
* work.
@ -658,8 +655,8 @@ AtPrepare_Inval(void)
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
/*
* Relcache init file invalidation requires processing both before
* and after we send the SI messages.
* Relcache init file invalidation requires processing both before and
* after we send the SI messages.
*/
if (transInvalInfo->RelcacheInitFileInval)
RegisterTwoPhaseRecord(TWOPHASE_RM_INVAL_ID, TWOPHASE_INFO_FILE_BEFORE,
@ -678,7 +675,7 @@ AtPrepare_Inval(void)
/*
* PostPrepare_Inval
* Clean up after successful PREPARE.
* Clean up after successful PREPARE.
*
* Here, we want to act as though the transaction aborted, so that we will
* undo any syscache changes it made, thereby bringing us into sync with the
@ -714,7 +711,7 @@ AtSubStart_Inval(void)
/*
* PersistInvalidationMessage
* Write an invalidation message to the 2PC state file.
* Write an invalidation message to the 2PC state file.
*/
static void
PersistInvalidationMessage(SharedInvalidationMessage *msg)
@ -736,7 +733,7 @@ inval_twophase_postcommit(TransactionId xid, uint16 info,
switch (info)
{
case TWOPHASE_INFO_MSG:
msg = (SharedInvalidationMessage *) recdata;
msg = (SharedInvalidationMessage *) recdata;
Assert(len == sizeof(SharedInvalidationMessage));
SendSharedInvalidMessage(msg);
break;
@ -786,15 +783,15 @@ AtEOXact_Inval(bool isCommit)
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
/*
* Relcache init file invalidation requires processing both before
* and after we send the SI messages. However, we need not do
* anything unless we committed.
* Relcache init file invalidation requires processing both before and
* after we send the SI messages. However, we need not do anything
* unless we committed.
*/
if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFileInvalidate(true);
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
&transInvalInfo->CurrentCmdInvalidMsgs);
&transInvalInfo->CurrentCmdInvalidMsgs);
ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
SendSharedInvalidMessage);
@ -897,9 +894,9 @@ void
CommandEndInvalidationMessages(void)
{
/*
* You might think this shouldn't be called outside any transaction,
* but bootstrap does it, and also ABORT issued when not in a
* transaction. So just quietly return if no state to work on.
* You might think this shouldn't be called outside any transaction, but
* bootstrap does it, and also ABORT issued when not in a transaction. So
* just quietly return if no state to work on.
*/
if (transInvalInfo == NULL)
return;

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.128 2005/10/11 17:27:14 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.129 2005/10/15 02:49:31 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
@ -149,10 +149,10 @@ get_op_hash_function(Oid opno)
Oid opclass = InvalidOid;
/*
* Search pg_amop to see if the target operator is registered as the
* "=" operator of any hash opclass. If the operator is registered in
* multiple opclasses, assume we can use the associated hash function
* from any one.
* Search pg_amop to see if the target operator is registered as the "="
* operator of any hash opclass. If the operator is registered in
* multiple opclasses, assume we can use the associated hash function from
* any one.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(opno),
@ -1223,9 +1223,9 @@ getTypeIOParam(HeapTuple typeTuple)
Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
* Array types get their typelem as parameter; everybody else gets
* their own type OID as parameter. (This is a change from 8.0,
* in which only composite types got their own OID as parameter.)
* Array types get their typelem as parameter; everybody else gets their
* own type OID as parameter. (This is a change from 8.0, in which only
* composite types got their own OID as parameter.)
*/
if (OidIsValid(typeStruct->typelem))
return typeStruct->typelem;
@ -1414,7 +1414,7 @@ get_typdefault(Oid typid)
/* Convert C string to a value of the given type */
datum = OidFunctionCall3(type->typinput,
CStringGetDatum(strDefaultVal),
ObjectIdGetDatum(getTypeIOParam(typeTuple)),
ObjectIdGetDatum(getTypeIOParam(typeTuple)),
Int32GetDatum(-1));
/* Build a Const node containing the value */
expr = (Node *) makeConst(typid,
@ -1501,8 +1501,8 @@ get_typavgwidth(Oid typid, int32 typmod)
{
/*
* For BPCHAR, the max width is also the only width. Otherwise we
* need to guess about the typical data width given the max. A
* sliding scale for percentage of max width seems reasonable.
* need to guess about the typical data width given the max. A sliding
* scale for percentage of max width seems reasonable.
*/
if (typid == BPCHAROID)
return maxwidth;
@ -1513,8 +1513,8 @@ get_typavgwidth(Oid typid, int32 typmod)
/*
* Beyond 1000, assume we're looking at something like
* "varchar(10000)" where the limit isn't actually reached often,
* and use a fixed estimate.
* "varchar(10000)" where the limit isn't actually reached often, and
* use a fixed estimate.
*/
return 32 + (1000 - 32) / 2;
}
@ -1905,9 +1905,9 @@ get_attstatsslot(HeapTuple statstuple,
values, nvalues);
/*
* If the element type is pass-by-reference, we now have a bunch
* of Datums that are pointers into the syscache value. Copy them
* to avoid problems if syscache decides to drop the entry.
* If the element type is pass-by-reference, we now have a bunch of
* Datums that are pointers into the syscache value. Copy them to
* avoid problems if syscache decides to drop the entry.
*/
if (!typeForm->typbyval)
{
@ -1938,9 +1938,9 @@ get_attstatsslot(HeapTuple statstuple,
statarray = DatumGetArrayTypeP(val);
/*
* We expect the array to be a 1-D float4 array; verify that. We
* don't need to use deconstruct_array() since the array data is
* just going to look like a C array of float4 values.
* We expect the array to be a 1-D float4 array; verify that. We don't
* need to use deconstruct_array() since the array data is just going
* to look like a C array of float4 values.
*/
narrayelem = ARR_DIMS(statarray)[0];
if (ARR_NDIM(statarray) != 1 || narrayelem <= 0 ||
@ -2038,7 +2038,7 @@ get_roleid(const char *rolname)
Oid
get_roleid_checked(const char *rolname)
{
Oid roleid;
Oid roleid;
roleid = get_roleid(rolname);
if (!OidIsValid(roleid))

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.229 2005/09/16 04:13:18 neilc Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.230 2005/10/15 02:49:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -192,7 +192,7 @@ static bool load_relcache_init_file(void);
static void write_relcache_init_file(void);
static void formrdesc(const char *relationName, Oid relationReltype,
bool hasoids, int natts, FormData_pg_attribute *att);
bool hasoids, int natts, FormData_pg_attribute *att);
static HeapTuple ScanPgRelation(Oid targetRelId, bool indexOK);
static Relation AllocateRelationDesc(Relation relation, Form_pg_class relp);
@ -241,9 +241,9 @@ ScanPgRelation(Oid targetRelId, bool indexOK)
/*
* Open pg_class and fetch a tuple. Force heap scan if we haven't yet
* built the critical relcache entries (this includes initdb and
* startup without a pg_internal.init file). The caller can also
* force a heap scan by setting indexOK == false.
* built the critical relcache entries (this includes initdb and startup
* without a pg_internal.init file). The caller can also force a heap
* scan by setting indexOK == false.
*/
pg_class_desc = heap_open(RelationRelationId, AccessShareLock);
pg_class_scan = systable_beginscan(pg_class_desc, ClassOidIndexId,
@ -303,12 +303,11 @@ AllocateRelationDesc(Relation relation, Form_pg_class relp)
/*
* Copy the relation tuple form
*
* We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
* relacl is NOT stored in the relcache --- there'd be little point in
* it, since we don't copy the tuple's nullvalues bitmap and hence
* wouldn't know if the value is valid ... bottom line is that relacl
* *cannot* be retrieved from the relcache. Get it from the syscache
* if you need it.
* We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. relacl
* is NOT stored in the relcache --- there'd be little point in it, since
* we don't copy the tuple's nullvalues bitmap and hence wouldn't know if
* the value is valid ... bottom line is that relacl *cannot* be retrieved
* from the relcache. Get it from the syscache if you need it.
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
@ -355,8 +354,8 @@ RelationBuildTupleDesc(Relation relation)
/*
* Form a scan key that selects only user attributes (attnum > 0).
* (Eliminating system attribute rows at the index level is lots
* faster than fetching them.)
* (Eliminating system attribute rows at the index level is lots faster
* than fetching them.)
*/
ScanKeyInit(&skey[0],
Anum_pg_attribute_attrelid,
@ -368,9 +367,9 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
* Open pg_attribute and begin a scan. Force heap scan if we haven't
* yet built the critical relcache entries (this includes initdb and
* startup without a pg_internal.init file).
* Open pg_attribute and begin a scan. Force heap scan if we haven't yet
* built the critical relcache entries (this includes initdb and startup
* without a pg_internal.init file).
*/
pg_attribute_desc = heap_open(AttributeRelationId, AccessShareLock);
pg_attribute_scan = systable_beginscan(pg_attribute_desc,
@ -445,9 +444,8 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
* attribute: it must be zero. This eliminates the need for special
* cases for attnum=1 that used to exist in fastgetattr() and
* index_getattr().
* attribute: it must be zero. This eliminates the need for special cases
* for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
relation->rd_att->attrs[0]->attcacheoff = 0;
@ -477,7 +475,7 @@ RelationBuildTupleDesc(Relation relation)
constr->num_check = relation->rd_rel->relchecks;
constr->check = (ConstrCheck *)
MemoryContextAllocZero(CacheMemoryContext,
constr->num_check * sizeof(ConstrCheck));
constr->num_check * sizeof(ConstrCheck));
CheckConstraintFetch(relation);
}
else
@ -521,8 +519,8 @@ RelationBuildRuleLock(Relation relation)
int maxlocks;
/*
* Make the private context. Parameters are set on the assumption
* that it'll probably not contain much data.
* Make the private context. Parameters are set on the assumption that
* it'll probably not contain much data.
*/
rulescxt = AllocSetContextCreate(CacheMemoryContext,
RelationGetRelationName(relation),
@ -532,8 +530,8 @@ RelationBuildRuleLock(Relation relation)
relation->rd_rulescxt = rulescxt;
/*
* allocate an array to hold the rewrite rules (the array is extended
* if necessary)
* allocate an array to hold the rewrite rules (the array is extended if
* necessary)
*/
maxlocks = 4;
rules = (RewriteRule **)
@ -551,10 +549,10 @@ RelationBuildRuleLock(Relation relation)
/*
* open pg_rewrite and begin a scan
*
* Note: since we scan the rules using RewriteRelRulenameIndexId,
* we will be reading the rules in name order, except possibly during
* emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
* in turn ensures that rules will be fired in name order.
* Note: since we scan the rules using RewriteRelRulenameIndexId, we will be
* reading the rules in name order, except possibly during
* emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in
* turn ensures that rules will be fired in name order.
*/
rewrite_desc = heap_open(RewriteRelationId, AccessShareLock);
rewrite_tupdesc = RelationGetDescr(rewrite_desc);
@ -602,7 +600,7 @@ RelationBuildRuleLock(Relation relation)
&isnull);
Assert(!isnull);
rule_evqual_str = DatumGetCString(DirectFunctionCall1(textout,
rule_evqual));
rule_evqual));
oldcxt = MemoryContextSwitchTo(rulescxt);
rule->qual = (Node *) stringToNode(rule_evqual_str);
MemoryContextSwitchTo(oldcxt);
@ -647,8 +645,8 @@ equalRuleLocks(RuleLock *rlock1, RuleLock *rlock2)
/*
* As of 7.3 we assume the rule ordering is repeatable, because
* RelationBuildRuleLock should read 'em in a consistent order. So
* just compare corresponding slots.
* RelationBuildRuleLock should read 'em in a consistent order. So just
* compare corresponding slots.
*/
if (rlock1 != NULL)
{
@ -717,8 +715,8 @@ RelationBuildDesc(Oid targetRelId, Relation oldrelation)
relp = (Form_pg_class) GETSTRUCT(pg_class_tuple);
/*
* allocate storage for the relation descriptor, and copy
* pg_class_tuple to relation->rd_rel.
* allocate storage for the relation descriptor, and copy pg_class_tuple
* to relation->rd_rel.
*/
relation = AllocateRelationDesc(oldrelation, relp);
@ -733,10 +731,9 @@ RelationBuildDesc(Oid targetRelId, Relation oldrelation)
RelationGetRelid(relation) = relid;
/*
* normal relations are not nailed into the cache; nor can a
* pre-existing relation be new. It could be temp though. (Actually,
* it could be new too, but it's okay to forget that fact if forced to
* flush the entry.)
* normal relations are not nailed into the cache; nor can a pre-existing
* relation be new. It could be temp though. (Actually, it could be new
* too, but it's okay to forget that fact if forced to flush the entry.)
*/
relation->rd_refcnt = 0;
relation->rd_isnailed = false;
@ -834,9 +831,8 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* Make a copy of the pg_index entry for the index. Since pg_index
* contains variable-length and possibly-null fields, we have to do
* this honestly rather than just treating it as a Form_pg_index
* struct.
* contains variable-length and possibly-null fields, we have to do this
* honestly rather than just treating it as a Form_pg_index struct.
*/
tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(RelationGetRelid(relation)),
@ -851,9 +847,9 @@ RelationInitIndexAccessInfo(Relation relation)
ReleaseSysCache(tuple);
/*
* indclass cannot be referenced directly through the C struct, because
* it is after the variable-width indkey field. Therefore we extract
* the datum the hard way and provide a direct link in the relcache.
* indclass cannot be referenced directly through the C struct, because it
* is after the variable-width indkey field. Therefore we extract the
* datum the hard way and provide a direct link in the relcache.
*/
indclassDatum = fastgetattr(relation->rd_indextuple,
Anum_pg_index_indclass,
@ -884,9 +880,9 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
* Make the private context to hold index access info. The reason we
* need a context, and not just a couple of pallocs, is so that we
* won't leak any subsidiary info attached to fmgr lookup records.
* Make the private context to hold index access info. The reason we need
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*
* Context parameters are set on the assumption that it'll probably not
* contain much data.
@ -931,7 +927,7 @@ RelationInitIndexAccessInfo(Relation relation)
relation->rd_supportinfo = supportinfo;
/*
* Fill the operator and support procedure OID arrays. (aminfo and
* Fill the operator and support procedure OID arrays. (aminfo and
* supportinfo are left as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(relation->rd_indclass,
@ -1070,17 +1066,17 @@ LookupOpclassInfo(Oid operatorClassOid,
opcentry->supportProcs = NULL;
/*
* To avoid infinite recursion during startup, force heap scans if
* we're looking up info for the opclasses used by the indexes we
* would like to reference here.
* To avoid infinite recursion during startup, force heap scans if we're
* looking up info for the opclasses used by the indexes we would like to
* reference here.
*/
indexOK = criticalRelcachesBuilt ||
(operatorClassOid != OID_BTREE_OPS_OID &&
operatorClassOid != INT2_BTREE_OPS_OID);
/*
* Scan pg_amop to obtain operators for the opclass. We only fetch
* the default ones (those with subtype zero).
* Scan pg_amop to obtain operators for the opclass. We only fetch the
* default ones (those with subtype zero).
*/
if (numStrats > 0)
{
@ -1113,8 +1109,8 @@ LookupOpclassInfo(Oid operatorClassOid,
}
/*
* Scan pg_amproc to obtain support procs for the opclass. We only
* fetch the default ones (those with subtype zero).
* Scan pg_amproc to obtain support procs for the opclass. We only fetch
* the default ones (those with subtype zero).
*/
if (numSupport > 0)
{
@ -1193,8 +1189,8 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_refcnt = 1;
/*
* all entries built with this routine are nailed-in-cache; none are
* for new or temp relations.
* all entries built with this routine are nailed-in-cache; none are for
* new or temp relations.
*/
relation->rd_isnailed = true;
relation->rd_createSubid = InvalidSubTransactionId;
@ -1203,9 +1199,9 @@ formrdesc(const char *relationName, Oid relationReltype,
/*
* initialize relation tuple form
*
* The data we insert here is pretty incomplete/bogus, but it'll serve to
* get us launched. RelationCacheInitializePhase2() will read the
* real data from pg_class and replace what we've done here.
* The data we insert here is pretty incomplete/bogus, but it'll serve to get
* us launched. RelationCacheInitializePhase2() will read the real data
* from pg_class and replace what we've done here.
*/
relation->rd_rel = (Form_pg_class) palloc0(CLASS_TUPLE_SIZE);
@ -1214,10 +1210,9 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_rel->reltype = relationReltype;
/*
* It's important to distinguish between shared and non-shared
* relations, even at bootstrap time, to make sure we know where they
* are stored. At present, all relations that formrdesc is used for
* are not shared.
* It's important to distinguish between shared and non-shared relations,
* even at bootstrap time, to make sure we know where they are stored. At
* present, all relations that formrdesc is used for are not shared.
*/
relation->rd_rel->relisshared = false;
@ -1231,8 +1226,8 @@ formrdesc(const char *relationName, Oid relationReltype,
* initialize attribute tuple form
*
* Unlike the case with the relation tuple, this data had better be right
* because it will never be replaced. The input values must be
* correctly defined by macros in src/include/catalog/ headers.
* because it will never be replaced. The input values must be correctly
* defined by macros in src/include/catalog/ headers.
*/
relation->rd_att = CreateTemplateTupleDesc(natts, hasoids);
relation->rd_att->tdtypeid = relationReltype;
@ -1361,8 +1356,8 @@ RelationIdGetRelation(Oid relationId)
return rd;
/*
* no reldesc in the cache, so have RelationBuildDesc() build one and
* add it.
* no reldesc in the cache, so have RelationBuildDesc() build one and add
* it.
*/
rd = RelationBuildDesc(relationId, NULL);
if (RelationIsValid(rd))
@ -1454,11 +1449,12 @@ RelationReloadClassinfo(Relation relation)
/* Should be called only for invalidated nailed indexes */
Assert(relation->rd_isnailed && !relation->rd_isvalid &&
relation->rd_rel->relkind == RELKIND_INDEX);
/*
* Read the pg_class row
*
* Don't try to use an indexscan of pg_class_oid_index to reload the
* info for pg_class_oid_index ...
* Don't try to use an indexscan of pg_class_oid_index to reload the info for
* pg_class_oid_index ...
*/
indexOK = (RelationGetRelid(relation) != ClassOidIndexId);
pg_class_tuple = ScanPgRelation(RelationGetRelid(relation), indexOK);
@ -1492,25 +1488,25 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
* weren't closed already. If the relation is not getting deleted,
* the next smgr access should reopen the files automatically. This
* ensures that the low-level file access state is updated after, say,
* a vacuum truncation.
* weren't closed already. If the relation is not getting deleted, the
* next smgr access should reopen the files automatically. This ensures
* that the low-level file access state is updated after, say, a vacuum
* truncation.
*/
RelationCloseSmgr(relation);
/*
* Never, never ever blow away a nailed-in system relation, because
* we'd be unable to recover. However, we must reset rd_targblock, in
* case we got called because of a relation cache flush that was
* triggered by VACUUM.
* Never, never ever blow away a nailed-in system relation, because we'd
* be unable to recover. However, we must reset rd_targblock, in case we
* got called because of a relation cache flush that was triggered by
* VACUUM.
*
* If it's a nailed index, then we need to re-read the pg_class row to
* see if its relfilenode changed. We can't necessarily do that here,
* because we might be in a failed transaction. We assume it's okay
* to do it if there are open references to the relcache entry (cf
* notes for AtEOXact_RelationCache). Otherwise just mark the entry
* as possibly invalid, and it'll be fixed when next opened.
* If it's a nailed index, then we need to re-read the pg_class row to see if
* its relfilenode changed. We can't necessarily do that here, because we
* might be in a failed transaction. We assume it's okay to do it if
* there are open references to the relcache entry (cf notes for
* AtEOXact_RelationCache). Otherwise just mark the entry as possibly
* invalid, and it'll be fixed when next opened.
*/
if (relation->rd_isnailed)
{
@ -1542,8 +1538,8 @@ RelationClearRelation(Relation relation, bool rebuild)
* Free all the subsidiary data structures of the relcache entry. We
* cannot free rd_att if we are trying to rebuild the entry, however,
* because pointers to it may be cached in various places. The rule
* manager might also have pointers into the rewrite rules. So to
* begin with, we can only get rid of these fields:
* manager might also have pointers into the rewrite rules. So to begin
* with, we can only get rid of these fields:
*/
FreeTriggerDesc(relation->trigdesc);
if (relation->rd_indextuple)
@ -1558,9 +1554,9 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* If we're really done with the relcache entry, blow it away. But if
* someone is still using it, reconstruct the whole deal without
* moving the physical RelationData record (so that the someone's
* pointer is still valid).
* someone is still using it, reconstruct the whole deal without moving
* the physical RelationData record (so that the someone's pointer is
* still valid).
*/
if (!rebuild)
{
@ -1574,12 +1570,12 @@ RelationClearRelation(Relation relation, bool rebuild)
else
{
/*
* When rebuilding an open relcache entry, must preserve ref count
* and rd_createSubid state. Also attempt to preserve the
* tupledesc and rewrite-rule substructures in place.
* When rebuilding an open relcache entry, must preserve ref count and
* rd_createSubid state. Also attempt to preserve the tupledesc and
* rewrite-rule substructures in place.
*
* Note that this process does not touch CurrentResourceOwner; which
* is good because whatever ref counts the entry may have do not
* Note that this process does not touch CurrentResourceOwner; which is
* good because whatever ref counts the entry may have do not
* necessarily belong to that resource owner.
*/
Oid save_relid = RelationGetRelid(relation);
@ -1773,8 +1769,8 @@ RelationCacheInvalidate(void)
{
/*
* Add this entry to list of stuff to rebuild in second pass.
* pg_class_oid_index goes on the front of rebuildFirstList,
* other nailed indexes on the back, and everything else into
* pg_class_oid_index goes on the front of rebuildFirstList, other
* nailed indexes on the back, and everything else into
* rebuildList (in no particular order).
*/
if (relation->rd_isnailed &&
@ -1793,9 +1789,9 @@ RelationCacheInvalidate(void)
rebuildList = list_concat(rebuildFirstList, rebuildList);
/*
* Now zap any remaining smgr cache entries. This must happen before
* we start to rebuild entries, since that may involve catalog fetches
* which will re-open catalog files.
* Now zap any remaining smgr cache entries. This must happen before we
* start to rebuild entries, since that may involve catalog fetches which
* will re-open catalog files.
*/
smgrcloseall();
@ -1832,13 +1828,13 @@ AtEOXact_RelationCache(bool isCommit)
/*
* To speed up transaction exit, we want to avoid scanning the relcache
* unless there is actually something for this routine to do. Other
* than the debug-only Assert checks, most transactions don't create
* any work for us to do here, so we keep a static flag that gets set
* if there is anything to do. (Currently, this means either a relation
* is created in the current xact, or an index list is forced.) For
* simplicity, the flag remains set till end of top-level transaction,
* even though we could clear it at subtransaction end in some cases.
* unless there is actually something for this routine to do. Other than
* the debug-only Assert checks, most transactions don't create any work
* for us to do here, so we keep a static flag that gets set if there is
* anything to do. (Currently, this means either a relation is created in
* the current xact, or an index list is forced.) For simplicity, the
* flag remains set till end of top-level transaction, even though we
* could clear it at subtransaction end in some cases.
*/
if (!need_eoxact_work
#ifdef USE_ASSERT_CHECKING
@ -1857,10 +1853,9 @@ AtEOXact_RelationCache(bool isCommit)
* The relcache entry's ref count should be back to its normal
* not-in-a-transaction state: 0 unless it's nailed in cache.
*
* In bootstrap mode, this is NOT true, so don't check it ---
* the bootstrap code expects relations to stay open across
* start/commit transaction calls. (That seems bogus, but it's
* not worth fixing.)
* In bootstrap mode, this is NOT true, so don't check it --- the
* bootstrap code expects relations to stay open across start/commit
* transaction calls. (That seems bogus, but it's not worth fixing.)
*/
#ifdef USE_ASSERT_CHECKING
if (!IsBootstrapProcessingMode())
@ -1939,8 +1934,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
/*
* Is it a relation created in the current subtransaction?
*
* During subcommit, mark it as belonging to the parent, instead.
* During subabort, simply delete the relcache entry.
* During subcommit, mark it as belonging to the parent, instead. During
* subabort, simply delete the relcache entry.
*/
if (relation->rd_createSubid == mySubid)
{
@ -2041,11 +2036,10 @@ RelationBuildLocalRelation(const char *relname,
/*
* create a new tuple descriptor from the one passed in. We do this
* partly to copy it into the cache context, and partly because the
* new relation can't have any defaults or constraints yet; they have
* to be added in later steps, because they require additions to
* multiple system catalogs. We can copy attnotnull constraints here,
* however.
* partly to copy it into the cache context, and partly because the new
* relation can't have any defaults or constraints yet; they have to be
* added in later steps, because they require additions to multiple system
* catalogs. We can copy attnotnull constraints here, however.
*/
rel->rd_att = CreateTupleDescCopy(tupDesc);
has_not_null = false;
@ -2079,9 +2073,9 @@ RelationBuildLocalRelation(const char *relname,
rel->rd_rel->relowner = BOOTSTRAP_SUPERUSERID;
/*
* Insert relation physical and logical identifiers (OIDs) into the
* right places. Note that the physical ID (relfilenode) is initially
* the same as the logical ID (OID).
* Insert relation physical and logical identifiers (OIDs) into the right
* places. Note that the physical ID (relfilenode) is initially the same
* as the logical ID (OID).
*/
rel->rd_rel->relisshared = shared_relation;
@ -2157,8 +2151,8 @@ RelationCacheInitialize(void)
/*
* Try to load the relcache cache file. If successful, we're done for
* now. Otherwise, initialize the cache with pre-made descriptors for
* the critical "nailed-in" system catalogs.
* now. Otherwise, initialize the cache with pre-made descriptors for the
* critical "nailed-in" system catalogs.
*/
if (IsBootstrapProcessingMode() ||
!load_relcache_init_file())
@ -2197,24 +2191,22 @@ RelationCacheInitializePhase2(void)
return;
/*
* If we didn't get the critical system indexes loaded into relcache,
* do so now. These are critical because the catcache depends on them
* for catcache fetches that are done during relcache load. Thus, we
* have an infinite-recursion problem. We can break the recursion by
* doing heapscans instead of indexscans at certain key spots. To
* avoid hobbling performance, we only want to do that until we have
* the critical indexes loaded into relcache. Thus, the flag
* criticalRelcachesBuilt is used to decide whether to do heapscan or
* indexscan at the key spots, and we set it true after we've loaded
* the critical indexes.
* If we didn't get the critical system indexes loaded into relcache, do
* so now. These are critical because the catcache depends on them for
* catcache fetches that are done during relcache load. Thus, we have an
* infinite-recursion problem. We can break the recursion by doing
* heapscans instead of indexscans at certain key spots. To avoid hobbling
* performance, we only want to do that until we have the critical indexes
* loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
* decide whether to do heapscan or indexscan at the key spots, and we set
* it true after we've loaded the critical indexes.
*
* The critical indexes are marked as "nailed in cache", partly to make
* it easy for load_relcache_init_file to count them, but mainly
* because we cannot flush and rebuild them once we've set
* criticalRelcachesBuilt to true. (NOTE: perhaps it would be
* possible to reload them by temporarily setting
* criticalRelcachesBuilt to false again. For now, though, we just
* nail 'em in.)
* The critical indexes are marked as "nailed in cache", partly to make it
* easy for load_relcache_init_file to count them, but mainly because we
* cannot flush and rebuild them once we've set criticalRelcachesBuilt to
* true. (NOTE: perhaps it would be possible to reload them by
* temporarily setting criticalRelcachesBuilt to false again. For now,
* though, we just nail 'em in.)
*/
if (!criticalRelcachesBuilt)
{
@ -2240,12 +2232,12 @@ RelationCacheInitializePhase2(void)
}
/*
* Now, scan all the relcache entries and update anything that might
* be wrong in the results from formrdesc or the relcache cache file.
* If we faked up relcache entries using formrdesc, then read the real
* pg_class rows and replace the fake entries with them. Also, if any
* of the relcache entries have rules or triggers, load that info the
* hard way since it isn't recorded in the cache file.
* Now, scan all the relcache entries and update anything that might be
* wrong in the results from formrdesc or the relcache cache file. If we
* faked up relcache entries using formrdesc, then read the real pg_class
* rows and replace the fake entries with them. Also, if any of the
* relcache entries have rules or triggers, load that info the hard way
* since it isn't recorded in the cache file.
*/
hash_seq_init(&status, RelationIdCache);
@ -2262,7 +2254,7 @@ RelationCacheInitializePhase2(void)
Form_pg_class relp;
htup = SearchSysCache(RELOID,
ObjectIdGetDatum(RelationGetRelid(relation)),
ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
if (!HeapTupleIsValid(htup))
elog(FATAL, "cache lookup failed for relation %u",
@ -2311,11 +2303,10 @@ RelationCacheInitializePhase3(void)
if (needNewCacheFile)
{
/*
* Force all the catcaches to finish initializing and thereby open
* the catalogs and indexes they use. This will preload the
* relcache with entries for all the most important system
* catalogs and indexes, so that the init file will be most useful
* for future backends.
* Force all the catcaches to finish initializing and thereby open the
* catalogs and indexes they use. This will preload the relcache with
* entries for all the most important system catalogs and indexes, so
* that the init file will be most useful for future backends.
*/
InitCatalogCachePhase2();
@ -2349,7 +2340,7 @@ GetPgIndexDescriptor(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
pgindexdesc = CreateTemplateTupleDesc(Natts_pg_index, false);
pgindexdesc->tdtypeid = RECORDOID; /* not right, but we don't care */
pgindexdesc->tdtypeid = RECORDOID; /* not right, but we don't care */
pgindexdesc->tdtypmod = -1;
for (i = 0; i < Natts_pg_index; i++)
@ -2405,7 +2396,7 @@ AttrDefaultFetch(Relation relation)
continue;
if (attrdef[i].adbin != NULL)
elog(WARNING, "multiple attrdef records found for attr %s of rel %s",
NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
RelationGetRelationName(relation));
else
found++;
@ -2415,12 +2406,12 @@ AttrDefaultFetch(Relation relation)
adrel->rd_att, &isnull);
if (isnull)
elog(WARNING, "null adbin for attr %s of rel %s",
NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
RelationGetRelationName(relation));
else
attrdef[i].adbin = MemoryContextStrdup(CacheMemoryContext,
DatumGetCString(DirectFunctionCall1(textout,
val)));
DatumGetCString(DirectFunctionCall1(textout,
val)));
break;
}
@ -2472,7 +2463,7 @@ CheckConstraintFetch(Relation relation)
RelationGetRelationName(relation));
check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
NameStr(conform->conname));
NameStr(conform->conname));
/* Grab and test conbin is actually set */
val = fastgetattr(htup,
@ -2483,8 +2474,8 @@ CheckConstraintFetch(Relation relation)
RelationGetRelationName(relation));
check[found].ccbin = MemoryContextStrdup(CacheMemoryContext,
DatumGetCString(DirectFunctionCall1(textout,
val)));
DatumGetCString(DirectFunctionCall1(textout,
val)));
found++;
}
@ -2514,7 +2505,7 @@ CheckConstraintFetch(Relation relation)
*
* Since shared cache inval causes the relcache's copy of the list to go away,
* we return a copy of the list palloc'd in the caller's context. The caller
* may list_free() the returned list after scanning it. This is necessary
* may list_free() the returned list after scanning it. This is necessary
* since the caller will typically be doing syscache lookups on the relevant
* indexes, and syscache lookup could cause SI messages to be processed!
*
@ -2539,10 +2530,10 @@ RelationGetIndexList(Relation relation)
return list_copy(relation->rd_indexlist);
/*
* We build the list we intend to return (in the caller's context)
* while doing the scan. After successfully completing the scan, we
* copy that list into the relcache entry. This avoids cache-context
* memory leakage if we get some sort of error partway through.
* We build the list we intend to return (in the caller's context) while
* doing the scan. After successfully completing the scan, we copy that
* list into the relcache entry. This avoids cache-context memory leakage
* if we get some sort of error partway through.
*/
result = NIL;
oidIndex = InvalidOid;
@ -2662,9 +2653,9 @@ RelationGetOidIndex(Relation relation)
List *ilist;
/*
* If relation doesn't have OIDs at all, caller is probably confused.
* (We could just silently return InvalidOid, but it seems better to
* throw an assertion.)
* If relation doesn't have OIDs at all, caller is probably confused. (We
* could just silently return InvalidOid, but it seems better to throw an
* assertion.)
*/
Assert(relation->rd_rel->relhasoids);
@ -2707,10 +2698,9 @@ RelationGetIndexExpressions(Relation relation)
return NIL;
/*
* We build the tree we intend to return in the caller's context.
* After successfully completing the work, we copy it into the
* relcache entry. This avoids problems if we get some sort of error
* partway through.
* We build the tree we intend to return in the caller's context. After
* successfully completing the work, we copy it into the relcache entry.
* This avoids problems if we get some sort of error partway through.
*/
exprsDatum = heap_getattr(relation->rd_indextuple,
Anum_pg_index_indexprs,
@ -2775,10 +2765,9 @@ RelationGetIndexPredicate(Relation relation)
return NIL;
/*
* We build the tree we intend to return in the caller's context.
* After successfully completing the work, we copy it into the
* relcache entry. This avoids problems if we get some sort of error
* partway through.
* We build the tree we intend to return in the caller's context. After
* successfully completing the work, we copy it into the relcache entry.
* This avoids problems if we get some sort of error partway through.
*/
predDatum = heap_getattr(relation->rd_indextuple,
Anum_pg_index_indpred,
@ -2795,8 +2784,8 @@ RelationGetIndexPredicate(Relation relation)
* will be comparing it to similarly-processed qual clauses, and may fail
* to detect valid matches without this. This must match the processing
* done to qual clauses in preprocess_expression()! (We can skip the
* stuff involving subqueries, however, since we don't allow any in
* index predicates.)
* stuff involving subqueries, however, since we don't allow any in index
* predicates.)
*/
result = (List *) eval_const_expressions((Node *) result);
@ -2897,9 +2886,9 @@ load_relcache_init_file(void)
}
/*
* Read the index relcache entries from the file. Note we will not
* enter any of them into the cache if the read fails partway through;
* this helps to guard against broken init files.
* Read the index relcache entries from the file. Note we will not enter
* any of them into the cache if the read fails partway through; this
* helps to guard against broken init files.
*/
max_rels = 100;
rels = (Relation *) palloc(max_rels * sizeof(Relation));
@ -3086,10 +3075,10 @@ load_relcache_init_file(void)
/*
* Rules and triggers are not saved (mainly because the internal
* format is complex and subject to change). They must be rebuilt
* if needed by RelationCacheInitializePhase2. This is not
* expected to be a big performance hit since few system catalogs
* have such. Ditto for index expressions and predicates.
* format is complex and subject to change). They must be rebuilt if
* needed by RelationCacheInitializePhase2. This is not expected to
* be a big performance hit since few system catalogs have such.
* Ditto for index expressions and predicates.
*/
rel->rd_rules = NULL;
rel->rd_rulescxt = NULL;
@ -3114,17 +3103,17 @@ load_relcache_init_file(void)
/*
* Recompute lock and physical addressing info. This is needed in
* case the pg_internal.init file was copied from some other
* database by CREATE DATABASE.
* case the pg_internal.init file was copied from some other database
* by CREATE DATABASE.
*/
RelationInitLockInfo(rel);
RelationInitPhysicalAddr(rel);
}
/*
* We reached the end of the init file without apparent problem. Did
* we get the right number of nailed items? (This is a useful
* crosscheck in case the set of critical rels or indexes changes.)
* We reached the end of the init file without apparent problem. Did we
* get the right number of nailed items? (This is a useful crosscheck in
* case the set of critical rels or indexes changes.)
*/
if (nailed_rels != NUM_CRITICAL_RELS ||
nailed_indexes != NUM_CRITICAL_INDEXES)
@ -3150,9 +3139,9 @@ load_relcache_init_file(void)
return true;
/*
* init file is broken, so do it the hard way. We don't bother trying
* to free the clutter we just allocated; it's not in the relcache so
* it won't hurt.
* init file is broken, so do it the hard way. We don't bother trying to
* free the clutter we just allocated; it's not in the relcache so it
* won't hurt.
*/
read_failed:
pfree(rels);
@ -3180,8 +3169,8 @@ write_relcache_init_file(void)
/*
* We must write a temporary file and rename it into place. Otherwise,
* another backend starting at about the same time might crash trying
* to read the partially-complete file.
* another backend starting at about the same time might crash trying to
* read the partially-complete file.
*/
snprintf(tempfilename, sizeof(tempfilename), "%s/%s.%d",
DatabasePath, RELCACHE_INIT_FILENAME, MyProcPid);
@ -3201,7 +3190,7 @@ write_relcache_init_file(void)
(errcode_for_file_access(),
errmsg("could not create relation-cache initialization file \"%s\": %m",
tempfilename),
errdetail("Continuing anyway, but there's something wrong.")));
errdetail("Continuing anyway, but there's something wrong.")));
return;
}
@ -3308,11 +3297,11 @@ write_relcache_init_file(void)
/*
* Now we have to check whether the data we've so painstakingly
* accumulated is already obsolete due to someone else's
* just-committed catalog changes. If so, we just delete the temp
* file and leave it to the next backend to try again. (Our own
* relcache entries will be updated by SI message processing, but we
* can't be sure whether what we wrote out was up-to-date.)
* accumulated is already obsolete due to someone else's just-committed
* catalog changes. If so, we just delete the temp file and leave it to
* the next backend to try again. (Our own relcache entries will be
* updated by SI message processing, but we can't be sure whether what we
* wrote out was up-to-date.)
*
* This mustn't run concurrently with RelationCacheInitFileInvalidate, so
* grab a serialization lock for the duration.
@ -3323,8 +3312,8 @@ write_relcache_init_file(void)
AcceptInvalidationMessages();
/*
* If we have received any SI relcache invals since backend start,
* assume we may have written out-of-date data.
* If we have received any SI relcache invals since backend start, assume
* we may have written out-of-date data.
*/
if (relcacheInvalsReceived == 0L)
{
@ -3332,10 +3321,10 @@ write_relcache_init_file(void)
* OK, rename the temp file to its final name, deleting any
* previously-existing init file.
*
* Note: a failure here is possible under Cygwin, if some other
* backend is holding open an unlinked-but-not-yet-gone init file.
* So treat this as a noncritical failure; just remove the useless
* temp file on failure.
* Note: a failure here is possible under Cygwin, if some other backend
* is holding open an unlinked-but-not-yet-gone init file. So treat
* this as a noncritical failure; just remove the useless temp file on
* failure.
*/
if (rename(tempfilename, finalfilename) < 0)
unlink(tempfilename);
@ -3401,11 +3390,10 @@ RelationCacheInitFileInvalidate(bool beforeSend)
/*
* We need to interlock this against write_relcache_init_file, to
* guard against possibility that someone renames a new-but-
* already-obsolete init file into place just after we unlink.
* With the interlock, it's certain that write_relcache_init_file
* will notice our SI inval message before renaming into place, or
* else that we will execute second and successfully unlink the
* file.
* already-obsolete init file into place just after we unlink. With
* the interlock, it's certain that write_relcache_init_file will
* notice our SI inval message before renaming into place, or else
* that we will execute second and successfully unlink the file.
*/
LWLockAcquire(RelCacheInitLock, LW_EXCLUSIVE);
unlink(initfilename);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.100 2005/06/28 05:09:01 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.101 2005/10/15 02:49:32 momjian Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
@ -56,7 +56,7 @@
Add your entry to the cacheinfo[] array below. All cache lists are
alphabetical, so add it in the proper place. Specify the relation
OID, index OID, number of keys, and key attribute numbers. If the
OID, index OID, number of keys, and key attribute numbers. If the
relation contains tuples that are associated with a particular relation
(for example, its attributes, rules, triggers, etc) then specify the
attribute number that contains the OID of the associated relation.
@ -92,7 +92,7 @@ struct cachedesc
};
static const struct cachedesc cacheinfo[] = {
{AggregateRelationId, /* AGGFNOID */
{AggregateRelationId, /* AGGFNOID */
AggregateFnoidIndexId,
0,
1,
@ -102,7 +102,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{AccessMethodRelationId, /* AMNAME */
{AccessMethodRelationId, /* AMNAME */
AmNameIndexId,
0,
1,
@ -112,7 +112,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{AccessMethodRelationId, /* AMOID */
{AccessMethodRelationId, /* AMOID */
AmOidIndexId,
0,
1,
@ -152,7 +152,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_amproc_amprocnum,
0
}},
{AttributeRelationId, /* ATTNAME */
{AttributeRelationId, /* ATTNAME */
AttributeRelidNameIndexId,
Anum_pg_attribute_attrelid,
2,
@ -162,7 +162,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{AttributeRelationId, /* ATTNUM */
{AttributeRelationId, /* ATTNUM */
AttributeRelidNumIndexId,
Anum_pg_attribute_attrelid,
2,
@ -172,7 +172,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{AuthMemRelationId, /* AUTHMEMMEMROLE */
{AuthMemRelationId, /* AUTHMEMMEMROLE */
AuthMemMemRoleIndexId,
0,
2,
@ -182,7 +182,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{AuthMemRelationId, /* AUTHMEMROLEMEM */
{AuthMemRelationId, /* AUTHMEMROLEMEM */
AuthMemRoleMemIndexId,
0,
2,
@ -192,7 +192,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{AuthIdRelationId, /* AUTHNAME */
{AuthIdRelationId, /* AUTHNAME */
AuthIdRolnameIndexId,
0,
1,
@ -202,7 +202,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{AuthIdRelationId, /* AUTHOID */
{AuthIdRelationId, /* AUTHOID */
AuthIdOidIndexId,
0,
1,
@ -213,7 +213,7 @@ static const struct cachedesc cacheinfo[] = {
0
}},
{
CastRelationId, /* CASTSOURCETARGET */
CastRelationId, /* CASTSOURCETARGET */
CastSourceTargetIndexId,
0,
2,
@ -223,7 +223,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{OperatorClassRelationId, /* CLAAMNAMENSP */
{OperatorClassRelationId, /* CLAAMNAMENSP */
OpclassAmNameNspIndexId,
0,
3,
@ -233,7 +233,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_opclass_opcnamespace,
0
}},
{OperatorClassRelationId, /* CLAOID */
{OperatorClassRelationId, /* CLAOID */
OpclassOidIndexId,
0,
1,
@ -243,7 +243,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{ConversionRelationId, /* CONDEFAULT */
{ConversionRelationId, /* CONDEFAULT */
ConversionDefaultIndexId,
0,
4,
@ -253,7 +253,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_conversion_contoencoding,
ObjectIdAttributeNumber,
}},
{ConversionRelationId, /* CONNAMENSP */
{ConversionRelationId, /* CONNAMENSP */
ConversionNameNspIndexId,
0,
2,
@ -263,7 +263,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{ConversionRelationId, /* CONOID */
{ConversionRelationId, /* CONOID */
ConversionOidIndexId,
0,
1,
@ -273,7 +273,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{IndexRelationId, /* INDEXRELID */
{IndexRelationId, /* INDEXRELID */
IndexRelidIndexId,
Anum_pg_index_indrelid,
1,
@ -283,7 +283,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{InheritsRelationId, /* INHRELID */
{InheritsRelationId, /* INHRELID */
InheritsRelidSeqnoIndexId,
Anum_pg_inherits_inhrelid,
2,
@ -293,7 +293,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{LanguageRelationId, /* LANGNAME */
{LanguageRelationId, /* LANGNAME */
LanguageNameIndexId,
0,
1,
@ -303,7 +303,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{LanguageRelationId, /* LANGOID */
{LanguageRelationId, /* LANGOID */
LanguageOidIndexId,
0,
1,
@ -313,7 +313,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{NamespaceRelationId, /* NAMESPACENAME */
{NamespaceRelationId, /* NAMESPACENAME */
NamespaceNameIndexId,
0,
1,
@ -323,7 +323,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{NamespaceRelationId, /* NAMESPACEOID */
{NamespaceRelationId, /* NAMESPACEOID */
NamespaceOidIndexId,
0,
1,
@ -333,7 +333,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{OperatorRelationId, /* OPERNAMENSP */
{OperatorRelationId, /* OPERNAMENSP */
OperatorNameNspIndexId,
0,
4,
@ -343,7 +343,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_operator_oprright,
Anum_pg_operator_oprnamespace
}},
{OperatorRelationId, /* OPEROID */
{OperatorRelationId, /* OPEROID */
OperatorOidIndexId,
0,
1,
@ -353,7 +353,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{ProcedureRelationId, /* PROCNAMEARGSNSP */
{ProcedureRelationId, /* PROCNAMEARGSNSP */
ProcedureNameArgsNspIndexId,
0,
3,
@ -363,7 +363,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_proc_pronamespace,
0
}},
{ProcedureRelationId, /* PROCOID */
{ProcedureRelationId, /* PROCOID */
ProcedureOidIndexId,
0,
1,
@ -373,7 +373,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{RelationRelationId, /* RELNAMENSP */
{RelationRelationId, /* RELNAMENSP */
ClassNameNspIndexId,
ObjectIdAttributeNumber,
2,
@ -383,7 +383,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{RelationRelationId, /* RELOID */
{RelationRelationId, /* RELOID */
ClassOidIndexId,
ObjectIdAttributeNumber,
1,
@ -393,7 +393,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{RewriteRelationId, /* RULERELNAME */
{RewriteRelationId, /* RULERELNAME */
RewriteRelRulenameIndexId,
Anum_pg_rewrite_ev_class,
2,
@ -403,7 +403,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{StatisticRelationId, /* STATRELATT */
{StatisticRelationId, /* STATRELATT */
StatisticRelidAttnumIndexId,
Anum_pg_statistic_starelid,
2,
@ -413,7 +413,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{TypeRelationId, /* TYPENAMENSP */
{TypeRelationId, /* TYPENAMENSP */
TypeNameNspIndexId,
Anum_pg_type_typrelid,
2,
@ -423,7 +423,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
{TypeRelationId, /* TYPEOID */
{TypeRelationId, /* TYPEOID */
TypeOidIndexId,
Anum_pg_type_typrelid,
1,
@ -435,7 +435,8 @@ static const struct cachedesc cacheinfo[] = {
}}
};
static CatCache *SysCache[lengthof(cacheinfo)];
static CatCache *SysCache[
lengthof(cacheinfo)];
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
@ -697,10 +698,10 @@ SysCacheGetAttr(int cacheId, HeapTuple tup,
bool *isNull)
{
/*
* We just need to get the TupleDesc out of the cache entry, and then
* we can apply heap_getattr(). We expect that the cache control data
* is currently valid --- if the caller recently fetched the tuple,
* then it should be.
* We just need to get the TupleDesc out of the cache entry, and then we
* can apply heap_getattr(). We expect that the cache control data is
* currently valid --- if the caller recently fetched the tuple, then it
* should be.
*/
if (cacheId < 0 || cacheId >= SysCacheSize)
elog(ERROR, "invalid cache id: %d", cacheId);

View File

@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.14 2005/05/29 04:23:06 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.15 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -135,9 +135,9 @@ lookup_type_cache(Oid type_id, int flags)
if (typentry == NULL)
{
/*
* If we didn't find one, we want to make one. But first look up
* the pg_type row, just to make sure we don't make a cache entry
* for an invalid type OID.
* If we didn't find one, we want to make one. But first look up the
* pg_type row, just to make sure we don't make a cache entry for an
* invalid type OID.
*/
HeapTuple tp;
Form_pg_type typtup;
@ -190,8 +190,8 @@ lookup_type_cache(Oid type_id, int flags)
{
/*
* If we find a btree opclass where previously we only found a
* hash opclass, forget the hash equality operator so we can
* use the btree operator instead.
* hash opclass, forget the hash equality operator so we can use
* the btree operator instead.
*/
typentry->eq_opr = InvalidOid;
typentry->eq_opr_finfo.fn_oid = InvalidOid;
@ -224,7 +224,7 @@ lookup_type_cache(Oid type_id, int flags)
if (typentry->btree_opc != InvalidOid)
typentry->gt_opr = get_opclass_member(typentry->btree_opc,
InvalidOid,
BTGreaterStrategyNumber);
BTGreaterStrategyNumber);
}
if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
typentry->cmp_proc == InvalidOid)
@ -238,9 +238,9 @@ lookup_type_cache(Oid type_id, int flags)
/*
* Set up fmgr lookup info as requested
*
* Note: we tell fmgr the finfo structures live in CacheMemoryContext,
* which is not quite right (they're really in DynaHashContext) but
* this will do for our purposes.
* Note: we tell fmgr the finfo structures live in CacheMemoryContext, which
* is not quite right (they're really in DynaHashContext) but this will do
* for our purposes.
*/
if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
typentry->eq_opr_finfo.fn_oid == InvalidOid &&
@ -277,9 +277,9 @@ lookup_type_cache(Oid type_id, int flags)
Assert(rel->rd_rel->reltype == typentry->type_id);
/*
* Notice that we simply store a link to the relcache's tupdesc.
* Since we are relying on relcache to detect cache flush events,
* there's not a lot of point to maintaining an independent copy.
* Notice that we simply store a link to the relcache's tupdesc. Since
* we are relying on relcache to detect cache flush events, there's
* not a lot of point to maintaining an independent copy.
*/
typentry->tupDesc = RelationGetDescr(rel);
@ -316,12 +316,11 @@ lookup_default_opclass(Oid type_id, Oid am_id)
* (either exactly or binary-compatibly, but prefer an exact match).
*
* We could find more than one binary-compatible match, in which case we
* require the user to specify which one he wants. If we find more
* than one exact match, then someone put bogus entries in pg_opclass.
* require the user to specify which one he wants. If we find more than
* one exact match, then someone put bogus entries in pg_opclass.
*
* This is the same logic as GetDefaultOpClass() in indexcmds.c, except
* that we consider all opclasses, regardless of the current search
* path.
* This is the same logic as GetDefaultOpClass() in indexcmds.c, except that
* we consider all opclasses, regardless of the current search path.
*/
rel = heap_open(OperatorClassRelationId, AccessShareLock);
@ -361,8 +360,8 @@ lookup_default_opclass(Oid type_id, Oid am_id)
if (nexact != 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("there are multiple default operator classes for data type %s",
format_type_be(type_id))));
errmsg("there are multiple default operator classes for data type %s",
format_type_be(type_id))));
if (ncompatible == 1)
return compatibleOid;
@ -506,7 +505,7 @@ assign_record_type_typmod(TupleDesc tupDesc)
int32 newlen = RecordCacheArrayLen * 2;
RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
newlen * sizeof(TupleDesc));
newlen * sizeof(TupleDesc));
RecordCacheArrayLen = newlen;
}