mirror of
https://github.com/postgres/postgres.git
synced 2025-07-02 09:02:37 +03:00
pgindent run on all C files. Java run to follow. initdb/regression
tests pass.
This commit is contained in:
30
src/backend/utils/cache/catcache.c
vendored
30
src/backend/utils/cache/catcache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.83 2001/10/06 23:21:44 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.84 2001/10/25 05:49:46 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -30,13 +30,13 @@
|
||||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
|
||||
/* #define CACHEDEBUG *//* turns DEBUG elogs on */
|
||||
|
||||
/*
|
||||
* Constants related to size of the catcache.
|
||||
*
|
||||
* NCCBUCKETS should be prime and must be less than 64K (because
|
||||
* SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
|
||||
* SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
|
||||
* practice it should be a lot less, anyway, to avoid chewing up too much
|
||||
* space on hash bucket headers.
|
||||
*
|
||||
@ -642,13 +642,13 @@ CatalogCacheFlushRelation(Oid relId)
|
||||
tupRelid = ct->tuple.t_data->t_oid;
|
||||
else
|
||||
{
|
||||
bool isNull;
|
||||
bool isNull;
|
||||
|
||||
tupRelid = DatumGetObjectId(
|
||||
fastgetattr(&ct->tuple,
|
||||
cache->cc_reloidattr,
|
||||
cache->cc_tupdesc,
|
||||
&isNull));
|
||||
fastgetattr(&ct->tuple,
|
||||
cache->cc_reloidattr,
|
||||
cache->cc_tupdesc,
|
||||
&isNull));
|
||||
Assert(!isNull);
|
||||
}
|
||||
|
||||
@ -707,8 +707,8 @@ InitCatCache(int id,
|
||||
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
|
||||
|
||||
/*
|
||||
* if first time through, initialize the cache group header,
|
||||
* including global LRU list header
|
||||
* if first time through, initialize the cache group header, including
|
||||
* global LRU list header
|
||||
*/
|
||||
if (CacheHdr == NULL)
|
||||
{
|
||||
@ -740,7 +740,7 @@ InitCatCache(int id,
|
||||
cp->cc_relname = relname;
|
||||
cp->cc_indname = indname;
|
||||
cp->cc_reloidattr = reloidattr;
|
||||
cp->cc_relisshared = false; /* temporary */
|
||||
cp->cc_relisshared = false; /* temporary */
|
||||
cp->cc_tupdesc = (TupleDesc) NULL;
|
||||
cp->cc_ntup = 0;
|
||||
cp->cc_size = NCCBUCKETS;
|
||||
@ -749,8 +749,8 @@ InitCatCache(int id,
|
||||
cp->cc_key[i] = key[i];
|
||||
|
||||
/*
|
||||
* new cache is initialized as far as we can go for now.
|
||||
* print some debugging information, if appropriate.
|
||||
* new cache is initialized as far as we can go for now. print some
|
||||
* debugging information, if appropriate.
|
||||
*/
|
||||
InitCatCache_DEBUG1;
|
||||
|
||||
@ -1105,9 +1105,7 @@ ReleaseCatCache(HeapTuple tuple)
|
||||
&& ct->dead
|
||||
#endif
|
||||
)
|
||||
{
|
||||
CatCacheRemoveCTup(ct->my_cache, ct);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1141,7 +1139,7 @@ ReleaseCatCache(HeapTuple tuple)
|
||||
void
|
||||
PrepareToInvalidateCacheTuple(Relation relation,
|
||||
HeapTuple tuple,
|
||||
void (*function) (int, Index, ItemPointer, Oid))
|
||||
void (*function) (int, Index, ItemPointer, Oid))
|
||||
{
|
||||
CatCache *ccp;
|
||||
|
||||
|
48
src/backend/utils/cache/inval.c
vendored
48
src/backend/utils/cache/inval.c
vendored
@ -22,14 +22,14 @@
|
||||
* second lives till end of transaction. Finally, we need a third list of
|
||||
* all tuples outdated in the current transaction; if we commit, we send
|
||||
* those invalidation events to all other backends (via the SI message queue)
|
||||
* so that they can flush obsolete entries from their caches. This list
|
||||
* so that they can flush obsolete entries from their caches. This list
|
||||
* definitely can't be processed until after we commit, otherwise the other
|
||||
* backends won't see our updated tuples as good.
|
||||
*
|
||||
* We do not need to register EVERY tuple operation in this way, just those
|
||||
* on tuples in relations that have associated catcaches. We do, however,
|
||||
* on tuples in relations that have associated catcaches. We do, however,
|
||||
* have to register every operation on every tuple that *could* be in a
|
||||
* catcache, whether or not it currently is in our cache. Also, if the
|
||||
* catcache, whether or not it currently is in our cache. Also, if the
|
||||
* tuple is in a relation that has multiple catcaches, we need to register
|
||||
* an invalidation message for each such catcache. catcache.c's
|
||||
* PrepareToInvalidateCacheTuple() routine provides the knowledge of which
|
||||
@ -56,7 +56,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/inval.c,v 1.45 2001/06/19 19:42:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/inval.c,v 1.46 2001/10/25 05:49:46 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -74,15 +74,15 @@
|
||||
/*
|
||||
* To minimize palloc traffic, we keep pending requests in successively-
|
||||
* larger chunks (a slightly more sophisticated version of an expansible
|
||||
* array). All request types can be stored as SharedInvalidationMessage
|
||||
* array). All request types can be stored as SharedInvalidationMessage
|
||||
* records.
|
||||
*/
|
||||
typedef struct InvalidationChunk
|
||||
{
|
||||
struct InvalidationChunk *next; /* list link */
|
||||
struct InvalidationChunk *next; /* list link */
|
||||
int nitems; /* # items currently stored in chunk */
|
||||
int maxitems; /* size of allocated array in this chunk */
|
||||
SharedInvalidationMessage msgs[1]; /* VARIABLE LENGTH ARRAY */
|
||||
SharedInvalidationMessage msgs[1]; /* VARIABLE LENGTH ARRAY */
|
||||
} InvalidationChunk; /* VARIABLE LENGTH STRUCTURE */
|
||||
|
||||
typedef struct InvalidationListHeader
|
||||
@ -148,7 +148,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
|
||||
chunk = (InvalidationChunk *)
|
||||
MemoryContextAlloc(TopTransactionContext,
|
||||
sizeof(InvalidationChunk) +
|
||||
(FIRSTCHUNKSIZE-1) * sizeof(SharedInvalidationMessage));
|
||||
(FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
|
||||
chunk->nitems = 0;
|
||||
chunk->maxitems = FIRSTCHUNKSIZE;
|
||||
chunk->next = *listHdr;
|
||||
@ -157,12 +157,12 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
|
||||
else if (chunk->nitems >= chunk->maxitems)
|
||||
{
|
||||
/* Need another chunk; double size of last chunk */
|
||||
int chunksize = 2 * chunk->maxitems;
|
||||
int chunksize = 2 * chunk->maxitems;
|
||||
|
||||
chunk = (InvalidationChunk *)
|
||||
MemoryContextAlloc(TopTransactionContext,
|
||||
sizeof(InvalidationChunk) +
|
||||
(chunksize-1) * sizeof(SharedInvalidationMessage));
|
||||
(chunksize - 1) *sizeof(SharedInvalidationMessage));
|
||||
chunk->nitems = 0;
|
||||
chunk->maxitems = chunksize;
|
||||
chunk->next = *listHdr;
|
||||
@ -279,7 +279,10 @@ DiscardInvalidationMessages(InvalidationListHeader *hdr, bool physicalFree)
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Assume the storage will go away at xact end, just reset pointers */
|
||||
/*
|
||||
* Assume the storage will go away at xact end, just reset
|
||||
* pointers
|
||||
*/
|
||||
hdr->cclist = NULL;
|
||||
hdr->rclist = NULL;
|
||||
}
|
||||
@ -421,7 +424,7 @@ InvalidateSystemCaches(void)
|
||||
static void
|
||||
PrepareForTupleInvalidation(Relation relation, HeapTuple tuple,
|
||||
void (*CacheIdRegisterFunc) (int, Index,
|
||||
ItemPointer, Oid),
|
||||
ItemPointer, Oid),
|
||||
void (*RelationIdRegisterFunc) (Oid, Oid))
|
||||
{
|
||||
Oid tupleRelId;
|
||||
@ -460,12 +463,12 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple,
|
||||
* Yes. We need to register a relcache invalidation event for the
|
||||
* relation identified by relationId.
|
||||
*
|
||||
* KLUGE ALERT: we always send the relcache event with MyDatabaseId,
|
||||
* even if the rel in question is shared. This essentially means that
|
||||
* only backends in this same database will react to the relcache flush
|
||||
* request. This is in fact appropriate, since only those backends could
|
||||
* see our pg_class or pg_attribute change anyway. It looks a bit ugly
|
||||
* though.
|
||||
* KLUGE ALERT: we always send the relcache event with MyDatabaseId, even
|
||||
* if the rel in question is shared. This essentially means that only
|
||||
* backends in this same database will react to the relcache flush
|
||||
* request. This is in fact appropriate, since only those backends
|
||||
* could see our pg_class or pg_attribute change anyway. It looks a
|
||||
* bit ugly though.
|
||||
*/
|
||||
(*RelationIdRegisterFunc) (MyDatabaseId, relationId);
|
||||
}
|
||||
@ -498,7 +501,7 @@ AcceptInvalidationMessages(void)
|
||||
* If isCommit, we must send out the messages in our GlobalInvalidMsgs list
|
||||
* to the shared invalidation message queue. Note that these will be read
|
||||
* not only by other backends, but also by our own backend at the next
|
||||
* transaction start (via AcceptInvalidationMessages). Therefore, it's okay
|
||||
* transaction start (via AcceptInvalidationMessages). Therefore, it's okay
|
||||
* to discard any pending LocalInvalidMsgs, since these will be redundant
|
||||
* with the global list.
|
||||
*
|
||||
@ -538,7 +541,7 @@ AtEOXactInvalidationMessages(bool isCommit)
|
||||
* in a transaction.
|
||||
*
|
||||
* Here, we send no messages to the shared queue, since we don't know yet if
|
||||
* we will commit. But we do need to locally process the LocalInvalidMsgs
|
||||
* we will commit. But we do need to locally process the LocalInvalidMsgs
|
||||
* list, so as to flush our caches of any tuples we have outdated in the
|
||||
* current command.
|
||||
*
|
||||
@ -563,9 +566,10 @@ CommandEndInvalidationMessages(bool isCommit)
|
||||
ProcessInvalidationMessages(&RollbackMsgs,
|
||||
LocalExecuteInvalidationMessage);
|
||||
}
|
||||
|
||||
/*
|
||||
* LocalInvalidMsgs list is not interesting anymore, so flush it
|
||||
* (for real). Do *not* clear GlobalInvalidMsgs or RollbackMsgs.
|
||||
* LocalInvalidMsgs list is not interesting anymore, so flush it (for
|
||||
* real). Do *not* clear GlobalInvalidMsgs or RollbackMsgs.
|
||||
*/
|
||||
DiscardInvalidationMessages(&LocalInvalidMsgs, true);
|
||||
}
|
||||
|
49
src/backend/utils/cache/lsyscache.c
vendored
49
src/backend/utils/cache/lsyscache.c
vendored
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/lsyscache.c,v 1.58 2001/09/06 02:07:42 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/lsyscache.c,v 1.59 2001/10/25 05:49:46 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Eventually, the index information should go through here, too.
|
||||
@ -589,7 +589,6 @@ get_relnatts(Oid relid)
|
||||
else
|
||||
return InvalidAttrNumber;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -723,7 +722,6 @@ get_typalign(Oid typid)
|
||||
else
|
||||
return 'i';
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
char
|
||||
@ -778,8 +776,8 @@ get_typdefault(Oid typid, Datum *defaultValue)
|
||||
typelem = type->typelem;
|
||||
|
||||
/*
|
||||
* typdefault is potentially null, so don't try to access it as a struct
|
||||
* field. Must do it the hard way with SysCacheGetAttr.
|
||||
* typdefault is potentially null, so don't try to access it as a
|
||||
* struct field. Must do it the hard way with SysCacheGetAttr.
|
||||
*/
|
||||
textDefaultVal = SysCacheGetAttr(TYPEOID,
|
||||
typeTuple,
|
||||
@ -828,6 +826,7 @@ get_typavgwidth(Oid typid, int32 typmod)
|
||||
*/
|
||||
if (typlen > 0)
|
||||
return typlen;
|
||||
|
||||
/*
|
||||
* type_maximum_size knows the encoding of typmod for some datatypes;
|
||||
* don't duplicate that knowledge here.
|
||||
@ -836,16 +835,17 @@ get_typavgwidth(Oid typid, int32 typmod)
|
||||
if (maxwidth > 0)
|
||||
{
|
||||
/*
|
||||
* For BPCHAR, the max width is also the only width. Otherwise
|
||||
* we need to guess about the typical data width given the max.
|
||||
* A sliding scale for percentage of max width seems reasonable.
|
||||
* For BPCHAR, the max width is also the only width. Otherwise we
|
||||
* need to guess about the typical data width given the max. A
|
||||
* sliding scale for percentage of max width seems reasonable.
|
||||
*/
|
||||
if (typid == BPCHAROID)
|
||||
return maxwidth;
|
||||
if (maxwidth <= 32)
|
||||
return maxwidth; /* assume full width */
|
||||
if (maxwidth < 1000)
|
||||
return 32 + (maxwidth - 32) / 2; /* assume 50% */
|
||||
return 32 + (maxwidth - 32) / 2; /* assume 50% */
|
||||
|
||||
/*
|
||||
* Beyond 1000, assume we're looking at something like
|
||||
* "varchar(10000)" where the limit isn't actually reached often,
|
||||
@ -853,6 +853,7 @@ get_typavgwidth(Oid typid, int32 typmod)
|
||||
*/
|
||||
return 32 + (1000 - 32) / 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ooops, we have no idea ... wild guess time.
|
||||
*/
|
||||
@ -887,7 +888,6 @@ get_typtype(Oid typid)
|
||||
else
|
||||
return '\0';
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* ---------- STATISTICS CACHE ---------- */
|
||||
@ -909,7 +909,7 @@ get_attavgwidth(Oid relid, AttrNumber attnum)
|
||||
0, 0);
|
||||
if (HeapTupleIsValid(tp))
|
||||
{
|
||||
int32 stawidth = ((Form_pg_statistic) GETSTRUCT(tp))->stawidth;
|
||||
int32 stawidth = ((Form_pg_statistic) GETSTRUCT(tp))->stawidth;
|
||||
|
||||
ReleaseSysCache(tp);
|
||||
if (stawidth > 0)
|
||||
@ -977,14 +977,17 @@ get_attstatsslot(HeapTuple statstuple,
|
||||
if (isnull)
|
||||
elog(ERROR, "get_attstatsslot: stavalues is null");
|
||||
statarray = DatumGetArrayTypeP(val);
|
||||
|
||||
/*
|
||||
* Do initial examination of the array. This produces a list
|
||||
* of text Datums --- ie, pointers into the text array value.
|
||||
* Do initial examination of the array. This produces a list of
|
||||
* text Datums --- ie, pointers into the text array value.
|
||||
*/
|
||||
deconstruct_array(statarray, false, -1, 'i', values, nvalues);
|
||||
narrayelem = *nvalues;
|
||||
|
||||
/*
|
||||
* We now need to replace each text Datum by its internal equivalent.
|
||||
* We now need to replace each text Datum by its internal
|
||||
* equivalent.
|
||||
*
|
||||
* Get the type input proc and typelem for the column datatype.
|
||||
*/
|
||||
@ -997,9 +1000,10 @@ get_attstatsslot(HeapTuple statstuple,
|
||||
fmgr_info(((Form_pg_type) GETSTRUCT(typeTuple))->typinput, &inputproc);
|
||||
typelem = ((Form_pg_type) GETSTRUCT(typeTuple))->typelem;
|
||||
ReleaseSysCache(typeTuple);
|
||||
|
||||
/*
|
||||
* Do the conversions. The palloc'd array of Datums is reused
|
||||
* in place.
|
||||
* Do the conversions. The palloc'd array of Datums is reused in
|
||||
* place.
|
||||
*/
|
||||
for (j = 0; j < narrayelem; j++)
|
||||
{
|
||||
@ -1013,6 +1017,7 @@ get_attstatsslot(HeapTuple statstuple,
|
||||
Int32GetDatum(atttypmod));
|
||||
pfree(strval);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free statarray if it's a detoasted copy.
|
||||
*/
|
||||
@ -1028,10 +1033,11 @@ get_attstatsslot(HeapTuple statstuple,
|
||||
if (isnull)
|
||||
elog(ERROR, "get_attstatsslot: stanumbers is null");
|
||||
statarray = DatumGetArrayTypeP(val);
|
||||
|
||||
/*
|
||||
* We expect the array to be a 1-D float4 array; verify that.
|
||||
* We don't need to use deconstruct_array() since the array
|
||||
* data is just going to look like a C array of float4 values.
|
||||
* We expect the array to be a 1-D float4 array; verify that. We
|
||||
* don't need to use deconstruct_array() since the array data is
|
||||
* just going to look like a C array of float4 values.
|
||||
*/
|
||||
narrayelem = ARR_DIMS(statarray)[0];
|
||||
if (ARR_NDIM(statarray) != 1 || narrayelem <= 0 ||
|
||||
@ -1040,6 +1046,7 @@ get_attstatsslot(HeapTuple statstuple,
|
||||
*numbers = (float4 *) palloc(narrayelem * sizeof(float4));
|
||||
memcpy(*numbers, ARR_DATA_PTR(statarray), narrayelem * sizeof(float4));
|
||||
*nnumbers = narrayelem;
|
||||
|
||||
/*
|
||||
* Free statarray if it's a detoasted copy.
|
||||
*/
|
||||
@ -1057,9 +1064,9 @@ free_attstatsslot(Oid atttype,
|
||||
{
|
||||
if (values)
|
||||
{
|
||||
if (! get_typbyval(atttype))
|
||||
if (!get_typbyval(atttype))
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nvalues; i++)
|
||||
pfree(DatumGetPointer(values[i]));
|
||||
|
57
src/backend/utils/cache/relcache.c
vendored
57
src/backend/utils/cache/relcache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.146 2001/10/06 23:21:44 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.147 2001/10/25 05:49:46 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -226,7 +226,6 @@ static void RelationClearRelation(Relation relation, bool rebuildIt);
|
||||
|
||||
#ifdef ENABLE_REINDEX_NAILED_RELATIONS
|
||||
static void RelationReloadClassinfo(Relation relation);
|
||||
|
||||
#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
|
||||
static void RelationFlushRelation(Relation relation);
|
||||
static Relation RelationNameCacheGetRelation(const char *relationName);
|
||||
@ -273,7 +272,6 @@ static List *insert_ordered_oid(List *list, Oid datum);
|
||||
static HeapTuple
|
||||
ScanPgRelation(RelationBuildDescInfo buildinfo)
|
||||
{
|
||||
|
||||
/*
|
||||
* If this is bootstrap time (initdb), then we can't use the system
|
||||
* catalog indices, because they may not exist yet. Otherwise, we
|
||||
@ -333,7 +331,6 @@ scan_pg_rel_seq(RelationBuildDescInfo buildinfo)
|
||||
return_tuple = pg_class_tuple;
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* a satanic bug used to live here: pg_class_tuple used to be
|
||||
* returned here without having the corresponding buffer pinned.
|
||||
@ -382,7 +379,7 @@ scan_pg_rel_ind(RelationBuildDescInfo buildinfo)
|
||||
|
||||
default:
|
||||
elog(ERROR, "ScanPgRelation: bad buildinfo");
|
||||
return_tuple = NULL;/* keep compiler quiet */
|
||||
return_tuple = NULL; /* keep compiler quiet */
|
||||
}
|
||||
|
||||
heap_close(pg_class_desc, AccessShareLock);
|
||||
@ -461,7 +458,6 @@ static void
|
||||
RelationBuildTupleDesc(RelationBuildDescInfo buildinfo,
|
||||
Relation relation)
|
||||
{
|
||||
|
||||
/*
|
||||
* If this is bootstrap time (initdb), then we can't use the system
|
||||
* catalog indices, because they may not exist yet. Otherwise, we
|
||||
@ -649,7 +645,6 @@ build_tupdesc_ind(RelationBuildDescInfo buildinfo,
|
||||
{
|
||||
#ifdef _DROP_COLUMN_HACK__
|
||||
bool columnDropped = false;
|
||||
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
|
||||
atttup = AttributeRelidNumIndexScan(attrel,
|
||||
@ -767,7 +762,7 @@ RelationBuildRuleLock(Relation relation)
|
||||
*/
|
||||
rulescxt = AllocSetContextCreate(CacheMemoryContext,
|
||||
RelationGetRelationName(relation),
|
||||
0, /* minsize */
|
||||
0, /* minsize */
|
||||
1024, /* initsize */
|
||||
1024); /* maxsize */
|
||||
relation->rd_rulescxt = rulescxt;
|
||||
@ -1106,16 +1101,16 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
amsupport = relation->rd_am->amsupport;
|
||||
|
||||
/*
|
||||
* Make the private context to hold index access info. The reason
|
||||
* we need a context, and not just a couple of pallocs, is so that
|
||||
* we won't leak any subsidiary info attached to fmgr lookup records.
|
||||
* Make the private context to hold index access info. The reason we
|
||||
* need a context, and not just a couple of pallocs, is so that we
|
||||
* won't leak any subsidiary info attached to fmgr lookup records.
|
||||
*
|
||||
* Context parameters are set on the assumption that it'll probably not
|
||||
* contain much data.
|
||||
*/
|
||||
indexcxt = AllocSetContextCreate(CacheMemoryContext,
|
||||
RelationGetRelationName(relation),
|
||||
0, /* minsize */
|
||||
0, /* minsize */
|
||||
512, /* initsize */
|
||||
1024); /* maxsize */
|
||||
relation->rd_indexcxt = indexcxt;
|
||||
@ -1128,7 +1123,7 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
|
||||
if (amsupport > 0)
|
||||
{
|
||||
int nsupport = natts * amsupport;
|
||||
int nsupport = natts * amsupport;
|
||||
|
||||
support = (RegProcedure *)
|
||||
MemoryContextAlloc(indexcxt, nsupport * sizeof(RegProcedure));
|
||||
@ -1214,8 +1209,9 @@ formrdesc(char *relationName,
|
||||
strcpy(RelationGetPhysicalRelationName(relation), relationName);
|
||||
|
||||
/*
|
||||
* It's important to distinguish between shared and non-shared relations,
|
||||
* even at bootstrap time, to make sure we know where they are stored.
|
||||
* It's important to distinguish between shared and non-shared
|
||||
* relations, even at bootstrap time, to make sure we know where they
|
||||
* are stored.
|
||||
*/
|
||||
relation->rd_rel->relisshared = IsSharedSystemRelationName(relationName);
|
||||
|
||||
@ -1267,8 +1263,8 @@ formrdesc(char *relationName,
|
||||
if (!IsBootstrapProcessingMode())
|
||||
{
|
||||
/*
|
||||
* This list is incomplete, but it only has to work for the
|
||||
* set of rels that formrdesc is used for ...
|
||||
* This list is incomplete, but it only has to work for the set of
|
||||
* rels that formrdesc is used for ...
|
||||
*/
|
||||
if (strcmp(relationName, RelationRelationName) == 0 ||
|
||||
strcmp(relationName, AttributeRelationName) == 0 ||
|
||||
@ -1560,7 +1556,6 @@ RelationReloadClassinfo(Relation relation)
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
|
||||
|
||||
/*
|
||||
@ -1649,7 +1644,6 @@ RelationClearRelation(Relation relation, bool rebuildIt)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* When rebuilding an open relcache entry, must preserve ref count
|
||||
* and myxactonly flag. Also attempt to preserve the tupledesc,
|
||||
@ -1663,7 +1657,7 @@ RelationClearRelation(Relation relation, bool rebuildIt)
|
||||
RuleLock *old_rules = relation->rd_rules;
|
||||
MemoryContext old_rulescxt = relation->rd_rulescxt;
|
||||
TriggerDesc *old_trigdesc = relation->trigdesc;
|
||||
BlockNumber old_nblocks = relation->rd_nblocks;
|
||||
BlockNumber old_nblocks = relation->rd_nblocks;
|
||||
RelationBuildDescInfo buildinfo;
|
||||
|
||||
buildinfo.infotype = INFO_RELID;
|
||||
@ -1730,7 +1724,6 @@ RelationFlushRelation(Relation relation)
|
||||
|
||||
if (relation->rd_myxactonly)
|
||||
{
|
||||
|
||||
/*
|
||||
* Local rels should always be rebuilt, not flushed; the relcache
|
||||
* entry must live until RelationPurgeLocalRelation().
|
||||
@ -1739,7 +1732,6 @@ RelationFlushRelation(Relation relation)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* Nonlocal rels can be dropped from the relcache if not open.
|
||||
*/
|
||||
@ -1837,7 +1829,6 @@ RelationFlushIndexes(Relation *r,
|
||||
relation->rd_rel->relam == accessMethodId))
|
||||
RelationFlushRelation(relation);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@ -1982,8 +1973,8 @@ RelationBuildLocalRelation(const char *relname,
|
||||
rel->rd_isnailed = true;
|
||||
|
||||
/*
|
||||
* create a new tuple descriptor from the one passed in
|
||||
* (we do this to copy it into the cache context)
|
||||
* create a new tuple descriptor from the one passed in (we do this to
|
||||
* copy it into the cache context)
|
||||
*/
|
||||
rel->rd_att = CreateTupleDescCopyConstr(tupDesc);
|
||||
|
||||
@ -2003,9 +1994,10 @@ RelationBuildLocalRelation(const char *relname,
|
||||
rel->rd_rel->relchecks = tupDesc->constr->num_check;
|
||||
|
||||
/*
|
||||
* Insert relation OID and database/tablespace ID into the right places.
|
||||
* XXX currently we assume physical tblspace/relnode are same as logical
|
||||
* dbid/reloid. Probably should pass an extra pair of parameters.
|
||||
* Insert relation OID and database/tablespace ID into the right
|
||||
* places. XXX currently we assume physical tblspace/relnode are same
|
||||
* as logical dbid/reloid. Probably should pass an extra pair of
|
||||
* parameters.
|
||||
*/
|
||||
rel->rd_rel->relisshared = (dbid == InvalidOid);
|
||||
|
||||
@ -2149,7 +2141,6 @@ RelationCacheInitialize(void)
|
||||
void
|
||||
RelationCacheInitializePhase2(void)
|
||||
{
|
||||
|
||||
/*
|
||||
* Get the real pg_class tuple for each nailed-in-cache relcache entry
|
||||
* that was made by RelationCacheInitialize(), and replace the phony
|
||||
@ -2159,7 +2150,6 @@ RelationCacheInitializePhase2(void)
|
||||
*/
|
||||
if (!IsBootstrapProcessingMode())
|
||||
{
|
||||
|
||||
/*
|
||||
* Initialize critical system index relation descriptors, first.
|
||||
* They are to make building relation descriptors fast.
|
||||
@ -2764,9 +2754,9 @@ init_irels(void)
|
||||
*/
|
||||
indexcxt = AllocSetContextCreate(CacheMemoryContext,
|
||||
RelationGetRelationName(ird),
|
||||
0, /* minsize */
|
||||
512, /* initsize */
|
||||
1024); /* maxsize */
|
||||
0, /* minsize */
|
||||
512, /* initsize */
|
||||
1024); /* maxsize */
|
||||
ird->rd_indexcxt = indexcxt;
|
||||
|
||||
/* next, read the index strategy map */
|
||||
@ -2848,7 +2838,6 @@ write_irels(void)
|
||||
fd = PathNameOpenFile(tempfilename, O_WRONLY | O_CREAT | O_TRUNC | PG_BINARY, 0600);
|
||||
if (fd < 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* We used to consider this a fatal error, but we might as well
|
||||
* continue with backend startup ...
|
||||
|
8
src/backend/utils/cache/syscache.c
vendored
8
src/backend/utils/cache/syscache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/syscache.c,v 1.65 2001/08/21 16:36:05 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/syscache.c,v 1.66 2001/10/25 05:49:46 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These routines allow the parser/planner/executor to perform
|
||||
@ -133,7 +133,7 @@ static struct cachedesc cacheinfo[] = {
|
||||
0,
|
||||
0
|
||||
}},
|
||||
{AccessMethodProcedureRelationName, /* AMPROCNUM */
|
||||
{AccessMethodProcedureRelationName, /* AMPROCNUM */
|
||||
AccessMethodProcedureIndex,
|
||||
0,
|
||||
2,
|
||||
@ -365,7 +365,8 @@ static struct cachedesc cacheinfo[] = {
|
||||
}}
|
||||
};
|
||||
|
||||
static CatCache *SysCache[lengthof(cacheinfo)];
|
||||
static CatCache *SysCache[
|
||||
lengthof(cacheinfo)];
|
||||
static int SysCacheSize = lengthof(cacheinfo);
|
||||
static bool CacheInitialized = false;
|
||||
|
||||
@ -564,7 +565,6 @@ SysCacheGetAttr(int cacheId, HeapTuple tup,
|
||||
AttrNumber attributeNumber,
|
||||
bool *isNull)
|
||||
{
|
||||
|
||||
/*
|
||||
* We just need to get the TupleDesc out of the cache entry, and then
|
||||
* we can apply heap_getattr(). We expect that the cache control data
|
||||
|
Reference in New Issue
Block a user