mirror of
https://github.com/postgres/postgres.git
synced 2025-07-02 09:02:37 +03:00
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
This commit is contained in:
2
src/backend/utils/cache/attoptcache.c
vendored
2
src/backend/utils/cache/attoptcache.c
vendored
@ -46,7 +46,7 @@ typedef struct
|
||||
* Flush all cache entries when pg_attribute is updated.
|
||||
*
|
||||
* When pg_attribute is updated, we must flush the cache entry at least
|
||||
* for that attribute. Currently, we just flush them all. Since attribute
|
||||
* for that attribute. Currently, we just flush them all. Since attribute
|
||||
* options are not currently used in performance-critical paths (such as
|
||||
* query execution), this seems OK.
|
||||
*/
|
||||
|
21
src/backend/utils/cache/catcache.c
vendored
21
src/backend/utils/cache/catcache.c
vendored
@ -836,9 +836,10 @@ RehashCatCache(CatCache *cp)
|
||||
for (i = 0; i < cp->cc_nbuckets; i++)
|
||||
{
|
||||
dlist_mutable_iter iter;
|
||||
|
||||
dlist_foreach_modify(iter, &cp->cc_bucket[i])
|
||||
{
|
||||
CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
|
||||
CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
|
||||
int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
|
||||
|
||||
dlist_delete(iter.cur);
|
||||
@ -856,7 +857,7 @@ RehashCatCache(CatCache *cp)
|
||||
* CatalogCacheInitializeCache
|
||||
*
|
||||
* This function does final initialization of a catcache: obtain the tuple
|
||||
* descriptor and set up the hash and equality function links. We assume
|
||||
* descriptor and set up the hash and equality function links. We assume
|
||||
* that the relcache entry can be opened at this point!
|
||||
*/
|
||||
#ifdef CACHEDEBUG
|
||||
@ -1081,7 +1082,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
|
||||
* if necessary (on the first access to a particular cache).
|
||||
*
|
||||
* The result is NULL if not found, or a pointer to a HeapTuple in
|
||||
* the cache. The caller must not modify the tuple, and must call
|
||||
* the cache. The caller must not modify the tuple, and must call
|
||||
* ReleaseCatCache() when done with it.
|
||||
*
|
||||
* The search key values should be expressed as Datums of the key columns'
|
||||
@ -1214,8 +1215,8 @@ SearchCatCache(CatCache *cache,
|
||||
* the relation --- for example, due to shared-cache-inval messages being
|
||||
* processed during heap_open(). This is OK. It's even possible for one
|
||||
* of those lookups to find and enter the very same tuple we are trying to
|
||||
* fetch here. If that happens, we will enter a second copy of the tuple
|
||||
* into the cache. The first copy will never be referenced again, and
|
||||
* fetch here. If that happens, we will enter a second copy of the tuple
|
||||
* into the cache. The first copy will never be referenced again, and
|
||||
* will eventually age out of the cache, so there's no functional problem.
|
||||
* This case is rare enough that it's not worth expending extra cycles to
|
||||
* detect.
|
||||
@ -1254,7 +1255,7 @@ SearchCatCache(CatCache *cache,
|
||||
*
|
||||
* In bootstrap mode, we don't build negative entries, because the cache
|
||||
* invalidation mechanism isn't alive and can't clear them if the tuple
|
||||
* gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
|
||||
* gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
|
||||
* cache inval for that.)
|
||||
*/
|
||||
if (ct == NULL)
|
||||
@ -1584,7 +1585,7 @@ SearchCatCacheList(CatCache *cache,
|
||||
/*
|
||||
* We are now past the last thing that could trigger an elog before we
|
||||
* have finished building the CatCList and remembering it in the
|
||||
* resource owner. So it's OK to fall out of the PG_TRY, and indeed
|
||||
* resource owner. So it's OK to fall out of the PG_TRY, and indeed
|
||||
* we'd better do so before we start marking the members as belonging
|
||||
* to the list.
|
||||
*/
|
||||
@ -1673,7 +1674,7 @@ ReleaseCatCacheList(CatCList *list)
|
||||
/*
|
||||
* CatalogCacheCreateEntry
|
||||
* Create a new CatCTup entry, copying the given HeapTuple and other
|
||||
* supplied data into it. The new entry initially has refcount 0.
|
||||
* supplied data into it. The new entry initially has refcount 0.
|
||||
*/
|
||||
static CatCTup *
|
||||
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
||||
@ -1724,8 +1725,8 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
||||
CacheHdr->ch_ntup++;
|
||||
|
||||
/*
|
||||
* If the hash table has become too full, enlarge the buckets array.
|
||||
* Quite arbitrarily, we enlarge when fill factor > 2.
|
||||
* If the hash table has become too full, enlarge the buckets array. Quite
|
||||
* arbitrarily, we enlarge when fill factor > 2.
|
||||
*/
|
||||
if (cache->cc_ntup > cache->cc_nbuckets * 2)
|
||||
RehashCatCache(cache);
|
||||
|
30
src/backend/utils/cache/inval.c
vendored
30
src/backend/utils/cache/inval.c
vendored
@ -29,23 +29,23 @@
|
||||
*
|
||||
* If we successfully complete the transaction, we have to broadcast all
|
||||
* these invalidation events to other backends (via the SI message queue)
|
||||
* so that they can flush obsolete entries from their caches. Note we have
|
||||
* so that they can flush obsolete entries from their caches. Note we have
|
||||
* to record the transaction commit before sending SI messages, otherwise
|
||||
* the other backends won't see our updated tuples as good.
|
||||
*
|
||||
* When a subtransaction aborts, we can process and discard any events
|
||||
* it has queued. When a subtransaction commits, we just add its events
|
||||
* it has queued. When a subtransaction commits, we just add its events
|
||||
* to the pending lists of the parent transaction.
|
||||
*
|
||||
* In short, we need to remember until xact end every insert or delete
|
||||
* of a tuple that might be in the system caches. Updates are treated as
|
||||
* of a tuple that might be in the system caches. Updates are treated as
|
||||
* two events, delete + insert, for simplicity. (If the update doesn't
|
||||
* change the tuple hash value, catcache.c optimizes this into one event.)
|
||||
*
|
||||
* We do not need to register EVERY tuple operation in this way, just those
|
||||
* on tuples in relations that have associated catcaches. We do, however,
|
||||
* on tuples in relations that have associated catcaches. We do, however,
|
||||
* have to register every operation on every tuple that *could* be in a
|
||||
* catcache, whether or not it currently is in our cache. Also, if the
|
||||
* catcache, whether or not it currently is in our cache. Also, if the
|
||||
* tuple is in a relation that has multiple catcaches, we need to register
|
||||
* an invalidation message for each such catcache. catcache.c's
|
||||
* PrepareToInvalidateCacheTuple() routine provides the knowledge of which
|
||||
@ -113,7 +113,7 @@
|
||||
/*
|
||||
* To minimize palloc traffic, we keep pending requests in successively-
|
||||
* larger chunks (a slightly more sophisticated version of an expansible
|
||||
* array). All request types can be stored as SharedInvalidationMessage
|
||||
* array). All request types can be stored as SharedInvalidationMessage
|
||||
* records. The ordering of requests within a list is never significant.
|
||||
*/
|
||||
typedef struct InvalidationChunk
|
||||
@ -650,7 +650,7 @@ AcceptInvalidationMessages(void)
|
||||
*
|
||||
* If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
|
||||
* slows things by at least a factor of 10000, so I wouldn't suggest
|
||||
* trying to run the entire regression tests that way. It's useful to try
|
||||
* trying to run the entire regression tests that way. It's useful to try
|
||||
* a few simple tests, to make sure that cache reload isn't subject to
|
||||
* internal cache-flush hazards, but after you've done a few thousand
|
||||
* recursive reloads it's unlikely you'll learn more.
|
||||
@ -863,12 +863,12 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
|
||||
* If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
|
||||
* to the shared invalidation message queue. Note that these will be read
|
||||
* not only by other backends, but also by our own backend at the next
|
||||
* transaction start (via AcceptInvalidationMessages). This means that
|
||||
* transaction start (via AcceptInvalidationMessages). This means that
|
||||
* we can skip immediate local processing of anything that's still in
|
||||
* CurrentCmdInvalidMsgs, and just send that list out too.
|
||||
*
|
||||
* If not isCommit, we are aborting, and must locally process the messages
|
||||
* in PriorCmdInvalidMsgs. No messages need be sent to other backends,
|
||||
* in PriorCmdInvalidMsgs. No messages need be sent to other backends,
|
||||
* since they'll not have seen our changed tuples anyway. We can forget
|
||||
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
|
||||
* the caches yet.
|
||||
@ -927,11 +927,11 @@ AtEOXact_Inval(bool isCommit)
|
||||
* parent's PriorCmdInvalidMsgs list.
|
||||
*
|
||||
* If not isCommit, we are aborting, and must locally process the messages
|
||||
* in PriorCmdInvalidMsgs. No messages need be sent to other backends.
|
||||
* in PriorCmdInvalidMsgs. No messages need be sent to other backends.
|
||||
* We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
|
||||
* touched the caches yet.
|
||||
*
|
||||
* In any case, pop the transaction stack. We need not physically free memory
|
||||
* In any case, pop the transaction stack. We need not physically free memory
|
||||
* here, since CurTransactionContext is about to be emptied anyway
|
||||
* (if aborting). Beware of the possibility of aborting the same nesting
|
||||
* level twice, though.
|
||||
@ -987,7 +987,7 @@ AtEOSubXact_Inval(bool isCommit)
|
||||
* in a transaction.
|
||||
*
|
||||
* Here, we send no messages to the shared queue, since we don't know yet if
|
||||
* we will commit. We do need to locally process the CurrentCmdInvalidMsgs
|
||||
* we will commit. We do need to locally process the CurrentCmdInvalidMsgs
|
||||
* list, so as to flush our caches of any entries we have outdated in the
|
||||
* current command. We then move the current-cmd list over to become part
|
||||
* of the prior-cmds list.
|
||||
@ -1094,7 +1094,7 @@ CacheInvalidateHeapTuple(Relation relation,
|
||||
* This essentially means that only backends in this same database
|
||||
* will react to the relcache flush request. This is in fact
|
||||
* appropriate, since only those backends could see our pg_attribute
|
||||
* change anyway. It looks a bit ugly though. (In practice, shared
|
||||
* change anyway. It looks a bit ugly though. (In practice, shared
|
||||
* relations can't have schema changes after bootstrap, so we should
|
||||
* never come here for a shared rel anyway.)
|
||||
*/
|
||||
@ -1106,7 +1106,7 @@ CacheInvalidateHeapTuple(Relation relation,
|
||||
|
||||
/*
|
||||
* When a pg_index row is updated, we should send out a relcache inval
|
||||
* for the index relation. As above, we don't know the shared status
|
||||
* for the index relation. As above, we don't know the shared status
|
||||
* of the index, but in practice it doesn't matter since indexes of
|
||||
* shared catalogs can't have such updates.
|
||||
*/
|
||||
@ -1214,7 +1214,7 @@ CacheInvalidateRelcacheByRelid(Oid relid)
|
||||
*
|
||||
* Sending this type of invalidation msg forces other backends to close open
|
||||
* smgr entries for the rel. This should be done to flush dangling open-file
|
||||
* references when the physical rel is being dropped or truncated. Because
|
||||
* references when the physical rel is being dropped or truncated. Because
|
||||
* these are nontransactional (i.e., not-rollback-able) operations, we just
|
||||
* send the inval message immediately without any queuing.
|
||||
*
|
||||
|
10
src/backend/utils/cache/lsyscache.c
vendored
10
src/backend/utils/cache/lsyscache.c
vendored
@ -186,13 +186,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
|
||||
* (This indicates that the operator is not a valid ordering operator.)
|
||||
*
|
||||
* Note: the operator could be registered in multiple families, for example
|
||||
* if someone were to build a "reverse sort" opfamily. This would result in
|
||||
* if someone were to build a "reverse sort" opfamily. This would result in
|
||||
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
|
||||
* or NULLS LAST, as well as inefficient planning due to failure to match up
|
||||
* pathkeys that should be the same. So we want a determinate result here.
|
||||
* Because of the way the syscache search works, we'll use the interpretation
|
||||
* associated with the opfamily with smallest OID, which is probably
|
||||
* determinate enough. Since there is no longer any particularly good reason
|
||||
* determinate enough. Since there is no longer any particularly good reason
|
||||
* to build reverse-sort opfamilies, it doesn't seem worth expending any
|
||||
* additional effort on ensuring consistency.
|
||||
*/
|
||||
@ -403,7 +403,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
|
||||
*
|
||||
* The planner currently uses simple equal() tests to compare the lists
|
||||
* returned by this function, which makes the list order relevant, though
|
||||
* strictly speaking it should not be. Because of the way syscache list
|
||||
* strictly speaking it should not be. Because of the way syscache list
|
||||
* searches are handled, in normal operation the result will be sorted by OID
|
||||
* so everything works fine. If running with system index usage disabled,
|
||||
* the result ordering is unspecified and hence the planner might fail to
|
||||
@ -1212,7 +1212,7 @@ op_mergejoinable(Oid opno, Oid inputtype)
|
||||
*
|
||||
* In some cases (currently only array_eq), hashjoinability depends on the
|
||||
* specific input data type the operator is invoked for, so that must be
|
||||
* passed as well. We currently assume that only one input's type is needed
|
||||
* passed as well. We currently assume that only one input's type is needed
|
||||
* to check this --- by convention, pass the left input's data type.
|
||||
*/
|
||||
bool
|
||||
@ -1880,7 +1880,7 @@ get_typbyval(Oid typid)
|
||||
* A two-fer: given the type OID, return both typlen and typbyval.
|
||||
*
|
||||
* Since both pieces of info are needed to know how to copy a Datum,
|
||||
* many places need both. Might as well get them with one cache lookup
|
||||
* many places need both. Might as well get them with one cache lookup
|
||||
* instead of two. Also, this routine raises an error instead of
|
||||
* returning a bogus value when given a bad type OID.
|
||||
*/
|
||||
|
42
src/backend/utils/cache/plancache.c
vendored
42
src/backend/utils/cache/plancache.c
vendored
@ -11,7 +11,7 @@
|
||||
* The logic for choosing generic or custom plans is in choose_custom_plan,
|
||||
* which see for comments.
|
||||
*
|
||||
* Cache invalidation is driven off sinval events. Any CachedPlanSource
|
||||
* Cache invalidation is driven off sinval events. Any CachedPlanSource
|
||||
* that matches the event is marked invalid, as is its generic CachedPlan
|
||||
* if it has one. When (and if) the next demand for a cached plan occurs,
|
||||
* parse analysis and rewrite is repeated to build a new valid query tree,
|
||||
@ -27,7 +27,7 @@
|
||||
* caller to notice changes and cope with them.
|
||||
*
|
||||
* Currently, we track exactly the dependencies of plans on relations and
|
||||
* user-defined functions. On relcache invalidation events or pg_proc
|
||||
* user-defined functions. On relcache invalidation events or pg_proc
|
||||
* syscache invalidation events, we invalidate just those plans that depend
|
||||
* on the particular object being modified. (Note: this scheme assumes
|
||||
* that any table modification that requires replanning will generate a
|
||||
@ -123,7 +123,7 @@ InitPlanCache(void)
|
||||
* CreateCachedPlan: initially create a plan cache entry.
|
||||
*
|
||||
* Creation of a cached plan is divided into two steps, CreateCachedPlan and
|
||||
* CompleteCachedPlan. CreateCachedPlan should be called after running the
|
||||
* CompleteCachedPlan. CreateCachedPlan should be called after running the
|
||||
* query through raw_parser, but before doing parse analysis and rewrite;
|
||||
* CompleteCachedPlan is called after that. The reason for this arrangement
|
||||
* is that it can save one round of copying of the raw parse tree, since
|
||||
@ -217,7 +217,7 @@ CreateCachedPlan(Node *raw_parse_tree,
|
||||
* in that context.
|
||||
*
|
||||
* A one-shot plan cannot be saved or copied, since we make no effort to
|
||||
* preserve the raw parse tree unmodified. There is also no support for
|
||||
* preserve the raw parse tree unmodified. There is also no support for
|
||||
* invalidation, so plan use must be completed in the current transaction,
|
||||
* and DDL that might invalidate the querytree_list must be avoided as well.
|
||||
*
|
||||
@ -274,13 +274,13 @@ CreateOneShotCachedPlan(Node *raw_parse_tree,
|
||||
* CompleteCachedPlan: second step of creating a plan cache entry.
|
||||
*
|
||||
* Pass in the analyzed-and-rewritten form of the query, as well as the
|
||||
* required subsidiary data about parameters and such. All passed values will
|
||||
* required subsidiary data about parameters and such. All passed values will
|
||||
* be copied into the CachedPlanSource's memory, except as specified below.
|
||||
* After this is called, GetCachedPlan can be called to obtain a plan, and
|
||||
* optionally the CachedPlanSource can be saved using SaveCachedPlan.
|
||||
*
|
||||
* If querytree_context is not NULL, the querytree_list must be stored in that
|
||||
* context (but the other parameters need not be). The querytree_list is not
|
||||
* context (but the other parameters need not be). The querytree_list is not
|
||||
* copied, rather the given context is kept as the initial query_context of
|
||||
* the CachedPlanSource. (It should have been created as a child of the
|
||||
* caller's working memory context, but it will now be reparented to belong
|
||||
@ -374,7 +374,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
|
||||
&plansource->invalItems);
|
||||
|
||||
/*
|
||||
* Also save the current search_path in the query_context. (This
|
||||
* Also save the current search_path in the query_context. (This
|
||||
* should not generate much extra cruft either, since almost certainly
|
||||
* the path is already valid.) Again, we don't really need this for
|
||||
* one-shot plans; and we *must* skip this for transaction control
|
||||
@ -421,7 +421,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
|
||||
* This is guaranteed not to throw error, except for the caller-error case
|
||||
* of trying to save a one-shot plan. Callers typically depend on that
|
||||
* since this is called just before or just after adding a pointer to the
|
||||
* CachedPlanSource to some permanent data structure of their own. Up until
|
||||
* CachedPlanSource to some permanent data structure of their own. Up until
|
||||
* this is done, a CachedPlanSource is just transient data that will go away
|
||||
* automatically on transaction abort.
|
||||
*/
|
||||
@ -442,13 +442,13 @@ SaveCachedPlan(CachedPlanSource *plansource)
|
||||
* plans from the CachedPlanSource. If there is a generic plan, moving it
|
||||
* into CacheMemoryContext would be pretty risky since it's unclear
|
||||
* whether the caller has taken suitable care with making references
|
||||
* long-lived. Best thing to do seems to be to discard the plan.
|
||||
* long-lived. Best thing to do seems to be to discard the plan.
|
||||
*/
|
||||
ReleaseGenericPlan(plansource);
|
||||
|
||||
/*
|
||||
* Reparent the source memory context under CacheMemoryContext so that it
|
||||
* will live indefinitely. The query_context follows along since it's
|
||||
* will live indefinitely. The query_context follows along since it's
|
||||
* already a child of the other one.
|
||||
*/
|
||||
MemoryContextSetParent(plansource->context, CacheMemoryContext);
|
||||
@ -466,7 +466,7 @@ SaveCachedPlan(CachedPlanSource *plansource)
|
||||
* DropCachedPlan: destroy a cached plan.
|
||||
*
|
||||
* Actually this only destroys the CachedPlanSource: any referenced CachedPlan
|
||||
* is released, but not destroyed until its refcount goes to zero. That
|
||||
* is released, but not destroyed until its refcount goes to zero. That
|
||||
* handles the situation where DropCachedPlan is called while the plan is
|
||||
* still in use.
|
||||
*/
|
||||
@ -617,7 +617,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
|
||||
plansource->search_path = NULL;
|
||||
|
||||
/*
|
||||
* Free the query_context. We don't really expect MemoryContextDelete to
|
||||
* Free the query_context. We don't really expect MemoryContextDelete to
|
||||
* fail, but just in case, make sure the CachedPlanSource is left in a
|
||||
* reasonably sane state. (The generic plan won't get unlinked yet, but
|
||||
* that's acceptable.)
|
||||
@ -675,7 +675,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
|
||||
PopActiveSnapshot();
|
||||
|
||||
/*
|
||||
* Check or update the result tupdesc. XXX should we use a weaker
|
||||
* Check or update the result tupdesc. XXX should we use a weaker
|
||||
* condition than equalTupleDescs() here?
|
||||
*
|
||||
* We assume the parameter types didn't change from the first time, so no
|
||||
@ -726,7 +726,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
|
||||
&plansource->invalItems);
|
||||
|
||||
/*
|
||||
* Also save the current search_path in the query_context. (This should
|
||||
* Also save the current search_path in the query_context. (This should
|
||||
* not generate much extra cruft either, since almost certainly the path
|
||||
* is already valid.)
|
||||
*/
|
||||
@ -860,7 +860,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
|
||||
* we ought to be holding sufficient locks to prevent any invalidation.
|
||||
* However, if we're building a custom plan after having built and
|
||||
* rejected a generic plan, it's possible to reach here with is_valid
|
||||
* false due to an invalidation while making the generic plan. In theory
|
||||
* false due to an invalidation while making the generic plan. In theory
|
||||
* the invalidation must be a false positive, perhaps a consequence of an
|
||||
* sinval reset event or the CLOBBER_CACHE_ALWAYS debug code. But for
|
||||
* safety, let's treat it as real and redo the RevalidateCachedQuery call.
|
||||
@ -1043,7 +1043,7 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
|
||||
* on the number of relations in the finished plan's rangetable.
|
||||
* Join planning effort actually scales much worse than linearly
|
||||
* in the number of relations --- but only until the join collapse
|
||||
* limits kick in. Also, while inheritance child relations surely
|
||||
* limits kick in. Also, while inheritance child relations surely
|
||||
* add to planning effort, they don't make the join situation
|
||||
* worse. So the actual shape of the planning cost curve versus
|
||||
* number of relations isn't all that obvious. It will take
|
||||
@ -1153,7 +1153,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
|
||||
|
||||
/*
|
||||
* If we choose to plan again, we need to re-copy the query_list,
|
||||
* since the planner probably scribbled on it. We can force
|
||||
* since the planner probably scribbled on it. We can force
|
||||
* BuildCachedPlan to do that by passing NIL.
|
||||
*/
|
||||
qlist = NIL;
|
||||
@ -1203,7 +1203,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
|
||||
*
|
||||
* Note: useResOwner = false is used for releasing references that are in
|
||||
* persistent data structures, such as the parent CachedPlanSource or a
|
||||
* Portal. Transient references should be protected by a resource owner.
|
||||
* Portal. Transient references should be protected by a resource owner.
|
||||
*/
|
||||
void
|
||||
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
|
||||
@ -1267,7 +1267,7 @@ CachedPlanSetParentContext(CachedPlanSource *plansource,
|
||||
*
|
||||
* This is a convenience routine that does the equivalent of
|
||||
* CreateCachedPlan + CompleteCachedPlan, using the data stored in the
|
||||
* input CachedPlanSource. The result is therefore "unsaved" (regardless
|
||||
* input CachedPlanSource. The result is therefore "unsaved" (regardless
|
||||
* of the state of the source), and we don't copy any generic plan either.
|
||||
* The result will be currently valid, or not, the same as the source.
|
||||
*/
|
||||
@ -1420,7 +1420,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
|
||||
{
|
||||
/*
|
||||
* Ignore utility statements, except those (such as EXPLAIN) that
|
||||
* contain a parsed-but-not-planned query. Note: it's okay to use
|
||||
* contain a parsed-but-not-planned query. Note: it's okay to use
|
||||
* ScanQueryForLocks, even though the query hasn't been through
|
||||
* rule rewriting, because rewriting doesn't change the query
|
||||
* representation.
|
||||
@ -1616,7 +1616,7 @@ plan_list_is_transient(List *stmt_list)
|
||||
|
||||
/*
|
||||
* PlanCacheComputeResultDesc: given a list of analyzed-and-rewritten Queries,
|
||||
* determine the result tupledesc it will produce. Returns NULL if the
|
||||
* determine the result tupledesc it will produce. Returns NULL if the
|
||||
* execution will not return tuples.
|
||||
*
|
||||
* Note: the result is created or copied into current memory context.
|
||||
|
122
src/backend/utils/cache/relcache.c
vendored
122
src/backend/utils/cache/relcache.c
vendored
@ -124,7 +124,7 @@ bool criticalSharedRelcachesBuilt = false;
|
||||
|
||||
/*
|
||||
* This counter counts relcache inval events received since backend startup
|
||||
* (but only for rels that are actually in cache). Presently, we use it only
|
||||
* (but only for rels that are actually in cache). Presently, we use it only
|
||||
* to detect whether data about to be written by write_relcache_init_file()
|
||||
* might already be obsolete.
|
||||
*/
|
||||
@ -167,8 +167,8 @@ static bool eoxact_list_overflowed = false;
|
||||
* we don't need to access individual items except at EOXact.
|
||||
*/
|
||||
static TupleDesc *EOXactTupleDescArray;
|
||||
static int NextEOXactTupleDescNum = 0;
|
||||
static int EOXactTupleDescArrayLen = 0;
|
||||
static int NextEOXactTupleDescNum = 0;
|
||||
static int EOXactTupleDescArrayLen = 0;
|
||||
|
||||
/*
|
||||
* macros to manipulate the lookup hashtables
|
||||
@ -495,7 +495,7 @@ RelationBuildTupleDesc(Relation relation)
|
||||
Int16GetDatum(0));
|
||||
|
||||
/*
|
||||
* Open pg_attribute and begin a scan. Force heap scan if we haven't yet
|
||||
* Open pg_attribute and begin a scan. Force heap scan if we haven't yet
|
||||
* built the critical relcache entries (this includes initdb and startup
|
||||
* without a pg_internal.init file).
|
||||
*/
|
||||
@ -558,7 +558,7 @@ RelationBuildTupleDesc(Relation relation)
|
||||
|
||||
/*
|
||||
* The attcacheoff values we read from pg_attribute should all be -1
|
||||
* ("unknown"). Verify this if assert checking is on. They will be
|
||||
* ("unknown"). Verify this if assert checking is on. They will be
|
||||
* computed when and if needed during tuple access.
|
||||
*/
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
@ -572,7 +572,7 @@ RelationBuildTupleDesc(Relation relation)
|
||||
|
||||
/*
|
||||
* However, we can easily set the attcacheoff value for the first
|
||||
* attribute: it must be zero. This eliminates the need for special cases
|
||||
* attribute: it must be zero. This eliminates the need for special cases
|
||||
* for attnum=1 that used to exist in fastgetattr() and index_getattr().
|
||||
*/
|
||||
if (relation->rd_rel->relnatts > 0)
|
||||
@ -628,7 +628,7 @@ RelationBuildTupleDesc(Relation relation)
|
||||
* each relcache entry that has associated rules. The context is used
|
||||
* just for rule info, not for any other subsidiary data of the relcache
|
||||
* entry, because that keeps the update logic in RelationClearRelation()
|
||||
* manageable. The other subsidiary data structures are simple enough
|
||||
* manageable. The other subsidiary data structures are simple enough
|
||||
* to be easy to free explicitly, anyway.
|
||||
*/
|
||||
static void
|
||||
@ -736,9 +736,9 @@ RelationBuildRuleLock(Relation relation)
|
||||
|
||||
/*
|
||||
* We want the rule's table references to be checked as though by the
|
||||
* table owner, not the user referencing the rule. Therefore, scan
|
||||
* table owner, not the user referencing the rule. Therefore, scan
|
||||
* through the rule's actions and set the checkAsUser field on all
|
||||
* rtable entries. We have to look at the qual as well, in case it
|
||||
* rtable entries. We have to look at the qual as well, in case it
|
||||
* contains sublinks.
|
||||
*
|
||||
* The reason for doing this when the rule is loaded, rather than when
|
||||
@ -1014,25 +1014,24 @@ RelationInitPhysicalAddr(Relation relation)
|
||||
if (relation->rd_rel->relfilenode)
|
||||
{
|
||||
/*
|
||||
* Even if we are using a decoding snapshot that doesn't represent
|
||||
* the current state of the catalog we need to make sure the
|
||||
* filenode points to the current file since the older file will
|
||||
* be gone (or truncated). The new file will still contain older
|
||||
* rows so lookups in them will work correctly. This wouldn't work
|
||||
* correctly if rewrites were allowed to change the schema in a
|
||||
* noncompatible way, but those are prevented both on catalog
|
||||
* tables and on user tables declared as additional catalog
|
||||
* tables.
|
||||
* Even if we are using a decoding snapshot that doesn't represent the
|
||||
* current state of the catalog we need to make sure the filenode
|
||||
* points to the current file since the older file will be gone (or
|
||||
* truncated). The new file will still contain older rows so lookups
|
||||
* in them will work correctly. This wouldn't work correctly if
|
||||
* rewrites were allowed to change the schema in a noncompatible way,
|
||||
* but those are prevented both on catalog tables and on user tables
|
||||
* declared as additional catalog tables.
|
||||
*/
|
||||
if (HistoricSnapshotActive()
|
||||
&& RelationIsAccessibleInLogicalDecoding(relation)
|
||||
&& IsTransactionState())
|
||||
{
|
||||
HeapTuple phys_tuple;
|
||||
Form_pg_class physrel;
|
||||
HeapTuple phys_tuple;
|
||||
Form_pg_class physrel;
|
||||
|
||||
phys_tuple = ScanPgRelation(RelationGetRelid(relation),
|
||||
RelationGetRelid(relation) != ClassOidIndexId,
|
||||
RelationGetRelid(relation) != ClassOidIndexId,
|
||||
true);
|
||||
if (!HeapTupleIsValid(phys_tuple))
|
||||
elog(ERROR, "could not find pg_class entry for %u",
|
||||
@ -1113,7 +1112,7 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
amsupport = aform->amsupport;
|
||||
|
||||
/*
|
||||
* Make the private context to hold index access info. The reason we need
|
||||
* Make the private context to hold index access info. The reason we need
|
||||
* a context, and not just a couple of pallocs, is so that we won't leak
|
||||
* any subsidiary info attached to fmgr lookup records.
|
||||
*
|
||||
@ -1161,7 +1160,7 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
|
||||
/*
|
||||
* indcollation cannot be referenced directly through the C struct,
|
||||
* because it comes after the variable-width indkey field. Must extract
|
||||
* because it comes after the variable-width indkey field. Must extract
|
||||
* the datum the hard way...
|
||||
*/
|
||||
indcollDatum = fastgetattr(relation->rd_indextuple,
|
||||
@ -1186,7 +1185,7 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
|
||||
/*
|
||||
* Fill the support procedure OID array, as well as the info about
|
||||
* opfamilies and opclass input types. (aminfo and supportinfo are left
|
||||
* opfamilies and opclass input types. (aminfo and supportinfo are left
|
||||
* as zeroes, and are filled on-the-fly when used)
|
||||
*/
|
||||
IndexSupportInitialize(indclass, relation->rd_support,
|
||||
@ -1274,7 +1273,7 @@ IndexSupportInitialize(oidvector *indclass,
|
||||
* Note there is no provision for flushing the cache. This is OK at the
|
||||
* moment because there is no way to ALTER any interesting properties of an
|
||||
* existing opclass --- all you can do is drop it, which will result in
|
||||
* a useless but harmless dead entry in the cache. To support altering
|
||||
* a useless but harmless dead entry in the cache. To support altering
|
||||
* opclass membership (not the same as opfamily membership!), we'd need to
|
||||
* be able to flush this cache as well as the contents of relcache entries
|
||||
* for indexes.
|
||||
@ -1383,7 +1382,7 @@ LookupOpclassInfo(Oid operatorClassOid,
|
||||
heap_close(rel, AccessShareLock);
|
||||
|
||||
/*
|
||||
* Scan pg_amproc to obtain support procs for the opclass. We only fetch
|
||||
* Scan pg_amproc to obtain support procs for the opclass. We only fetch
|
||||
* the default ones (those with lefttype = righttype = opcintype).
|
||||
*/
|
||||
if (numSupport > 0)
|
||||
@ -1889,11 +1888,11 @@ RelationDestroyRelation(Relation relation, bool remember_tupdesc)
|
||||
{
|
||||
/*
|
||||
* If we Rebuilt a relcache entry during a transaction then its
|
||||
* possible we did that because the TupDesc changed as the result
|
||||
* of an ALTER TABLE that ran at less than AccessExclusiveLock.
|
||||
* It's possible someone copied that TupDesc, in which case the
|
||||
* copy would point to free'd memory. So if we rebuild an entry
|
||||
* we keep the TupDesc around until end of transaction, to be safe.
|
||||
* possible we did that because the TupDesc changed as the result of
|
||||
* an ALTER TABLE that ran at less than AccessExclusiveLock. It's
|
||||
* possible someone copied that TupDesc, in which case the copy would
|
||||
* point to free'd memory. So if we rebuild an entry we keep the
|
||||
* TupDesc around until end of transaction, to be safe.
|
||||
*/
|
||||
if (remember_tupdesc)
|
||||
RememberToFreeTupleDescAtEOX(relation->rd_att);
|
||||
@ -1928,7 +1927,7 @@ RelationDestroyRelation(Relation relation, bool remember_tupdesc)
|
||||
*
|
||||
* NB: when rebuilding, we'd better hold some lock on the relation,
|
||||
* else the catalog data we need to read could be changing under us.
|
||||
* Also, a rel to be rebuilt had better have refcnt > 0. This is because
|
||||
* Also, a rel to be rebuilt had better have refcnt > 0. This is because
|
||||
* an sinval reset could happen while we're accessing the catalogs, and
|
||||
* the rel would get blown away underneath us by RelationCacheInvalidate
|
||||
* if it has zero refcnt.
|
||||
@ -1951,7 +1950,7 @@ RelationClearRelation(Relation relation, bool rebuild)
|
||||
/*
|
||||
* Make sure smgr and lower levels close the relation's files, if they
|
||||
* weren't closed already. If the relation is not getting deleted, the
|
||||
* next smgr access should reopen the files automatically. This ensures
|
||||
* next smgr access should reopen the files automatically. This ensures
|
||||
* that the low-level file access state is updated after, say, a vacuum
|
||||
* truncation.
|
||||
*/
|
||||
@ -2047,7 +2046,7 @@ RelationClearRelation(Relation relation, bool rebuild)
|
||||
* over from the old entry). This is to avoid trouble in case an
|
||||
* error causes us to lose control partway through. The old entry
|
||||
* will still be marked !rd_isvalid, so we'll try to rebuild it again
|
||||
* on next access. Meanwhile it's not any less valid than it was
|
||||
* on next access. Meanwhile it's not any less valid than it was
|
||||
* before, so any code that might expect to continue accessing it
|
||||
* isn't hurt by the rebuild failure. (Consider for example a
|
||||
* subtransaction that ALTERs a table and then gets canceled partway
|
||||
@ -2237,7 +2236,7 @@ RelationCacheInvalidateEntry(Oid relationId)
|
||||
/*
|
||||
* RelationCacheInvalidate
|
||||
* Blow away cached relation descriptors that have zero reference counts,
|
||||
* and rebuild those with positive reference counts. Also reset the smgr
|
||||
* and rebuild those with positive reference counts. Also reset the smgr
|
||||
* relation cache and re-read relation mapping data.
|
||||
*
|
||||
* This is currently used only to recover from SI message buffer overflow,
|
||||
@ -2250,7 +2249,7 @@ RelationCacheInvalidateEntry(Oid relationId)
|
||||
* We do this in two phases: the first pass deletes deletable items, and
|
||||
* the second one rebuilds the rebuildable items. This is essential for
|
||||
* safety, because hash_seq_search only copes with concurrent deletion of
|
||||
* the element it is currently visiting. If a second SI overflow were to
|
||||
* the element it is currently visiting. If a second SI overflow were to
|
||||
* occur while we are walking the table, resulting in recursive entry to
|
||||
* this routine, we could crash because the inner invocation blows away
|
||||
* the entry next to be visited by the outer scan. But this way is OK,
|
||||
@ -2385,7 +2384,8 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
|
||||
{
|
||||
if (EOXactTupleDescArray == NULL)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
MemoryContext oldcxt;
|
||||
|
||||
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
|
||||
|
||||
EOXactTupleDescArray = (TupleDesc *) palloc(16 * sizeof(TupleDesc));
|
||||
@ -2395,12 +2395,12 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
|
||||
}
|
||||
else if (NextEOXactTupleDescNum >= EOXactTupleDescArrayLen)
|
||||
{
|
||||
int32 newlen = EOXactTupleDescArrayLen * 2;
|
||||
int32 newlen = EOXactTupleDescArrayLen * 2;
|
||||
|
||||
Assert(EOXactTupleDescArrayLen > 0);
|
||||
|
||||
EOXactTupleDescArray = (TupleDesc *) repalloc(EOXactTupleDescArray,
|
||||
newlen * sizeof(TupleDesc));
|
||||
newlen * sizeof(TupleDesc));
|
||||
EOXactTupleDescArrayLen = newlen;
|
||||
}
|
||||
|
||||
@ -2437,7 +2437,7 @@ AtEOXact_RelationCache(bool isCommit)
|
||||
* For simplicity, eoxact_list[] entries are not deleted till end of
|
||||
* top-level transaction, even though we could remove them at
|
||||
* subtransaction end in some cases, or remove relations from the list if
|
||||
* they are cleared for other reasons. Therefore we should expect the
|
||||
* they are cleared for other reasons. Therefore we should expect the
|
||||
* case that list entries are not found in the hashtable; if not, there's
|
||||
* nothing to do for them.
|
||||
*/
|
||||
@ -2498,7 +2498,7 @@ AtEOXact_cleanup(Relation relation, bool isCommit)
|
||||
* transaction calls. (That seems bogus, but it's not worth fixing.)
|
||||
*
|
||||
* Note: ideally this check would be applied to every relcache entry, not
|
||||
* just those that have eoxact work to do. But it's not worth forcing a
|
||||
* just those that have eoxact work to do. But it's not worth forcing a
|
||||
* scan of the whole relcache just for this. (Moreover, doing so would
|
||||
* mean that assert-enabled testing never tests the hash_search code path
|
||||
* above, which seems a bad idea.)
|
||||
@ -2809,7 +2809,7 @@ RelationBuildLocalRelation(const char *relname,
|
||||
|
||||
/*
|
||||
* Insert relation physical and logical identifiers (OIDs) into the right
|
||||
* places. For a mapped relation, we set relfilenode to zero and rely on
|
||||
* places. For a mapped relation, we set relfilenode to zero and rely on
|
||||
* RelationInitPhysicalAddr to consult the map.
|
||||
*/
|
||||
rel->rd_rel->relisshared = shared_relation;
|
||||
@ -3052,7 +3052,7 @@ RelationCacheInitializePhase2(void)
|
||||
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
|
||||
|
||||
/*
|
||||
* Try to load the shared relcache cache file. If unsuccessful, bootstrap
|
||||
* Try to load the shared relcache cache file. If unsuccessful, bootstrap
|
||||
* the cache with pre-made descriptors for the critical shared catalogs.
|
||||
*/
|
||||
if (!load_relcache_init_file(true))
|
||||
@ -3132,9 +3132,9 @@ RelationCacheInitializePhase3(void)
|
||||
|
||||
/*
|
||||
* If we didn't get the critical system indexes loaded into relcache, do
|
||||
* so now. These are critical because the catcache and/or opclass cache
|
||||
* so now. These are critical because the catcache and/or opclass cache
|
||||
* depend on them for fetches done during relcache load. Thus, we have an
|
||||
* infinite-recursion problem. We can break the recursion by doing
|
||||
* infinite-recursion problem. We can break the recursion by doing
|
||||
* heapscans instead of indexscans at certain key spots. To avoid hobbling
|
||||
* performance, we only want to do that until we have the critical indexes
|
||||
* loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
|
||||
@ -3151,7 +3151,7 @@ RelationCacheInitializePhase3(void)
|
||||
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
|
||||
* in the same way as the others, because the critical catalogs don't
|
||||
* (currently) have any rules or triggers, and so these indexes can be
|
||||
* rebuilt without inducing recursion. However they are used during
|
||||
* rebuilt without inducing recursion. However they are used during
|
||||
* relcache load when a rel does have rules or triggers, so we choose to
|
||||
* nail them for performance reasons.
|
||||
*/
|
||||
@ -3182,7 +3182,7 @@ RelationCacheInitializePhase3(void)
|
||||
*
|
||||
* DatabaseNameIndexId isn't critical for relcache loading, but rather for
|
||||
* initial lookup of MyDatabaseId, without which we'll never find any
|
||||
* non-shared catalogs at all. Autovacuum calls InitPostgres with a
|
||||
* non-shared catalogs at all. Autovacuum calls InitPostgres with a
|
||||
* database OID, so it instead depends on DatabaseOidIndexId. We also
|
||||
* need to nail up some indexes on pg_authid and pg_auth_members for use
|
||||
* during client authentication.
|
||||
@ -3617,7 +3617,7 @@ RelationGetIndexList(Relation relation)
|
||||
|
||||
/*
|
||||
* We build the list we intend to return (in the caller's context) while
|
||||
* doing the scan. After successfully completing the scan, we copy that
|
||||
* doing the scan. After successfully completing the scan, we copy that
|
||||
* list into the relcache entry. This avoids cache-context memory leakage
|
||||
* if we get some sort of error partway through.
|
||||
*/
|
||||
@ -3655,7 +3655,7 @@ RelationGetIndexList(Relation relation)
|
||||
|
||||
/*
|
||||
* indclass cannot be referenced directly through the C struct,
|
||||
* because it comes after the variable-width indkey field. Must
|
||||
* because it comes after the variable-width indkey field. Must
|
||||
* extract the datum the hard way...
|
||||
*/
|
||||
indclassDatum = heap_getattr(htup,
|
||||
@ -3970,16 +3970,16 @@ RelationGetIndexPredicate(Relation relation)
|
||||
Bitmapset *
|
||||
RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
|
||||
{
|
||||
Bitmapset *indexattrs; /* indexed columns */
|
||||
Bitmapset *uindexattrs; /* columns in unique indexes */
|
||||
Bitmapset *idindexattrs; /* columns in the replica identity */
|
||||
Bitmapset *indexattrs; /* indexed columns */
|
||||
Bitmapset *uindexattrs; /* columns in unique indexes */
|
||||
Bitmapset *idindexattrs; /* columns in the replica identity */
|
||||
List *indexoidlist;
|
||||
ListCell *l;
|
||||
MemoryContext oldcxt;
|
||||
|
||||
/* Quick exit if we already computed the result. */
|
||||
if (relation->rd_indexattr != NULL)
|
||||
switch(attrKind)
|
||||
switch (attrKind)
|
||||
{
|
||||
case INDEX_ATTR_BITMAP_IDENTITY_KEY:
|
||||
return bms_copy(relation->rd_idattr);
|
||||
@ -4023,8 +4023,8 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
|
||||
Relation indexDesc;
|
||||
IndexInfo *indexInfo;
|
||||
int i;
|
||||
bool isKey; /* candidate key */
|
||||
bool isIDKey; /* replica identity index */
|
||||
bool isKey; /* candidate key */
|
||||
bool isIDKey; /* replica identity index */
|
||||
|
||||
|
||||
indexDesc = index_open(indexOid, AccessShareLock);
|
||||
@ -4052,7 +4052,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
|
||||
|
||||
if (isIDKey)
|
||||
idindexattrs = bms_add_member(idindexattrs,
|
||||
attrnum - FirstLowInvalidHeapAttributeNumber);
|
||||
attrnum - FirstLowInvalidHeapAttributeNumber);
|
||||
|
||||
if (isKey)
|
||||
uindexattrs = bms_add_member(uindexattrs,
|
||||
@ -4079,7 +4079,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
/* We return our original working copy for caller to play with */
|
||||
switch(attrKind)
|
||||
switch (attrKind)
|
||||
{
|
||||
case INDEX_ATTR_BITMAP_IDENTITY_KEY:
|
||||
return idindexattrs;
|
||||
@ -4268,7 +4268,7 @@ errtablecol(Relation rel, int attnum)
|
||||
* given directly rather than extracted from the relation's catalog data.
|
||||
*
|
||||
* Don't use this directly unless errtablecol() is inconvenient for some
|
||||
* reason. This might possibly be needed during intermediate states in ALTER
|
||||
* reason. This might possibly be needed during intermediate states in ALTER
|
||||
* TABLE, for instance.
|
||||
*/
|
||||
int
|
||||
@ -4688,7 +4688,7 @@ load_relcache_init_file(bool shared)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* init file is broken, so do it the hard way. We don't bother trying to
|
||||
* init file is broken, so do it the hard way. We don't bother trying to
|
||||
* free the clutter we just allocated; it's not in the relcache so it
|
||||
* won't hurt.
|
||||
*/
|
||||
@ -4753,7 +4753,7 @@ write_relcache_init_file(bool shared)
|
||||
}
|
||||
|
||||
/*
|
||||
* Write a magic number to serve as a file version identifier. We can
|
||||
* Write a magic number to serve as a file version identifier. We can
|
||||
* change the magic number whenever the relcache layout changes.
|
||||
*/
|
||||
magic = RELCACHE_INIT_FILEMAGIC;
|
||||
@ -4978,7 +4978,7 @@ RelationCacheInitFilePostInvalidate(void)
|
||||
*
|
||||
* We used to keep the init files across restarts, but that is unsafe in PITR
|
||||
* scenarios, and even in simple crash-recovery cases there are windows for
|
||||
* the init files to become out-of-sync with the database. So now we just
|
||||
* the init files to become out-of-sync with the database. So now we just
|
||||
* remove them during startup and expect the first backend launch to rebuild
|
||||
* them. Of course, this has to happen in each database of the cluster.
|
||||
*/
|
||||
|
13
src/backend/utils/cache/relfilenodemap.c
vendored
13
src/backend/utils/cache/relfilenodemap.c
vendored
@ -43,7 +43,7 @@ typedef struct
|
||||
|
||||
typedef struct
|
||||
{
|
||||
RelfilenodeMapKey key; /* lookup key - must be first */
|
||||
RelfilenodeMapKey key; /* lookup key - must be first */
|
||||
Oid relid; /* pg_class.oid */
|
||||
} RelfilenodeMapEntry;
|
||||
|
||||
@ -143,10 +143,10 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
|
||||
{
|
||||
RelfilenodeMapKey key;
|
||||
RelfilenodeMapEntry *entry;
|
||||
bool found;
|
||||
bool found;
|
||||
SysScanDesc scandesc;
|
||||
Relation relation;
|
||||
HeapTuple ntp;
|
||||
Relation relation;
|
||||
HeapTuple ntp;
|
||||
ScanKeyData skey[2];
|
||||
Oid relid;
|
||||
|
||||
@ -222,8 +222,9 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
if (assert_enabled)
|
||||
{
|
||||
bool isnull;
|
||||
Oid check;
|
||||
bool isnull;
|
||||
Oid check;
|
||||
|
||||
check = fastgetattr(ntp, Anum_pg_class_reltablespace,
|
||||
RelationGetDescr(relation),
|
||||
&isnull);
|
||||
|
20
src/backend/utils/cache/relmapper.c
vendored
20
src/backend/utils/cache/relmapper.c
vendored
@ -23,7 +23,7 @@
|
||||
* mapped catalogs can only be relocated by operations such as VACUUM FULL
|
||||
* and CLUSTER, which make no transactionally-significant changes: it must be
|
||||
* safe for the new file to replace the old, even if the transaction itself
|
||||
* aborts. An important factor here is that the indexes and toast table of
|
||||
* aborts. An important factor here is that the indexes and toast table of
|
||||
* a mapped catalog must also be mapped, so that the rewrites/relocations of
|
||||
* all these files commit in a single map file update rather than being tied
|
||||
* to transaction commit.
|
||||
@ -57,13 +57,13 @@
|
||||
/*
|
||||
* The map file is critical data: we have no automatic method for recovering
|
||||
* from loss or corruption of it. We use a CRC so that we can detect
|
||||
* corruption. To minimize the risk of failed updates, the map file should
|
||||
* corruption. To minimize the risk of failed updates, the map file should
|
||||
* be kept to no more than one standard-size disk sector (ie 512 bytes),
|
||||
* and we use overwrite-in-place rather than playing renaming games.
|
||||
* The struct layout below is designed to occupy exactly 512 bytes, which
|
||||
* might make filesystem updates a bit more efficient.
|
||||
*
|
||||
* Entries in the mappings[] array are in no particular order. We could
|
||||
* Entries in the mappings[] array are in no particular order. We could
|
||||
* speed searching by insisting on OID order, but it really shouldn't be
|
||||
* worth the trouble given the intended size of the mapping sets.
|
||||
*/
|
||||
@ -90,7 +90,7 @@ typedef struct RelMapFile
|
||||
|
||||
/*
|
||||
* The currently known contents of the shared map file and our database's
|
||||
* local map file are stored here. These can be reloaded from disk
|
||||
* local map file are stored here. These can be reloaded from disk
|
||||
* immediately whenever we receive an update sinval message.
|
||||
*/
|
||||
static RelMapFile shared_map;
|
||||
@ -346,7 +346,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay)
|
||||
* RelationMapRemoveMapping
|
||||
*
|
||||
* Remove a relation's entry in the map. This is only allowed for "active"
|
||||
* (but not committed) local mappings. We need it so we can back out the
|
||||
* (but not committed) local mappings. We need it so we can back out the
|
||||
* entry for the transient target file when doing VACUUM FULL/CLUSTER on
|
||||
* a mapped relation.
|
||||
*/
|
||||
@ -374,7 +374,7 @@ RelationMapRemoveMapping(Oid relationId)
|
||||
* RelationMapInvalidate
|
||||
*
|
||||
* This routine is invoked for SI cache flush messages. We must re-read
|
||||
* the indicated map file. However, we might receive a SI message in a
|
||||
* the indicated map file. However, we might receive a SI message in a
|
||||
* process that hasn't yet, and might never, load the mapping files;
|
||||
* for example the autovacuum launcher, which *must not* try to read
|
||||
* a local map since it is attached to no particular database.
|
||||
@ -442,7 +442,7 @@ AtCCI_RelationMap(void)
|
||||
*
|
||||
* During commit, this must be called as late as possible before the actual
|
||||
* transaction commit, so as to minimize the window where the transaction
|
||||
* could still roll back after committing map changes. Although nothing
|
||||
* could still roll back after committing map changes. Although nothing
|
||||
* critically bad happens in such a case, we still would prefer that it
|
||||
* not happen, since we'd possibly be losing useful updates to the relations'
|
||||
* pg_class row(s).
|
||||
@ -509,7 +509,7 @@ AtPrepare_RelationMap(void)
|
||||
/*
|
||||
* CheckPointRelationMap
|
||||
*
|
||||
* This is called during a checkpoint. It must ensure that any relation map
|
||||
* This is called during a checkpoint. It must ensure that any relation map
|
||||
* updates that were WAL-logged before the start of the checkpoint are
|
||||
* securely flushed to disk and will not need to be replayed later. This
|
||||
* seems unlikely to be a performance-critical issue, so we use a simple
|
||||
@ -700,7 +700,7 @@ load_relmap_file(bool shared)
|
||||
*
|
||||
* Because this may be called during WAL replay when MyDatabaseId,
|
||||
* DatabasePath, etc aren't valid, we require the caller to pass in suitable
|
||||
* values. The caller is also responsible for being sure no concurrent
|
||||
* values. The caller is also responsible for being sure no concurrent
|
||||
* map update could be happening.
|
||||
*/
|
||||
static void
|
||||
@ -820,7 +820,7 @@ write_relmap_file(bool shared, RelMapFile *newmap,
|
||||
|
||||
/*
|
||||
* Make sure that the files listed in the map are not deleted if the outer
|
||||
* transaction aborts. This had better be within the critical section
|
||||
* transaction aborts. This had better be within the critical section
|
||||
* too: it's not likely to fail, but if it did, we'd arrive at transaction
|
||||
* abort with the files still vulnerable. PANICing will leave things in a
|
||||
* good state on-disk.
|
||||
|
6
src/backend/utils/cache/spccache.c
vendored
6
src/backend/utils/cache/spccache.c
vendored
@ -4,7 +4,7 @@
|
||||
* Tablespace cache management.
|
||||
*
|
||||
* We cache the parsed version of spcoptions for each tablespace to avoid
|
||||
* needing to reparse on every lookup. Right now, there doesn't appear to
|
||||
* needing to reparse on every lookup. Right now, there doesn't appear to
|
||||
* be a measurable performance gain from doing this, but that might change
|
||||
* in the future as we add more options.
|
||||
*
|
||||
@ -128,7 +128,7 @@ get_tablespace(Oid spcid)
|
||||
return spc;
|
||||
|
||||
/*
|
||||
* Not found in TableSpace cache. Check catcache. If we don't find a
|
||||
* Not found in TableSpace cache. Check catcache. If we don't find a
|
||||
* valid HeapTuple, it must mean someone has managed to request tablespace
|
||||
* details for a non-existent tablespace. We'll just treat that case as
|
||||
* if no options were specified.
|
||||
@ -158,7 +158,7 @@ get_tablespace(Oid spcid)
|
||||
}
|
||||
|
||||
/*
|
||||
* Now create the cache entry. It's important to do this only after
|
||||
* Now create the cache entry. It's important to do this only after
|
||||
* reading the pg_tablespace entry, since doing so could cause a cache
|
||||
* flush.
|
||||
*/
|
||||
|
21
src/backend/utils/cache/syscache.c
vendored
21
src/backend/utils/cache/syscache.c
vendored
@ -803,16 +803,17 @@ static CatCache *SysCache[
|
||||
static int SysCacheSize = lengthof(cacheinfo);
|
||||
static bool CacheInitialized = false;
|
||||
|
||||
static Oid SysCacheRelationOid[lengthof(cacheinfo)];
|
||||
static int SysCacheRelationOidSize;
|
||||
static Oid SysCacheRelationOid[
|
||||
lengthof(cacheinfo)];
|
||||
static int SysCacheRelationOidSize;
|
||||
|
||||
static int oid_compare(const void *a, const void *b);
|
||||
static int oid_compare(const void *a, const void *b);
|
||||
|
||||
/*
|
||||
* InitCatalogCache - initialize the caches
|
||||
*
|
||||
* Note that no database access is done here; we only allocate memory
|
||||
* and initialize the cache structure. Interrogation of the database
|
||||
* and initialize the cache structure. Interrogation of the database
|
||||
* to complete initialization of a cache happens upon first use
|
||||
* of that cache.
|
||||
*/
|
||||
@ -1063,7 +1064,7 @@ SearchSysCacheExistsAttName(Oid relid, const char *attname)
|
||||
* extract a specific attribute.
|
||||
*
|
||||
* This is equivalent to using heap_getattr() on a tuple fetched
|
||||
* from a non-cached relation. Usually, this is only used for attributes
|
||||
* from a non-cached relation. Usually, this is only used for attributes
|
||||
* that could be NULL or variable length; the fixed-size attributes in
|
||||
* a system table are accessed just by mapping the tuple onto the C struct
|
||||
* declarations from include/catalog/.
|
||||
@ -1176,12 +1177,12 @@ RelationInvalidatesSnapshotsOnly(Oid relid)
|
||||
bool
|
||||
RelationHasSysCache(Oid relid)
|
||||
{
|
||||
int low = 0,
|
||||
high = SysCacheRelationOidSize - 1;
|
||||
int low = 0,
|
||||
high = SysCacheRelationOidSize - 1;
|
||||
|
||||
while (low <= high)
|
||||
{
|
||||
int middle = low + (high - low) / 2;
|
||||
int middle = low + (high - low) / 2;
|
||||
|
||||
if (SysCacheRelationOid[middle] == relid)
|
||||
return true;
|
||||
@ -1201,8 +1202,8 @@ RelationHasSysCache(Oid relid)
|
||||
static int
|
||||
oid_compare(const void *a, const void *b)
|
||||
{
|
||||
Oid oa = *((Oid *) a);
|
||||
Oid ob = *((Oid *) b);
|
||||
Oid oa = *((Oid *) a);
|
||||
Oid ob = *((Oid *) b);
|
||||
|
||||
if (oa == ob)
|
||||
return 0;
|
||||
|
8
src/backend/utils/cache/typcache.c
vendored
8
src/backend/utils/cache/typcache.c
vendored
@ -11,7 +11,7 @@
|
||||
*
|
||||
* Several seemingly-odd choices have been made to support use of the type
|
||||
* cache by generic array and record handling routines, such as array_eq(),
|
||||
* record_cmp(), and hash_array(). Because those routines are used as index
|
||||
* record_cmp(), and hash_array(). Because those routines are used as index
|
||||
* support operations, they cannot leak memory. To allow them to execute
|
||||
* efficiently, all information that they would like to re-use across calls
|
||||
* is kept in the type cache.
|
||||
@ -101,7 +101,7 @@ typedef struct TypeCacheEnumData
|
||||
*
|
||||
* Stored record types are remembered in a linear array of TupleDescs,
|
||||
* which can be indexed quickly with the assigned typmod. There is also
|
||||
* a hash table to speed searches for matching TupleDescs. The hash key
|
||||
* a hash table to speed searches for matching TupleDescs. The hash key
|
||||
* uses just the first N columns' type OIDs, and so we may have multiple
|
||||
* entries with the same hash key.
|
||||
*/
|
||||
@ -482,7 +482,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
|
||||
|
||||
/*
|
||||
* Link to the tupdesc and increment its refcount (we assert it's a
|
||||
* refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
|
||||
* refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
|
||||
* because the reference mustn't be entered in the current resource owner;
|
||||
* it can outlive the current query.
|
||||
*/
|
||||
@ -1074,7 +1074,7 @@ load_enum_cache_data(TypeCacheEntry *tcache)
|
||||
/*
|
||||
* Read all the information for members of the enum type. We collect the
|
||||
* info in working memory in the caller's context, and then transfer it to
|
||||
* permanent memory in CacheMemoryContext. This minimizes the risk of
|
||||
* permanent memory in CacheMemoryContext. This minimizes the risk of
|
||||
* leaking memory from CacheMemoryContext in the event of an error partway
|
||||
* through.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user