mirror of
https://github.com/postgres/postgres.git
synced 2025-07-02 09:02:37 +03:00
pgindent run for 8.3.
This commit is contained in:
10
src/backend/utils/cache/catcache.c
vendored
10
src/backend/utils/cache/catcache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.138 2007/08/21 01:11:19 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.139 2007/11/15 21:14:39 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1043,10 +1043,10 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
|
||||
if (cache->id == INDEXRELID)
|
||||
{
|
||||
/*
|
||||
* Rather than tracking exactly which indexes have to be loaded
|
||||
* before we can use indexscans (which changes from time to time),
|
||||
* just force all pg_index searches to be heap scans until we've
|
||||
* built the critical relcaches.
|
||||
* Rather than tracking exactly which indexes have to be loaded before
|
||||
* we can use indexscans (which changes from time to time), just force
|
||||
* all pg_index searches to be heap scans until we've built the
|
||||
* critical relcaches.
|
||||
*/
|
||||
if (!criticalRelcachesBuilt)
|
||||
return false;
|
||||
|
6
src/backend/utils/cache/inval.c
vendored
6
src/backend/utils/cache/inval.c
vendored
@ -80,7 +80,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.80 2007/05/02 21:08:46 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.81 2007/11/15 21:14:39 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -592,7 +592,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
|
||||
* This essentially means that only backends in this same database
|
||||
* will react to the relcache flush request. This is in fact
|
||||
* appropriate, since only those backends could see our pg_attribute
|
||||
* change anyway. It looks a bit ugly though. (In practice, shared
|
||||
* change anyway. It looks a bit ugly though. (In practice, shared
|
||||
* relations can't have schema changes after bootstrap, so we should
|
||||
* never come here for a shared rel anyway.)
|
||||
*/
|
||||
@ -604,7 +604,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
|
||||
|
||||
/*
|
||||
* When a pg_index row is updated, we should send out a relcache inval
|
||||
* for the index relation. As above, we don't know the shared status
|
||||
* for the index relation. As above, we don't know the shared status
|
||||
* of the index, but in practice it doesn't matter since indexes of
|
||||
* shared catalogs can't have such updates.
|
||||
*/
|
||||
|
19
src/backend/utils/cache/lsyscache.c
vendored
19
src/backend/utils/cache/lsyscache.c
vendored
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.153 2007/10/13 15:55:40 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.154 2007/11/15 21:14:40 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Eventually, the index information should go through here, too.
|
||||
@ -149,13 +149,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
|
||||
* (This indicates that the operator is not a valid ordering operator.)
|
||||
*
|
||||
* Note: the operator could be registered in multiple families, for example
|
||||
* if someone were to build a "reverse sort" opfamily. This would result in
|
||||
* if someone were to build a "reverse sort" opfamily. This would result in
|
||||
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
|
||||
* or NULLS LAST, as well as inefficient planning due to failure to match up
|
||||
* pathkeys that should be the same. So we want a determinate result here.
|
||||
* Because of the way the syscache search works, we'll use the interpretation
|
||||
* associated with the opfamily with smallest OID, which is probably
|
||||
* determinate enough. Since there is no longer any particularly good reason
|
||||
* determinate enough. Since there is no longer any particularly good reason
|
||||
* to build reverse-sort opfamilies, it doesn't seem worth expending any
|
||||
* additional effort on ensuring consistency.
|
||||
*/
|
||||
@ -238,7 +238,7 @@ get_compare_function_for_ordering_op(Oid opno, Oid *cmpfunc, bool *reverse)
|
||||
opcintype,
|
||||
opcintype,
|
||||
BTORDER_PROC);
|
||||
if (!OidIsValid(*cmpfunc)) /* should not happen */
|
||||
if (!OidIsValid(*cmpfunc)) /* should not happen */
|
||||
elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
|
||||
BTORDER_PROC, opcintype, opcintype, opfamily);
|
||||
*reverse = (strategy == BTGreaterStrategyNumber);
|
||||
@ -322,7 +322,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
|
||||
if (aform->amopstrategy == BTEqualStrategyNumber)
|
||||
{
|
||||
/* Found a suitable opfamily, get matching ordering operator */
|
||||
Oid typid;
|
||||
Oid typid;
|
||||
|
||||
typid = use_lhs_type ? aform->amoplefttype : aform->amoprighttype;
|
||||
result = get_opfamily_member(aform->amopfamily,
|
||||
@ -350,7 +350,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
|
||||
*
|
||||
* The planner currently uses simple equal() tests to compare the lists
|
||||
* returned by this function, which makes the list order relevant, though
|
||||
* strictly speaking it should not be. Because of the way syscache list
|
||||
* strictly speaking it should not be. Because of the way syscache list
|
||||
* searches are handled, in normal operation the result will be sorted by OID
|
||||
* so everything works fine. If running with system index usage disabled,
|
||||
* the result ordering is unspecified and hence the planner might fail to
|
||||
@ -445,6 +445,7 @@ get_compatible_hash_operators(Oid opno,
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the matching single-type operator(s). Failure probably
|
||||
* shouldn't happen --- it implies a bogus opfamily --- but
|
||||
@ -2162,7 +2163,7 @@ type_is_rowtype(Oid typid)
|
||||
|
||||
/*
|
||||
* type_is_enum
|
||||
* Returns true if the given type is an enum type.
|
||||
* Returns true if the given type is an enum type.
|
||||
*/
|
||||
bool
|
||||
type_is_enum(Oid typid)
|
||||
@ -2239,7 +2240,7 @@ Oid
|
||||
get_array_type(Oid typid)
|
||||
{
|
||||
HeapTuple tp;
|
||||
Oid result = InvalidOid;
|
||||
Oid result = InvalidOid;
|
||||
|
||||
tp = SearchSysCache(TYPEOID,
|
||||
ObjectIdGetDatum(typid),
|
||||
@ -2444,7 +2445,7 @@ get_typmodout(Oid typid)
|
||||
else
|
||||
return InvalidOid;
|
||||
}
|
||||
#endif /* NOT_USED */
|
||||
#endif /* NOT_USED */
|
||||
|
||||
|
||||
/* ---------- STATISTICS CACHE ---------- */
|
||||
|
104
src/backend/utils/cache/plancache.c
vendored
104
src/backend/utils/cache/plancache.c
vendored
@ -15,7 +15,7 @@
|
||||
* the tables they depend on. When (and if) the next demand for a cached
|
||||
* plan occurs, the query will be replanned. Note that this could result
|
||||
* in an error, for example if a column referenced by the query is no
|
||||
* longer present. The creator of a cached plan can specify whether it
|
||||
* longer present. The creator of a cached plan can specify whether it
|
||||
* is allowable for the query to change output tupdesc on replan (this
|
||||
* could happen with "SELECT *" for example) --- if so, it's up to the
|
||||
* caller to notice changes and cope with them.
|
||||
@ -33,7 +33,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.12 2007/10/11 18:05:27 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.13 2007/11/15 21:14:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -55,35 +55,35 @@
|
||||
|
||||
typedef struct
|
||||
{
|
||||
void (*callback) ();
|
||||
void (*callback) ();
|
||||
void *arg;
|
||||
} ScanQueryWalkerContext;
|
||||
} ScanQueryWalkerContext;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
Oid inval_relid;
|
||||
CachedPlan *plan;
|
||||
} InvalRelidContext;
|
||||
} InvalRelidContext;
|
||||
|
||||
|
||||
static List *cached_plans_list = NIL;
|
||||
|
||||
static void StoreCachedPlan(CachedPlanSource *plansource, List *stmt_list,
|
||||
MemoryContext plan_context);
|
||||
static void StoreCachedPlan(CachedPlanSource * plansource, List *stmt_list,
|
||||
MemoryContext plan_context);
|
||||
static List *do_planning(List *querytrees, int cursorOptions);
|
||||
static void AcquireExecutorLocks(List *stmt_list, bool acquire);
|
||||
static void AcquirePlannerLocks(List *stmt_list, bool acquire);
|
||||
static void LockRelid(Oid relid, LOCKMODE lockmode, void *arg);
|
||||
static void UnlockRelid(Oid relid, LOCKMODE lockmode, void *arg);
|
||||
static void ScanQueryForRelids(Query *parsetree,
|
||||
void (*callback) (),
|
||||
void *arg);
|
||||
static bool ScanQueryWalker(Node *node, ScanQueryWalkerContext *context);
|
||||
void (*callback) (),
|
||||
void *arg);
|
||||
static bool ScanQueryWalker(Node *node, ScanQueryWalkerContext * context);
|
||||
static bool rowmark_member(List *rowMarks, int rt_index);
|
||||
static bool plan_list_is_transient(List *stmt_list);
|
||||
static void PlanCacheCallback(Datum arg, Oid relid);
|
||||
static void InvalRelid(Oid relid, LOCKMODE lockmode,
|
||||
InvalRelidContext *context);
|
||||
InvalRelidContext * context);
|
||||
|
||||
|
||||
/*
|
||||
@ -153,7 +153,7 @@ CreateCachedPlan(Node *raw_parse_tree,
|
||||
plansource = (CachedPlanSource *) palloc(sizeof(CachedPlanSource));
|
||||
plansource->raw_parse_tree = copyObject(raw_parse_tree);
|
||||
plansource->query_string = query_string ? pstrdup(query_string) : NULL;
|
||||
plansource->commandTag = commandTag; /* no copying needed */
|
||||
plansource->commandTag = commandTag; /* no copying needed */
|
||||
if (num_params > 0)
|
||||
{
|
||||
plansource->param_types = (Oid *) palloc(num_params * sizeof(Oid));
|
||||
@ -166,7 +166,7 @@ CreateCachedPlan(Node *raw_parse_tree,
|
||||
plansource->fully_planned = fully_planned;
|
||||
plansource->fixed_result = fixed_result;
|
||||
plansource->search_path = search_path;
|
||||
plansource->generation = 0; /* StoreCachedPlan will increment */
|
||||
plansource->generation = 0; /* StoreCachedPlan will increment */
|
||||
plansource->resultDesc = PlanCacheComputeResultDesc(stmt_list);
|
||||
plansource->plan = NULL;
|
||||
plansource->context = source_context;
|
||||
@ -200,7 +200,7 @@ CreateCachedPlan(Node *raw_parse_tree,
|
||||
* avoids extra copy steps during plan construction. If the query ever does
|
||||
* need replanning, we'll generate a separate new CachedPlan at that time, but
|
||||
* the CachedPlanSource and the initial CachedPlan share the caller-provided
|
||||
* context and go away together when neither is needed any longer. (Because
|
||||
* context and go away together when neither is needed any longer. (Because
|
||||
* the parser and planner generate extra cruft in addition to their real
|
||||
* output, this approach means that the context probably contains a bunch of
|
||||
* useless junk as well as the useful trees. Hence, this method is a
|
||||
@ -241,14 +241,14 @@ FastCreateCachedPlan(Node *raw_parse_tree,
|
||||
plansource = (CachedPlanSource *) palloc(sizeof(CachedPlanSource));
|
||||
plansource->raw_parse_tree = raw_parse_tree;
|
||||
plansource->query_string = query_string;
|
||||
plansource->commandTag = commandTag; /* no copying needed */
|
||||
plansource->commandTag = commandTag; /* no copying needed */
|
||||
plansource->param_types = param_types;
|
||||
plansource->num_params = num_params;
|
||||
plansource->cursor_options = cursor_options;
|
||||
plansource->fully_planned = fully_planned;
|
||||
plansource->fixed_result = fixed_result;
|
||||
plansource->search_path = search_path;
|
||||
plansource->generation = 0; /* StoreCachedPlan will increment */
|
||||
plansource->generation = 0; /* StoreCachedPlan will increment */
|
||||
plansource->resultDesc = PlanCacheComputeResultDesc(stmt_list);
|
||||
plansource->plan = NULL;
|
||||
plansource->context = context;
|
||||
@ -284,7 +284,7 @@ FastCreateCachedPlan(Node *raw_parse_tree,
|
||||
* Common subroutine for CreateCachedPlan and RevalidateCachedPlan.
|
||||
*/
|
||||
static void
|
||||
StoreCachedPlan(CachedPlanSource *plansource,
|
||||
StoreCachedPlan(CachedPlanSource * plansource,
|
||||
List *stmt_list,
|
||||
MemoryContext plan_context)
|
||||
{
|
||||
@ -295,8 +295,8 @@ StoreCachedPlan(CachedPlanSource *plansource,
|
||||
{
|
||||
/*
|
||||
* Make a dedicated memory context for the CachedPlan and its
|
||||
* subsidiary data. It's probably not going to be large, but
|
||||
* just in case, use the default maxsize parameter.
|
||||
* subsidiary data. It's probably not going to be large, but just in
|
||||
* case, use the default maxsize parameter.
|
||||
*/
|
||||
plan_context = AllocSetContextCreate(CacheMemoryContext,
|
||||
"CachedPlan",
|
||||
@ -345,12 +345,12 @@ StoreCachedPlan(CachedPlanSource *plansource,
|
||||
* DropCachedPlan: destroy a cached plan.
|
||||
*
|
||||
* Actually this only destroys the CachedPlanSource: the referenced CachedPlan
|
||||
* is released, but not destroyed until its refcount goes to zero. That
|
||||
* is released, but not destroyed until its refcount goes to zero. That
|
||||
* handles the situation where DropCachedPlan is called while the plan is
|
||||
* still in use.
|
||||
*/
|
||||
void
|
||||
DropCachedPlan(CachedPlanSource *plansource)
|
||||
DropCachedPlan(CachedPlanSource * plansource)
|
||||
{
|
||||
/* Validity check that we were given a CachedPlanSource */
|
||||
Assert(list_member_ptr(cached_plans_list, plansource));
|
||||
@ -393,7 +393,7 @@ DropCachedPlan(CachedPlanSource *plansource)
|
||||
* is used for that work.
|
||||
*/
|
||||
CachedPlan *
|
||||
RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
|
||||
RevalidateCachedPlan(CachedPlanSource * plansource, bool useResOwner)
|
||||
{
|
||||
CachedPlan *plan;
|
||||
|
||||
@ -402,9 +402,8 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
|
||||
|
||||
/*
|
||||
* If the plan currently appears valid, acquire locks on the referenced
|
||||
* objects; then check again. We need to do it this way to cover the
|
||||
* race condition that an invalidation message arrives before we get
|
||||
* the lock.
|
||||
* objects; then check again. We need to do it this way to cover the race
|
||||
* condition that an invalidation message arrives before we get the lock.
|
||||
*/
|
||||
plan = plansource->plan;
|
||||
if (plan && !plan->dead)
|
||||
@ -430,8 +429,8 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
|
||||
plan->dead = true;
|
||||
|
||||
/*
|
||||
* By now, if any invalidation has happened, PlanCacheCallback
|
||||
* will have marked the plan dead.
|
||||
* By now, if any invalidation has happened, PlanCacheCallback will
|
||||
* have marked the plan dead.
|
||||
*/
|
||||
if (plan->dead)
|
||||
{
|
||||
@ -458,8 +457,8 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
|
||||
*/
|
||||
if (!plan)
|
||||
{
|
||||
List *slist;
|
||||
TupleDesc resultDesc;
|
||||
List *slist;
|
||||
TupleDesc resultDesc;
|
||||
|
||||
/*
|
||||
* Restore the search_path that was in use when the plan was made.
|
||||
@ -486,7 +485,7 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check or update the result tupdesc. XXX should we use a weaker
|
||||
* Check or update the result tupdesc. XXX should we use a weaker
|
||||
* condition than equalTupleDescs() here?
|
||||
*/
|
||||
resultDesc = PlanCacheComputeResultDesc(slist);
|
||||
@ -550,12 +549,12 @@ do_planning(List *querytrees, int cursorOptions)
|
||||
/*
|
||||
* If a snapshot is already set (the normal case), we can just use that
|
||||
* for planning. But if it isn't, we have to tell pg_plan_queries to make
|
||||
* a snap if it needs one. In that case we should arrange to reset
|
||||
* a snap if it needs one. In that case we should arrange to reset
|
||||
* ActiveSnapshot afterward, to ensure that RevalidateCachedPlan has no
|
||||
* caller-visible effects on the snapshot. Having to replan is an unusual
|
||||
* caller-visible effects on the snapshot. Having to replan is an unusual
|
||||
* case, and it seems a really bad idea for RevalidateCachedPlan to affect
|
||||
* the snapshot only in unusual cases. (Besides, the snap might have
|
||||
* been created in a short-lived context.)
|
||||
* the snapshot only in unusual cases. (Besides, the snap might have been
|
||||
* created in a short-lived context.)
|
||||
*/
|
||||
if (ActiveSnapshot != NULL)
|
||||
stmt_list = pg_plan_queries(querytrees, cursorOptions, NULL, false);
|
||||
@ -589,10 +588,10 @@ do_planning(List *querytrees, int cursorOptions)
|
||||
*
|
||||
* Note: useResOwner = false is used for releasing references that are in
|
||||
* persistent data structures, such as the parent CachedPlanSource or a
|
||||
* Portal. Transient references should be protected by a resource owner.
|
||||
* Portal. Transient references should be protected by a resource owner.
|
||||
*/
|
||||
void
|
||||
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
|
||||
ReleaseCachedPlan(CachedPlan * plan, bool useResOwner)
|
||||
{
|
||||
if (useResOwner)
|
||||
ResourceOwnerForgetPlanCacheRef(CurrentResourceOwner, plan);
|
||||
@ -633,10 +632,10 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Acquire the appropriate type of lock on each relation OID.
|
||||
* Note that we don't actually try to open the rel, and hence
|
||||
* will not fail if it's been dropped entirely --- we'll just
|
||||
* transiently acquire a non-conflicting lock.
|
||||
* Acquire the appropriate type of lock on each relation OID. Note
|
||||
* that we don't actually try to open the rel, and hence will not
|
||||
* fail if it's been dropped entirely --- we'll just transiently
|
||||
* acquire a non-conflicting lock.
|
||||
*/
|
||||
if (list_member_int(plannedstmt->resultRelations, rt_index))
|
||||
lockmode = RowExclusiveLock;
|
||||
@ -719,6 +718,7 @@ ScanQueryForRelids(Query *parsetree,
|
||||
switch (rte->rtekind)
|
||||
{
|
||||
case RTE_RELATION:
|
||||
|
||||
/*
|
||||
* Determine the lock type required for this RTE.
|
||||
*/
|
||||
@ -767,7 +767,7 @@ ScanQueryForRelids(Query *parsetree,
|
||||
* Walker to find sublink subqueries for ScanQueryForRelids
|
||||
*/
|
||||
static bool
|
||||
ScanQueryWalker(Node *node, ScanQueryWalkerContext *context)
|
||||
ScanQueryWalker(Node *node, ScanQueryWalkerContext * context)
|
||||
{
|
||||
if (node == NULL)
|
||||
return false;
|
||||
@ -782,8 +782,8 @@ ScanQueryWalker(Node *node, ScanQueryWalkerContext *context)
|
||||
}
|
||||
|
||||
/*
|
||||
* Do NOT recurse into Query nodes, because ScanQueryForRelids
|
||||
* already processed subselects of subselects for us.
|
||||
* Do NOT recurse into Query nodes, because ScanQueryForRelids already
|
||||
* processed subselects of subselects for us.
|
||||
*/
|
||||
return expression_tree_walker(node, ScanQueryWalker,
|
||||
(void *) context);
|
||||
@ -818,20 +818,20 @@ plan_list_is_transient(List *stmt_list)
|
||||
foreach(lc, stmt_list)
|
||||
{
|
||||
PlannedStmt *plannedstmt = (PlannedStmt *) lfirst(lc);
|
||||
|
||||
|
||||
if (!IsA(plannedstmt, PlannedStmt))
|
||||
continue; /* Ignore utility statements */
|
||||
|
||||
if (plannedstmt->transientPlan)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* PlanCacheComputeResultDesc: given a list of either fully-planned statements
|
||||
* or Queries, determine the result tupledesc it will produce. Returns NULL
|
||||
* or Queries, determine the result tupledesc it will produce. Returns NULL
|
||||
* if the execution will not return tuples.
|
||||
*
|
||||
* Note: the result is created or copied into current memory context.
|
||||
@ -924,22 +924,22 @@ PlanCacheCallback(Datum arg, Oid relid)
|
||||
|
||||
Assert(!IsA(plannedstmt, Query));
|
||||
if (!IsA(plannedstmt, PlannedStmt))
|
||||
continue; /* Ignore utility statements */
|
||||
continue; /* Ignore utility statements */
|
||||
if ((relid == InvalidOid) ? plannedstmt->relationOids != NIL :
|
||||
list_member_oid(plannedstmt->relationOids, relid))
|
||||
{
|
||||
/* Invalidate the plan! */
|
||||
plan->dead = true;
|
||||
break; /* out of stmt_list scan */
|
||||
break; /* out of stmt_list scan */
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* For not-fully-planned entries we use ScanQueryForRelids,
|
||||
* since a recursive traversal is needed. The callback API
|
||||
* is a bit tedious but avoids duplication of coding.
|
||||
* For not-fully-planned entries we use ScanQueryForRelids, since
|
||||
* a recursive traversal is needed. The callback API is a bit
|
||||
* tedious but avoids duplication of coding.
|
||||
*/
|
||||
InvalRelidContext context;
|
||||
|
||||
@ -970,7 +970,7 @@ ResetPlanCache(void)
|
||||
* ScanQueryForRelids callback function for PlanCacheCallback
|
||||
*/
|
||||
static void
|
||||
InvalRelid(Oid relid, LOCKMODE lockmode, InvalRelidContext *context)
|
||||
InvalRelid(Oid relid, LOCKMODE lockmode, InvalRelidContext * context)
|
||||
{
|
||||
if (relid == context->inval_relid || context->inval_relid == InvalidOid)
|
||||
context->plan->dead = true;
|
||||
|
37
src/backend/utils/cache/relcache.c
vendored
37
src/backend/utils/cache/relcache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.263 2007/09/20 17:56:31 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.264 2007/11/15 21:14:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -932,7 +932,7 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
Datum indoptionDatum;
|
||||
bool isnull;
|
||||
oidvector *indclass;
|
||||
int2vector *indoption;
|
||||
int2vector *indoption;
|
||||
MemoryContext indexcxt;
|
||||
MemoryContext oldcontext;
|
||||
int natts;
|
||||
@ -1030,8 +1030,8 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
|
||||
/*
|
||||
* indclass cannot be referenced directly through the C struct, because it
|
||||
* comes after the variable-width indkey field. Must extract the
|
||||
* datum the hard way...
|
||||
* comes after the variable-width indkey field. Must extract the datum
|
||||
* the hard way...
|
||||
*/
|
||||
indclassDatum = fastgetattr(relation->rd_indextuple,
|
||||
Anum_pg_index_indclass,
|
||||
@ -1041,9 +1041,9 @@ RelationInitIndexAccessInfo(Relation relation)
|
||||
indclass = (oidvector *) DatumGetPointer(indclassDatum);
|
||||
|
||||
/*
|
||||
* Fill the operator and support procedure OID arrays, as well as the
|
||||
* info about opfamilies and opclass input types. (aminfo and
|
||||
* supportinfo are left as zeroes, and are filled on-the-fly when used)
|
||||
* Fill the operator and support procedure OID arrays, as well as the info
|
||||
* about opfamilies and opclass input types. (aminfo and supportinfo are
|
||||
* left as zeroes, and are filled on-the-fly when used)
|
||||
*/
|
||||
IndexSupportInitialize(indclass,
|
||||
relation->rd_operator, relation->rd_support,
|
||||
@ -1655,8 +1655,8 @@ RelationReloadIndexInfo(Relation relation)
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
elog(ERROR, "cache lookup failed for index %u",
|
||||
RelationGetRelid(relation));
|
||||
elog(ERROR, "cache lookup failed for index %u",
|
||||
RelationGetRelid(relation));
|
||||
index = (Form_pg_index) GETSTRUCT(tuple);
|
||||
|
||||
relation->rd_index->indisvalid = index->indisvalid;
|
||||
@ -2078,7 +2078,7 @@ AtEOXact_RelationCache(bool isCommit)
|
||||
* for us to do here, so we keep a static flag that gets set if there is
|
||||
* anything to do. (Currently, this means either a relation is created in
|
||||
* the current xact, or one is given a new relfilenode, or an index list
|
||||
* is forced.) For simplicity, the flag remains set till end of top-level
|
||||
* is forced.) For simplicity, the flag remains set till end of top-level
|
||||
* transaction, even though we could clear it at subtransaction end in
|
||||
* some cases.
|
||||
*/
|
||||
@ -2201,7 +2201,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
|
||||
}
|
||||
|
||||
/*
|
||||
* Likewise, update or drop any new-relfilenode-in-subtransaction hint.
|
||||
* Likewise, update or drop any new-relfilenode-in-subtransaction
|
||||
* hint.
|
||||
*/
|
||||
if (relation->rd_newRelfilenodeSubid == mySubid)
|
||||
{
|
||||
@ -2228,7 +2229,7 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
|
||||
* RelationCacheMarkNewRelfilenode
|
||||
*
|
||||
* Mark the rel as having been given a new relfilenode in the current
|
||||
* (sub) transaction. This is a hint that can be used to optimize
|
||||
* (sub) transaction. This is a hint that can be used to optimize
|
||||
* later operations on the rel in the same transaction.
|
||||
*/
|
||||
void
|
||||
@ -3165,9 +3166,9 @@ RelationGetIndexPredicate(Relation relation)
|
||||
Bitmapset *
|
||||
RelationGetIndexAttrBitmap(Relation relation)
|
||||
{
|
||||
Bitmapset *indexattrs;
|
||||
List *indexoidlist;
|
||||
ListCell *l;
|
||||
Bitmapset *indexattrs;
|
||||
List *indexoidlist;
|
||||
ListCell *l;
|
||||
MemoryContext oldcxt;
|
||||
|
||||
/* Quick exit if we already computed the result. */
|
||||
@ -3196,7 +3197,7 @@ RelationGetIndexAttrBitmap(Relation relation)
|
||||
Oid indexOid = lfirst_oid(l);
|
||||
Relation indexDesc;
|
||||
IndexInfo *indexInfo;
|
||||
int i;
|
||||
int i;
|
||||
|
||||
indexDesc = index_open(indexOid, AccessShareLock);
|
||||
|
||||
@ -3206,11 +3207,11 @@ RelationGetIndexAttrBitmap(Relation relation)
|
||||
/* Collect simple attribute references */
|
||||
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
|
||||
{
|
||||
int attrnum = indexInfo->ii_KeyAttrNumbers[i];
|
||||
int attrnum = indexInfo->ii_KeyAttrNumbers[i];
|
||||
|
||||
if (attrnum != 0)
|
||||
indexattrs = bms_add_member(indexattrs,
|
||||
attrnum - FirstLowInvalidHeapAttributeNumber);
|
||||
attrnum - FirstLowInvalidHeapAttributeNumber);
|
||||
}
|
||||
|
||||
/* Collect all attributes used in expressions, too */
|
||||
|
24
src/backend/utils/cache/ts_cache.c
vendored
24
src/backend/utils/cache/ts_cache.c
vendored
@ -12,7 +12,7 @@
|
||||
* safe to hold onto a pointer to the cache entry while doing things that
|
||||
* might result in recognizing a cache invalidation. Beware however that
|
||||
* subsidiary information might be deleted and reallocated somewhere else
|
||||
* if a cache inval and reval happens! This does not look like it will be
|
||||
* if a cache inval and reval happens! This does not look like it will be
|
||||
* a big problem as long as parser and dictionary methods do not attempt
|
||||
* any database access.
|
||||
*
|
||||
@ -20,7 +20,7 @@
|
||||
* Copyright (c) 2006-2007, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/ts_cache.c,v 1.3 2007/09/10 00:57:21 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/ts_cache.c,v 1.4 2007/11/15 21:14:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -143,8 +143,8 @@ lookup_ts_parser_cache(Oid prsId)
|
||||
if (entry == NULL || !entry->isvalid)
|
||||
{
|
||||
/*
|
||||
* If we didn't find one, we want to make one.
|
||||
* But first look up the object to be sure the OID is real.
|
||||
* If we didn't find one, we want to make one. But first look up the
|
||||
* object to be sure the OID is real.
|
||||
*/
|
||||
HeapTuple tp;
|
||||
Form_pg_ts_parser prs;
|
||||
@ -245,8 +245,8 @@ lookup_ts_dictionary_cache(Oid dictId)
|
||||
if (entry == NULL || !entry->isvalid)
|
||||
{
|
||||
/*
|
||||
* If we didn't find one, we want to make one.
|
||||
* But first look up the object to be sure the OID is real.
|
||||
* If we didn't find one, we want to make one. But first look up the
|
||||
* object to be sure the OID is real.
|
||||
*/
|
||||
HeapTuple tpdict,
|
||||
tptmpl;
|
||||
@ -325,8 +325,8 @@ lookup_ts_dictionary_cache(Oid dictId)
|
||||
MemoryContext oldcontext;
|
||||
|
||||
/*
|
||||
* Init method runs in dictionary's private memory context,
|
||||
* and we make sure the options are stored there too
|
||||
* Init method runs in dictionary's private memory context, and we
|
||||
* make sure the options are stored there too
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(entry->dictCtx);
|
||||
|
||||
@ -340,7 +340,7 @@ lookup_ts_dictionary_cache(Oid dictId)
|
||||
|
||||
entry->dictData =
|
||||
DatumGetPointer(OidFunctionCall1(template->tmplinit,
|
||||
PointerGetDatum(dictoptions)));
|
||||
PointerGetDatum(dictoptions)));
|
||||
|
||||
MemoryContextSwitchTo(oldcontext);
|
||||
}
|
||||
@ -410,8 +410,8 @@ lookup_ts_config_cache(Oid cfgId)
|
||||
if (entry == NULL || !entry->isvalid)
|
||||
{
|
||||
/*
|
||||
* If we didn't find one, we want to make one.
|
||||
* But first look up the object to be sure the OID is real.
|
||||
* If we didn't find one, we want to make one. But first look up the
|
||||
* object to be sure the OID is real.
|
||||
*/
|
||||
HeapTuple tp;
|
||||
Form_pg_ts_config cfg;
|
||||
@ -492,7 +492,7 @@ lookup_ts_config_cache(Oid cfgId)
|
||||
while ((maptup = index_getnext(mapscan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
Form_pg_ts_config_map cfgmap = (Form_pg_ts_config_map) GETSTRUCT(maptup);
|
||||
int toktype = cfgmap->maptokentype;
|
||||
int toktype = cfgmap->maptokentype;
|
||||
|
||||
if (toktype <= 0 || toktype > MAXTOKENTYPE)
|
||||
elog(ERROR, "maptokentype value %d is out of range", toktype);
|
||||
|
4
src/backend/utils/cache/typcache.c
vendored
4
src/backend/utils/cache/typcache.c
vendored
@ -36,7 +36,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.25 2007/04/02 03:49:39 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.26 2007/11/15 21:14:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -169,7 +169,7 @@ lookup_type_cache(Oid type_id, int flags)
|
||||
TYPECACHE_BTREE_OPFAMILY)) &&
|
||||
typentry->btree_opf == InvalidOid)
|
||||
{
|
||||
Oid opclass;
|
||||
Oid opclass;
|
||||
|
||||
opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
|
||||
if (OidIsValid(opclass))
|
||||
|
Reference in New Issue
Block a user