1
0
mirror of https://github.com/postgres/postgres.git synced 2025-12-01 12:18:01 +03:00

First phase of plan-invalidation project: create a plan cache management

module and teach PREPARE and protocol-level prepared statements to use it.
In service of this, rearrange utility-statement processing so that parse
analysis does not assume table schemas can't change before execution for
utility statements (necessary because we don't attempt to re-acquire locks
for utility statements when reusing a stored plan).  This requires some
refactoring of the ProcessUtility API, but it ends up cleaner anyway,
for instance we can get rid of the QueryContext global.

Still to do: fix up SPI and related code to use the plan cache; I'm tempted to
try to make SQL functions use it too.  Also, there are at least some aspects
of system state that we want to ensure remain the same during a replan as in
the original processing; search_path certainly ought to behave that way for
instance, and perhaps there are others.
This commit is contained in:
Tom Lane
2007-03-13 00:33:44 +00:00
parent f84308f195
commit b9527e9840
61 changed files with 2478 additions and 1354 deletions

View File

@@ -4,7 +4,7 @@
# Makefile for utils/cache
#
# IDENTIFICATION
# $PostgreSQL: pgsql/src/backend/utils/cache/Makefile,v 1.20 2007/01/20 17:16:13 petere Exp $
# $PostgreSQL: pgsql/src/backend/utils/cache/Makefile,v 1.21 2007/03/13 00:33:42 tgl Exp $
#
#-------------------------------------------------------------------------
@@ -12,7 +12,8 @@ subdir = src/backend/utils/cache
top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
OBJS = catcache.o inval.o relcache.o syscache.o lsyscache.o typcache.o
OBJS = catcache.o inval.o plancache.o relcache.o \
syscache.o lsyscache.o typcache.o
all: SUBSYS.o

862
src/backend/utils/cache/plancache.c vendored Normal file
View File

@@ -0,0 +1,862 @@
/*-------------------------------------------------------------------------
*
* plancache.c
* Plan cache management.
*
* We can store a cached plan in either fully-planned format, or just
* parsed-and-rewritten if the caller wishes to postpone planning until
* actual parameter values are available. CachedPlanSource has the same
* contents either way, but CachedPlan contains a list of PlannedStmts
* and bare utility statements in the first case, or a list of Query nodes
* in the second case.
*
* The plan cache manager itself is principally responsible for tracking
* whether cached plans should be invalidated because of schema changes in
* the tables they depend on. When (and if) the next demand for a cached
* plan occurs, the query will be replanned. Note that this could result
* in an error, for example if a column referenced by the query is no
* longer present. The creator of a cached plan can specify whether it
* is allowable for the query to change output tupdesc on replan (this
* could happen with "SELECT *" for example) --- if so, it's up to the
* caller to notice changes and cope with them.
*
* Currently, we use only relcache invalidation events to invalidate plans.
* This means that changes such as modification of a function definition do
* not invalidate plans using the function. This is not 100% OK --- for
* example, changing a SQL function that's been inlined really ought to
* cause invalidation of the plan that it's been inlined into --- but the
* cost of tracking additional types of object seems much higher than the
* gain, so we're just ignoring them for now.
*
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.1 2007/03/13 00:33:42 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "utils/plancache.h"
#include "executor/executor.h"
#include "optimizer/clauses.h"
#include "storage/lmgr.h"
#include "tcop/pquery.h"
#include "tcop/tcopprot.h"
#include "tcop/utility.h"
#include "utils/inval.h"
#include "utils/memutils.h"
#include "utils/resowner.h"
typedef struct
{
void (*callback) ();
void *arg;
} ScanQueryWalkerContext;
typedef struct
{
Oid inval_relid;
CachedPlan *plan;
} InvalRelidContext;
static List *cached_plans_list = NIL;
static void StoreCachedPlan(CachedPlanSource *plansource, List *stmt_list,
MemoryContext plan_context);
static void AcquireExecutorLocks(List *stmt_list, bool acquire);
static void AcquirePlannerLocks(List *stmt_list, bool acquire);
static void LockRelid(Oid relid, LOCKMODE lockmode, void *arg);
static void UnlockRelid(Oid relid, LOCKMODE lockmode, void *arg);
static void ScanQueryForRelids(Query *parsetree,
void (*callback) (),
void *arg);
static bool ScanQueryWalker(Node *node, ScanQueryWalkerContext *context);
static bool rowmark_member(List *rowMarks, int rt_index);
static TupleDesc ComputeResultDesc(List *stmt_list);
static void PlanCacheCallback(Datum arg, Oid relid);
static void InvalRelid(Oid relid, LOCKMODE lockmode,
InvalRelidContext *context);
/*
* InitPlanCache: initialize module during InitPostgres.
*
* All we need to do is hook into inval.c's callback list.
*/
void
InitPlanCache(void)
{
CacheRegisterRelcacheCallback(PlanCacheCallback, (Datum) 0);
}
/*
* CreateCachedPlan: initially create a plan cache entry.
*
* The caller must already have successfully parsed/planned the query;
* about all that we do here is copy it into permanent storage.
*
* raw_parse_tree: output of raw_parser()
* query_string: original query text (can be NULL if not available, but
* that is discouraged because it degrades error message quality)
* commandTag: compile-time-constant tag for query, or NULL if empty query
* param_types: array of parameter type OIDs, or NULL if none
* num_params: number of parameters
* stmt_list: list of PlannedStmts/utility stmts, or list of Query trees
* fully_planned: are we caching planner or rewriter output?
* fixed_result: TRUE to disallow changes in result tupdesc
*/
CachedPlanSource *
CreateCachedPlan(Node *raw_parse_tree,
const char *query_string,
const char *commandTag,
Oid *param_types,
int num_params,
List *stmt_list,
bool fully_planned,
bool fixed_result)
{
CachedPlanSource *plansource;
MemoryContext source_context;
MemoryContext oldcxt;
/*
* Make a dedicated memory context for the CachedPlanSource and its
* subsidiary data. We expect it can be pretty small.
*/
source_context = AllocSetContextCreate(CacheMemoryContext,
"CachedPlanSource",
ALLOCSET_SMALL_MINSIZE,
ALLOCSET_SMALL_INITSIZE,
ALLOCSET_SMALL_MAXSIZE);
/*
* Create and fill the CachedPlanSource struct within the new context.
*/
oldcxt = MemoryContextSwitchTo(source_context);
plansource = (CachedPlanSource *) palloc(sizeof(CachedPlanSource));
plansource->raw_parse_tree = copyObject(raw_parse_tree);
plansource->query_string = query_string ? pstrdup(query_string) : NULL;
plansource->commandTag = commandTag; /* no copying needed */
if (num_params > 0)
{
plansource->param_types = (Oid *) palloc(num_params * sizeof(Oid));
memcpy(plansource->param_types, param_types, num_params * sizeof(Oid));
}
else
plansource->param_types = NULL;
plansource->num_params = num_params;
plansource->fully_planned = fully_planned;
plansource->fixed_result = fixed_result;
plansource->generation = 0; /* StoreCachedPlan will increment */
plansource->resultDesc = ComputeResultDesc(stmt_list);
plansource->plan = NULL;
plansource->context = source_context;
plansource->orig_plan = NULL;
/*
* Copy the current output plans into the plancache entry.
*/
StoreCachedPlan(plansource, stmt_list, NULL);
/*
* Now we can add the entry to the list of cached plans. The List nodes
* live in CacheMemoryContext.
*/
MemoryContextSwitchTo(CacheMemoryContext);
cached_plans_list = lappend(cached_plans_list, plansource);
MemoryContextSwitchTo(oldcxt);
return plansource;
}
/*
* FastCreateCachedPlan: create a plan cache entry with minimal data copying.
*
* For plans that aren't expected to live very long, the copying overhead of
* CreateCachedPlan is annoying. We provide this variant entry point in which
* the caller has already placed all the data in a suitable memory context.
* The source data and completed plan are in the same context, since this
* avoids extra copy steps during plan construction. If the query ever does
* need replanning, we'll generate a separate new CachedPlan at that time, but
* the CachedPlanSource and the initial CachedPlan share the caller-provided
* context and go away together when neither is needed any longer. (Because
* the parser and planner generate extra cruft in addition to their real
* output, this approach means that the context probably contains a bunch of
* useless junk as well as the useful trees. Hence, this method is a
* space-for-time tradeoff, which is worth making for plans expected to be
* short-lived.)
*
* raw_parse_tree, query_string, param_types, and stmt_list must reside in the
* given context, which must have adequate lifespan (recommendation: make it a
* child of CacheMemoryContext). Otherwise the API is the same as
* CreateCachedPlan.
*/
CachedPlanSource *
FastCreateCachedPlan(Node *raw_parse_tree,
char *query_string,
const char *commandTag,
Oid *param_types,
int num_params,
List *stmt_list,
bool fully_planned,
bool fixed_result,
MemoryContext context)
{
CachedPlanSource *plansource;
MemoryContext oldcxt;
/*
* Create and fill the CachedPlanSource struct within the given context.
*/
oldcxt = MemoryContextSwitchTo(context);
plansource = (CachedPlanSource *) palloc(sizeof(CachedPlanSource));
plansource->raw_parse_tree = raw_parse_tree;
plansource->query_string = query_string;
plansource->commandTag = commandTag; /* no copying needed */
plansource->param_types = param_types;
plansource->num_params = num_params;
plansource->fully_planned = fully_planned;
plansource->fixed_result = fixed_result;
plansource->generation = 0; /* StoreCachedPlan will increment */
plansource->resultDesc = ComputeResultDesc(stmt_list);
plansource->plan = NULL;
plansource->context = context;
plansource->orig_plan = NULL;
/*
* Store the current output plans into the plancache entry.
*/
StoreCachedPlan(plansource, stmt_list, context);
/*
* Since the context is owned by the CachedPlan, advance its refcount.
*/
plansource->orig_plan = plansource->plan;
plansource->orig_plan->refcount++;
/*
* Now we can add the entry to the list of cached plans. The List nodes
* live in CacheMemoryContext.
*/
MemoryContextSwitchTo(CacheMemoryContext);
cached_plans_list = lappend(cached_plans_list, plansource);
MemoryContextSwitchTo(oldcxt);
return plansource;
}
/*
* StoreCachedPlan: store a built or rebuilt plan into a plancache entry.
*
* Common subroutine for CreateCachedPlan and RevalidateCachedPlan.
*/
static void
StoreCachedPlan(CachedPlanSource *plansource,
List *stmt_list,
MemoryContext plan_context)
{
CachedPlan *plan;
MemoryContext oldcxt;
if (plan_context == NULL)
{
/*
* Make a dedicated memory context for the CachedPlan and its
* subsidiary data.
*/
plan_context = AllocSetContextCreate(CacheMemoryContext,
"CachedPlan",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/*
* Copy supplied data into the new context.
*/
oldcxt = MemoryContextSwitchTo(plan_context);
stmt_list = (List *) copyObject(stmt_list);
}
else
{
/* Assume subsidiary data is in the given context */
oldcxt = MemoryContextSwitchTo(plan_context);
}
/*
* Create and fill the CachedPlan struct within the new context.
*/
plan = (CachedPlan *) palloc(sizeof(CachedPlan));
plan->stmt_list = stmt_list;
plan->fully_planned = plansource->fully_planned;
plan->dead = false;
plan->refcount = 1; /* for the parent's link */
plan->generation = ++(plansource->generation);
plan->context = plan_context;
Assert(plansource->plan == NULL);
plansource->plan = plan;
MemoryContextSwitchTo(oldcxt);
}
/*
* DropCachedPlan: destroy a cached plan.
*
* Actually this only destroys the CachedPlanSource: the referenced CachedPlan
* is released, but not destroyed until its refcount goes to zero. That
* handles the situation where DropCachedPlan is called while the plan is
* still in use.
*/
void
DropCachedPlan(CachedPlanSource *plansource)
{
/* Validity check that we were given a CachedPlanSource */
Assert(list_member_ptr(cached_plans_list, plansource));
/* Remove it from the list */
cached_plans_list = list_delete_ptr(cached_plans_list, plansource);
/* Decrement child CachePlan's refcount and drop if no longer needed */
if (plansource->plan)
ReleaseCachedPlan(plansource->plan, false);
/*
* If CachedPlanSource has independent storage, just drop it. Otherwise
* decrement the refcount on the CachePlan that owns the storage.
*/
if (plansource->orig_plan == NULL)
{
/* Remove the CachedPlanSource and all subsidiary data */
MemoryContextDelete(plansource->context);
}
else
{
Assert(plansource->context == plansource->orig_plan->context);
ReleaseCachedPlan(plansource->orig_plan, false);
}
}
/*
* RevalidateCachedPlan: prepare for re-use of a previously cached plan.
*
* What we do here is re-acquire locks and rebuild the plan if necessary.
* On return, the plan is valid and we have sufficient locks to begin
* execution (or planning, if not fully_planned).
*
* On return, the refcount of the plan has been incremented; a later
* ReleaseCachedPlan() call is expected. The refcount has been reported
* to the CurrentResourceOwner if useResOwner is true.
*
* Note: if any replanning activity is required, the caller's memory context
* is used for that work.
*/
CachedPlan *
RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
{
CachedPlan *plan;
/* Validity check that we were given a CachedPlanSource */
Assert(list_member_ptr(cached_plans_list, plansource));
/*
* If the plan currently appears valid, acquire locks on the referenced
* objects; then check again. We need to do it this way to cover the
* race condition that an invalidation message arrives before we get
* the lock.
*/
plan = plansource->plan;
if (plan && !plan->dead)
{
/*
* Plan must have positive refcount because it is referenced by
* plansource; so no need to fear it disappears under us here.
*/
Assert(plan->refcount > 0);
if (plan->fully_planned)
AcquireExecutorLocks(plan->stmt_list, true);
else
AcquirePlannerLocks(plan->stmt_list, true);
/*
* By now, if any invalidation has happened, PlanCacheCallback
* will have marked the plan dead.
*/
if (plan->dead)
{
/* Ooops, the race case happened. Release useless locks. */
if (plan->fully_planned)
AcquireExecutorLocks(plan->stmt_list, false);
else
AcquirePlannerLocks(plan->stmt_list, false);
}
}
/*
* If plan has been invalidated, unlink it from the parent and release it.
*/
if (plan && plan->dead)
{
plansource->plan = NULL;
ReleaseCachedPlan(plan, false);
plan = NULL;
}
/*
* Build a new plan if needed.
*/
if (!plan)
{
List *slist;
TupleDesc resultDesc;
/*
* Run parse analysis and rule rewriting. The parser tends to
* scribble on its input, so we must copy the raw parse tree to
* prevent corruption of the cache. Note that we do not use
* parse_analyze_varparams(), assuming that the caller never wants the
* parameter types to change from the original values.
*/
slist = pg_analyze_and_rewrite(copyObject(plansource->raw_parse_tree),
plansource->query_string,
plansource->param_types,
plansource->num_params);
if (plansource->fully_planned)
{
/*
* Generate plans for queries. Assume snapshot is not set yet
* (XXX this may be wasteful, won't all callers have done that?)
*/
slist = pg_plan_queries(slist, NULL, true);
}
/*
* Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*/
resultDesc = ComputeResultDesc(slist);
if (resultDesc == NULL && plansource->resultDesc == NULL)
{
/* OK, doesn't return tuples */
}
else if (resultDesc == NULL || plansource->resultDesc == NULL ||
!equalTupleDescs(resultDesc, plansource->resultDesc))
{
MemoryContext oldcxt;
/* can we give a better error message? */
if (plansource->fixed_result)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cached plan must not change result type")));
oldcxt = MemoryContextSwitchTo(plansource->context);
if (resultDesc)
resultDesc = CreateTupleDescCopy(resultDesc);
if (plansource->resultDesc)
FreeTupleDesc(plansource->resultDesc);
plansource->resultDesc = resultDesc;
MemoryContextSwitchTo(oldcxt);
}
/*
* Store the plans into the plancache entry, advancing the generation
* count.
*/
StoreCachedPlan(plansource, slist, NULL);
plan = plansource->plan;
}
/*
* Last step: flag the plan as in use by caller.
*/
if (useResOwner)
ResourceOwnerEnlargePlanCacheRefs(CurrentResourceOwner);
plan->refcount++;
if (useResOwner)
ResourceOwnerRememberPlanCacheRef(CurrentResourceOwner, plan);
return plan;
}
/*
* ReleaseCachedPlan: release active use of a cached plan.
*
* This decrements the reference count, and frees the plan if the count
* has thereby gone to zero. If useResOwner is true, it is assumed that
* the reference count is managed by the CurrentResourceOwner.
*
* Note: useResOwner = false is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
* Portal. Transient references should be protected by a resource owner.
*/
void
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
{
if (useResOwner)
ResourceOwnerForgetPlanCacheRef(CurrentResourceOwner, plan);
Assert(plan->refcount > 0);
plan->refcount--;
if (plan->refcount == 0)
MemoryContextDelete(plan->context);
}
/*
* AcquireExecutorLocks: acquire locks needed for execution of a fully-planned
* cached plan; or release them if acquire is false.
*/
static void
AcquireExecutorLocks(List *stmt_list, bool acquire)
{
ListCell *lc1;
foreach(lc1, stmt_list)
{
PlannedStmt *plannedstmt = (PlannedStmt *) lfirst(lc1);
int rt_index;
ListCell *lc2;
Assert(!IsA(plannedstmt, Query));
if (!IsA(plannedstmt, PlannedStmt))
continue; /* Ignore utility statements */
rt_index = 0;
foreach(lc2, plannedstmt->rtable)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2);
LOCKMODE lockmode;
rt_index++;
if (rte->rtekind != RTE_RELATION)
continue;
/*
* Acquire the appropriate type of lock on each relation OID.
* Note that we don't actually try to open the rel, and hence
* will not fail if it's been dropped entirely --- we'll just
* transiently acquire a non-conflicting lock.
*/
if (list_member_int(plannedstmt->resultRelations, rt_index))
lockmode = RowExclusiveLock;
else if (rowmark_member(plannedstmt->rowMarks, rt_index))
lockmode = RowShareLock;
else
lockmode = AccessShareLock;
if (acquire)
LockRelationOid(rte->relid, lockmode);
else
UnlockRelationOid(rte->relid, lockmode);
}
}
}
/*
* AcquirePlannerLocks: acquire locks needed for planning and execution of a
* not-fully-planned cached plan; or release them if acquire is false.
*
* Note that we don't actually try to open the relations, and hence will not
* fail if one has been dropped entirely --- we'll just transiently acquire
* a non-conflicting lock.
*/
static void
AcquirePlannerLocks(List *stmt_list, bool acquire)
{
ListCell *lc;
foreach(lc, stmt_list)
{
Query *query = (Query *) lfirst(lc);
Assert(IsA(query, Query));
if (acquire)
ScanQueryForRelids(query, LockRelid, NULL);
else
ScanQueryForRelids(query, UnlockRelid, NULL);
}
}
/*
* ScanQueryForRelids callback functions for AcquirePlannerLocks
*/
static void
LockRelid(Oid relid, LOCKMODE lockmode, void *arg)
{
LockRelationOid(relid, lockmode);
}
static void
UnlockRelid(Oid relid, LOCKMODE lockmode, void *arg)
{
UnlockRelationOid(relid, lockmode);
}
/*
* ScanQueryForRelids: recursively scan one Query and apply the callback
* function to each relation OID found therein. The callback function
* takes the arguments relation OID, lockmode, pointer arg.
*/
static void
ScanQueryForRelids(Query *parsetree,
void (*callback) (),
void *arg)
{
ListCell *lc;
int rt_index;
/*
* First, process RTEs of the current query level.
*/
rt_index = 0;
foreach(lc, parsetree->rtable)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
LOCKMODE lockmode;
rt_index++;
switch (rte->rtekind)
{
case RTE_RELATION:
/*
* Determine the lock type required for this RTE.
*/
if (rt_index == parsetree->resultRelation)
lockmode = RowExclusiveLock;
else if (rowmark_member(parsetree->rowMarks, rt_index))
lockmode = RowShareLock;
else
lockmode = AccessShareLock;
(*callback) (rte->relid, lockmode, arg);
break;
case RTE_SUBQUERY:
/*
* The subquery RTE itself is all right, but we have to
* recurse to process the represented subquery.
*/
ScanQueryForRelids(rte->subquery, callback, arg);
break;
default:
/* ignore other types of RTEs */
break;
}
}
/*
* Recurse into sublink subqueries, too. But we already did the ones in
* the rtable.
*/
if (parsetree->hasSubLinks)
{
ScanQueryWalkerContext context;
context.callback = callback;
context.arg = arg;
query_tree_walker(parsetree, ScanQueryWalker,
(void *) &context,
QTW_IGNORE_RT_SUBQUERIES);
}
}
/*
* Walker to find sublink subqueries for ScanQueryForRelids
*/
static bool
ScanQueryWalker(Node *node, ScanQueryWalkerContext *context)
{
if (node == NULL)
return false;
if (IsA(node, SubLink))
{
SubLink *sub = (SubLink *) node;
/* Do what we came for */
ScanQueryForRelids((Query *) sub->subselect,
context->callback, context->arg);
/* Fall through to process lefthand args of SubLink */
}
/*
* Do NOT recurse into Query nodes, because ScanQueryForRelids
* already processed subselects of subselects for us.
*/
return expression_tree_walker(node, ScanQueryWalker,
(void *) context);
}
/*
* rowmark_member: check whether an RT index appears in a RowMarkClause list.
*/
static bool
rowmark_member(List *rowMarks, int rt_index)
{
ListCell *l;
foreach(l, rowMarks)
{
RowMarkClause *rc = (RowMarkClause *) lfirst(l);
if (rc->rti == rt_index)
return true;
}
return false;
}
/*
* ComputeResultDesc: given a list of either fully-planned statements or
* Queries, determine the result tupledesc it will produce. Returns NULL
* if the execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
*/
static TupleDesc
ComputeResultDesc(List *stmt_list)
{
Node *node;
Query *query;
PlannedStmt *pstmt;
switch (ChoosePortalStrategy(stmt_list))
{
case PORTAL_ONE_SELECT:
node = (Node *) linitial(stmt_list);
if (IsA(node, Query))
{
query = (Query *) node;
return ExecCleanTypeFromTL(query->targetList, false);
}
if (IsA(node, PlannedStmt))
{
pstmt = (PlannedStmt *) node;
return ExecCleanTypeFromTL(pstmt->planTree->targetlist, false);
}
/* other cases shouldn't happen, but return NULL */
break;
case PORTAL_ONE_RETURNING:
node = PortalListGetPrimaryStmt(stmt_list);
if (IsA(node, Query))
{
query = (Query *) node;
Assert(query->returningList);
return ExecCleanTypeFromTL(query->returningList, false);
}
if (IsA(node, PlannedStmt))
{
pstmt = (PlannedStmt *) node;
Assert(pstmt->returningLists);
return ExecCleanTypeFromTL((List *) linitial(pstmt->returningLists), false);
}
/* other cases shouldn't happen, but return NULL */
break;
case PORTAL_UTIL_SELECT:
node = (Node *) linitial(stmt_list);
if (IsA(node, Query))
{
query = (Query *) node;
Assert(query->utilityStmt);
return UtilityTupleDescriptor(query->utilityStmt);
}
/* else it's a bare utility statement */
return UtilityTupleDescriptor(node);
case PORTAL_MULTI_QUERY:
/* will not return tuples */
break;
}
return NULL;
}
/*
* PlanCacheCallback
* Relcache inval callback function
*/
static void
PlanCacheCallback(Datum arg, Oid relid)
{
ListCell *lc1;
ListCell *lc2;
foreach(lc1, cached_plans_list)
{
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1);
CachedPlan *plan = plansource->plan;
/* No work if it's already invalidated */
if (!plan || plan->dead)
continue;
if (plan->fully_planned)
{
foreach(lc2, plan->stmt_list)
{
PlannedStmt *plannedstmt = (PlannedStmt *) lfirst(lc2);
ListCell *lc3;
Assert(!IsA(plannedstmt, Query));
if (!IsA(plannedstmt, PlannedStmt))
continue; /* Ignore utility statements */
foreach(lc3, plannedstmt->rtable)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc3);
if (rte->rtekind != RTE_RELATION)
continue;
if (relid == rte->relid)
{
/* Invalidate the plan! */
plan->dead = true;
break; /* out of rangetable scan */
}
}
if (plan->dead)
break; /* out of stmt_list scan */
}
}
else
{
/*
* For not-fully-planned entries we use ScanQueryForRelids,
* since a recursive traversal is needed. The callback API
* is a bit tedious but avoids duplication of coding.
*/
InvalRelidContext context;
context.inval_relid = relid;
context.plan = plan;
foreach(lc2, plan->stmt_list)
{
Query *query = (Query *) lfirst(lc2);
Assert(IsA(query, Query));
ScanQueryForRelids(query, InvalRelid, (void *) &context);
}
}
}
}
/*
* ScanQueryForRelids callback function for PlanCacheCallback
*/
static void
InvalRelid(Oid relid, LOCKMODE lockmode, InvalRelidContext *context)
{
if (relid == context->inval_relid)
context->plan->dead = true;
}

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.174 2007/02/15 23:23:23 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.175 2007/03/13 00:33:42 tgl Exp $
*
*
*-------------------------------------------------------------------------
@@ -28,6 +28,7 @@
#include "libpq/hba.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "postmaster/autovacuum.h"
#include "postmaster/postmaster.h"
#include "storage/backendid.h"
@@ -40,10 +41,10 @@
#include "utils/acl.h"
#include "utils/flatfiles.h"
#include "utils/guc.h"
#include "utils/plancache.h"
#include "utils/portal.h"
#include "utils/relcache.h"
#include "utils/syscache.h"
#include "pgstat.h"
static bool FindMyDatabase(const char *name, Oid *db_id, Oid *db_tablespace);
@@ -429,6 +430,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
*/
RelationCacheInitialize();
InitCatalogCache();
InitPlanCache();
/* Initialize portal manager */
EnablePortalManager();

View File

@@ -1,4 +1,4 @@
$PostgreSQL: pgsql/src/backend/utils/mmgr/README,v 1.9 2006/09/07 22:52:01 tgl Exp $
$PostgreSQL: pgsql/src/backend/utils/mmgr/README,v 1.10 2007/03/13 00:33:42 tgl Exp $
Notes about memory allocation redesign
--------------------------------------
@@ -201,15 +201,6 @@ have dangling pointers leading to a crash at top-level commit. An example of
data kept here is pending NOTIFY messages, which are sent at top-level commit,
but only if the generating subtransaction did not abort.
QueryContext --- this is not actually a separate context, but a global
variable pointing to the context that holds the current command's parse tree.
(In simple-Query mode this points to MessageContext; when executing a
prepared statement it will point to the prepared statement's private context.
Note that the plan tree may or may not be in this same context.)
Generally it is not appropriate for any code to use QueryContext as an
allocation target --- from the point of view of any code that would be
referencing the QueryContext variable, it's a read-only context.
PortalContext --- this is not actually a separate context either, but a
global variable pointing to the per-portal context of the currently active
execution portal. This can be used if it's necessary to allocate storage
@@ -229,9 +220,7 @@ Contexts for prepared statements and portals
A prepared-statement object has an associated private context, in which
the parse and plan trees for its query are stored. Because these trees
are read-only to the executor, the prepared statement can be re-used many
times without further copying of these trees. QueryContext points at this
private context while executing any portal built from the prepared
statement.
times without further copying of these trees.
An execution-portal object has a private context that is referenced by
PortalContext when the portal is active. In the case of a portal created

View File

@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.59 2007/01/05 22:19:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.60 2007/03/13 00:33:42 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,8 +46,7 @@ MemoryContext MessageContext = NULL;
MemoryContext TopTransactionContext = NULL;
MemoryContext CurTransactionContext = NULL;
/* These two are transient links to contexts owned by other objects: */
MemoryContext QueryContext = NULL;
/* This is a transient link to the active portal's memory context: */
MemoryContext PortalContext = NULL;

View File

@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.99 2007/02/20 17:32:17 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.100 2007/03/13 00:33:42 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -149,9 +149,9 @@ GetPortalByName(const char *name)
* cases should occur in present usages of this function.
*
* Copes if given a list of Querys --- can't happen in a portal, but this
* code also supports prepared statements, which need both cases.
* code also supports plancache.c, which needs both cases.
*
* Note: the reason this is just handed a List is so that prepared statements
* Note: the reason this is just handed a List is so that plancache.c
* can share the code. For use with a portal, use PortalGetPrimaryStmt
* rather than calling this directly.
*/
@@ -275,9 +275,17 @@ CreateNewPortal(void)
*
* Notes: commandTag shall be NULL if and only if the original query string
* (before rewriting) was an empty string. Also, the passed commandTag must
* be a pointer to a constant string, since it is not copied. The caller is
* responsible for ensuring that the passed prepStmtName (if any), sourceText
* (if any), and plan trees have adequate lifetime.
* be a pointer to a constant string, since it is not copied. However,
* prepStmtName and sourceText, if provided, are copied into the portal's
* heap context for safekeeping.
*
* If cplan is provided, then it is a cached plan containing the stmts,
* and the caller must have done RevalidateCachedPlan(), causing a refcount
* increment. The refcount will be released when the portal is destroyed.
*
* If cplan is NULL, then it is the caller's responsibility to ensure that
* the passed plan trees have adequate lifetime. Typically this is done by
* copying them into the portal's heap context.
*/
void
PortalDefineQuery(Portal portal,
@@ -285,18 +293,35 @@ PortalDefineQuery(Portal portal,
const char *sourceText,
const char *commandTag,
List *stmts,
MemoryContext queryContext)
CachedPlan *cplan)
{
AssertArg(PortalIsValid(portal));
AssertState(portal->queryContext == NULL); /* else defined already */
AssertState(portal->status == PORTAL_NEW);
Assert(commandTag != NULL || stmts == NIL);
portal->prepStmtName = prepStmtName;
portal->sourceText = sourceText;
portal->prepStmtName = prepStmtName ?
MemoryContextStrdup(PortalGetHeapMemory(portal), prepStmtName) : NULL;
portal->sourceText = sourceText ?
MemoryContextStrdup(PortalGetHeapMemory(portal), sourceText) : NULL;
portal->commandTag = commandTag;
portal->stmts = stmts;
portal->queryContext = queryContext;
portal->cplan = cplan;
portal->status = PORTAL_DEFINED;
}
/*
* PortalReleaseCachedPlan
* Release a portal's reference to its cached plan, if any.
*/
static void
PortalReleaseCachedPlan(Portal portal)
{
if (portal->cplan)
{
ReleaseCachedPlan(portal->cplan, false);
portal->cplan = NULL;
}
}
/*
@@ -356,6 +381,10 @@ PortalDrop(Portal portal, bool isTopCommit)
if (PointerIsValid(portal->cleanup))
(*portal->cleanup) (portal);
/* drop cached plan reference, if any */
if (portal->cplan)
PortalReleaseCachedPlan(portal);
/*
* Release any resources still attached to the portal. There are several
* cases being covered here:
@@ -423,29 +452,6 @@ PortalDrop(Portal portal, bool isTopCommit)
pfree(portal);
}
/*
* DropDependentPortals
* Drop any portals using the specified context as queryContext.
*
* This is normally used to make sure we can safely drop a prepared statement.
*/
void
DropDependentPortals(MemoryContext queryContext)
{
HASH_SEQ_STATUS status;
PortalHashEnt *hentry;
hash_seq_init(&status, PortalHashTable);
while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
{
Portal portal = hentry->portal;
if (portal->queryContext == queryContext)
PortalDrop(portal, false);
}
}
/*
* Pre-commit processing for portals.
@@ -485,6 +491,10 @@ CommitHoldablePortals(void)
PortalCreateHoldStore(portal);
PersistHoldablePortal(portal);
/* drop cached plan reference, if any */
if (portal->cplan)
PortalReleaseCachedPlan(portal);
/*
* Any resources belonging to the portal will be released in the
* upcoming transaction-wide cleanup; the portal will no longer
@@ -630,6 +640,10 @@ AtAbort_Portals(void)
portal->cleanup = NULL;
}
/* drop cached plan reference, if any */
if (portal->cplan)
PortalReleaseCachedPlan(portal);
/*
* Any resources belonging to the portal will be released in the
* upcoming transaction-wide cleanup; they will be gone before we run
@@ -769,6 +783,10 @@ AtSubAbort_Portals(SubTransactionId mySubid,
portal->cleanup = NULL;
}
/* drop cached plan reference, if any */
if (portal->cplan)
PortalReleaseCachedPlan(portal);
/*
* Any resources belonging to the portal will be released in the
* upcoming transaction-wide cleanup; they will be gone before we

View File

@@ -1,4 +1,4 @@
$PostgreSQL: pgsql/src/backend/utils/resowner/README,v 1.4 2006/06/16 18:42:23 tgl Exp $
$PostgreSQL: pgsql/src/backend/utils/resowner/README,v 1.5 2007/03/13 00:33:42 tgl Exp $
Notes about resource owners
---------------------------
@@ -60,12 +60,13 @@ subtransaction or portal. Therefore, the "release" operation on a child
ResourceOwner transfers lock ownership to the parent instead of actually
releasing the lock, if isCommit is true.
Currently, ResourceOwners contain direct support for recording ownership
of buffer pins, lmgr locks, and catcache, relcache, and tupdesc references.
Other objects can be associated with a ResourceOwner by recording the address
of the owning ResourceOwner in such an object. There is an API for other
modules to get control during ResourceOwner release, so that they can scan
their own data structures to find the objects that need to be deleted.
Currently, ResourceOwners contain direct support for recording ownership of
buffer pins, lmgr locks, and catcache, relcache, plancache, and tupdesc
references. Other objects can be associated with a ResourceOwner by recording
the address of the owning ResourceOwner in such an object. There is an API
for other modules to get control during ResourceOwner release, so that they
can scan their own data structures to find the objects that need to be
deleted.
Whenever we are inside a transaction, the global variable
CurrentResourceOwner shows which resource owner should be assigned

View File

@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.23 2007/01/05 22:19:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.24 2007/03/13 00:33:42 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,6 +56,11 @@ typedef struct ResourceOwnerData
Relation *relrefs; /* dynamically allocated array */
int maxrelrefs; /* currently allocated array size */
/* We have built-in support for remembering plancache references */
int nplanrefs; /* number of owned plancache pins */
CachedPlan **planrefs; /* dynamically allocated array */
int maxplanrefs; /* currently allocated array size */
/* We have built-in support for remembering tupdesc references */
int ntupdescs; /* number of owned tupdesc references */
TupleDesc *tupdescs; /* dynamically allocated array */
@@ -90,6 +95,7 @@ static void ResourceOwnerReleaseInternal(ResourceOwner owner,
bool isCommit,
bool isTopLevel);
static void PrintRelCacheLeakWarning(Relation rel);
static void PrintPlanCacheLeakWarning(CachedPlan *plan);
static void PrintTupleDescLeakWarning(TupleDesc tupdesc);
@@ -280,6 +286,13 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
PrintCatCacheListLeakWarning(owner->catlistrefs[owner->ncatlistrefs - 1]);
ReleaseCatCacheList(owner->catlistrefs[owner->ncatlistrefs - 1]);
}
/* Ditto for plancache references */
while (owner->nplanrefs > 0)
{
if (isCommit)
PrintPlanCacheLeakWarning(owner->planrefs[owner->nplanrefs - 1]);
ReleaseCachedPlan(owner->planrefs[owner->nplanrefs - 1], true);
}
/* Ditto for tupdesc references */
while (owner->ntupdescs > 0)
{
@@ -316,6 +329,7 @@ ResourceOwnerDelete(ResourceOwner owner)
Assert(owner->ncatrefs == 0);
Assert(owner->ncatlistrefs == 0);
Assert(owner->nrelrefs == 0);
Assert(owner->nplanrefs == 0);
Assert(owner->ntupdescs == 0);
/*
@@ -341,6 +355,8 @@ ResourceOwnerDelete(ResourceOwner owner)
pfree(owner->catlistrefs);
if (owner->relrefs)
pfree(owner->relrefs);
if (owner->planrefs)
pfree(owner->planrefs);
if (owner->tupdescs)
pfree(owner->tupdescs);
@@ -758,6 +774,86 @@ PrintRelCacheLeakWarning(Relation rel)
RelationGetRelationName(rel));
}
/*
* Make sure there is room for at least one more entry in a ResourceOwner's
* plancache reference array.
*
* This is separate from actually inserting an entry because if we run out
* of memory, it's critical to do so *before* acquiring the resource.
*/
void
ResourceOwnerEnlargePlanCacheRefs(ResourceOwner owner)
{
int newmax;
if (owner->nplanrefs < owner->maxplanrefs)
return; /* nothing to do */
if (owner->planrefs == NULL)
{
newmax = 16;
owner->planrefs = (CachedPlan **)
MemoryContextAlloc(TopMemoryContext, newmax * sizeof(CachedPlan *));
owner->maxplanrefs = newmax;
}
else
{
newmax = owner->maxplanrefs * 2;
owner->planrefs = (CachedPlan **)
repalloc(owner->planrefs, newmax * sizeof(CachedPlan *));
owner->maxplanrefs = newmax;
}
}
/*
* Remember that a plancache reference is owned by a ResourceOwner
*
* Caller must have previously done ResourceOwnerEnlargePlanCacheRefs()
*/
void
ResourceOwnerRememberPlanCacheRef(ResourceOwner owner, CachedPlan *plan)
{
Assert(owner->nplanrefs < owner->maxplanrefs);
owner->planrefs[owner->nplanrefs] = plan;
owner->nplanrefs++;
}
/*
* Forget that a plancache reference is owned by a ResourceOwner
*/
void
ResourceOwnerForgetPlanCacheRef(ResourceOwner owner, CachedPlan *plan)
{
CachedPlan **planrefs = owner->planrefs;
int np1 = owner->nplanrefs - 1;
int i;
for (i = np1; i >= 0; i--)
{
if (planrefs[i] == plan)
{
while (i < np1)
{
planrefs[i] = planrefs[i + 1];
i++;
}
owner->nplanrefs = np1;
return;
}
}
elog(ERROR, "plancache reference %p is not owned by resource owner %s",
plan, owner->name);
}
/*
* Debugging subroutine
*/
static void
PrintPlanCacheLeakWarning(CachedPlan *plan)
{
elog(WARNING, "plancache reference leak: plan %p not closed", plan);
}
/*
* Make sure there is room for at least one more entry in a ResourceOwner's
* tupdesc reference array.