mirror of
https://github.com/postgres/postgres.git
synced 2025-07-09 22:41:56 +03:00
pgindent run.
This commit is contained in:
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.63 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.64 2003/08/04 00:43:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -48,9 +48,9 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
|
||||
/*
|
||||
* Because gimme_tree considers both left- and right-sided trees,
|
||||
* there is no difference between a tour (a,b,c,d,...) and a tour
|
||||
* (b,a,c,d,...) --- the same join orders will be considered.
|
||||
* To avoid redundant cost calculations, we simply reject tours where
|
||||
* tour[0] > tour[1], assigning them an artificially bad fitness.
|
||||
* (b,a,c,d,...) --- the same join orders will be considered. To avoid
|
||||
* redundant cost calculations, we simply reject tours where tour[0] >
|
||||
* tour[1], assigning them an artificially bad fitness.
|
||||
*
|
||||
* (It would be better to tweak the GEQO logic to not generate such tours
|
||||
* in the first place, but I'm not sure of all the implications in the
|
||||
@ -65,8 +65,8 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
|
||||
*
|
||||
* Since geqo_eval() will be called many times, we can't afford to let
|
||||
* all that memory go unreclaimed until end of statement. Note we
|
||||
* make the temp context a child of the planner's normal context, so that
|
||||
* it will be freed even if we abort via ereport(ERROR).
|
||||
* make the temp context a child of the planner's normal context, so
|
||||
* that it will be freed even if we abort via ereport(ERROR).
|
||||
*/
|
||||
mycontext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"GEQO",
|
||||
@ -76,9 +76,9 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
|
||||
oldcxt = MemoryContextSwitchTo(mycontext);
|
||||
|
||||
/*
|
||||
* preserve root->join_rel_list, which gimme_tree changes; without this,
|
||||
* it'll be pointing at recycled storage after the MemoryContextDelete
|
||||
* below.
|
||||
* preserve root->join_rel_list, which gimme_tree changes; without
|
||||
* this, it'll be pointing at recycled storage after the
|
||||
* MemoryContextDelete below.
|
||||
*/
|
||||
savelist = root->join_rel_list;
|
||||
|
||||
@ -151,9 +151,10 @@ gimme_tree(Query *root, List *initial_rels,
|
||||
|
||||
/*
|
||||
* Construct a RelOptInfo representing the previous joinrel joined
|
||||
* to inner_rel. These are always inner joins. Note that we expect
|
||||
* the joinrel not to exist in root->join_rel_list yet, and so the
|
||||
* paths constructed for it will only include the ones we want.
|
||||
* to inner_rel. These are always inner joins. Note that we
|
||||
* expect the joinrel not to exist in root->join_rel_list yet, and
|
||||
* so the paths constructed for it will only include the ones we
|
||||
* want.
|
||||
*/
|
||||
new_rel = make_join_rel(root, joinrel, inner_rel, JOIN_INNER);
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.36 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.37 2003/08/04 00:43:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -229,8 +229,8 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
|
||||
|
||||
|
||||
/*
|
||||
* got the cheapest query tree processed by geqo;
|
||||
* first element of the population indicates the best query tree
|
||||
* got the cheapest query tree processed by geqo; first element of the
|
||||
* population indicates the best query tree
|
||||
*/
|
||||
best_tour = (Gene *) pool->data[0].string;
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.37 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.38 2003/08/04 00:43:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -42,7 +42,7 @@ avg_pool(Pool *pool)
|
||||
|
||||
/*
|
||||
* Since the pool may contain multiple occurrences of DBL_MAX, divide
|
||||
* by pool->size before summing, not after, to avoid overflow. This
|
||||
* by pool->size before summing, not after, to avoid overflow. This
|
||||
* loses a little in speed and accuracy, but this routine is only used
|
||||
* for debug printouts, so we don't care that much.
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.104 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.105 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -50,13 +50,13 @@ static void set_function_pathlist(Query *root, RelOptInfo *rel,
|
||||
static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed,
|
||||
List *initial_rels);
|
||||
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
|
||||
bool *differentTypes);
|
||||
bool *differentTypes);
|
||||
static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
|
||||
bool *differentTypes);
|
||||
bool *differentTypes);
|
||||
static void compare_tlist_datatypes(List *tlist, List *colTypes,
|
||||
bool *differentTypes);
|
||||
bool *differentTypes);
|
||||
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
|
||||
bool *differentTypes);
|
||||
bool *differentTypes);
|
||||
static void subquery_push_qual(Query *subquery, Index rti, Node *qual);
|
||||
static void recurse_push_qual(Node *setOp, Query *topquery,
|
||||
Index rti, Node *qual);
|
||||
@ -290,14 +290,14 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel,
|
||||
rel->rows += childrel->rows;
|
||||
if (childrel->width > rel->width)
|
||||
rel->width = childrel->width;
|
||||
|
||||
|
||||
childvars = FastListValue(&childrel->reltargetlist);
|
||||
foreach(parentvars, FastListValue(&rel->reltargetlist))
|
||||
{
|
||||
Var *parentvar = (Var *) lfirst(parentvars);
|
||||
Var *childvar = (Var *) lfirst(childvars);
|
||||
int parentndx = parentvar->varattno - rel->min_attr;
|
||||
int childndx = childvar->varattno - childrel->min_attr;
|
||||
Var *parentvar = (Var *) lfirst(parentvars);
|
||||
Var *childvar = (Var *) lfirst(childvars);
|
||||
int parentndx = parentvar->varattno - rel->min_attr;
|
||||
int childndx = childvar->varattno - childrel->min_attr;
|
||||
|
||||
if (childrel->attr_widths[childndx] > rel->attr_widths[parentndx])
|
||||
rel->attr_widths[parentndx] = childrel->attr_widths[childndx];
|
||||
@ -343,8 +343,8 @@ set_subquery_pathlist(Query *root, RelOptInfo *rel,
|
||||
*
|
||||
* There are several cases where we cannot push down clauses.
|
||||
* Restrictions involving the subquery are checked by
|
||||
* subquery_is_pushdown_safe(). Restrictions on individual clauses are
|
||||
* checked by qual_is_pushdown_safe().
|
||||
* subquery_is_pushdown_safe(). Restrictions on individual clauses
|
||||
* are checked by qual_is_pushdown_safe().
|
||||
*
|
||||
* Non-pushed-down clauses will get evaluated as qpquals of the
|
||||
* SubqueryScan node.
|
||||
@ -725,15 +725,16 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
|
||||
vars = pull_var_clause(qual, false);
|
||||
foreach(vl, vars)
|
||||
{
|
||||
Var *var = (Var *) lfirst(vl);
|
||||
Var *var = (Var *) lfirst(vl);
|
||||
List *tl;
|
||||
TargetEntry *tle = NULL;
|
||||
|
||||
Assert(var->varno == rti);
|
||||
|
||||
/*
|
||||
* We use a bitmapset to avoid testing the same attno more than
|
||||
* once. (NB: this only works because subquery outputs can't
|
||||
* have negative attnos.)
|
||||
* once. (NB: this only works because subquery outputs can't have
|
||||
* negative attnos.)
|
||||
*/
|
||||
if (bms_is_member(var->varattno, tested))
|
||||
continue;
|
||||
|
@ -49,7 +49,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.111 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.112 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -102,10 +102,10 @@ bool enable_hashjoin = true;
|
||||
|
||||
|
||||
static Selectivity estimate_hash_bucketsize(Query *root, Var *var,
|
||||
int nbuckets);
|
||||
static bool cost_qual_eval_walker(Node *node, QualCost *total);
|
||||
int nbuckets);
|
||||
static bool cost_qual_eval_walker(Node *node, QualCost * total);
|
||||
static Selectivity approx_selectivity(Query *root, List *quals,
|
||||
JoinType jointype);
|
||||
JoinType jointype);
|
||||
static void set_rel_width(Query *root, RelOptInfo *rel);
|
||||
static double relation_byte_size(double tuples, int width);
|
||||
static double page_size(double tuples, int width);
|
||||
@ -358,13 +358,13 @@ cost_index(Path *path, Query *root,
|
||||
* Normally the indexquals will be removed from the list of restriction
|
||||
* clauses that we have to evaluate as qpquals, so we should subtract
|
||||
* their costs from baserestrictcost. But if we are doing a join then
|
||||
* some of the indexquals are join clauses and shouldn't be subtracted.
|
||||
* Rather than work out exactly how much to subtract, we don't subtract
|
||||
* anything.
|
||||
* some of the indexquals are join clauses and shouldn't be
|
||||
* subtracted. Rather than work out exactly how much to subtract, we
|
||||
* don't subtract anything.
|
||||
*
|
||||
* XXX For a lossy index, not all the quals will be removed and so we
|
||||
* really shouldn't subtract their costs; but detecting that seems more
|
||||
* expensive than it's worth.
|
||||
* really shouldn't subtract their costs; but detecting that seems
|
||||
* more expensive than it's worth.
|
||||
*/
|
||||
startup_cost += baserel->baserestrictcost.startup;
|
||||
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
|
||||
@ -433,8 +433,8 @@ cost_subqueryscan(Path *path, RelOptInfo *baserel)
|
||||
/*
|
||||
* Cost of path is cost of evaluating the subplan, plus cost of
|
||||
* evaluating any restriction clauses that will be attached to the
|
||||
* SubqueryScan node, plus cpu_tuple_cost to account for selection
|
||||
* and projection overhead.
|
||||
* SubqueryScan node, plus cpu_tuple_cost to account for selection and
|
||||
* projection overhead.
|
||||
*/
|
||||
path->startup_cost = baserel->subplan->startup_cost;
|
||||
path->total_cost = baserel->subplan->total_cost;
|
||||
@ -597,8 +597,9 @@ cost_material(Path *path,
|
||||
}
|
||||
|
||||
/*
|
||||
* Also charge a small amount per extracted tuple. We use cpu_tuple_cost
|
||||
* so that it doesn't appear worthwhile to materialize a bare seqscan.
|
||||
* Also charge a small amount per extracted tuple. We use
|
||||
* cpu_tuple_cost so that it doesn't appear worthwhile to materialize
|
||||
* a bare seqscan.
|
||||
*/
|
||||
run_cost += cpu_tuple_cost * tuples;
|
||||
|
||||
@ -631,17 +632,17 @@ cost_agg(Path *path, Query *root,
|
||||
* additional cpu_operator_cost per grouping column per input tuple
|
||||
* for grouping comparisons.
|
||||
*
|
||||
* We will produce a single output tuple if not grouping,
|
||||
* and a tuple per group otherwise.
|
||||
* We will produce a single output tuple if not grouping, and a tuple per
|
||||
* group otherwise.
|
||||
*
|
||||
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
|
||||
* same total CPU cost, but AGG_SORTED has lower startup cost. If the
|
||||
* same total CPU cost, but AGG_SORTED has lower startup cost. If the
|
||||
* input path is already sorted appropriately, AGG_SORTED should be
|
||||
* preferred (since it has no risk of memory overflow). This will happen
|
||||
* as long as the computed total costs are indeed exactly equal --- but
|
||||
* if there's roundoff error we might do the wrong thing. So be sure
|
||||
* that the computations below form the same intermediate values in the
|
||||
* same order.
|
||||
* preferred (since it has no risk of memory overflow). This will
|
||||
* happen as long as the computed total costs are indeed exactly equal
|
||||
* --- but if there's roundoff error we might do the wrong thing. So
|
||||
* be sure that the computations below form the same intermediate
|
||||
* values in the same order.
|
||||
*/
|
||||
if (aggstrategy == AGG_PLAIN)
|
||||
{
|
||||
@ -724,26 +725,26 @@ cost_nestloop(NestPath *path, Query *root)
|
||||
double outer_path_rows = PATH_ROWS(outer_path);
|
||||
double inner_path_rows = PATH_ROWS(inner_path);
|
||||
double ntuples;
|
||||
Selectivity joininfactor;
|
||||
Selectivity joininfactor;
|
||||
|
||||
if (!enable_nestloop)
|
||||
startup_cost += disable_cost;
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop scanning inner tuples for an
|
||||
* outer tuple as soon as we have one match. Account for the effects of
|
||||
* this by scaling down the cost estimates in proportion to the expected
|
||||
* output size. (This assumes that all the quals attached to the join are
|
||||
* IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop scanning inner tuples for
|
||||
* an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the expected output size. (This assumes that all the quals
|
||||
* attached to the join are IN quals, which should be true.)
|
||||
*
|
||||
* Note: it's probably bogus to use the normal selectivity calculation
|
||||
* here when either the outer or inner path is a UniquePath.
|
||||
*/
|
||||
if (path->jointype == JOIN_IN)
|
||||
{
|
||||
Selectivity qual_selec = approx_selectivity(root, restrictlist,
|
||||
Selectivity qual_selec = approx_selectivity(root, restrictlist,
|
||||
path->jointype);
|
||||
double qptuples;
|
||||
double qptuples;
|
||||
|
||||
qptuples = ceil(qual_selec * outer_path_rows * inner_path_rows);
|
||||
if (qptuples > path->path.parent->rows)
|
||||
@ -761,8 +762,8 @@ cost_nestloop(NestPath *path, Query *root)
|
||||
* before we can start returning tuples, so the join's startup cost is
|
||||
* their sum. What's not so clear is whether the inner path's
|
||||
* startup_cost must be paid again on each rescan of the inner path.
|
||||
* This is not true if the inner path is materialized or is a hashjoin,
|
||||
* but probably is true otherwise.
|
||||
* This is not true if the inner path is materialized or is a
|
||||
* hashjoin, but probably is true otherwise.
|
||||
*/
|
||||
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
|
||||
run_cost += outer_path->total_cost - outer_path->startup_cost;
|
||||
@ -783,14 +784,15 @@ cost_nestloop(NestPath *path, Query *root)
|
||||
(inner_path->total_cost - inner_path->startup_cost) * joininfactor;
|
||||
|
||||
/*
|
||||
* Compute number of tuples processed (not number emitted!).
|
||||
* If inner path is an indexscan, be sure to use its estimated output row
|
||||
* count, which may be lower than the restriction-clause-only row count of
|
||||
* its parent. (We don't include this case in the PATH_ROWS macro because
|
||||
* it applies *only* to a nestloop's inner relation.) Note: it is correct
|
||||
* to use the unadjusted inner_path_rows in the above calculation for
|
||||
* joininfactor, since otherwise we'd be double-counting the selectivity
|
||||
* of the join clause being used for the index.
|
||||
* Compute number of tuples processed (not number emitted!). If inner
|
||||
* path is an indexscan, be sure to use its estimated output row
|
||||
* count, which may be lower than the restriction-clause-only row
|
||||
* count of its parent. (We don't include this case in the PATH_ROWS
|
||||
* macro because it applies *only* to a nestloop's inner relation.)
|
||||
* Note: it is correct to use the unadjusted inner_path_rows in the
|
||||
* above calculation for joininfactor, since otherwise we'd be
|
||||
* double-counting the selectivity of the join clause being used for
|
||||
* the index.
|
||||
*/
|
||||
if (IsA(inner_path, IndexPath))
|
||||
inner_path_rows = ((IndexPath *) inner_path)->rows;
|
||||
@ -831,8 +833,8 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
Cost startup_cost = 0;
|
||||
Cost run_cost = 0;
|
||||
Cost cpu_per_tuple;
|
||||
Selectivity merge_selec;
|
||||
Selectivity qp_selec;
|
||||
Selectivity merge_selec;
|
||||
Selectivity qp_selec;
|
||||
QualCost merge_qual_cost;
|
||||
QualCost qp_qual_cost;
|
||||
RestrictInfo *firstclause;
|
||||
@ -847,7 +849,7 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
double rescanratio;
|
||||
Selectivity outerscansel,
|
||||
innerscansel;
|
||||
Selectivity joininfactor;
|
||||
Selectivity joininfactor;
|
||||
Path sort_path; /* dummy for result of cost_sort */
|
||||
|
||||
if (!enable_mergejoin)
|
||||
@ -856,7 +858,8 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
/*
|
||||
* Compute cost and selectivity of the mergequals and qpquals (other
|
||||
* restriction clauses) separately. We use approx_selectivity here
|
||||
* for speed --- in most cases, any errors won't affect the result much.
|
||||
* for speed --- in most cases, any errors won't affect the result
|
||||
* much.
|
||||
*
|
||||
* Note: it's probably bogus to use the normal selectivity calculation
|
||||
* here when either the outer or inner path is a UniquePath.
|
||||
@ -876,29 +879,30 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
qptuples = ceil(mergejointuples * qp_selec);
|
||||
|
||||
/*
|
||||
* When there are equal merge keys in the outer relation, the mergejoin
|
||||
* must rescan any matching tuples in the inner relation. This means
|
||||
* re-fetching inner tuples. Our cost model for this is that a re-fetch
|
||||
* costs the same as an original fetch, which is probably an overestimate;
|
||||
* but on the other hand we ignore the bookkeeping costs of mark/restore.
|
||||
* Not clear if it's worth developing a more refined model.
|
||||
* When there are equal merge keys in the outer relation, the
|
||||
* mergejoin must rescan any matching tuples in the inner relation.
|
||||
* This means re-fetching inner tuples. Our cost model for this is
|
||||
* that a re-fetch costs the same as an original fetch, which is
|
||||
* probably an overestimate; but on the other hand we ignore the
|
||||
* bookkeeping costs of mark/restore. Not clear if it's worth
|
||||
* developing a more refined model.
|
||||
*
|
||||
* The number of re-fetches can be estimated approximately as size of
|
||||
* merge join output minus size of inner relation. Assume that the
|
||||
* distinct key values are 1, 2, ..., and denote the number of values of
|
||||
* each key in the outer relation as m1, m2, ...; in the inner relation,
|
||||
* n1, n2, ... Then we have
|
||||
* merge join output minus size of inner relation. Assume that the
|
||||
* distinct key values are 1, 2, ..., and denote the number of values
|
||||
* of each key in the outer relation as m1, m2, ...; in the inner
|
||||
* relation, n1, n2, ... Then we have
|
||||
*
|
||||
* size of join = m1 * n1 + m2 * n2 + ...
|
||||
* size of join = m1 * n1 + m2 * n2 + ...
|
||||
*
|
||||
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ...
|
||||
* = m1 * n1 + m2 * n2 + ... - (n1 + n2 + ...)
|
||||
* = size of join - size of inner relation
|
||||
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
|
||||
* n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
|
||||
* relation
|
||||
*
|
||||
* This equation works correctly for outer tuples having no inner match
|
||||
* (nk = 0), but not for inner tuples having no outer match (mk = 0);
|
||||
* we are effectively subtracting those from the number of rescanned
|
||||
* tuples, when we should not. Can we do better without expensive
|
||||
* tuples, when we should not. Can we do better without expensive
|
||||
* selectivity computations?
|
||||
*/
|
||||
if (IsA(outer_path, UniquePath))
|
||||
@ -953,8 +957,9 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
|
||||
/*
|
||||
* Readjust scan selectivities to account for above rounding. This is
|
||||
* normally an insignificant effect, but when there are only a few rows
|
||||
* in the inputs, failing to do this makes for a large percentage error.
|
||||
* normally an insignificant effect, but when there are only a few
|
||||
* rows in the inputs, failing to do this makes for a large percentage
|
||||
* error.
|
||||
*/
|
||||
outerscansel = outer_rows / outer_path_rows;
|
||||
innerscansel = inner_rows / inner_path_rows;
|
||||
@ -1002,11 +1007,11 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
/* CPU costs */
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop outputting inner
|
||||
* tuples for an outer tuple as soon as we have one match. Account for
|
||||
* the effects of this by scaling down the cost estimates in proportion
|
||||
* to the expected output size. (This assumes that all the quals attached
|
||||
* to the join are IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop outputting inner tuples
|
||||
* for an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the expected output size. (This assumes that all the quals
|
||||
* attached to the join are IN quals, which should be true.)
|
||||
*/
|
||||
if (path->jpath.jointype == JOIN_IN &&
|
||||
qptuples > path->jpath.path.parent->rows)
|
||||
@ -1017,9 +1022,9 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
/*
|
||||
* The number of tuple comparisons needed is approximately number of
|
||||
* outer rows plus number of inner rows plus number of rescanned
|
||||
* tuples (can we refine this?). At each one, we need to evaluate
|
||||
* the mergejoin quals. NOTE: JOIN_IN mode does not save any work
|
||||
* here, so do NOT include joininfactor.
|
||||
* tuples (can we refine this?). At each one, we need to evaluate the
|
||||
* mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
|
||||
* so do NOT include joininfactor.
|
||||
*/
|
||||
startup_cost += merge_qual_cost.startup;
|
||||
run_cost += merge_qual_cost.per_tuple *
|
||||
@ -1028,7 +1033,7 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
/*
|
||||
* For each tuple that gets through the mergejoin proper, we charge
|
||||
* cpu_tuple_cost plus the cost of evaluating additional restriction
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* since not all of the quals may get evaluated at each tuple.) This
|
||||
* work is skipped in JOIN_IN mode, so apply the factor.
|
||||
*/
|
||||
@ -1059,8 +1064,8 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
Cost startup_cost = 0;
|
||||
Cost run_cost = 0;
|
||||
Cost cpu_per_tuple;
|
||||
Selectivity hash_selec;
|
||||
Selectivity qp_selec;
|
||||
Selectivity hash_selec;
|
||||
Selectivity qp_selec;
|
||||
QualCost hash_qual_cost;
|
||||
QualCost qp_qual_cost;
|
||||
double hashjointuples;
|
||||
@ -1076,7 +1081,7 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
int physicalbuckets;
|
||||
int numbatches;
|
||||
Selectivity innerbucketsize;
|
||||
Selectivity joininfactor;
|
||||
Selectivity joininfactor;
|
||||
List *hcl;
|
||||
List *qpquals;
|
||||
|
||||
@ -1086,7 +1091,8 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/*
|
||||
* Compute cost and selectivity of the hashquals and qpquals (other
|
||||
* restriction clauses) separately. We use approx_selectivity here
|
||||
* for speed --- in most cases, any errors won't affect the result much.
|
||||
* for speed --- in most cases, any errors won't affect the result
|
||||
* much.
|
||||
*
|
||||
* Note: it's probably bogus to use the normal selectivity calculation
|
||||
* here when either the outer or inner path is a UniquePath.
|
||||
@ -1114,9 +1120,9 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
* Cost of computing hash function: must do it once per input tuple.
|
||||
* We charge one cpu_operator_cost for each column's hash function.
|
||||
*
|
||||
* XXX when a hashclause is more complex than a single operator,
|
||||
* we really should charge the extra eval costs of the left or right
|
||||
* side, as appropriate, here. This seems more work than it's worth
|
||||
* XXX when a hashclause is more complex than a single operator, we
|
||||
* really should charge the extra eval costs of the left or right
|
||||
* side, as appropriate, here. This seems more work than it's worth
|
||||
* at the moment.
|
||||
*/
|
||||
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
|
||||
@ -1131,13 +1137,13 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
|
||||
/*
|
||||
* Determine bucketsize fraction for inner relation. We use the
|
||||
* smallest bucketsize estimated for any individual hashclause;
|
||||
* this is undoubtedly conservative.
|
||||
* smallest bucketsize estimated for any individual hashclause; this
|
||||
* is undoubtedly conservative.
|
||||
*
|
||||
* BUT: if inner relation has been unique-ified, we can assume it's
|
||||
* good for hashing. This is important both because it's the right
|
||||
* answer, and because we avoid contaminating the cache with a value
|
||||
* that's wrong for non-unique-ified paths.
|
||||
* BUT: if inner relation has been unique-ified, we can assume it's good
|
||||
* for hashing. This is important both because it's the right answer,
|
||||
* and because we avoid contaminating the cache with a value that's
|
||||
* wrong for non-unique-ified paths.
|
||||
*/
|
||||
if (IsA(inner_path, UniquePath))
|
||||
innerbucketsize = 1.0 / virtualbuckets;
|
||||
@ -1152,12 +1158,13 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
Assert(IsA(restrictinfo, RestrictInfo));
|
||||
|
||||
/*
|
||||
* First we have to figure out which side of the hashjoin clause
|
||||
* is the inner side.
|
||||
* First we have to figure out which side of the hashjoin
|
||||
* clause is the inner side.
|
||||
*
|
||||
* Since we tend to visit the same clauses over and over when
|
||||
* planning a large query, we cache the bucketsize estimate in the
|
||||
* RestrictInfo node to avoid repeated lookups of statistics.
|
||||
* planning a large query, we cache the bucketsize estimate in
|
||||
* the RestrictInfo node to avoid repeated lookups of
|
||||
* statistics.
|
||||
*/
|
||||
if (bms_is_subset(restrictinfo->right_relids,
|
||||
inner_path->parent->relids))
|
||||
@ -1169,7 +1176,7 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/* not cached yet */
|
||||
thisbucketsize =
|
||||
estimate_hash_bucketsize(root,
|
||||
(Var *) get_rightop(restrictinfo->clause),
|
||||
(Var *) get_rightop(restrictinfo->clause),
|
||||
virtualbuckets);
|
||||
restrictinfo->right_bucketsize = thisbucketsize;
|
||||
}
|
||||
@ -1185,7 +1192,7 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/* not cached yet */
|
||||
thisbucketsize =
|
||||
estimate_hash_bucketsize(root,
|
||||
(Var *) get_leftop(restrictinfo->clause),
|
||||
(Var *) get_leftop(restrictinfo->clause),
|
||||
virtualbuckets);
|
||||
restrictinfo->left_bucketsize = thisbucketsize;
|
||||
}
|
||||
@ -1217,11 +1224,11 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/* CPU costs */
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop comparing inner
|
||||
* tuples to an outer tuple as soon as we have one match. Account for
|
||||
* the effects of this by scaling down the cost estimates in proportion
|
||||
* to the expected output size. (This assumes that all the quals attached
|
||||
* to the join are IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop comparing inner tuples to
|
||||
* an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the expected output size. (This assumes that all the quals
|
||||
* attached to the join are IN quals, which should be true.)
|
||||
*/
|
||||
if (path->jpath.jointype == JOIN_IN &&
|
||||
qptuples > path->jpath.path.parent->rows)
|
||||
@ -1243,7 +1250,7 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/*
|
||||
* For each tuple that gets through the hashjoin proper, we charge
|
||||
* cpu_tuple_cost plus the cost of evaluating additional restriction
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* since not all of the quals may get evaluated at each tuple.)
|
||||
*/
|
||||
startup_cost += qp_qual_cost.startup;
|
||||
@ -1254,14 +1261,14 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
* Bias against putting larger relation on inside. We don't want an
|
||||
* absolute prohibition, though, since larger relation might have
|
||||
* better bucketsize --- and we can't trust the size estimates
|
||||
* unreservedly, anyway. Instead, inflate the run cost by the
|
||||
* square root of the size ratio. (Why square root? No real good
|
||||
* reason, but it seems reasonable...)
|
||||
* unreservedly, anyway. Instead, inflate the run cost by the square
|
||||
* root of the size ratio. (Why square root? No real good reason,
|
||||
* but it seems reasonable...)
|
||||
*
|
||||
* Note: before 7.4 we implemented this by inflating startup cost;
|
||||
* but if there's a disable_cost component in the input paths'
|
||||
* startup cost, that unfairly penalizes the hash. Probably it'd
|
||||
* be better to keep track of disable penalty separately from cost.
|
||||
* Note: before 7.4 we implemented this by inflating startup cost; but if
|
||||
* there's a disable_cost component in the input paths' startup cost,
|
||||
* that unfairly penalizes the hash. Probably it'd be better to keep
|
||||
* track of disable penalty separately from cost.
|
||||
*/
|
||||
if (innerbytes > outerbytes && outerbytes > 0)
|
||||
run_cost *= sqrt(innerbytes / outerbytes);
|
||||
@ -1442,7 +1449,7 @@ estimate_hash_bucketsize(Query *root, Var *var, int nbuckets)
|
||||
* and a per-evaluation component.
|
||||
*/
|
||||
void
|
||||
cost_qual_eval(QualCost *cost, List *quals)
|
||||
cost_qual_eval(QualCost * cost, List *quals)
|
||||
{
|
||||
List *l;
|
||||
|
||||
@ -1484,7 +1491,7 @@ cost_qual_eval(QualCost *cost, List *quals)
|
||||
}
|
||||
|
||||
static bool
|
||||
cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
cost_qual_eval_walker(Node *node, QualCost * total)
|
||||
{
|
||||
if (node == NULL)
|
||||
return false;
|
||||
@ -1502,9 +1509,7 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
IsA(node, OpExpr) ||
|
||||
IsA(node, DistinctExpr) ||
|
||||
IsA(node, NullIfExpr))
|
||||
{
|
||||
total->per_tuple += cpu_operator_cost;
|
||||
}
|
||||
else if (IsA(node, ScalarArrayOpExpr))
|
||||
{
|
||||
/* should charge more than 1 op cost, but how many? */
|
||||
@ -1519,47 +1524,48 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
{
|
||||
/*
|
||||
* A subplan node in an expression typically indicates that the
|
||||
* subplan will be executed on each evaluation, so charge accordingly.
|
||||
* (Sub-selects that can be executed as InitPlans have already been
|
||||
* removed from the expression.)
|
||||
* subplan will be executed on each evaluation, so charge
|
||||
* accordingly. (Sub-selects that can be executed as InitPlans
|
||||
* have already been removed from the expression.)
|
||||
*
|
||||
* An exception occurs when we have decided we can implement the
|
||||
* subplan by hashing.
|
||||
*
|
||||
*/
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
Plan *plan = subplan->plan;
|
||||
|
||||
if (subplan->useHashTable)
|
||||
{
|
||||
/*
|
||||
* If we are using a hash table for the subquery outputs, then
|
||||
* the cost of evaluating the query is a one-time cost.
|
||||
* We charge one cpu_operator_cost per tuple for the work of
|
||||
* the cost of evaluating the query is a one-time cost. We
|
||||
* charge one cpu_operator_cost per tuple for the work of
|
||||
* loading the hashtable, too.
|
||||
*/
|
||||
total->startup += plan->total_cost +
|
||||
cpu_operator_cost * plan->plan_rows;
|
||||
|
||||
/*
|
||||
* The per-tuple costs include the cost of evaluating the
|
||||
* lefthand expressions, plus the cost of probing the hashtable.
|
||||
* Recursion into the exprs list will handle the lefthand
|
||||
* expressions properly, and will count one cpu_operator_cost
|
||||
* for each comparison operator. That is probably too low for
|
||||
* the probing cost, but it's hard to make a better estimate,
|
||||
* so live with it for now.
|
||||
* lefthand expressions, plus the cost of probing the
|
||||
* hashtable. Recursion into the exprs list will handle the
|
||||
* lefthand expressions properly, and will count one
|
||||
* cpu_operator_cost for each comparison operator. That is
|
||||
* probably too low for the probing cost, but it's hard to
|
||||
* make a better estimate, so live with it for now.
|
||||
*/
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Otherwise we will be rescanning the subplan output on each
|
||||
* evaluation. We need to estimate how much of the output
|
||||
* we will actually need to scan. NOTE: this logic should
|
||||
* agree with the estimates used by make_subplan() in
|
||||
* evaluation. We need to estimate how much of the output we
|
||||
* will actually need to scan. NOTE: this logic should agree
|
||||
* with the estimates used by make_subplan() in
|
||||
* plan/subselect.c.
|
||||
*/
|
||||
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
|
||||
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
|
||||
|
||||
if (subplan->subLinkType == EXISTS_SUBLINK)
|
||||
{
|
||||
@ -1579,23 +1585,20 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
/* assume we need all tuples */
|
||||
total->per_tuple += plan_run_cost;
|
||||
}
|
||||
|
||||
/*
|
||||
* Also account for subplan's startup cost.
|
||||
* If the subplan is uncorrelated or undirect correlated,
|
||||
* AND its topmost node is a Sort or Material node, assume
|
||||
* that we'll only need to pay its startup cost once;
|
||||
* otherwise assume we pay the startup cost every time.
|
||||
* Also account for subplan's startup cost. If the subplan is
|
||||
* uncorrelated or undirect correlated, AND its topmost node
|
||||
* is a Sort or Material node, assume that we'll only need to
|
||||
* pay its startup cost once; otherwise assume we pay the
|
||||
* startup cost every time.
|
||||
*/
|
||||
if (subplan->parParam == NIL &&
|
||||
(IsA(plan, Sort) ||
|
||||
IsA(plan, Material)))
|
||||
{
|
||||
total->startup += plan->startup_cost;
|
||||
}
|
||||
else
|
||||
{
|
||||
total->per_tuple += plan->startup_cost;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1745,7 +1748,7 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
|
||||
UniquePath *upath;
|
||||
|
||||
/*
|
||||
* Compute joinclause selectivity. Note that we are only considering
|
||||
* Compute joinclause selectivity. Note that we are only considering
|
||||
* clauses that become restriction clauses at this join level; we are
|
||||
* not double-counting them because they were not considered in
|
||||
* estimating the sizes of the component rels.
|
||||
@ -1758,8 +1761,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
|
||||
/*
|
||||
* Basically, we multiply size of Cartesian product by selectivity.
|
||||
*
|
||||
* If we are doing an outer join, take that into account: the output
|
||||
* must be at least as large as the non-nullable input. (Is there any
|
||||
* If we are doing an outer join, take that into account: the output must
|
||||
* be at least as large as the non-nullable input. (Is there any
|
||||
* chance of being even smarter?)
|
||||
*
|
||||
* For JOIN_IN and variants, the Cartesian product is figured with
|
||||
@ -1823,8 +1826,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
|
||||
rel->rows = temp;
|
||||
|
||||
/*
|
||||
* We need not compute the output width here, because build_joinrel_tlist
|
||||
* already did.
|
||||
* We need not compute the output width here, because
|
||||
* build_joinrel_tlist already did.
|
||||
*/
|
||||
}
|
||||
|
||||
@ -1911,11 +1914,14 @@ set_rel_width(Query *root, RelOptInfo *rel)
|
||||
|
||||
Assert(IsA(var, Var));
|
||||
|
||||
/* The width probably hasn't been cached yet, but may as well check */
|
||||
/*
|
||||
* The width probably hasn't been cached yet, but may as well
|
||||
* check
|
||||
*/
|
||||
if (rel->attr_widths[ndx] > 0)
|
||||
{
|
||||
tuple_width += rel->attr_widths[ndx];
|
||||
continue;
|
||||
tuple_width += rel->attr_widths[ndx];
|
||||
continue;
|
||||
}
|
||||
|
||||
relid = getrelid(var->varno, root->rtable);
|
||||
@ -1931,8 +1937,8 @@ set_rel_width(Query *root, RelOptInfo *rel)
|
||||
}
|
||||
|
||||
/*
|
||||
* Not a plain relation, or can't find statistics for it.
|
||||
* Estimate using just the type info.
|
||||
* Not a plain relation, or can't find statistics for it. Estimate
|
||||
* using just the type info.
|
||||
*/
|
||||
item_width = get_typavgwidth(var->vartype, var->vartypmod);
|
||||
Assert(item_width > 0);
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.145 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.146 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -64,9 +64,9 @@ static List *group_clauses_by_indexkey_for_join(Query *root,
|
||||
Relids outer_relids,
|
||||
JoinType jointype, bool isouterjoin);
|
||||
static bool match_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
|
||||
int indexcol, Oid opclass, Expr *clause);
|
||||
static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
|
||||
int indexcol, Oid opclass, Expr *clause);
|
||||
static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
|
||||
int indexcol, Oid opclass, Expr *clause);
|
||||
static Oid indexable_operator(Expr *clause, Oid opclass,
|
||||
bool indexkey_on_left);
|
||||
static bool pred_test(List *predicate_list, List *restrictinfo_list,
|
||||
@ -77,8 +77,8 @@ static bool pred_test_recurse_pred(Expr *predicate, Node *clause);
|
||||
static bool pred_test_simple_clause(Expr *predicate, Node *clause);
|
||||
static Relids indexable_outerrelids(RelOptInfo *rel, IndexOptInfo *index);
|
||||
static Path *make_innerjoin_index_path(Query *root,
|
||||
RelOptInfo *rel, IndexOptInfo *index,
|
||||
List *clausegroups);
|
||||
RelOptInfo *rel, IndexOptInfo *index,
|
||||
List *clausegroups);
|
||||
static bool match_index_to_operand(Node *operand, int indexcol,
|
||||
RelOptInfo *rel, IndexOptInfo *index);
|
||||
static bool match_special_index_operator(Expr *clause, Oid opclass,
|
||||
@ -87,7 +87,7 @@ static List *expand_indexqual_condition(Expr *clause, Oid opclass);
|
||||
static List *prefix_quals(Node *leftop, Oid opclass,
|
||||
Const *prefix, Pattern_Prefix_Status pstatus);
|
||||
static List *network_prefix_quals(Node *leftop, Oid expr_op, Oid opclass,
|
||||
Datum rightop);
|
||||
Datum rightop);
|
||||
static Datum string_to_datum(const char *str, Oid datatype);
|
||||
static Const *string_to_const(const char *str, Oid datatype);
|
||||
|
||||
@ -114,7 +114,7 @@ static Const *string_to_const(const char *str, Oid datatype);
|
||||
* scan this routine deems potentially interesting for the current query.
|
||||
*
|
||||
* We also determine the set of other relids that participate in join
|
||||
* clauses that could be used with each index. The actually best innerjoin
|
||||
* clauses that could be used with each index. The actually best innerjoin
|
||||
* path will be generated for each outer relation later on, but knowing the
|
||||
* set of potential otherrels allows us to identify equivalent outer relations
|
||||
* and avoid repeated computation.
|
||||
@ -219,10 +219,11 @@ create_index_paths(Query *root, RelOptInfo *rel)
|
||||
|
||||
/*
|
||||
* 6. Examine join clauses to see which ones are potentially
|
||||
* usable with this index, and generate the set of all other relids
|
||||
* that participate in such join clauses. We'll use this set later
|
||||
* to recognize outer rels that are equivalent for joining purposes.
|
||||
* We compute both per-index and overall-for-relation sets.
|
||||
* usable with this index, and generate the set of all other
|
||||
* relids that participate in such join clauses. We'll use this
|
||||
* set later to recognize outer rels that are equivalent for
|
||||
* joining purposes. We compute both per-index and
|
||||
* overall-for-relation sets.
|
||||
*/
|
||||
join_outerrelids = indexable_outerrelids(rel, index);
|
||||
index->outer_relids = join_outerrelids;
|
||||
@ -274,7 +275,7 @@ match_index_orclauses(RelOptInfo *rel,
|
||||
*/
|
||||
restrictinfo->subclauseindices =
|
||||
match_index_orclause(rel, index,
|
||||
((BoolExpr *) restrictinfo->clause)->args,
|
||||
((BoolExpr *) restrictinfo->clause)->args,
|
||||
restrictinfo->subclauseindices);
|
||||
}
|
||||
}
|
||||
@ -422,6 +423,7 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
|
||||
Oid *classes = index->classlist;
|
||||
|
||||
FastListInit(&quals);
|
||||
|
||||
/*
|
||||
* Extract relevant indexclauses in indexkey order. This is
|
||||
* essentially just like group_clauses_by_indexkey() except that the
|
||||
@ -576,7 +578,7 @@ group_clauses_by_indexkey(RelOptInfo *rel, IndexOptInfo *index)
|
||||
*
|
||||
* This is much like group_clauses_by_indexkey(), but we consider both
|
||||
* join and restriction clauses. Any joinclause that uses only otherrels
|
||||
* in the specified outer_relids is fair game. But there must be at least
|
||||
* in the specified outer_relids is fair game. But there must be at least
|
||||
* one such joinclause in the final list, otherwise we return NIL indicating
|
||||
* that this index isn't interesting as an inner indexscan. (A scan using
|
||||
* only restriction clauses shouldn't be created here, because a regular Path
|
||||
@ -641,10 +643,10 @@ group_clauses_by_indexkey_for_join(Query *root,
|
||||
*/
|
||||
if (FastListValue(&clausegroup) != NIL)
|
||||
{
|
||||
List *nl;
|
||||
List *nl;
|
||||
|
||||
nl = remove_redundant_join_clauses(root,
|
||||
FastListValue(&clausegroup),
|
||||
FastListValue(&clausegroup),
|
||||
jointype);
|
||||
FastListFromList(&clausegroup, nl);
|
||||
}
|
||||
@ -736,9 +738,9 @@ match_clause_to_indexcol(RelOptInfo *rel,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Check for clauses of the form:
|
||||
* (indexkey operator constant) or (constant operator indexkey).
|
||||
* Anything that is a "pseudo constant" expression will do.
|
||||
* Check for clauses of the form: (indexkey operator constant) or
|
||||
* (constant operator indexkey). Anything that is a "pseudo constant"
|
||||
* expression will do.
|
||||
*/
|
||||
if (match_index_to_operand(leftop, indexcol, rel, index) &&
|
||||
is_pseudo_constant_clause(rightop))
|
||||
@ -747,8 +749,8 @@ match_clause_to_indexcol(RelOptInfo *rel,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If we didn't find a member of the index's opclass, see
|
||||
* whether it is a "special" indexable operator.
|
||||
* If we didn't find a member of the index's opclass, see whether
|
||||
* it is a "special" indexable operator.
|
||||
*/
|
||||
if (match_special_index_operator(clause, opclass, true))
|
||||
return true;
|
||||
@ -762,8 +764,8 @@ match_clause_to_indexcol(RelOptInfo *rel,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If we didn't find a member of the index's opclass, see
|
||||
* whether it is a "special" indexable operator.
|
||||
* If we didn't find a member of the index's opclass, see whether
|
||||
* it is a "special" indexable operator.
|
||||
*/
|
||||
if (match_special_index_operator(clause, opclass, false))
|
||||
return true;
|
||||
@ -824,10 +826,10 @@ match_join_clause_to_indexcol(RelOptInfo *rel,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Check for an indexqual that could be handled by a nestloop
|
||||
* join. We need the index key to be compared against an
|
||||
* expression that uses none of the indexed relation's vars and
|
||||
* contains no volatile functions.
|
||||
* Check for an indexqual that could be handled by a nestloop join. We
|
||||
* need the index key to be compared against an expression that uses
|
||||
* none of the indexed relation's vars and contains no volatile
|
||||
* functions.
|
||||
*/
|
||||
if (match_index_to_operand(leftop, indexcol, rel, index))
|
||||
{
|
||||
@ -1174,10 +1176,11 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
|
||||
* 1. Find "btree" strategy numbers for the pred_op and clause_op.
|
||||
*
|
||||
* We must find a btree opclass that contains both operators, else the
|
||||
* implication can't be determined. If there are multiple such opclasses,
|
||||
* assume we can use any one to determine the logical relationship of the
|
||||
* two operators and the correct corresponding test operator. This should
|
||||
* work for any logically consistent opclasses.
|
||||
* implication can't be determined. If there are multiple such
|
||||
* opclasses, assume we can use any one to determine the logical
|
||||
* relationship of the two operators and the correct corresponding
|
||||
* test operator. This should work for any logically consistent
|
||||
* opclasses.
|
||||
*/
|
||||
catlist = SearchSysCacheList(AMOPOPID, 1,
|
||||
ObjectIdGetDatum(pred_op),
|
||||
@ -1269,7 +1272,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
|
||||
|
||||
/* And execute it. */
|
||||
test_result = ExecEvalExprSwitchContext(test_exprstate,
|
||||
GetPerTupleExprContext(estate),
|
||||
GetPerTupleExprContext(estate),
|
||||
&isNull, NULL);
|
||||
|
||||
/* Get back to outer memory context */
|
||||
@ -1295,7 +1298,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
|
||||
/*
|
||||
* indexable_outerrelids
|
||||
* Finds all other relids that participate in any indexable join clause
|
||||
* for the specified index. Returns a set of relids.
|
||||
* for the specified index. Returns a set of relids.
|
||||
*
|
||||
* 'rel' is the relation for which 'index' is defined
|
||||
*/
|
||||
@ -1314,16 +1317,16 @@ indexable_outerrelids(RelOptInfo *rel, IndexOptInfo *index)
|
||||
/*
|
||||
* Examine each joinclause in the JoinInfo node's list to see if
|
||||
* it matches any key of the index. If so, add the JoinInfo's
|
||||
* otherrels to the result. We can skip examining other joinclauses
|
||||
* in the same list as soon as we find a match (since by definition
|
||||
* they all have the same otherrels).
|
||||
* otherrels to the result. We can skip examining other
|
||||
* joinclauses in the same list as soon as we find a match (since
|
||||
* by definition they all have the same otherrels).
|
||||
*/
|
||||
foreach(j, joininfo->jinfo_restrictinfo)
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) lfirst(j);
|
||||
Expr *clause = rinfo->clause;
|
||||
int indexcol = 0;
|
||||
Oid *classes = index->classlist;
|
||||
Expr *clause = rinfo->clause;
|
||||
int indexcol = 0;
|
||||
Oid *classes = index->classlist;
|
||||
|
||||
do
|
||||
{
|
||||
@ -1398,11 +1401,13 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no indexable joinclauses for this rel, exit quickly.
|
||||
*/
|
||||
if (bms_is_empty(rel->index_outer_relids))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Otherwise, we have to do path selection in the memory context of
|
||||
* the given rel, so that any created path can be safely attached to
|
||||
@ -1410,10 +1415,11 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
* issue for normal planning, but it is an issue for GEQO planning.)
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
|
||||
|
||||
/*
|
||||
* Intersect the given outer_relids with index_outer_relids
|
||||
* to find the set of outer relids actually relevant for this index.
|
||||
* If there are none, again we can fail immediately.
|
||||
* Intersect the given outer_relids with index_outer_relids to find
|
||||
* the set of outer relids actually relevant for this index. If there
|
||||
* are none, again we can fail immediately.
|
||||
*/
|
||||
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
|
||||
if (bms_is_empty(outer_relids))
|
||||
@ -1422,11 +1428,13 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
MemoryContextSwitchTo(oldcontext);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look to see if we already computed the result for this set of
|
||||
* relevant outerrels. (We include the isouterjoin status in the
|
||||
* relevant outerrels. (We include the isouterjoin status in the
|
||||
* cache lookup key for safety. In practice I suspect this is not
|
||||
* necessary because it should always be the same for a given innerrel.)
|
||||
* necessary because it should always be the same for a given
|
||||
* innerrel.)
|
||||
*/
|
||||
foreach(jlist, rel->index_inner_paths)
|
||||
{
|
||||
@ -1441,15 +1449,15 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* For each index of the rel, find the best path; then choose the
|
||||
* best overall. We cache the per-index results as well as the overall
|
||||
* result. (This is useful because different indexes may have different
|
||||
* relevant outerrel sets, so different overall outerrel sets might still
|
||||
* map to the same computation for a given index.)
|
||||
* For each index of the rel, find the best path; then choose the best
|
||||
* overall. We cache the per-index results as well as the overall
|
||||
* result. (This is useful because different indexes may have
|
||||
* different relevant outerrel sets, so different overall outerrel
|
||||
* sets might still map to the same computation for a given index.)
|
||||
*/
|
||||
foreach(ilist, rel->indexlist)
|
||||
{
|
||||
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
|
||||
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
|
||||
Relids index_outer_relids;
|
||||
Path *path = NULL;
|
||||
|
||||
@ -1461,6 +1469,7 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
bms_free(index_outer_relids);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look to see if we already computed the result for this index.
|
||||
*/
|
||||
@ -1471,7 +1480,7 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
info->isouterjoin == isouterjoin)
|
||||
{
|
||||
path = info->best_innerpath;
|
||||
bms_free(index_outer_relids); /* not needed anymore */
|
||||
bms_free(index_outer_relids); /* not needed anymore */
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1484,9 +1493,9 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
clausegroups = group_clauses_by_indexkey_for_join(root,
|
||||
rel,
|
||||
index,
|
||||
index_outer_relids,
|
||||
index_outer_relids,
|
||||
jointype,
|
||||
isouterjoin);
|
||||
isouterjoin);
|
||||
if (clausegroups)
|
||||
{
|
||||
/* make the path */
|
||||
@ -1548,9 +1557,9 @@ make_innerjoin_index_path(Query *root,
|
||||
pathnode->path.parent = rel;
|
||||
|
||||
/*
|
||||
* There's no point in marking the path with any pathkeys, since
|
||||
* it will only ever be used as the inner path of a nestloop, and
|
||||
* so its ordering does not matter.
|
||||
* There's no point in marking the path with any pathkeys, since it
|
||||
* will only ever be used as the inner path of a nestloop, and so its
|
||||
* ordering does not matter.
|
||||
*/
|
||||
pathnode->path.pathkeys = NIL;
|
||||
|
||||
@ -1582,19 +1591,19 @@ make_innerjoin_index_path(Query *root,
|
||||
|
||||
/*
|
||||
* We must compute the estimated number of output rows for the
|
||||
* indexscan. This is less than rel->rows because of the
|
||||
* additional selectivity of the join clauses. Since clausegroups
|
||||
* may contain both restriction and join clauses, we have to do a
|
||||
* set union to get the full set of clauses that must be
|
||||
* considered to compute the correct selectivity. (Without the union
|
||||
* operation, we might have some restriction clauses appearing twice,
|
||||
* which'd mislead restrictlist_selectivity into double-counting their
|
||||
* selectivity. However, since RestrictInfo nodes aren't copied when
|
||||
* linking them into different lists, it should be sufficient to use
|
||||
* pointer comparison to remove duplicates.)
|
||||
* indexscan. This is less than rel->rows because of the additional
|
||||
* selectivity of the join clauses. Since clausegroups may contain
|
||||
* both restriction and join clauses, we have to do a set union to get
|
||||
* the full set of clauses that must be considered to compute the
|
||||
* correct selectivity. (Without the union operation, we might have
|
||||
* some restriction clauses appearing twice, which'd mislead
|
||||
* restrictlist_selectivity into double-counting their selectivity.
|
||||
* However, since RestrictInfo nodes aren't copied when linking them
|
||||
* into different lists, it should be sufficient to use pointer
|
||||
* comparison to remove duplicates.)
|
||||
*
|
||||
* Always assume the join type is JOIN_INNER; even if some of the
|
||||
* join clauses come from other contexts, that's not our problem.
|
||||
* Always assume the join type is JOIN_INNER; even if some of the join
|
||||
* clauses come from other contexts, that's not our problem.
|
||||
*/
|
||||
allclauses = set_ptrUnion(rel->baserestrictinfo, allclauses);
|
||||
pathnode->rows = rel->tuples *
|
||||
@ -1656,9 +1665,9 @@ match_index_to_operand(Node *operand,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Index expression; find the correct expression. (This search could
|
||||
* be avoided, at the cost of complicating all the callers of this
|
||||
* routine; doesn't seem worth it.)
|
||||
* Index expression; find the correct expression. (This search
|
||||
* could be avoided, at the cost of complicating all the callers
|
||||
* of this routine; doesn't seem worth it.)
|
||||
*/
|
||||
List *indexprs;
|
||||
int i;
|
||||
@ -1677,6 +1686,7 @@ match_index_to_operand(Node *operand,
|
||||
if (indexprs == NIL)
|
||||
elog(ERROR, "wrong number of index expressions");
|
||||
indexkey = (Node *) lfirst(indexprs);
|
||||
|
||||
/*
|
||||
* Does it match the operand? Again, strip any relabeling.
|
||||
*/
|
||||
@ -1776,12 +1786,12 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_LIKE_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_BYTEA_LIKE_OP:
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_ICLIKE_OP:
|
||||
@ -1789,7 +1799,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_ICLIKE_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_REGEXEQ_OP:
|
||||
@ -1797,7 +1807,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_REGEXEQ_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_ICREGEXEQ_OP:
|
||||
@ -1805,7 +1815,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_ICREGEXEQ_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_INET_SUB_OP:
|
||||
@ -1831,9 +1841,9 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
* want to apply. (A hash index, for example, will not support ">=".)
|
||||
* Currently, only btree supports the operators we need.
|
||||
*
|
||||
* We insist on the opclass being the specific one we expect,
|
||||
* else we'd do the wrong thing if someone were to make a reverse-sort
|
||||
* opclass with the same operators.
|
||||
* We insist on the opclass being the specific one we expect, else we'd
|
||||
* do the wrong thing if someone were to make a reverse-sort opclass
|
||||
* with the same operators.
|
||||
*/
|
||||
switch (expr_op)
|
||||
{
|
||||
@ -1896,7 +1906,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
* The input list is ordered by index key, and so the output list is too.
|
||||
* (The latter is not depended on by any part of the planner, so far as I can
|
||||
* tell; but some parts of the executor do assume that the indxqual list
|
||||
* ultimately delivered to the executor is so ordered. One such place is
|
||||
* ultimately delivered to the executor is so ordered. One such place is
|
||||
* _bt_orderkeys() in the btree support. Perhaps that ought to be fixed
|
||||
* someday --- tgl 7/00)
|
||||
*/
|
||||
@ -1930,7 +1940,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
|
||||
|
||||
} while (clausegroups != NIL && !DoneMatchingIndexKeys(classes));
|
||||
|
||||
Assert(clausegroups == NIL); /* else more groups than indexkeys... */
|
||||
Assert(clausegroups == NIL); /* else more groups than indexkeys... */
|
||||
|
||||
return FastListValue(&resultquals);
|
||||
}
|
||||
@ -1953,11 +1963,12 @@ expand_indexqual_condition(Expr *clause, Oid opclass)
|
||||
|
||||
switch (expr_op)
|
||||
{
|
||||
/*
|
||||
* LIKE and regex operators are not members of any index
|
||||
* opclass, so if we find one in an indexqual list we can
|
||||
* assume that it was accepted by match_special_index_operator().
|
||||
*/
|
||||
/*
|
||||
* LIKE and regex operators are not members of any index
|
||||
* opclass, so if we find one in an indexqual list we can
|
||||
* assume that it was accepted by
|
||||
* match_special_index_operator().
|
||||
*/
|
||||
case OID_TEXT_LIKE_OP:
|
||||
case OID_BPCHAR_LIKE_OP:
|
||||
case OID_NAME_LIKE_OP:
|
||||
@ -2061,22 +2072,22 @@ prefix_quals(Node *leftop, Oid opclass,
|
||||
}
|
||||
|
||||
/*
|
||||
* If necessary, coerce the prefix constant to the right type.
|
||||
* The given prefix constant is either text or bytea type.
|
||||
* If necessary, coerce the prefix constant to the right type. The
|
||||
* given prefix constant is either text or bytea type.
|
||||
*/
|
||||
if (prefix_const->consttype != datatype)
|
||||
{
|
||||
char *prefix;
|
||||
char *prefix;
|
||||
|
||||
switch (prefix_const->consttype)
|
||||
{
|
||||
case TEXTOID:
|
||||
prefix = DatumGetCString(DirectFunctionCall1(textout,
|
||||
prefix_const->constvalue));
|
||||
prefix_const->constvalue));
|
||||
break;
|
||||
case BYTEAOID:
|
||||
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
|
||||
prefix_const->constvalue));
|
||||
prefix_const->constvalue));
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unexpected const type: %u",
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.79 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.80 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -300,7 +300,7 @@ sort_inner_and_outer(Query *root,
|
||||
* We always generate a nestloop path for each available outer path.
|
||||
* In fact we may generate as many as four: one on the cheapest-total-cost
|
||||
* inner path, one on the same with materialization, one on the
|
||||
* cheapest-startup-cost inner path (if different),
|
||||
* cheapest-startup-cost inner path (if different),
|
||||
* and one on the best inner-indexscan path (if any).
|
||||
*
|
||||
* We also consider mergejoins if mergejoin clauses are available. We have
|
||||
@ -342,10 +342,10 @@ match_unsorted_outer(Query *root,
|
||||
|
||||
/*
|
||||
* Nestloop only supports inner, left, and IN joins. Also, if we are
|
||||
* doing a right or full join, we must use *all* the mergeclauses as join
|
||||
* clauses, else we will not have a valid plan. (Although these two
|
||||
* flags are currently inverses, keep them separate for clarity and
|
||||
* possible future changes.)
|
||||
* doing a right or full join, we must use *all* the mergeclauses as
|
||||
* join clauses, else we will not have a valid plan. (Although these
|
||||
* two flags are currently inverses, keep them separate for clarity
|
||||
* and possible future changes.)
|
||||
*/
|
||||
switch (jointype)
|
||||
{
|
||||
@ -371,8 +371,8 @@ match_unsorted_outer(Query *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* If we need to unique-ify the inner path, we will consider only
|
||||
* the cheapest inner.
|
||||
* If we need to unique-ify the inner path, we will consider only the
|
||||
* cheapest inner.
|
||||
*/
|
||||
if (jointype == JOIN_UNIQUE_INNER)
|
||||
{
|
||||
@ -384,9 +384,10 @@ match_unsorted_outer(Query *root,
|
||||
else if (nestjoinOK)
|
||||
{
|
||||
/*
|
||||
* If the cheapest inner path is a join or seqscan, we should consider
|
||||
* materializing it. (This is a heuristic: we could consider it
|
||||
* always, but for inner indexscans it's probably a waste of time.)
|
||||
* If the cheapest inner path is a join or seqscan, we should
|
||||
* consider materializing it. (This is a heuristic: we could
|
||||
* consider it always, but for inner indexscans it's probably a
|
||||
* waste of time.)
|
||||
*/
|
||||
if (!(IsA(inner_cheapest_total, IndexPath) ||
|
||||
IsA(inner_cheapest_total, TidPath)))
|
||||
@ -394,8 +395,8 @@ match_unsorted_outer(Query *root,
|
||||
create_material_path(innerrel, inner_cheapest_total);
|
||||
|
||||
/*
|
||||
* Get the best innerjoin indexpath (if any) for this outer rel. It's
|
||||
* the same for all outer paths.
|
||||
* Get the best innerjoin indexpath (if any) for this outer rel.
|
||||
* It's the same for all outer paths.
|
||||
*/
|
||||
bestinnerjoin = best_inner_indexscan(root, innerrel,
|
||||
outerrel->relids, jointype);
|
||||
@ -414,8 +415,8 @@ match_unsorted_outer(Query *root,
|
||||
int sortkeycnt;
|
||||
|
||||
/*
|
||||
* If we need to unique-ify the outer path, it's pointless to consider
|
||||
* any but the cheapest outer.
|
||||
* If we need to unique-ify the outer path, it's pointless to
|
||||
* consider any but the cheapest outer.
|
||||
*/
|
||||
if (save_jointype == JOIN_UNIQUE_OUTER)
|
||||
{
|
||||
@ -709,7 +710,7 @@ hash_inner_and_outer(Query *root,
|
||||
/* righthand side is inner */
|
||||
}
|
||||
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
{
|
||||
/* lefthand side is inner */
|
||||
}
|
||||
@ -727,9 +728,9 @@ hash_inner_and_outer(Query *root,
|
||||
* cheapest-startup-cost outer paths. There's no need to consider
|
||||
* any but the cheapest-total-cost inner path, however.
|
||||
*/
|
||||
Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
|
||||
Path *cheapest_total_outer = outerrel->cheapest_total_path;
|
||||
Path *cheapest_total_inner = innerrel->cheapest_total_path;
|
||||
Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
|
||||
Path *cheapest_total_outer = outerrel->cheapest_total_path;
|
||||
Path *cheapest_total_inner = innerrel->cheapest_total_path;
|
||||
|
||||
/* Unique-ify if need be */
|
||||
if (jointype == JOIN_UNIQUE_OUTER)
|
||||
@ -840,7 +841,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
|
||||
/* righthand side is inner */
|
||||
}
|
||||
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
{
|
||||
/* lefthand side is inner */
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.61 2003/07/25 00:01:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.62 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -19,11 +19,11 @@
|
||||
|
||||
|
||||
static List *make_rels_by_clause_joins(Query *root,
|
||||
RelOptInfo *old_rel,
|
||||
List *other_rels);
|
||||
RelOptInfo *old_rel,
|
||||
List *other_rels);
|
||||
static List *make_rels_by_clauseless_joins(Query *root,
|
||||
RelOptInfo *old_rel,
|
||||
List *other_rels);
|
||||
RelOptInfo *old_rel,
|
||||
List *other_rels);
|
||||
|
||||
|
||||
/*
|
||||
@ -417,8 +417,8 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
|
||||
/*
|
||||
* If we are implementing IN clauses as joins, there are some joins
|
||||
* that are illegal. Check to see if the proposed join is trouble.
|
||||
* We can skip the work if looking at an outer join, however, because
|
||||
* that are illegal. Check to see if the proposed join is trouble. We
|
||||
* can skip the work if looking at an outer join, however, because
|
||||
* only top-level joins might be affected.
|
||||
*/
|
||||
if (jointype == JOIN_INNER)
|
||||
@ -430,8 +430,8 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
InClauseInfo *ininfo = (InClauseInfo *) lfirst(l);
|
||||
|
||||
/*
|
||||
* Cannot join if proposed join contains part, but only
|
||||
* part, of the RHS, *and* it contains rels not in the RHS.
|
||||
* Cannot join if proposed join contains part, but only part,
|
||||
* of the RHS, *and* it contains rels not in the RHS.
|
||||
*/
|
||||
if (bms_overlap(ininfo->righthand, joinrelids) &&
|
||||
!bms_is_subset(ininfo->righthand, joinrelids) &&
|
||||
@ -442,16 +442,17 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
}
|
||||
|
||||
/*
|
||||
* No issue unless we are looking at a join of the IN's RHS
|
||||
* to other stuff.
|
||||
* No issue unless we are looking at a join of the IN's RHS to
|
||||
* other stuff.
|
||||
*/
|
||||
if (! (bms_is_subset(ininfo->righthand, joinrelids) &&
|
||||
!bms_equal(ininfo->righthand, joinrelids)))
|
||||
if (!(bms_is_subset(ininfo->righthand, joinrelids) &&
|
||||
!bms_equal(ininfo->righthand, joinrelids)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If we already joined IN's RHS to any part of its LHS in either
|
||||
* input path, then this join is not constrained (the necessary
|
||||
* work was done at a lower level).
|
||||
* If we already joined IN's RHS to any part of its LHS in
|
||||
* either input path, then this join is not constrained (the
|
||||
* necessary work was done at a lower level).
|
||||
*/
|
||||
if (bms_overlap(ininfo->lefthand, rel1->relids) &&
|
||||
bms_is_subset(ininfo->righthand, rel1->relids))
|
||||
@ -459,6 +460,7 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
if (bms_overlap(ininfo->lefthand, rel2->relids) &&
|
||||
bms_is_subset(ininfo->righthand, rel2->relids))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* JOIN_IN technique will work if outerrel includes LHS and
|
||||
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
|
||||
@ -478,22 +480,14 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
}
|
||||
if (bms_is_subset(ininfo->lefthand, rel1->relids) &&
|
||||
bms_equal(ininfo->righthand, rel2->relids))
|
||||
{
|
||||
jointype = JOIN_IN;
|
||||
}
|
||||
else if (bms_is_subset(ininfo->lefthand, rel2->relids) &&
|
||||
bms_equal(ininfo->righthand, rel1->relids))
|
||||
{
|
||||
jointype = JOIN_REVERSE_IN;
|
||||
}
|
||||
else if (bms_equal(ininfo->righthand, rel1->relids))
|
||||
{
|
||||
jointype = JOIN_UNIQUE_OUTER;
|
||||
}
|
||||
else if (bms_equal(ininfo->righthand, rel2->relids))
|
||||
{
|
||||
jointype = JOIN_UNIQUE_INNER;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* invalid join path */
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.51 2003/06/15 22:51:45 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -99,7 +99,7 @@ create_or_index_paths(Query *root, RelOptInfo *rel)
|
||||
|
||||
best_or_subclause_indices(root,
|
||||
rel,
|
||||
((BoolExpr *) restrictinfo->clause)->args,
|
||||
((BoolExpr *) restrictinfo->clause)->args,
|
||||
restrictinfo->subclauseindices,
|
||||
pathnode);
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.51 2003/07/25 00:01:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -198,8 +198,8 @@ generate_implied_equalities(Query *root)
|
||||
/*
|
||||
* Collect info about relids mentioned in each item. For this
|
||||
* routine we only really care whether there are any at all in
|
||||
* each item, but process_implied_equality() needs the exact
|
||||
* sets, so we may as well pull them here.
|
||||
* each item, but process_implied_equality() needs the exact sets,
|
||||
* so we may as well pull them here.
|
||||
*/
|
||||
relids = (Relids *) palloc(nitems * sizeof(Relids));
|
||||
have_consts = false;
|
||||
@ -233,8 +233,8 @@ generate_implied_equalities(Query *root)
|
||||
|
||||
/*
|
||||
* If it's "const = const" then just ignore it altogether.
|
||||
* There is no place in the restrictinfo structure to store
|
||||
* it. (If the two consts are in fact unequal, then
|
||||
* There is no place in the restrictinfo structure to
|
||||
* store it. (If the two consts are in fact unequal, then
|
||||
* propagating the comparison to Vars will cause us to
|
||||
* produce zero rows out, as expected.)
|
||||
*/
|
||||
@ -242,12 +242,12 @@ generate_implied_equalities(Query *root)
|
||||
{
|
||||
/*
|
||||
* Tell process_implied_equality to delete the clause,
|
||||
* not add it, if it's "var = var" and we have constants
|
||||
* present in the list.
|
||||
* not add it, if it's "var = var" and we have
|
||||
* constants present in the list.
|
||||
*/
|
||||
bool delete_it = (have_consts &&
|
||||
i1_is_variable &&
|
||||
i2_is_variable);
|
||||
bool delete_it = (have_consts &&
|
||||
i1_is_variable &&
|
||||
i2_is_variable);
|
||||
|
||||
process_implied_equality(root,
|
||||
item1->key, item2->key,
|
||||
@ -751,20 +751,21 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
|
||||
* element might match none, one, or more of the output columns
|
||||
* that are visible to the outer query. This means we may have
|
||||
* multiple possible representations of the sub_pathkey in the
|
||||
* context of the outer query. Ideally we would generate them all
|
||||
* and put them all into a pathkey list of the outer query, thereby
|
||||
* propagating equality knowledge up to the outer query. Right now
|
||||
* we cannot do so, because the outer query's canonical pathkey
|
||||
* sets are already frozen when this is called. Instead we prefer
|
||||
* the one that has the highest "score" (number of canonical pathkey
|
||||
* peers, plus one if it matches the outer query_pathkeys).
|
||||
* This is the most likely to be useful in the outer query.
|
||||
* context of the outer query. Ideally we would generate them all
|
||||
* and put them all into a pathkey list of the outer query,
|
||||
* thereby propagating equality knowledge up to the outer query.
|
||||
* Right now we cannot do so, because the outer query's canonical
|
||||
* pathkey sets are already frozen when this is called. Instead
|
||||
* we prefer the one that has the highest "score" (number of
|
||||
* canonical pathkey peers, plus one if it matches the outer
|
||||
* query_pathkeys). This is the most likely to be useful in the
|
||||
* outer query.
|
||||
*/
|
||||
foreach(j, sub_pathkey)
|
||||
{
|
||||
PathKeyItem *sub_item = (PathKeyItem *) lfirst(j);
|
||||
Node *sub_key = sub_item->key;
|
||||
List *k;
|
||||
Node *sub_key = sub_item->key;
|
||||
List *k;
|
||||
|
||||
foreach(k, subquery->targetList)
|
||||
{
|
||||
@ -774,9 +775,9 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
|
||||
equal(tle->expr, sub_key))
|
||||
{
|
||||
/* Found a representation for this sub_key */
|
||||
Var *outer_var;
|
||||
Var *outer_var;
|
||||
PathKeyItem *outer_item;
|
||||
int score;
|
||||
int score;
|
||||
|
||||
outer_var = makeVar(rel->relid,
|
||||
tle->resdom->resno,
|
||||
@ -802,8 +803,8 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
|
||||
}
|
||||
|
||||
/*
|
||||
* If we couldn't find a representation of this sub_pathkey,
|
||||
* we're done (we can't use the ones to its right, either).
|
||||
* If we couldn't find a representation of this sub_pathkey, we're
|
||||
* done (we can't use the ones to its right, either).
|
||||
*/
|
||||
if (!best_item)
|
||||
break;
|
||||
@ -812,8 +813,8 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
|
||||
cpathkey = make_canonical_pathkey(root, best_item);
|
||||
|
||||
/*
|
||||
* Eliminate redundant ordering info; could happen if outer
|
||||
* query equijoins subquery keys...
|
||||
* Eliminate redundant ordering info; could happen if outer query
|
||||
* equijoins subquery keys...
|
||||
*/
|
||||
if (!ptrMember(cpathkey, retval))
|
||||
{
|
||||
@ -920,7 +921,7 @@ make_pathkeys_for_sortclauses(List *sortclauses,
|
||||
* many times when dealing with a many-relation query.
|
||||
*
|
||||
* We have to be careful that the cached values are palloc'd in the same
|
||||
* context the RestrictInfo node itself is in. This is not currently a
|
||||
* context the RestrictInfo node itself is in. This is not currently a
|
||||
* problem for normal planning, but it is an issue for GEQO planning.
|
||||
*/
|
||||
void
|
||||
@ -1090,7 +1091,7 @@ make_pathkeys_for_mergeclauses(Query *root,
|
||||
else
|
||||
{
|
||||
elog(ERROR, "could not identify which side of mergeclause to use");
|
||||
pathkey = NIL; /* keep compiler quiet */
|
||||
pathkey = NIL; /* keep compiler quiet */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.14 2003/02/08 20:20:54 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.15 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -27,7 +27,7 @@
|
||||
|
||||
static List *TidqualFromRestrictinfo(Relids relids, List *restrictinfo);
|
||||
static bool isEvaluable(int varno, Node *node);
|
||||
static Node *TidequalClause(int varno, OpExpr *node);
|
||||
static Node *TidequalClause(int varno, OpExpr * node);
|
||||
static List *TidqualFromExpr(int varno, Expr *expr);
|
||||
|
||||
static bool
|
||||
@ -66,7 +66,7 @@ isEvaluable(int varno, Node *node)
|
||||
* or the left node if the opclause is ....=CTID
|
||||
*/
|
||||
static Node *
|
||||
TidequalClause(int varno, OpExpr *node)
|
||||
TidequalClause(int varno, OpExpr * node)
|
||||
{
|
||||
Node *rnode = NULL,
|
||||
*arg1,
|
||||
|
@ -10,7 +10,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.149 2003/07/25 00:01:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.150 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -40,9 +40,9 @@ static bool use_physical_tlist(RelOptInfo *rel);
|
||||
static void disuse_physical_tlist(Plan *plan, Path *path);
|
||||
static Join *create_join_plan(Query *root, JoinPath *best_path);
|
||||
static Append *create_append_plan(Query *root, AppendPath *best_path);
|
||||
static Result *create_result_plan(Query *root, ResultPath *best_path);
|
||||
static Material *create_material_plan(Query *root, MaterialPath *best_path);
|
||||
static Plan *create_unique_plan(Query *root, UniquePath *best_path);
|
||||
static Result *create_result_plan(Query *root, ResultPath * best_path);
|
||||
static Material *create_material_plan(Query *root, MaterialPath * best_path);
|
||||
static Plan *create_unique_plan(Query *root, UniquePath * best_path);
|
||||
static SeqScan *create_seqscan_plan(Path *best_path, List *tlist,
|
||||
List *scan_clauses);
|
||||
static IndexScan *create_indexscan_plan(Query *root, IndexPath *best_path,
|
||||
@ -63,9 +63,9 @@ static void fix_indxqual_references(List *indexquals, IndexPath *index_path,
|
||||
List **fixed_indexquals,
|
||||
List **recheck_indexquals);
|
||||
static void fix_indxqual_sublist(List *indexqual,
|
||||
Relids baserelids, int baserelid,
|
||||
IndexOptInfo *index,
|
||||
List **fixed_quals, List **recheck_quals);
|
||||
Relids baserelids, int baserelid,
|
||||
IndexOptInfo *index,
|
||||
List **fixed_quals, List **recheck_quals);
|
||||
static Node *fix_indxqual_operand(Node *node, int baserelid,
|
||||
IndexOptInfo *index,
|
||||
Oid *opclass);
|
||||
@ -98,9 +98,9 @@ static MergeJoin *make_mergejoin(List *tlist,
|
||||
Plan *lefttree, Plan *righttree,
|
||||
JoinType jointype);
|
||||
static Sort *make_sort(Query *root, List *tlist, Plan *lefttree, int numCols,
|
||||
AttrNumber *sortColIdx, Oid *sortOperators);
|
||||
AttrNumber *sortColIdx, Oid *sortOperators);
|
||||
static Sort *make_sort_from_pathkeys(Query *root, Plan *lefttree,
|
||||
Relids relids, List *pathkeys);
|
||||
Relids relids, List *pathkeys);
|
||||
|
||||
|
||||
/*
|
||||
@ -148,7 +148,7 @@ create_plan(Query *root, Path *best_path)
|
||||
break;
|
||||
case T_Material:
|
||||
plan = (Plan *) create_material_plan(root,
|
||||
(MaterialPath *) best_path);
|
||||
(MaterialPath *) best_path);
|
||||
break;
|
||||
case T_Unique:
|
||||
plan = (Plan *) create_unique_plan(root,
|
||||
@ -192,12 +192,12 @@ create_scan_plan(Query *root, Path *best_path)
|
||||
Scan *plan;
|
||||
|
||||
/*
|
||||
* For table scans, rather than using the relation targetlist (which is
|
||||
* only those Vars actually needed by the query), we prefer to generate a
|
||||
* tlist containing all Vars in order. This will allow the executor to
|
||||
* optimize away projection of the table tuples, if possible. (Note that
|
||||
* planner.c may replace the tlist we generate here, forcing projection to
|
||||
* occur.)
|
||||
* For table scans, rather than using the relation targetlist (which
|
||||
* is only those Vars actually needed by the query), we prefer to
|
||||
* generate a tlist containing all Vars in order. This will allow the
|
||||
* executor to optimize away projection of the table tuples, if
|
||||
* possible. (Note that planner.c may replace the tlist we generate
|
||||
* here, forcing projection to occur.)
|
||||
*/
|
||||
if (use_physical_tlist(rel))
|
||||
{
|
||||
@ -274,8 +274,8 @@ build_relation_tlist(RelOptInfo *rel)
|
||||
FastListInit(&tlist);
|
||||
foreach(v, FastListValue(&rel->reltargetlist))
|
||||
{
|
||||
/* Do we really need to copy here? Not sure */
|
||||
Var *var = (Var *) copyObject(lfirst(v));
|
||||
/* Do we really need to copy here? Not sure */
|
||||
Var *var = (Var *) copyObject(lfirst(v));
|
||||
|
||||
FastAppend(&tlist, create_tl_element(var, resdomno));
|
||||
resdomno++;
|
||||
@ -294,22 +294,24 @@ use_physical_tlist(RelOptInfo *rel)
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Currently, can't do this for subquery or function scans. (This
|
||||
* is mainly because we don't have an equivalent of build_physical_tlist
|
||||
* Currently, can't do this for subquery or function scans. (This is
|
||||
* mainly because we don't have an equivalent of build_physical_tlist
|
||||
* for them; worth adding?)
|
||||
*/
|
||||
if (rel->rtekind != RTE_RELATION)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Can't do it with inheritance cases either (mainly because Append
|
||||
* doesn't project).
|
||||
*/
|
||||
if (rel->reloptkind != RELOPT_BASEREL)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Can't do it if any system columns are requested, either. (This could
|
||||
* possibly be fixed but would take some fragile assumptions in setrefs.c,
|
||||
* I think.)
|
||||
* Can't do it if any system columns are requested, either. (This
|
||||
* could possibly be fixed but would take some fragile assumptions in
|
||||
* setrefs.c, I think.)
|
||||
*/
|
||||
for (i = rel->min_attr; i <= 0; i++)
|
||||
{
|
||||
@ -325,7 +327,7 @@ use_physical_tlist(RelOptInfo *rel)
|
||||
*
|
||||
* If the plan node immediately above a scan would prefer to get only
|
||||
* needed Vars and not a physical tlist, it must call this routine to
|
||||
* undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
|
||||
* undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
|
||||
* and Material nodes want this, so they don't have to store useless columns.
|
||||
*/
|
||||
static void
|
||||
@ -441,7 +443,7 @@ create_append_plan(Query *root, AppendPath *best_path)
|
||||
* Returns a Plan node.
|
||||
*/
|
||||
static Result *
|
||||
create_result_plan(Query *root, ResultPath *best_path)
|
||||
create_result_plan(Query *root, ResultPath * best_path)
|
||||
{
|
||||
Result *plan;
|
||||
List *tlist;
|
||||
@ -473,7 +475,7 @@ create_result_plan(Query *root, ResultPath *best_path)
|
||||
* Returns a Plan node.
|
||||
*/
|
||||
static Material *
|
||||
create_material_plan(Query *root, MaterialPath *best_path)
|
||||
create_material_plan(Query *root, MaterialPath * best_path)
|
||||
{
|
||||
Material *plan;
|
||||
Plan *subplan;
|
||||
@ -498,7 +500,7 @@ create_material_plan(Query *root, MaterialPath *best_path)
|
||||
* Returns a Plan node.
|
||||
*/
|
||||
static Plan *
|
||||
create_unique_plan(Query *root, UniquePath *best_path)
|
||||
create_unique_plan(Query *root, UniquePath * best_path)
|
||||
{
|
||||
Plan *plan;
|
||||
Plan *subplan;
|
||||
@ -509,9 +511,9 @@ create_unique_plan(Query *root, UniquePath *best_path)
|
||||
subplan = create_plan(root, best_path->subpath);
|
||||
|
||||
/*
|
||||
* If the subplan came from an IN subselect (currently always the case),
|
||||
* we need to instantiate the correct output targetlist for the subselect,
|
||||
* rather than using the flattened tlist.
|
||||
* If the subplan came from an IN subselect (currently always the
|
||||
* case), we need to instantiate the correct output targetlist for the
|
||||
* subselect, rather than using the flattened tlist.
|
||||
*/
|
||||
sub_targetlist = NIL;
|
||||
foreach(l, root->in_info_list)
|
||||
@ -530,8 +532,8 @@ create_unique_plan(Query *root, UniquePath *best_path)
|
||||
/*
|
||||
* Transform list of plain Vars into targetlist
|
||||
*/
|
||||
List *newtlist = NIL;
|
||||
int resno = 1;
|
||||
List *newtlist = NIL;
|
||||
int resno = 1;
|
||||
|
||||
foreach(l, sub_targetlist)
|
||||
{
|
||||
@ -547,12 +549,13 @@ create_unique_plan(Query *root, UniquePath *best_path)
|
||||
newtlist = lappend(newtlist, tle);
|
||||
resno++;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the top plan node can't do projections, we need to add a
|
||||
* Result node to help it along.
|
||||
*
|
||||
* Currently, the only non-projection-capable plan type
|
||||
* we can see here is Append.
|
||||
* Currently, the only non-projection-capable plan type we can see
|
||||
* here is Append.
|
||||
*/
|
||||
if (IsA(subplan, Append))
|
||||
subplan = (Plan *) make_result(newtlist, NULL, subplan);
|
||||
@ -564,16 +567,16 @@ create_unique_plan(Query *root, UniquePath *best_path)
|
||||
|
||||
if (best_path->use_hash)
|
||||
{
|
||||
int numGroupCols = length(my_tlist);
|
||||
long numGroups;
|
||||
int numGroupCols = length(my_tlist);
|
||||
long numGroups;
|
||||
AttrNumber *groupColIdx;
|
||||
int i;
|
||||
int i;
|
||||
|
||||
numGroups = (long) Min(best_path->rows, (double) LONG_MAX);
|
||||
|
||||
groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
|
||||
for (i = 0; i < numGroupCols; i++)
|
||||
groupColIdx[i] = i+1;
|
||||
groupColIdx[i] = i + 1;
|
||||
|
||||
plan = (Plan *) make_agg(root,
|
||||
my_tlist,
|
||||
@ -700,9 +703,7 @@ create_indexscan_plan(Query *root,
|
||||
|
||||
FastListInit(&orclauses);
|
||||
foreach(orclause, indxqual)
|
||||
{
|
||||
FastAppend(&orclauses, make_ands_explicit(lfirst(orclause)));
|
||||
}
|
||||
indxqual_or_expr = make_orclause(FastListValue(&orclauses));
|
||||
|
||||
qpqual = set_difference(scan_clauses, makeList1(indxqual_or_expr));
|
||||
@ -861,9 +862,9 @@ create_nestloop_plan(Query *root,
|
||||
/*
|
||||
* An index is being used to reduce the number of tuples scanned
|
||||
* in the inner relation. If there are join clauses being used
|
||||
* with the index, we may remove those join clauses from the list of
|
||||
* clauses that have to be checked as qpquals at the join node ---
|
||||
* but only if there's just one indexscan in the inner path
|
||||
* with the index, we may remove those join clauses from the list
|
||||
* of clauses that have to be checked as qpquals at the join node
|
||||
* --- but only if there's just one indexscan in the inner path
|
||||
* (otherwise, several different sets of clauses are being ORed
|
||||
* together).
|
||||
*
|
||||
@ -873,13 +874,14 @@ create_nestloop_plan(Query *root,
|
||||
* been put in the same joininfo list.
|
||||
*
|
||||
* This would be a waste of time if the indexpath was an ordinary
|
||||
* indexpath and not a special innerjoin path. We will skip it in
|
||||
* that case since indexjoinclauses is NIL in an ordinary indexpath.
|
||||
* indexpath and not a special innerjoin path. We will skip it in
|
||||
* that case since indexjoinclauses is NIL in an ordinary
|
||||
* indexpath.
|
||||
*/
|
||||
IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
|
||||
List *indexjoinclauses = innerpath->indexjoinclauses;
|
||||
|
||||
if (length(indexjoinclauses) == 1) /* single indexscan? */
|
||||
if (length(indexjoinclauses) == 1) /* single indexscan? */
|
||||
{
|
||||
joinrestrictclauses =
|
||||
select_nonredundant_join_clauses(root,
|
||||
@ -947,11 +949,11 @@ create_mergejoin_plan(Query *root,
|
||||
joinclauses = set_difference(joinclauses, mergeclauses);
|
||||
|
||||
/*
|
||||
* Rearrange mergeclauses, if needed, so that the outer variable
|
||||
* is always on the left.
|
||||
* Rearrange mergeclauses, if needed, so that the outer variable is
|
||||
* always on the left.
|
||||
*/
|
||||
mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
|
||||
best_path->jpath.outerjoinpath->parent->relids);
|
||||
best_path->jpath.outerjoinpath->parent->relids);
|
||||
|
||||
/*
|
||||
* Create explicit sort nodes for the outer and inner join paths if
|
||||
@ -964,7 +966,7 @@ create_mergejoin_plan(Query *root,
|
||||
outer_plan = (Plan *)
|
||||
make_sort_from_pathkeys(root,
|
||||
outer_plan,
|
||||
best_path->jpath.outerjoinpath->parent->relids,
|
||||
best_path->jpath.outerjoinpath->parent->relids,
|
||||
best_path->outersortkeys);
|
||||
}
|
||||
|
||||
@ -974,7 +976,7 @@ create_mergejoin_plan(Query *root,
|
||||
inner_plan = (Plan *)
|
||||
make_sort_from_pathkeys(root,
|
||||
inner_plan,
|
||||
best_path->jpath.innerjoinpath->parent->relids,
|
||||
best_path->jpath.innerjoinpath->parent->relids,
|
||||
best_path->innersortkeys);
|
||||
}
|
||||
|
||||
@ -1030,21 +1032,19 @@ create_hashjoin_plan(Query *root,
|
||||
joinclauses = set_difference(joinclauses, hashclauses);
|
||||
|
||||
/*
|
||||
* Rearrange hashclauses, if needed, so that the outer variable
|
||||
* is always on the left.
|
||||
* Rearrange hashclauses, if needed, so that the outer variable is
|
||||
* always on the left.
|
||||
*/
|
||||
hashclauses = get_switched_clauses(best_path->path_hashclauses,
|
||||
best_path->jpath.outerjoinpath->parent->relids);
|
||||
best_path->jpath.outerjoinpath->parent->relids);
|
||||
|
||||
/*
|
||||
* Extract the inner hash keys (right-hand operands of the hashclauses)
|
||||
* to put in the Hash node.
|
||||
* Extract the inner hash keys (right-hand operands of the
|
||||
* hashclauses) to put in the Hash node.
|
||||
*/
|
||||
innerhashkeys = NIL;
|
||||
foreach(hcl, hashclauses)
|
||||
{
|
||||
innerhashkeys = lappend(innerhashkeys, get_rightop(lfirst(hcl)));
|
||||
}
|
||||
|
||||
/* We don't want any excess columns in the hashed tuples */
|
||||
disuse_physical_tlist(inner_plan, best_path->jpath.innerjoinpath);
|
||||
@ -1362,7 +1362,7 @@ order_qual_clauses(Query *root, List *clauses)
|
||||
FastListInit(&withsubplans);
|
||||
foreach(l, clauses)
|
||||
{
|
||||
Node *clause = lfirst(l);
|
||||
Node *clause = lfirst(l);
|
||||
|
||||
if (contain_subplans(clause))
|
||||
FastAppend(&withsubplans, clause);
|
||||
@ -1507,8 +1507,8 @@ make_subqueryscan(List *qptlist,
|
||||
|
||||
/*
|
||||
* Cost is figured here for the convenience of prepunion.c. Note this
|
||||
* is only correct for the case where qpqual is empty; otherwise caller
|
||||
* should overwrite cost with a better estimate.
|
||||
* is only correct for the case where qpqual is empty; otherwise
|
||||
* caller should overwrite cost with a better estimate.
|
||||
*/
|
||||
copy_plan_costsize(plan, subplan);
|
||||
plan->total_cost += cpu_tuple_cost * subplan->plan_rows;
|
||||
@ -1709,7 +1709,7 @@ make_sort(Query *root, List *tlist, Plan *lefttree, int numCols,
|
||||
* once as a sort key column; if so, the extra mentions are redundant.
|
||||
*
|
||||
* Caller is assumed to have allocated the arrays large enough for the
|
||||
* max possible number of columns. Return value is the new column count.
|
||||
* max possible number of columns. Return value is the new column count.
|
||||
*/
|
||||
static int
|
||||
add_sort_column(AttrNumber colIdx, Oid sortOp,
|
||||
@ -1777,8 +1777,8 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
|
||||
/*
|
||||
* We can sort by any one of the sort key items listed in this
|
||||
* sublist. For now, we take the first one that corresponds to an
|
||||
* available Var in the tlist. If there isn't any, use the
|
||||
* first one that is an expression in the input's vars.
|
||||
* available Var in the tlist. If there isn't any, use the first
|
||||
* one that is an expression in the input's vars.
|
||||
*
|
||||
* XXX if we have a choice, is there any way of figuring out which
|
||||
* might be cheapest to execute? (For example, int4lt is likely
|
||||
@ -1805,17 +1805,19 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
|
||||
}
|
||||
if (!j)
|
||||
elog(ERROR, "could not find pathkey item to sort");
|
||||
|
||||
/*
|
||||
* Do we need to insert a Result node?
|
||||
*
|
||||
* Currently, the only non-projection-capable plan type
|
||||
* we can see here is Append.
|
||||
* Currently, the only non-projection-capable plan type we can
|
||||
* see here is Append.
|
||||
*/
|
||||
if (IsA(lefttree, Append))
|
||||
{
|
||||
tlist = copyObject(tlist);
|
||||
lefttree = (Plan *) make_result(tlist, NULL, lefttree);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add resjunk entry to input's tlist
|
||||
*/
|
||||
@ -1827,8 +1829,9 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
|
||||
tlist = lappend(tlist,
|
||||
makeTargetEntry(resdom,
|
||||
(Expr *) pathkey->key));
|
||||
lefttree->targetlist = tlist; /* just in case NIL before */
|
||||
lefttree->targetlist = tlist; /* just in case NIL before */
|
||||
}
|
||||
|
||||
/*
|
||||
* The column might already be selected as a sort key, if the
|
||||
* pathkeys contain duplicate entries. (This can happen in
|
||||
@ -1836,7 +1839,7 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
|
||||
* var, for example.) So enter it only once in the sort arrays.
|
||||
*/
|
||||
numsortkeys = add_sort_column(resdom->resno, pathkey->sortop,
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
}
|
||||
|
||||
Assert(numsortkeys > 0);
|
||||
@ -1881,10 +1884,11 @@ make_sort_from_sortclauses(Query *root, List *tlist,
|
||||
|
||||
/*
|
||||
* Check for the possibility of duplicate order-by clauses --- the
|
||||
* parser should have removed 'em, but no point in sorting redundantly.
|
||||
* parser should have removed 'em, but no point in sorting
|
||||
* redundantly.
|
||||
*/
|
||||
numsortkeys = add_sort_column(resdom->resno, sortcl->sortop,
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
}
|
||||
|
||||
Assert(numsortkeys > 0);
|
||||
@ -1938,10 +1942,11 @@ make_sort_from_groupcols(Query *root,
|
||||
|
||||
/*
|
||||
* Check for the possibility of duplicate group-by clauses --- the
|
||||
* parser should have removed 'em, but no point in sorting redundantly.
|
||||
* parser should have removed 'em, but no point in sorting
|
||||
* redundantly.
|
||||
*/
|
||||
numsortkeys = add_sort_column(resdom->resno, grpcl->sortop,
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
grpno++;
|
||||
}
|
||||
|
||||
@ -1973,7 +1978,7 @@ make_material(List *tlist, Plan *lefttree)
|
||||
* materialize_finished_plan: stick a Material node atop a completed plan
|
||||
*
|
||||
* There are a couple of places where we want to attach a Material node
|
||||
* after completion of subquery_planner(). This currently requires hackery.
|
||||
* after completion of subquery_planner(). This currently requires hackery.
|
||||
* Since subquery_planner has already run SS_finalize_plan on the subplan
|
||||
* tree, we have to kluge up parameter lists for the Material node.
|
||||
* Possibly this could be fixed by postponing SS_finalize_plan processing
|
||||
@ -2032,8 +2037,8 @@ make_agg(Query *root, List *tlist, List *qual,
|
||||
plan->total_cost = agg_path.total_cost;
|
||||
|
||||
/*
|
||||
* We will produce a single output tuple if not grouping,
|
||||
* and a tuple per group otherwise.
|
||||
* We will produce a single output tuple if not grouping, and a tuple
|
||||
* per group otherwise.
|
||||
*/
|
||||
if (aggstrategy == AGG_PLAIN)
|
||||
plan->plan_rows = 1;
|
||||
@ -2041,10 +2046,10 @@ make_agg(Query *root, List *tlist, List *qual,
|
||||
plan->plan_rows = numGroups;
|
||||
|
||||
/*
|
||||
* We also need to account for the cost of evaluation of the qual
|
||||
* (ie, the HAVING clause) and the tlist. Note that cost_qual_eval
|
||||
* doesn't charge anything for Aggref nodes; this is okay since
|
||||
* they are really comparable to Vars.
|
||||
* We also need to account for the cost of evaluation of the qual (ie,
|
||||
* the HAVING clause) and the tlist. Note that cost_qual_eval doesn't
|
||||
* charge anything for Aggref nodes; this is okay since they are
|
||||
* really comparable to Vars.
|
||||
*
|
||||
* See notes in grouping_planner about why this routine and make_group
|
||||
* are the only ones in this file that worry about tlist eval cost.
|
||||
@ -2100,13 +2105,13 @@ make_group(Query *root,
|
||||
/*
|
||||
* We also need to account for the cost of evaluation of the tlist.
|
||||
*
|
||||
* XXX this double-counts the cost of evaluation of any expressions
|
||||
* used for grouping, since in reality those will have been evaluated
|
||||
* at a lower plan level and will only be copied by the Group node.
|
||||
* Worth fixing?
|
||||
* XXX this double-counts the cost of evaluation of any expressions used
|
||||
* for grouping, since in reality those will have been evaluated at a
|
||||
* lower plan level and will only be copied by the Group node. Worth
|
||||
* fixing?
|
||||
*
|
||||
* See notes in grouping_planner about why this routine and make_agg
|
||||
* are the only ones in this file that worry about tlist eval cost.
|
||||
* See notes in grouping_planner about why this routine and make_agg are
|
||||
* the only ones in this file that worry about tlist eval cost.
|
||||
*/
|
||||
cost_qual_eval(&qual_cost, tlist);
|
||||
plan->startup_cost += qual_cost.startup;
|
||||
@ -2139,15 +2144,15 @@ make_unique(List *tlist, Plan *lefttree, List *distinctList)
|
||||
|
||||
/*
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We
|
||||
* assume all columns get compared at most of the tuples. (XXX probably
|
||||
* this is an overestimate.)
|
||||
* assume all columns get compared at most of the tuples. (XXX
|
||||
* probably this is an overestimate.)
|
||||
*/
|
||||
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
|
||||
|
||||
/*
|
||||
* plan->plan_rows is left as a copy of the input subplan's plan_rows;
|
||||
* ie, we assume the filter removes nothing. The caller must alter this
|
||||
* if he has a better idea.
|
||||
* ie, we assume the filter removes nothing. The caller must alter
|
||||
* this if he has a better idea.
|
||||
*/
|
||||
|
||||
plan->targetlist = tlist;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.88 2003/07/28 00:09:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.89 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -36,12 +36,12 @@
|
||||
static void mark_baserels_for_outer_join(Query *root, Relids rels,
|
||||
Relids outerrels);
|
||||
static void distribute_qual_to_rels(Query *root, Node *clause,
|
||||
bool ispusheddown,
|
||||
bool isdeduced,
|
||||
Relids outerjoin_nonnullable,
|
||||
Relids qualscope);
|
||||
bool ispusheddown,
|
||||
bool isdeduced,
|
||||
Relids outerjoin_nonnullable,
|
||||
Relids qualscope);
|
||||
static void add_vars_to_targetlist(Query *root, List *vars,
|
||||
Relids where_needed);
|
||||
Relids where_needed);
|
||||
static bool qual_is_redundant(Query *root, RestrictInfo *restrictinfo,
|
||||
List *restrictlist);
|
||||
static void check_mergejoinable(RestrictInfo *restrictinfo);
|
||||
@ -83,9 +83,7 @@ add_base_rels_to_query(Query *root, Node *jtnode)
|
||||
List *l;
|
||||
|
||||
foreach(l, f->fromlist)
|
||||
{
|
||||
add_base_rels_to_query(root, lfirst(l));
|
||||
}
|
||||
}
|
||||
else if (IsA(jtnode, JoinExpr))
|
||||
{
|
||||
@ -93,13 +91,14 @@ add_base_rels_to_query(Query *root, Node *jtnode)
|
||||
|
||||
add_base_rels_to_query(root, j->larg);
|
||||
add_base_rels_to_query(root, j->rarg);
|
||||
|
||||
/*
|
||||
* Safety check: join RTEs should not be SELECT FOR UPDATE targets
|
||||
*/
|
||||
if (intMember(j->rtindex, root->rowMarks))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("SELECT FOR UPDATE cannot be applied to a join")));
|
||||
errmsg("SELECT FOR UPDATE cannot be applied to a join")));
|
||||
}
|
||||
else
|
||||
elog(ERROR, "unrecognized node type: %d",
|
||||
@ -247,14 +246,14 @@ distribute_quals_to_rels(Query *root, Node *jtnode)
|
||||
* Order of operations here is subtle and critical. First we
|
||||
* recurse to handle sub-JOINs. Their join quals will be placed
|
||||
* without regard for whether this level is an outer join, which
|
||||
* is correct. Then we place our own join quals, which are restricted
|
||||
* by lower outer joins in any case, and are forced to this level if
|
||||
* this is an outer join and they mention the outer side. Finally, if
|
||||
* this is an outer join, we mark baserels contained within the inner
|
||||
* side(s) with our own rel set; this will prevent quals above us in
|
||||
* the join tree that use those rels from being pushed down below this
|
||||
* level. (It's okay for upper quals to be pushed down to the outer
|
||||
* side, however.)
|
||||
* is correct. Then we place our own join quals, which are
|
||||
* restricted by lower outer joins in any case, and are forced to
|
||||
* this level if this is an outer join and they mention the outer
|
||||
* side. Finally, if this is an outer join, we mark baserels
|
||||
* contained within the inner side(s) with our own rel set; this
|
||||
* will prevent quals above us in the join tree that use those
|
||||
* rels from being pushed down below this level. (It's okay for
|
||||
* upper quals to be pushed down to the outer side, however.)
|
||||
*/
|
||||
leftids = distribute_quals_to_rels(root, j->larg);
|
||||
rightids = distribute_quals_to_rels(root, j->rarg);
|
||||
@ -390,9 +389,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
|
||||
restrictinfo->clause = (Expr *) clause;
|
||||
restrictinfo->subclauseindices = NIL;
|
||||
restrictinfo->eval_cost.startup = -1; /* not computed until needed */
|
||||
restrictinfo->eval_cost.startup = -1; /* not computed until
|
||||
* needed */
|
||||
restrictinfo->this_selec = -1; /* not computed until needed */
|
||||
restrictinfo->left_relids = NULL; /* set below, if join clause */
|
||||
restrictinfo->left_relids = NULL; /* set below, if join clause */
|
||||
restrictinfo->right_relids = NULL;
|
||||
restrictinfo->mergejoinoperator = InvalidOid;
|
||||
restrictinfo->left_sortop = InvalidOid;
|
||||
@ -435,10 +435,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
if (isdeduced)
|
||||
{
|
||||
/*
|
||||
* If the qual came from implied-equality deduction, we can evaluate
|
||||
* the qual at its natural semantic level. It is not affected by
|
||||
* any outer-join rules (else we'd not have decided the vars were
|
||||
* equal).
|
||||
* If the qual came from implied-equality deduction, we can
|
||||
* evaluate the qual at its natural semantic level. It is not
|
||||
* affected by any outer-join rules (else we'd not have decided
|
||||
* the vars were equal).
|
||||
*/
|
||||
Assert(bms_equal(relids, qualscope));
|
||||
can_be_equijoin = true;
|
||||
@ -446,12 +446,13 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
else if (bms_overlap(relids, outerjoin_nonnullable))
|
||||
{
|
||||
/*
|
||||
* The qual is attached to an outer join and mentions (some of the)
|
||||
* rels on the nonnullable side. Force the qual to be evaluated
|
||||
* exactly at the level of joining corresponding to the outer join.
|
||||
* We cannot let it get pushed down into the nonnullable side, since
|
||||
* then we'd produce no output rows, rather than the intended single
|
||||
* null-extended row, for any nonnullable-side rows failing the qual.
|
||||
* The qual is attached to an outer join and mentions (some of
|
||||
* the) rels on the nonnullable side. Force the qual to be
|
||||
* evaluated exactly at the level of joining corresponding to the
|
||||
* outer join. We cannot let it get pushed down into the
|
||||
* nonnullable side, since then we'd produce no output rows,
|
||||
* rather than the intended single null-extended row, for any
|
||||
* nonnullable-side rows failing the qual.
|
||||
*
|
||||
* Note: an outer-join qual that mentions only nullable-side rels can
|
||||
* be pushed down into the nullable side without changing the join
|
||||
@ -464,13 +465,14 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
{
|
||||
/*
|
||||
* For a non-outer-join qual, we can evaluate the qual as soon as
|
||||
* (1) we have all the rels it mentions, and (2) we are at or above
|
||||
* any outer joins that can null any of these rels and are below the
|
||||
* syntactic location of the given qual. To enforce the latter, scan
|
||||
* the base rels listed in relids, and merge their outer-join sets
|
||||
* into the clause's own reference list. At the time we are called,
|
||||
* the outerjoinset of each baserel will show exactly those outer
|
||||
* joins that are below the qual in the join tree.
|
||||
* (1) we have all the rels it mentions, and (2) we are at or
|
||||
* above any outer joins that can null any of these rels and are
|
||||
* below the syntactic location of the given qual. To enforce the
|
||||
* latter, scan the base rels listed in relids, and merge their
|
||||
* outer-join sets into the clause's own reference list. At the
|
||||
* time we are called, the outerjoinset of each baserel will show
|
||||
* exactly those outer joins that are below the qual in the join
|
||||
* tree.
|
||||
*/
|
||||
Relids addrelids = NULL;
|
||||
Relids tmprelids;
|
||||
@ -496,9 +498,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
relids = bms_union(relids, addrelids);
|
||||
/* Should still be a subset of current scope ... */
|
||||
Assert(bms_is_subset(relids, qualscope));
|
||||
|
||||
/*
|
||||
* Because application of the qual will be delayed by outer join,
|
||||
* we mustn't assume its vars are equal everywhere.
|
||||
* Because application of the qual will be delayed by outer
|
||||
* join, we mustn't assume its vars are equal everywhere.
|
||||
*/
|
||||
can_be_equijoin = false;
|
||||
}
|
||||
@ -518,6 +521,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
switch (bms_membership(relids))
|
||||
{
|
||||
case BMS_SINGLETON:
|
||||
|
||||
/*
|
||||
* There is only one relation participating in 'clause', so
|
||||
* 'clause' is a restriction clause for that relation.
|
||||
@ -525,28 +529,29 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
rel = find_base_rel(root, bms_singleton_member(relids));
|
||||
|
||||
/*
|
||||
* Check for a "mergejoinable" clause even though it's not a join
|
||||
* clause. This is so that we can recognize that "a.x = a.y"
|
||||
* makes x and y eligible to be considered equal, even when they
|
||||
* belong to the same rel. Without this, we would not recognize
|
||||
* that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to
|
||||
* consider z and q equal after their rels are joined.
|
||||
* Check for a "mergejoinable" clause even though it's not a
|
||||
* join clause. This is so that we can recognize that "a.x =
|
||||
* a.y" makes x and y eligible to be considered equal, even
|
||||
* when they belong to the same rel. Without this, we would
|
||||
* not recognize that "a.x = a.y AND a.x = b.z AND a.y = c.q"
|
||||
* allows us to consider z and q equal after their rels are
|
||||
* joined.
|
||||
*/
|
||||
if (can_be_equijoin)
|
||||
check_mergejoinable(restrictinfo);
|
||||
|
||||
/*
|
||||
* If the clause was deduced from implied equality, check to see
|
||||
* whether it is redundant with restriction clauses we already
|
||||
* have for this rel. Note we cannot apply this check to
|
||||
* user-written clauses, since we haven't found the canonical
|
||||
* pathkey sets yet while processing user clauses. (NB: no
|
||||
* comparable check is done in the join-clause case; redundancy
|
||||
* will be detected when the join clause is moved into a join
|
||||
* rel's restriction list.)
|
||||
* If the clause was deduced from implied equality, check to
|
||||
* see whether it is redundant with restriction clauses we
|
||||
* already have for this rel. Note we cannot apply this check
|
||||
* to user-written clauses, since we haven't found the
|
||||
* canonical pathkey sets yet while processing user clauses.
|
||||
* (NB: no comparable check is done in the join-clause case;
|
||||
* redundancy will be detected when the join clause is moved
|
||||
* into a join rel's restriction list.)
|
||||
*/
|
||||
if (!isdeduced ||
|
||||
!qual_is_redundant(root, restrictinfo, rel->baserestrictinfo))
|
||||
!qual_is_redundant(root, restrictinfo, rel->baserestrictinfo))
|
||||
{
|
||||
/* Add clause to rel's restriction list */
|
||||
rel->baserestrictinfo = lappend(rel->baserestrictinfo,
|
||||
@ -554,13 +559,14 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
}
|
||||
break;
|
||||
case BMS_MULTIPLE:
|
||||
|
||||
/*
|
||||
* 'clause' is a join clause, since there is more than one rel in
|
||||
* the relid set. Set additional RestrictInfo fields for
|
||||
* joining. First, does it look like a normal join clause, i.e.,
|
||||
* a binary operator relating expressions that come from distinct
|
||||
* relations? If so we might be able to use it in a join
|
||||
* algorithm.
|
||||
* 'clause' is a join clause, since there is more than one rel
|
||||
* in the relid set. Set additional RestrictInfo fields for
|
||||
* joining. First, does it look like a normal join clause,
|
||||
* i.e., a binary operator relating expressions that come from
|
||||
* distinct relations? If so we might be able to use it in a
|
||||
* join algorithm.
|
||||
*/
|
||||
if (is_opclause(clause) && length(((OpExpr *) clause)->args) == 2)
|
||||
{
|
||||
@ -582,9 +588,9 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
* Now check for hash or mergejoinable operators.
|
||||
*
|
||||
* We don't bother setting the hashjoin info if we're not going
|
||||
* to need it. We do want to know about mergejoinable ops in all
|
||||
* cases, however, because we use mergejoinable ops for other
|
||||
* purposes such as detecting redundant clauses.
|
||||
* to need it. We do want to know about mergejoinable ops in
|
||||
* all cases, however, because we use mergejoinable ops for
|
||||
* other purposes such as detecting redundant clauses.
|
||||
*/
|
||||
check_mergejoinable(restrictinfo);
|
||||
if (enable_hashjoin)
|
||||
@ -597,16 +603,18 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
|
||||
/*
|
||||
* Add vars used in the join clause to targetlists of their
|
||||
* relations, so that they will be emitted by the plan nodes that
|
||||
* scan those relations (else they won't be available at the join
|
||||
* node!).
|
||||
* relations, so that they will be emitted by the plan nodes
|
||||
* that scan those relations (else they won't be available at
|
||||
* the join node!).
|
||||
*/
|
||||
add_vars_to_targetlist(root, vars, relids);
|
||||
break;
|
||||
default:
|
||||
|
||||
/*
|
||||
* 'clause' references no rels, and therefore we have no place to
|
||||
* attach it. Shouldn't get here if callers are working properly.
|
||||
* 'clause' references no rels, and therefore we have no place
|
||||
* to attach it. Shouldn't get here if callers are working
|
||||
* properly.
|
||||
*/
|
||||
elog(ERROR, "cannot cope with variable-free clause");
|
||||
break;
|
||||
@ -634,7 +642,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
|
||||
*
|
||||
* This processing is a consequence of transitivity of mergejoin equality:
|
||||
* if we have mergejoinable clauses A = B and B = C, we can deduce A = C
|
||||
* (where = is an appropriate mergejoinable operator). See path/pathkeys.c
|
||||
* (where = is an appropriate mergejoinable operator). See path/pathkeys.c
|
||||
* for more details.
|
||||
*/
|
||||
void
|
||||
@ -695,8 +703,8 @@ process_implied_equality(Query *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan to see if equality is already known. If so, we're done in
|
||||
* the add case, and done after removing it in the delete case.
|
||||
* Scan to see if equality is already known. If so, we're done in the
|
||||
* add case, and done after removing it in the delete case.
|
||||
*/
|
||||
foreach(itm, restrictlist)
|
||||
{
|
||||
@ -719,7 +727,7 @@ process_implied_equality(Query *root,
|
||||
{
|
||||
/* delete it from local restrictinfo list */
|
||||
rel1->baserestrictinfo = lremove(restrictinfo,
|
||||
rel1->baserestrictinfo);
|
||||
rel1->baserestrictinfo);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -768,9 +776,9 @@ process_implied_equality(Query *root,
|
||||
errmsg("equality operator for types %s and %s should be mergejoinable, but isn't",
|
||||
format_type_be(ltype), format_type_be(rtype))));
|
||||
|
||||
clause = make_opclause(oprid(eq_operator), /* opno */
|
||||
BOOLOID, /* opresulttype */
|
||||
false, /* opretset */
|
||||
clause = make_opclause(oprid(eq_operator), /* opno */
|
||||
BOOLOID, /* opresulttype */
|
||||
false, /* opretset */
|
||||
(Expr *) item1,
|
||||
(Expr *) item2);
|
||||
|
||||
@ -797,9 +805,9 @@ process_implied_equality(Query *root,
|
||||
* too-small selectivity, not to mention wasting time at execution.
|
||||
*
|
||||
* Note: quals of the form "var = const" are never considered redundant,
|
||||
* only those of the form "var = var". This is needed because when we
|
||||
* only those of the form "var = var". This is needed because when we
|
||||
* have constants in an implied-equality set, we use a different strategy
|
||||
* that suppresses all "var = var" deductions. We must therefore keep
|
||||
* that suppresses all "var = var" deductions. We must therefore keep
|
||||
* all the "var = const" quals.
|
||||
*/
|
||||
static bool
|
||||
@ -858,7 +866,8 @@ qual_is_redundant(Query *root,
|
||||
* left side of the new qual. We traverse the old-quals list
|
||||
* repeatedly to transitively expand the exprs list. If at any point
|
||||
* we find we can reach the right-side expr of the new qual, we are
|
||||
* done. We give up when we can't expand the equalexprs list any more.
|
||||
* done. We give up when we can't expand the equalexprs list any
|
||||
* more.
|
||||
*/
|
||||
equalexprs = makeList1(newleft);
|
||||
do
|
||||
@ -945,7 +954,7 @@ check_mergejoinable(RestrictInfo *restrictinfo)
|
||||
* info fields in the restrictinfo.
|
||||
*
|
||||
* Currently, we support hashjoin for binary opclauses where
|
||||
* the operator is a hashjoinable operator. The arguments can be
|
||||
* the operator is a hashjoinable operator. The arguments can be
|
||||
* anything --- as long as there are no volatile functions in them.
|
||||
*/
|
||||
static void
|
||||
|
@ -14,7 +14,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.76 2003/07/25 00:01:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.77 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -33,7 +33,7 @@
|
||||
* which may involve joins but not any fancier features.
|
||||
*
|
||||
* Since query_planner does not handle the toplevel processing (grouping,
|
||||
* sorting, etc) it cannot select the best path by itself. It selects
|
||||
* sorting, etc) it cannot select the best path by itself. It selects
|
||||
* two paths: the cheapest path that produces all the required tuples,
|
||||
* independent of any ordering considerations, and the cheapest path that
|
||||
* produces the expected fraction of the required tuples in the required
|
||||
@ -84,7 +84,7 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
|
||||
if (root->jointree->fromlist == NIL)
|
||||
{
|
||||
*cheapest_path = (Path *) create_result_path(NULL, NULL,
|
||||
(List *) root->jointree->quals);
|
||||
(List *) root->jointree->quals);
|
||||
*sorted_path = NULL;
|
||||
return;
|
||||
}
|
||||
@ -125,9 +125,9 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
|
||||
* relations. We also build lists of equijoined keys for pathkey
|
||||
* construction.
|
||||
*
|
||||
* Note: all subplan nodes will have "flat" (var-only) tlists.
|
||||
* This implies that all expression evaluations are done at the root of
|
||||
* the plan tree. Once upon a time there was code to try to push
|
||||
* Note: all subplan nodes will have "flat" (var-only) tlists. This
|
||||
* implies that all expression evaluations are done at the root of the
|
||||
* plan tree. Once upon a time there was code to try to push
|
||||
* expensive function calls down to lower plan nodes, but that's dead
|
||||
* code and has been for a long time...
|
||||
*/
|
||||
@ -223,7 +223,8 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have constant quals, add a toplevel Result step to process them.
|
||||
* If we have constant quals, add a toplevel Result step to process
|
||||
* them.
|
||||
*/
|
||||
if (constant_quals)
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.157 2003/07/25 00:01:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.158 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -45,10 +45,10 @@
|
||||
|
||||
/* Expression kind codes for preprocess_expression */
|
||||
#define EXPRKIND_QUAL 0
|
||||
#define EXPRKIND_TARGET 1
|
||||
#define EXPRKIND_RTFUNC 2
|
||||
#define EXPRKIND_TARGET 1
|
||||
#define EXPRKIND_RTFUNC 2
|
||||
#define EXPRKIND_LIMIT 3
|
||||
#define EXPRKIND_ININFO 4
|
||||
#define EXPRKIND_ININFO 4
|
||||
|
||||
|
||||
static Node *preprocess_expression(Query *parse, Node *expr, int kind);
|
||||
@ -59,9 +59,9 @@ static bool hash_safe_grouping(Query *parse);
|
||||
static List *make_subplanTargetList(Query *parse, List *tlist,
|
||||
AttrNumber **groupColIdx, bool *need_tlist_eval);
|
||||
static void locate_grouping_columns(Query *parse,
|
||||
List *tlist,
|
||||
List *sub_tlist,
|
||||
AttrNumber *groupColIdx);
|
||||
List *tlist,
|
||||
List *sub_tlist,
|
||||
AttrNumber *groupColIdx);
|
||||
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
|
||||
|
||||
|
||||
@ -103,9 +103,9 @@ planner(Query *parse, bool isCursor, int cursorOptions)
|
||||
{
|
||||
/*
|
||||
* We have no real idea how many tuples the user will ultimately
|
||||
* FETCH from a cursor, but it seems a good bet that he
|
||||
* doesn't want 'em all. Optimize for 10% retrieval (you
|
||||
* gotta better number? Should this be a SETtable parameter?)
|
||||
* FETCH from a cursor, but it seems a good bet that he doesn't
|
||||
* want 'em all. Optimize for 10% retrieval (you gotta better
|
||||
* number? Should this be a SETtable parameter?)
|
||||
*/
|
||||
tuple_fraction = 0.10;
|
||||
}
|
||||
@ -121,8 +121,8 @@ planner(Query *parse, bool isCursor, int cursorOptions)
|
||||
Assert(PlannerQueryLevel == 0);
|
||||
|
||||
/*
|
||||
* If creating a plan for a scrollable cursor, make sure it can
|
||||
* run backwards on demand. Add a Material node at the top at need.
|
||||
* If creating a plan for a scrollable cursor, make sure it can run
|
||||
* backwards on demand. Add a Material node at the top at need.
|
||||
*/
|
||||
if (isCursor && (cursorOptions & CURSOR_OPT_SCROLL))
|
||||
{
|
||||
@ -181,14 +181,14 @@ subquery_planner(Query *parse, double tuple_fraction)
|
||||
|
||||
/*
|
||||
* Look for IN clauses at the top level of WHERE, and transform them
|
||||
* into joins. Note that this step only handles IN clauses originally
|
||||
* at top level of WHERE; if we pull up any subqueries in the next step,
|
||||
* their INs are processed just before pulling them up.
|
||||
* into joins. Note that this step only handles IN clauses originally
|
||||
* at top level of WHERE; if we pull up any subqueries in the next
|
||||
* step, their INs are processed just before pulling them up.
|
||||
*/
|
||||
parse->in_info_list = NIL;
|
||||
if (parse->hasSubLinks)
|
||||
parse->jointree->quals = pull_up_IN_clauses(parse,
|
||||
parse->jointree->quals);
|
||||
parse->jointree->quals);
|
||||
|
||||
/*
|
||||
* Check to see if any subqueries in the rangetable can be merged into
|
||||
@ -198,10 +198,11 @@ subquery_planner(Query *parse, double tuple_fraction)
|
||||
pull_up_subqueries(parse, (Node *) parse->jointree, false);
|
||||
|
||||
/*
|
||||
* Detect whether any rangetable entries are RTE_JOIN kind; if not,
|
||||
* we can avoid the expense of doing flatten_join_alias_vars(). Also
|
||||
* check for outer joins --- if none, we can skip reduce_outer_joins().
|
||||
* This must be done after we have done pull_up_subqueries, of course.
|
||||
* Detect whether any rangetable entries are RTE_JOIN kind; if not, we
|
||||
* can avoid the expense of doing flatten_join_alias_vars(). Also
|
||||
* check for outer joins --- if none, we can skip
|
||||
* reduce_outer_joins(). This must be done after we have done
|
||||
* pull_up_subqueries, of course.
|
||||
*/
|
||||
parse->hasJoinRTEs = false;
|
||||
hasOuterJoins = false;
|
||||
@ -283,19 +284,20 @@ subquery_planner(Query *parse, double tuple_fraction)
|
||||
parse->havingQual = (Node *) newHaving;
|
||||
|
||||
/*
|
||||
* If we have any outer joins, try to reduce them to plain inner joins.
|
||||
* This step is most easily done after we've done expression preprocessing.
|
||||
* If we have any outer joins, try to reduce them to plain inner
|
||||
* joins. This step is most easily done after we've done expression
|
||||
* preprocessing.
|
||||
*/
|
||||
if (hasOuterJoins)
|
||||
reduce_outer_joins(parse);
|
||||
|
||||
/*
|
||||
* See if we can simplify the jointree; opportunities for this may come
|
||||
* from having pulled up subqueries, or from flattening explicit JOIN
|
||||
* syntax. We must do this after flattening JOIN alias variables, since
|
||||
* eliminating explicit JOIN nodes from the jointree will cause
|
||||
* get_relids_for_join() to fail. But it should happen after
|
||||
* reduce_outer_joins, anyway.
|
||||
* See if we can simplify the jointree; opportunities for this may
|
||||
* come from having pulled up subqueries, or from flattening explicit
|
||||
* JOIN syntax. We must do this after flattening JOIN alias
|
||||
* variables, since eliminating explicit JOIN nodes from the jointree
|
||||
* will cause get_relids_for_join() to fail. But it should happen
|
||||
* after reduce_outer_joins, anyway.
|
||||
*/
|
||||
parse->jointree = (FromExpr *)
|
||||
simplify_jointree(parse, (Node *) parse->jointree);
|
||||
@ -318,26 +320,26 @@ subquery_planner(Query *parse, double tuple_fraction)
|
||||
*/
|
||||
if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1)
|
||||
{
|
||||
Cost initplan_cost = 0;
|
||||
Cost initplan_cost = 0;
|
||||
|
||||
/* Prepare extParam/allParam sets for all nodes in tree */
|
||||
SS_finalize_plan(plan, parse->rtable);
|
||||
|
||||
/*
|
||||
* SS_finalize_plan doesn't handle initPlans, so we have to manually
|
||||
* attach them to the topmost plan node, and add their extParams to
|
||||
* the topmost node's, too.
|
||||
* SS_finalize_plan doesn't handle initPlans, so we have to
|
||||
* manually attach them to the topmost plan node, and add their
|
||||
* extParams to the topmost node's, too.
|
||||
*
|
||||
* We also add the total_cost of each initPlan to the startup cost
|
||||
* of the top node. This is a conservative overestimate, since in
|
||||
* fact each initPlan might be executed later than plan startup, or
|
||||
* even not at all.
|
||||
* We also add the total_cost of each initPlan to the startup cost of
|
||||
* the top node. This is a conservative overestimate, since in
|
||||
* fact each initPlan might be executed later than plan startup,
|
||||
* or even not at all.
|
||||
*/
|
||||
plan->initPlan = PlannerInitPlan;
|
||||
|
||||
foreach(lst, plan->initPlan)
|
||||
{
|
||||
SubPlan *initplan = (SubPlan *) lfirst(lst);
|
||||
SubPlan *initplan = (SubPlan *) lfirst(lst);
|
||||
|
||||
plan->extParam = bms_add_members(plan->extParam,
|
||||
initplan->plan->extParam);
|
||||
@ -368,7 +370,8 @@ preprocess_expression(Query *parse, Node *expr, int kind)
|
||||
/*
|
||||
* If the query has any join RTEs, replace join alias variables with
|
||||
* base-relation variables. We must do this before sublink processing,
|
||||
* else sublinks expanded out from join aliases wouldn't get processed.
|
||||
* else sublinks expanded out from join aliases wouldn't get
|
||||
* processed.
|
||||
*/
|
||||
if (parse->hasJoinRTEs)
|
||||
expr = flatten_join_alias_vars(parse, expr);
|
||||
@ -403,8 +406,8 @@ preprocess_expression(Query *parse, Node *expr, int kind)
|
||||
expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL));
|
||||
|
||||
/*
|
||||
* XXX do not insert anything here unless you have grokked the comments
|
||||
* in SS_replace_correlation_vars ...
|
||||
* XXX do not insert anything here unless you have grokked the
|
||||
* comments in SS_replace_correlation_vars ...
|
||||
*/
|
||||
|
||||
/* Replace uplevel vars with Param nodes */
|
||||
@ -498,20 +501,21 @@ inheritance_planner(Query *parse, List *inheritlist)
|
||||
/* Generate plan */
|
||||
subplan = grouping_planner(subquery, 0.0 /* retrieve all tuples */ );
|
||||
subplans = lappend(subplans, subplan);
|
||||
|
||||
/*
|
||||
* It's possible that additional RTEs got added to the rangetable
|
||||
* due to expansion of inherited source tables (see allpaths.c).
|
||||
* If so, we must copy 'em back to the main parse tree's rtable.
|
||||
*
|
||||
* XXX my goodness this is ugly. Really need to think about ways
|
||||
* to rein in planner's habit of scribbling on its input.
|
||||
* XXX my goodness this is ugly. Really need to think about ways to
|
||||
* rein in planner's habit of scribbling on its input.
|
||||
*/
|
||||
subrtlength = length(subquery->rtable);
|
||||
if (subrtlength > mainrtlength)
|
||||
{
|
||||
List *subrt = subquery->rtable;
|
||||
List *subrt = subquery->rtable;
|
||||
|
||||
while (mainrtlength-- > 0) /* wish we had nthcdr() */
|
||||
while (mainrtlength-- > 0) /* wish we had nthcdr() */
|
||||
subrt = lnext(subrt);
|
||||
parse->rtable = nconc(parse->rtable, subrt);
|
||||
mainrtlength = subrtlength;
|
||||
@ -684,7 +688,7 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
* from tlist if grouping or aggregation is needed.
|
||||
*/
|
||||
sub_tlist = make_subplanTargetList(parse, tlist,
|
||||
&groupColIdx, &need_tlist_eval);
|
||||
&groupColIdx, &need_tlist_eval);
|
||||
|
||||
/*
|
||||
* Calculate pathkeys that represent grouping/ordering
|
||||
@ -700,8 +704,8 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
* Also, it's possible that optimization has eliminated all
|
||||
* aggregates, and we may as well check for that here.
|
||||
*
|
||||
* Note: we do not attempt to detect duplicate aggregates here;
|
||||
* a somewhat-overestimated count is okay for our present purposes.
|
||||
* Note: we do not attempt to detect duplicate aggregates here; a
|
||||
* somewhat-overestimated count is okay for our present purposes.
|
||||
*/
|
||||
if (parse->hasAggs)
|
||||
{
|
||||
@ -892,8 +896,8 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
&cheapest_path, &sorted_path);
|
||||
|
||||
/*
|
||||
* We couldn't canonicalize group_pathkeys and sort_pathkeys before
|
||||
* running query_planner(), so do it now.
|
||||
* We couldn't canonicalize group_pathkeys and sort_pathkeys
|
||||
* before running query_planner(), so do it now.
|
||||
*/
|
||||
group_pathkeys = canonicalize_pathkeys(parse, group_pathkeys);
|
||||
sort_pathkeys = canonicalize_pathkeys(parse, sort_pathkeys);
|
||||
@ -903,9 +907,9 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
*/
|
||||
if (parse->groupClause)
|
||||
{
|
||||
List *groupExprs;
|
||||
double cheapest_path_rows;
|
||||
int cheapest_path_width;
|
||||
List *groupExprs;
|
||||
double cheapest_path_rows;
|
||||
int cheapest_path_width;
|
||||
|
||||
/*
|
||||
* Beware in this section of the possibility that
|
||||
@ -919,13 +923,13 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
}
|
||||
else
|
||||
{
|
||||
cheapest_path_rows = 1; /* assume non-set result */
|
||||
cheapest_path_width = 100; /* arbitrary */
|
||||
cheapest_path_rows = 1; /* assume non-set result */
|
||||
cheapest_path_width = 100; /* arbitrary */
|
||||
}
|
||||
|
||||
/*
|
||||
* Always estimate the number of groups. We can't do this until
|
||||
* after running query_planner(), either.
|
||||
* Always estimate the number of groups. We can't do this
|
||||
* until after running query_planner(), either.
|
||||
*/
|
||||
groupExprs = get_sortgrouplist_exprs(parse->groupClause,
|
||||
parse->targetList);
|
||||
@ -936,12 +940,13 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
|
||||
|
||||
/*
|
||||
* Check can't-do-it conditions, including whether the grouping
|
||||
* operators are hashjoinable.
|
||||
* Check can't-do-it conditions, including whether the
|
||||
* grouping operators are hashjoinable.
|
||||
*
|
||||
* Executor doesn't support hashed aggregation with DISTINCT
|
||||
* aggregates. (Doing so would imply storing *all* the input
|
||||
* values in the hash table, which seems like a certain loser.)
|
||||
* aggregates. (Doing so would imply storing *all* the input
|
||||
* values in the hash table, which seems like a certain
|
||||
* loser.)
|
||||
*/
|
||||
if (!enable_hashagg || !hash_safe_grouping(parse))
|
||||
use_hashed_grouping = false;
|
||||
@ -953,32 +958,30 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
{
|
||||
/*
|
||||
* Use hashed grouping if (a) we think we can fit the
|
||||
* hashtable into SortMem, *and* (b) the estimated cost
|
||||
* is no more than doing it the other way. While avoiding
|
||||
* hashtable into SortMem, *and* (b) the estimated cost is
|
||||
* no more than doing it the other way. While avoiding
|
||||
* the need for sorted input is usually a win, the fact
|
||||
* that the output won't be sorted may be a loss; so we
|
||||
* need to do an actual cost comparison.
|
||||
*
|
||||
* In most cases we have no good way to estimate the size of
|
||||
* the transition value needed by an aggregate; arbitrarily
|
||||
* assume it is 100 bytes. Also set the overhead per hashtable
|
||||
* entry at 64 bytes.
|
||||
* the transition value needed by an aggregate;
|
||||
* arbitrarily assume it is 100 bytes. Also set the
|
||||
* overhead per hashtable entry at 64 bytes.
|
||||
*/
|
||||
int hashentrysize = cheapest_path_width + 64 + numAggs * 100;
|
||||
int hashentrysize = cheapest_path_width + 64 + numAggs * 100;
|
||||
|
||||
if (hashentrysize * dNumGroups <= SortMem * 1024L)
|
||||
{
|
||||
/*
|
||||
* Okay, do the cost comparison. We need to consider
|
||||
* cheapest_path + hashagg [+ final sort]
|
||||
* versus either
|
||||
* cheapest_path [+ sort] + group or agg [+ final sort]
|
||||
* or
|
||||
* presorted_path + group or agg [+ final sort]
|
||||
* where brackets indicate a step that may not be needed.
|
||||
* We assume query_planner() will have returned a
|
||||
* presorted path only if it's a winner compared to
|
||||
* cheapest_path for this purpose.
|
||||
* cheapest_path + hashagg [+ final sort] versus
|
||||
* either cheapest_path [+ sort] + group or agg [+
|
||||
* final sort] or presorted_path + group or agg [+
|
||||
* final sort] where brackets indicate a step that may
|
||||
* not be needed. We assume query_planner() will have
|
||||
* returned a presorted path only if it's a winner
|
||||
* compared to cheapest_path for this purpose.
|
||||
*
|
||||
* These path variables are dummies that just hold cost
|
||||
* fields; we don't make actual Paths for these steps.
|
||||
@ -1065,9 +1068,9 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
/*
|
||||
* Select the best path and create a plan to execute it.
|
||||
*
|
||||
* If we are doing hashed grouping, we will always read all the
|
||||
* input tuples, so use the cheapest-total path. Otherwise,
|
||||
* trust query_planner's decision about which to use.
|
||||
* If we are doing hashed grouping, we will always read all the input
|
||||
* tuples, so use the cheapest-total path. Otherwise, trust
|
||||
* query_planner's decision about which to use.
|
||||
*/
|
||||
if (sorted_path && !use_hashed_grouping)
|
||||
{
|
||||
@ -1081,19 +1084,19 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
}
|
||||
|
||||
/*
|
||||
* create_plan() returns a plan with just a "flat" tlist of required
|
||||
* Vars. Usually we need to insert the sub_tlist as the tlist of the
|
||||
* top plan node. However, we can skip that if we determined that
|
||||
* whatever query_planner chose to return will be good enough.
|
||||
* create_plan() returns a plan with just a "flat" tlist of
|
||||
* required Vars. Usually we need to insert the sub_tlist as the
|
||||
* tlist of the top plan node. However, we can skip that if we
|
||||
* determined that whatever query_planner chose to return will be
|
||||
* good enough.
|
||||
*/
|
||||
if (need_tlist_eval)
|
||||
{
|
||||
/*
|
||||
* If the top-level plan node is one that cannot do expression
|
||||
* evaluation, we must insert a Result node to project the desired
|
||||
* tlist.
|
||||
* Currently, the only plan node we might see here that falls into
|
||||
* that category is Append.
|
||||
* evaluation, we must insert a Result node to project the
|
||||
* desired tlist. Currently, the only plan node we might see
|
||||
* here that falls into that category is Append.
|
||||
*/
|
||||
if (IsA(result_plan, Append))
|
||||
{
|
||||
@ -1108,23 +1111,25 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
*/
|
||||
result_plan->targetlist = sub_tlist;
|
||||
}
|
||||
|
||||
/*
|
||||
* Also, account for the cost of evaluation of the sub_tlist.
|
||||
*
|
||||
* Up to now, we have only been dealing with "flat" tlists,
|
||||
* containing just Vars. So their evaluation cost is zero
|
||||
* according to the model used by cost_qual_eval() (or if you
|
||||
* prefer, the cost is factored into cpu_tuple_cost). Thus we can
|
||||
* avoid accounting for tlist cost throughout query_planner() and
|
||||
* subroutines. But now we've inserted a tlist that might contain
|
||||
* actual operators, sub-selects, etc --- so we'd better account
|
||||
* for its cost.
|
||||
* prefer, the cost is factored into cpu_tuple_cost). Thus we
|
||||
* can avoid accounting for tlist cost throughout
|
||||
* query_planner() and subroutines. But now we've inserted a
|
||||
* tlist that might contain actual operators, sub-selects, etc
|
||||
* --- so we'd better account for its cost.
|
||||
*
|
||||
* Below this point, any tlist eval cost for added-on nodes should
|
||||
* be accounted for as we create those nodes. Presently, of the
|
||||
* node types we can add on, only Agg and Group project new tlists
|
||||
* (the rest just copy their input tuples) --- so make_agg() and
|
||||
* make_group() are responsible for computing the added cost.
|
||||
* Below this point, any tlist eval cost for added-on nodes
|
||||
* should be accounted for as we create those nodes.
|
||||
* Presently, of the node types we can add on, only Agg and
|
||||
* Group project new tlists (the rest just copy their input
|
||||
* tuples) --- so make_agg() and make_group() are responsible
|
||||
* for computing the added cost.
|
||||
*/
|
||||
cost_qual_eval(&tlist_cost, sub_tlist);
|
||||
result_plan->startup_cost += tlist_cost.startup;
|
||||
@ -1135,8 +1140,8 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
{
|
||||
/*
|
||||
* Since we're using query_planner's tlist and not the one
|
||||
* make_subplanTargetList calculated, we have to refigure
|
||||
* any grouping-column indexes make_subplanTargetList computed.
|
||||
* make_subplanTargetList calculated, we have to refigure any
|
||||
* grouping-column indexes make_subplanTargetList computed.
|
||||
*/
|
||||
locate_grouping_columns(parse, tlist, result_plan->targetlist,
|
||||
groupColIdx);
|
||||
@ -1180,6 +1185,7 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
current_pathkeys = group_pathkeys;
|
||||
}
|
||||
aggstrategy = AGG_SORTED;
|
||||
|
||||
/*
|
||||
* The AGG node will not change the sort ordering of its
|
||||
* groups, so current_pathkeys describes the result too.
|
||||
@ -1205,7 +1211,8 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If there are no Aggs, we shouldn't have any HAVING qual anymore
|
||||
* If there are no Aggs, we shouldn't have any HAVING qual
|
||||
* anymore
|
||||
*/
|
||||
Assert(parse->havingQual == NULL);
|
||||
|
||||
@ -1216,8 +1223,8 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
if (parse->groupClause)
|
||||
{
|
||||
/*
|
||||
* Add an explicit sort if we couldn't make the path come out
|
||||
* the way the GROUP node needs it.
|
||||
* Add an explicit sort if we couldn't make the path come
|
||||
* out the way the GROUP node needs it.
|
||||
*/
|
||||
if (!pathkeys_contained_in(group_pathkeys, current_pathkeys))
|
||||
{
|
||||
@ -1238,7 +1245,7 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
/* The Group node won't change sort ordering */
|
||||
}
|
||||
}
|
||||
} /* end of if (setOperations) */
|
||||
} /* end of if (setOperations) */
|
||||
|
||||
/*
|
||||
* If we were not able to make the plan come out in the right order,
|
||||
@ -1264,6 +1271,7 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
{
|
||||
result_plan = (Plan *) make_unique(tlist, result_plan,
|
||||
parse->distinctClause);
|
||||
|
||||
/*
|
||||
* If there was grouping or aggregation, leave plan_rows as-is
|
||||
* (ie, assume the result was already mostly unique). If not,
|
||||
@ -1272,13 +1280,13 @@ grouping_planner(Query *parse, double tuple_fraction)
|
||||
*/
|
||||
if (!parse->groupClause && !parse->hasAggs)
|
||||
{
|
||||
List *distinctExprs;
|
||||
List *distinctExprs;
|
||||
|
||||
distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
|
||||
parse->targetList);
|
||||
result_plan->plan_rows = estimate_num_groups(parse,
|
||||
distinctExprs,
|
||||
result_plan->plan_rows);
|
||||
result_plan->plan_rows);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1443,7 +1451,7 @@ make_subplanTargetList(Query *parse,
|
||||
false),
|
||||
(Expr *) groupexpr);
|
||||
sub_tlist = lappend(sub_tlist, te);
|
||||
*need_tlist_eval = true; /* it's not flat anymore */
|
||||
*need_tlist_eval = true; /* it's not flat anymore */
|
||||
}
|
||||
|
||||
/* and save its resno */
|
||||
@ -1459,7 +1467,7 @@ make_subplanTargetList(Query *parse,
|
||||
* Locate grouping columns in the tlist chosen by query_planner.
|
||||
*
|
||||
* This is only needed if we don't use the sub_tlist chosen by
|
||||
* make_subplanTargetList. We have to forget the column indexes found
|
||||
* make_subplanTargetList. We have to forget the column indexes found
|
||||
* by that routine and re-locate the grouping vars in the real sub_tlist.
|
||||
*/
|
||||
static void
|
||||
@ -1528,7 +1536,7 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
|
||||
Assert(orig_tlist != NIL);
|
||||
orig_tle = (TargetEntry *) lfirst(orig_tlist);
|
||||
orig_tlist = lnext(orig_tlist);
|
||||
if (orig_tle->resdom->resjunk) /* should not happen */
|
||||
if (orig_tle->resdom->resjunk) /* should not happen */
|
||||
elog(ERROR, "resjunk output columns are not implemented");
|
||||
Assert(new_tle->resdom->resno == orig_tle->resdom->resno);
|
||||
Assert(new_tle->resdom->restype == orig_tle->resdom->restype);
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.94 2003/07/25 00:01:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.95 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -46,11 +46,11 @@ static void set_join_references(Join *join, List *rtable);
|
||||
static void set_uppernode_references(Plan *plan, Index subvarno);
|
||||
static bool targetlist_has_non_vars(List *tlist);
|
||||
static List *join_references(List *clauses,
|
||||
List *rtable,
|
||||
List *outer_tlist,
|
||||
List *inner_tlist,
|
||||
Index acceptable_rel,
|
||||
bool tlists_have_non_vars);
|
||||
List *rtable,
|
||||
List *outer_tlist,
|
||||
List *inner_tlist,
|
||||
Index acceptable_rel,
|
||||
bool tlists_have_non_vars);
|
||||
static Node *join_references_mutator(Node *node,
|
||||
join_references_context *context);
|
||||
static Node *replace_vars_with_subplan_refs(Node *node,
|
||||
@ -60,7 +60,7 @@ static Node *replace_vars_with_subplan_refs(Node *node,
|
||||
static Node *replace_vars_with_subplan_refs_mutator(Node *node,
|
||||
replace_vars_with_subplan_refs_context *context);
|
||||
static bool fix_opfuncids_walker(Node *node, void *context);
|
||||
static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
|
||||
static void set_sa_opfuncid(ScalarArrayOpExpr * opexpr);
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
@ -167,12 +167,13 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
(Node *) ((HashJoin *) plan)->hashclauses);
|
||||
break;
|
||||
case T_Hash:
|
||||
|
||||
/*
|
||||
* Hash does not evaluate its targetlist or quals, so don't
|
||||
* touch those (see comments below). But we do need to fix its
|
||||
* hashkeys. The hashkeys are a little bizarre because they
|
||||
* need to match the hashclauses of the parent HashJoin node,
|
||||
* so we use join_references to fix them.
|
||||
* touch those (see comments below). But we do need to fix
|
||||
* its hashkeys. The hashkeys are a little bizarre because
|
||||
* they need to match the hashclauses of the parent HashJoin
|
||||
* node, so we use join_references to fix them.
|
||||
*/
|
||||
((Hash *) plan)->hashkeys =
|
||||
join_references(((Hash *) plan)->hashkeys,
|
||||
@ -180,7 +181,7 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
NIL,
|
||||
plan->lefttree->targetlist,
|
||||
(Index) 0,
|
||||
targetlist_has_non_vars(plan->lefttree->targetlist));
|
||||
targetlist_has_non_vars(plan->lefttree->targetlist));
|
||||
fix_expr_references(plan,
|
||||
(Node *) ((Hash *) plan)->hashkeys);
|
||||
break;
|
||||
@ -196,9 +197,9 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
* unmodified input tuples). The optimizer is lazy about
|
||||
* creating really valid targetlists for them. Best to just
|
||||
* leave the targetlist alone. In particular, we do not want
|
||||
* to process subplans for them, since we will likely end
|
||||
* up reprocessing subplans that also appear in lower levels
|
||||
* of the plan tree!
|
||||
* to process subplans for them, since we will likely end up
|
||||
* reprocessing subplans that also appear in lower levels of
|
||||
* the plan tree!
|
||||
*/
|
||||
break;
|
||||
case T_Agg:
|
||||
@ -253,7 +254,7 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
|
||||
foreach(pl, plan->initPlan)
|
||||
{
|
||||
SubPlan *sp = (SubPlan *) lfirst(pl);
|
||||
SubPlan *sp = (SubPlan *) lfirst(pl);
|
||||
|
||||
Assert(IsA(sp, SubPlan));
|
||||
set_plan_references(sp->plan, sp->rtable);
|
||||
@ -284,14 +285,14 @@ fix_expr_references_walker(Node *node, void *context)
|
||||
if (IsA(node, OpExpr))
|
||||
set_opfuncid((OpExpr *) node);
|
||||
else if (IsA(node, DistinctExpr))
|
||||
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
|
||||
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
|
||||
else if (IsA(node, ScalarArrayOpExpr))
|
||||
set_sa_opfuncid((ScalarArrayOpExpr *) node);
|
||||
else if (IsA(node, NullIfExpr))
|
||||
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
|
||||
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
|
||||
else if (IsA(node, SubPlan))
|
||||
{
|
||||
SubPlan *sp = (SubPlan *) node;
|
||||
SubPlan *sp = (SubPlan *) node;
|
||||
|
||||
set_plan_references(sp->plan, sp->rtable);
|
||||
}
|
||||
@ -350,10 +351,10 @@ set_join_references(Join *join, List *rtable)
|
||||
if (IsA(inner_plan, IndexScan))
|
||||
{
|
||||
/*
|
||||
* An index is being used to reduce the number of tuples scanned
|
||||
* in the inner relation. If there are join clauses being used
|
||||
* with the index, we must update their outer-rel var nodes to
|
||||
* refer to the outer side of the join.
|
||||
* An index is being used to reduce the number of tuples
|
||||
* scanned in the inner relation. If there are join clauses
|
||||
* being used with the index, we must update their outer-rel
|
||||
* var nodes to refer to the outer side of the join.
|
||||
*/
|
||||
IndexScan *innerscan = (IndexScan *) inner_plan;
|
||||
List *indxqualorig = innerscan->indxqualorig;
|
||||
@ -369,17 +370,18 @@ set_join_references(Join *join, List *rtable)
|
||||
outer_tlist,
|
||||
NIL,
|
||||
innerrel,
|
||||
tlists_have_non_vars);
|
||||
tlists_have_non_vars);
|
||||
innerscan->indxqual = join_references(innerscan->indxqual,
|
||||
rtable,
|
||||
outer_tlist,
|
||||
NIL,
|
||||
innerrel,
|
||||
tlists_have_non_vars);
|
||||
tlists_have_non_vars);
|
||||
|
||||
/*
|
||||
* We must fix the inner qpqual too, if it has join clauses
|
||||
* (this could happen if the index is lossy: some indxquals
|
||||
* may get rechecked as qpquals).
|
||||
* We must fix the inner qpqual too, if it has join
|
||||
* clauses (this could happen if the index is lossy: some
|
||||
* indxquals may get rechecked as qpquals).
|
||||
*/
|
||||
if (NumRelids((Node *) inner_plan->qual) > 1)
|
||||
inner_plan->qual = join_references(inner_plan->qual,
|
||||
@ -387,7 +389,7 @@ set_join_references(Join *join, List *rtable)
|
||||
outer_tlist,
|
||||
NIL,
|
||||
innerrel,
|
||||
tlists_have_non_vars);
|
||||
tlists_have_non_vars);
|
||||
}
|
||||
}
|
||||
else if (IsA(inner_plan, TidScan))
|
||||
@ -470,8 +472,8 @@ set_uppernode_references(Plan *plan, Index subvarno)
|
||||
subplan_targetlist,
|
||||
tlist_has_non_vars);
|
||||
output_targetlist = lappend(output_targetlist,
|
||||
makeTargetEntry(tle->resdom,
|
||||
(Expr *) newexpr));
|
||||
makeTargetEntry(tle->resdom,
|
||||
(Expr *) newexpr));
|
||||
}
|
||||
plan->targetlist = output_targetlist;
|
||||
|
||||
@ -491,7 +493,7 @@ set_uppernode_references(Plan *plan, Index subvarno)
|
||||
static bool
|
||||
targetlist_has_non_vars(List *tlist)
|
||||
{
|
||||
List *l;
|
||||
List *l;
|
||||
|
||||
foreach(l, tlist)
|
||||
{
|
||||
@ -740,11 +742,11 @@ fix_opfuncids_walker(Node *node, void *context)
|
||||
if (IsA(node, OpExpr))
|
||||
set_opfuncid((OpExpr *) node);
|
||||
else if (IsA(node, DistinctExpr))
|
||||
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
|
||||
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
|
||||
else if (IsA(node, ScalarArrayOpExpr))
|
||||
set_sa_opfuncid((ScalarArrayOpExpr *) node);
|
||||
else if (IsA(node, NullIfExpr))
|
||||
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
|
||||
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
|
||||
return expression_tree_walker(node, fix_opfuncids_walker, context);
|
||||
}
|
||||
|
||||
@ -757,7 +759,7 @@ fix_opfuncids_walker(Node *node, void *context)
|
||||
* DistinctExpr and NullIfExpr nodes.
|
||||
*/
|
||||
void
|
||||
set_opfuncid(OpExpr *opexpr)
|
||||
set_opfuncid(OpExpr * opexpr)
|
||||
{
|
||||
if (opexpr->opfuncid == InvalidOid)
|
||||
opexpr->opfuncid = get_opcode(opexpr->opno);
|
||||
@ -768,7 +770,7 @@ set_opfuncid(OpExpr *opexpr)
|
||||
* As above, for ScalarArrayOpExpr nodes.
|
||||
*/
|
||||
static void
|
||||
set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
|
||||
set_sa_opfuncid(ScalarArrayOpExpr * opexpr)
|
||||
{
|
||||
if (opexpr->opfuncid == InvalidOid)
|
||||
opexpr->opfuncid = get_opcode(opexpr->opno);
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/subselect.c,v 1.79 2003/07/25 00:01:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/subselect.c,v 1.80 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -71,26 +71,26 @@ typedef struct PlannerParamItem
|
||||
{
|
||||
Node *item; /* the Var, Aggref, or Param */
|
||||
Index abslevel; /* its absolute query level */
|
||||
} PlannerParamItem;
|
||||
} PlannerParamItem;
|
||||
|
||||
|
||||
typedef struct finalize_primnode_context
|
||||
{
|
||||
Bitmapset *paramids; /* Set of PARAM_EXEC paramids found */
|
||||
Bitmapset *outer_params; /* Set of accessible outer paramids */
|
||||
} finalize_primnode_context;
|
||||
Bitmapset *paramids; /* Set of PARAM_EXEC paramids found */
|
||||
Bitmapset *outer_params; /* Set of accessible outer paramids */
|
||||
} finalize_primnode_context;
|
||||
|
||||
|
||||
static List *convert_sublink_opers(List *lefthand, List *operOids,
|
||||
List *targetlist, int rtindex,
|
||||
List **righthandIds);
|
||||
List *targetlist, int rtindex,
|
||||
List **righthandIds);
|
||||
static bool subplan_is_hashable(SubLink *slink, SubPlan *node);
|
||||
static Node *replace_correlation_vars_mutator(Node *node, void *context);
|
||||
static Node *process_sublinks_mutator(Node *node, bool *isTopQual);
|
||||
static Bitmapset *finalize_plan(Plan *plan, List *rtable,
|
||||
Bitmapset *outer_params,
|
||||
Bitmapset *valid_params);
|
||||
static bool finalize_primnode(Node *node, finalize_primnode_context *context);
|
||||
Bitmapset * outer_params,
|
||||
Bitmapset * valid_params);
|
||||
static bool finalize_primnode(Node *node, finalize_primnode_context * context);
|
||||
|
||||
|
||||
/*
|
||||
@ -125,7 +125,7 @@ replace_outer_var(Var *var)
|
||||
pitem = (PlannerParamItem *) lfirst(ppl);
|
||||
if (pitem->abslevel == abslevel && IsA(pitem->item, Var))
|
||||
{
|
||||
Var *pvar = (Var *) pitem->item;
|
||||
Var *pvar = (Var *) pitem->item;
|
||||
|
||||
if (pvar->varno == var->varno &&
|
||||
pvar->varattno == var->varattno &&
|
||||
@ -177,7 +177,7 @@ replace_outer_agg(Aggref *agg)
|
||||
* Just make a new slot every time.
|
||||
*/
|
||||
agg = (Aggref *) copyObject(agg);
|
||||
IncrementVarSublevelsUp((Node *) agg, - ((int) agg->agglevelsup), 0);
|
||||
IncrementVarSublevelsUp((Node *) agg, -((int) agg->agglevelsup), 0);
|
||||
Assert(agg->agglevelsup == 0);
|
||||
|
||||
pitem = (PlannerParamItem *) palloc(sizeof(PlannerParamItem));
|
||||
@ -238,7 +238,7 @@ generate_new_param(Oid paramtype, int32 paramtypmod)
|
||||
static Node *
|
||||
make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
{
|
||||
SubPlan *node = makeNode(SubPlan);
|
||||
SubPlan *node = makeNode(SubPlan);
|
||||
Query *subquery = (Query *) (slink->subselect);
|
||||
double tuple_fraction;
|
||||
Plan *plan;
|
||||
@ -268,8 +268,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
* in path/costsize.c.
|
||||
*
|
||||
* XXX If an ALL/ANY subplan is uncorrelated, we may decide to hash or
|
||||
* materialize its result below. In that case it would've been better to
|
||||
* specify full retrieval. At present, however, we can only detect
|
||||
* materialize its result below. In that case it would've been better
|
||||
* to specify full retrieval. At present, however, we can only detect
|
||||
* correlation or lack of it after we've made the subplan :-(. Perhaps
|
||||
* detection of correlation should be done as a separate step.
|
||||
* Meanwhile, we don't want to be too optimistic about the percentage
|
||||
@ -323,12 +323,13 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
bms_free(tmpset);
|
||||
|
||||
/*
|
||||
* Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, or
|
||||
* MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or ARRAY,
|
||||
* we just produce a Param referring to the result of evaluating the
|
||||
* initPlan. For MULTIEXPR, we must build an AND or OR-clause of the
|
||||
* individual comparison operators, using the appropriate lefthand
|
||||
* side expressions and Params for the initPlan's target items.
|
||||
* Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY,
|
||||
* or MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or
|
||||
* ARRAY, we just produce a Param referring to the result of
|
||||
* evaluating the initPlan. For MULTIEXPR, we must build an AND or
|
||||
* OR-clause of the individual comparison operators, using the
|
||||
* appropriate lefthand side expressions and Params for the initPlan's
|
||||
* target items.
|
||||
*/
|
||||
if (node->parParam == NIL && slink->subLinkType == EXISTS_SUBLINK)
|
||||
{
|
||||
@ -368,7 +369,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
}
|
||||
else if (node->parParam == NIL && slink->subLinkType == MULTIEXPR_SUBLINK)
|
||||
{
|
||||
List *exprs;
|
||||
List *exprs;
|
||||
|
||||
/* Convert the lefthand exprs and oper OIDs into executable exprs */
|
||||
exprs = convert_sublink_opers(lefthand,
|
||||
@ -378,6 +379,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
&node->paramIds);
|
||||
node->setParam = listCopy(node->paramIds);
|
||||
PlannerInitPlan = lappend(PlannerInitPlan, node);
|
||||
|
||||
/*
|
||||
* The executable expressions are returned to become part of the
|
||||
* outer plan's expression tree; they are not kept in the initplan
|
||||
@ -402,15 +404,16 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
*/
|
||||
if (subplan_is_hashable(slink, node))
|
||||
node->useHashTable = true;
|
||||
|
||||
/*
|
||||
* Otherwise, we have the option to tack a MATERIAL node onto the top
|
||||
* of the subplan, to reduce the cost of reading it repeatedly. This
|
||||
* is pointless for a direct-correlated subplan, since we'd have to
|
||||
* recompute its results each time anyway. For uncorrelated/undirect
|
||||
* correlated subplans, we add MATERIAL if the subplan's top plan node
|
||||
* is anything more complicated than a plain sequential scan, and we
|
||||
* do it even for seqscan if the qual appears selective enough to
|
||||
* eliminate many tuples.
|
||||
* Otherwise, we have the option to tack a MATERIAL node onto the
|
||||
* top of the subplan, to reduce the cost of reading it
|
||||
* repeatedly. This is pointless for a direct-correlated subplan,
|
||||
* since we'd have to recompute its results each time anyway. For
|
||||
* uncorrelated/undirect correlated subplans, we add MATERIAL if
|
||||
* the subplan's top plan node is anything more complicated than a
|
||||
* plain sequential scan, and we do it even for seqscan if the
|
||||
* qual appears selective enough to eliminate many tuples.
|
||||
*/
|
||||
else if (node->parParam == NIL)
|
||||
{
|
||||
@ -448,9 +451,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
break;
|
||||
}
|
||||
if (use_material)
|
||||
{
|
||||
node->plan = plan = materialize_finished_plan(plan);
|
||||
}
|
||||
}
|
||||
|
||||
/* Convert the lefthand exprs and oper OIDs into executable exprs */
|
||||
@ -470,7 +471,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
|
||||
/*
|
||||
* The Var or Aggref has already been adjusted to have the
|
||||
* correct varlevelsup or agglevelsup. We probably don't even
|
||||
* correct varlevelsup or agglevelsup. We probably don't even
|
||||
* need to copy it again, but be safe.
|
||||
*/
|
||||
args = lappend(args, copyObject(pitem->item));
|
||||
@ -485,14 +486,14 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
|
||||
/*
|
||||
* convert_sublink_opers: given a lefthand-expressions list and a list of
|
||||
* operator OIDs, build a list of actually executable expressions. The
|
||||
* operator OIDs, build a list of actually executable expressions. The
|
||||
* righthand sides of the expressions are Params or Vars representing the
|
||||
* results of the sub-select.
|
||||
*
|
||||
* If rtindex is 0, we build Params to represent the sub-select outputs.
|
||||
* The paramids of the Params created are returned in the *righthandIds list.
|
||||
*
|
||||
* If rtindex is not 0, we build Vars using that rtindex as varno. The
|
||||
* If rtindex is not 0, we build Vars using that rtindex as varno. The
|
||||
* Vars themselves are returned in *righthandIds (this is a bit of a type
|
||||
* cheat, but we can get away with it).
|
||||
*/
|
||||
@ -549,10 +550,10 @@ convert_sublink_opers(List *lefthand, List *operOids,
|
||||
/*
|
||||
* Make the expression node.
|
||||
*
|
||||
* Note: we use make_op_expr in case runtime type conversion
|
||||
* function calls must be inserted for this operator! (But we
|
||||
* are not expecting to have to resolve unknown Params, so
|
||||
* it's okay to pass a null pstate.)
|
||||
* Note: we use make_op_expr in case runtime type conversion function
|
||||
* calls must be inserted for this operator! (But we are not
|
||||
* expecting to have to resolve unknown Params, so it's okay to
|
||||
* pass a null pstate.)
|
||||
*/
|
||||
result = lappend(result,
|
||||
make_op_expr(NULL,
|
||||
@ -584,9 +585,9 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
|
||||
List *opids;
|
||||
|
||||
/*
|
||||
* The sublink type must be "= ANY" --- that is, an IN operator.
|
||||
* (We require the operator name to be unqualified, which may be
|
||||
* overly paranoid, or may not be.) XXX since we also check that the
|
||||
* The sublink type must be "= ANY" --- that is, an IN operator. (We
|
||||
* require the operator name to be unqualified, which may be overly
|
||||
* paranoid, or may not be.) XXX since we also check that the
|
||||
* operators are hashable, the test on operator name may be redundant?
|
||||
*/
|
||||
if (slink->subLinkType != ANY_SUBLINK)
|
||||
@ -594,33 +595,37 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
|
||||
if (length(slink->operName) != 1 ||
|
||||
strcmp(strVal(lfirst(slink->operName)), "=") != 0)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The subplan must not have any direct correlation vars --- else we'd
|
||||
* have to recompute its output each time, so that the hashtable wouldn't
|
||||
* gain anything.
|
||||
* have to recompute its output each time, so that the hashtable
|
||||
* wouldn't gain anything.
|
||||
*/
|
||||
if (node->parParam != NIL)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The estimated size of the subquery result must fit in SortMem.
|
||||
* (XXX what about hashtable overhead?)
|
||||
* The estimated size of the subquery result must fit in SortMem. (XXX
|
||||
* what about hashtable overhead?)
|
||||
*/
|
||||
subquery_size = node->plan->plan_rows *
|
||||
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData)));
|
||||
if (subquery_size > SortMem * 1024L)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The combining operators must be hashable, strict, and self-commutative.
|
||||
* The need for hashability is obvious, since we want to use hashing.
|
||||
* Without strictness, behavior in the presence of nulls is too
|
||||
* unpredictable. (We actually must assume even more than plain
|
||||
* strictness, see nodeSubplan.c for details.) And commutativity ensures
|
||||
* that the left and right datatypes are the same; this allows us to
|
||||
* assume that the combining operators are equality for the righthand
|
||||
* datatype, so that they can be used to compare righthand tuples as
|
||||
* well as comparing lefthand to righthand tuples. (This last restriction
|
||||
* could be relaxed by using two different sets of operators with the
|
||||
* hash table, but there is no obvious usefulness to that at present.)
|
||||
* The combining operators must be hashable, strict, and
|
||||
* self-commutative. The need for hashability is obvious, since we
|
||||
* want to use hashing. Without strictness, behavior in the presence
|
||||
* of nulls is too unpredictable. (We actually must assume even more
|
||||
* than plain strictness, see nodeSubplan.c for details.) And
|
||||
* commutativity ensures that the left and right datatypes are the
|
||||
* same; this allows us to assume that the combining operators are
|
||||
* equality for the righthand datatype, so that they can be used to
|
||||
* compare righthand tuples as well as comparing lefthand to righthand
|
||||
* tuples. (This last restriction could be relaxed by using two
|
||||
* different sets of operators with the hash table, but there is no
|
||||
* obvious usefulness to that at present.)
|
||||
*/
|
||||
foreach(opids, slink->operOids)
|
||||
{
|
||||
@ -665,25 +670,27 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
|
||||
int rtindex;
|
||||
RangeTblEntry *rte;
|
||||
RangeTblRef *rtr;
|
||||
InClauseInfo *ininfo;
|
||||
InClauseInfo *ininfo;
|
||||
List *exprs;
|
||||
|
||||
/*
|
||||
* The sublink type must be "= ANY" --- that is, an IN operator.
|
||||
* (We require the operator name to be unqualified, which may be
|
||||
* overly paranoid, or may not be.)
|
||||
* The sublink type must be "= ANY" --- that is, an IN operator. (We
|
||||
* require the operator name to be unqualified, which may be overly
|
||||
* paranoid, or may not be.)
|
||||
*/
|
||||
if (sublink->subLinkType != ANY_SUBLINK)
|
||||
return NULL;
|
||||
if (length(sublink->operName) != 1 ||
|
||||
strcmp(strVal(lfirst(sublink->operName)), "=") != 0)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The sub-select must not refer to any Vars of the parent query.
|
||||
* (Vars of higher levels should be okay, though.)
|
||||
*/
|
||||
if (contain_vars_of_level((Node *) subselect, 1))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The left-hand expressions must contain some Vars of the current
|
||||
* query, else it's not gonna be a join.
|
||||
@ -691,6 +698,7 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
|
||||
left_varnos = pull_varnos((Node *) sublink->lefthand);
|
||||
if (bms_is_empty(left_varnos))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The left-hand expressions mustn't be volatile. (Perhaps we should
|
||||
* test the combining operators, too? We'd only need to point the
|
||||
@ -698,13 +706,14 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
|
||||
*/
|
||||
if (contain_volatile_functions((Node *) sublink->lefthand))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Okay, pull up the sub-select into top range table and jointree.
|
||||
*
|
||||
* We rely here on the assumption that the outer query has no references
|
||||
* to the inner (necessarily true, other than the Vars that we build
|
||||
* below). Therefore this is a lot easier than what pull_up_subqueries
|
||||
* has to go through.
|
||||
* below). Therefore this is a lot easier than what
|
||||
* pull_up_subqueries has to go through.
|
||||
*/
|
||||
rte = addRangeTableEntryForSubquery(NULL,
|
||||
subselect,
|
||||
@ -715,6 +724,7 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
|
||||
rtr = makeNode(RangeTblRef);
|
||||
rtr->rtindex = rtindex;
|
||||
parse->jointree->fromlist = lappend(parse->jointree->fromlist, rtr);
|
||||
|
||||
/*
|
||||
* Now build the InClauseInfo node.
|
||||
*/
|
||||
@ -722,6 +732,7 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
|
||||
ininfo->lefthand = left_varnos;
|
||||
ininfo->righthand = bms_make_singleton(rtindex);
|
||||
parse->in_info_list = lcons(ininfo, parse->in_info_list);
|
||||
|
||||
/*
|
||||
* Build the result qual expressions. As a side effect,
|
||||
* ininfo->sub_targetlist is filled with a list of the Vars
|
||||
@ -744,9 +755,9 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
|
||||
* Since we do not recurse into the arguments of uplevel aggregates, they will
|
||||
* get copied to the appropriate subplan args list in the parent query with
|
||||
* uplevel vars not replaced by Params, but only adjusted in level (see
|
||||
* replace_outer_agg). That's exactly what we want for the vars of the parent
|
||||
* replace_outer_agg). That's exactly what we want for the vars of the parent
|
||||
* level --- but if an aggregate's argument contains any further-up variables,
|
||||
* they have to be replaced with Params in their turn. That will happen when
|
||||
* they have to be replaced with Params in their turn. That will happen when
|
||||
* the parent level runs SS_replace_correlation_vars. Therefore it must do
|
||||
* so after expanding its sublinks to subplans. And we don't want any steps
|
||||
* in between, else those steps would never get applied to the aggregate
|
||||
@ -796,7 +807,7 @@ SS_process_sublinks(Node *expr, bool isQual)
|
||||
static Node *
|
||||
process_sublinks_mutator(Node *node, bool *isTopQual)
|
||||
{
|
||||
bool locTopQual;
|
||||
bool locTopQual;
|
||||
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
@ -806,11 +817,13 @@ process_sublinks_mutator(Node *node, bool *isTopQual)
|
||||
List *lefthand;
|
||||
|
||||
/*
|
||||
* First, recursively process the lefthand-side expressions, if any.
|
||||
* First, recursively process the lefthand-side expressions, if
|
||||
* any.
|
||||
*/
|
||||
locTopQual = false;
|
||||
lefthand = (List *)
|
||||
process_sublinks_mutator((Node *) sublink->lefthand, &locTopQual);
|
||||
|
||||
/*
|
||||
* Now build the SubPlan node and make the expr to return.
|
||||
*/
|
||||
@ -818,9 +831,9 @@ process_sublinks_mutator(Node *node, bool *isTopQual)
|
||||
}
|
||||
|
||||
/*
|
||||
* We should never see a SubPlan expression in the input (since this is
|
||||
* the very routine that creates 'em to begin with). We shouldn't find
|
||||
* ourselves invoked directly on a Query, either.
|
||||
* We should never see a SubPlan expression in the input (since this
|
||||
* is the very routine that creates 'em to begin with). We shouldn't
|
||||
* find ourselves invoked directly on a Query, either.
|
||||
*/
|
||||
Assert(!is_subplan(node));
|
||||
Assert(!IsA(node, Query));
|
||||
@ -854,9 +867,9 @@ SS_finalize_plan(Plan *plan, List *rtable)
|
||||
List *lst;
|
||||
|
||||
/*
|
||||
* First, scan the param list to discover the sets of params that
|
||||
* are available from outer query levels and my own query level.
|
||||
* We do this once to save time in the per-plan recursion steps.
|
||||
* First, scan the param list to discover the sets of params that are
|
||||
* available from outer query levels and my own query level. We do
|
||||
* this once to save time in the per-plan recursion steps.
|
||||
*/
|
||||
paramid = 0;
|
||||
foreach(lst, PlannerParamList)
|
||||
@ -896,7 +909,7 @@ SS_finalize_plan(Plan *plan, List *rtable)
|
||||
*/
|
||||
static Bitmapset *
|
||||
finalize_plan(Plan *plan, List *rtable,
|
||||
Bitmapset *outer_params, Bitmapset *valid_params)
|
||||
Bitmapset * outer_params, Bitmapset * valid_params)
|
||||
{
|
||||
finalize_primnode_context context;
|
||||
List *lst;
|
||||
@ -1038,8 +1051,8 @@ finalize_plan(Plan *plan, List *rtable,
|
||||
plan->allParam = context.paramids;
|
||||
|
||||
/*
|
||||
* For speed at execution time, make sure extParam/allParam are actually
|
||||
* NULL if they are empty sets.
|
||||
* For speed at execution time, make sure extParam/allParam are
|
||||
* actually NULL if they are empty sets.
|
||||
*/
|
||||
if (bms_is_empty(plan->extParam))
|
||||
{
|
||||
@ -1060,7 +1073,7 @@ finalize_plan(Plan *plan, List *rtable,
|
||||
* expression tree to the result set.
|
||||
*/
|
||||
static bool
|
||||
finalize_primnode(Node *node, finalize_primnode_context *context)
|
||||
finalize_primnode(Node *node, finalize_primnode_context * context)
|
||||
{
|
||||
if (node == NULL)
|
||||
return false;
|
||||
@ -1076,12 +1089,12 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
|
||||
}
|
||||
if (is_subplan(node))
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
|
||||
/* Add outer-level params needed by the subplan to paramids */
|
||||
context->paramids = bms_join(context->paramids,
|
||||
bms_intersect(subplan->plan->extParam,
|
||||
context->outer_params));
|
||||
bms_intersect(subplan->plan->extParam,
|
||||
context->outer_params));
|
||||
/* fall through to recurse into subplan args */
|
||||
}
|
||||
return expression_tree_walker(node, finalize_primnode,
|
||||
|
@ -16,7 +16,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.8 2003/07/25 00:01:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.9 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -32,28 +32,28 @@
|
||||
|
||||
|
||||
/* These parameters are set by GUC */
|
||||
int from_collapse_limit;
|
||||
int join_collapse_limit;
|
||||
int from_collapse_limit;
|
||||
int join_collapse_limit;
|
||||
|
||||
|
||||
typedef struct reduce_outer_joins_state
|
||||
{
|
||||
Relids relids; /* base relids within this subtree */
|
||||
bool contains_outer; /* does subtree contain outer join(s)? */
|
||||
bool contains_outer; /* does subtree contain outer join(s)? */
|
||||
List *sub_states; /* List of states for subtree components */
|
||||
} reduce_outer_joins_state;
|
||||
} reduce_outer_joins_state;
|
||||
|
||||
static bool is_simple_subquery(Query *subquery);
|
||||
static bool has_nullable_targetlist(Query *subquery);
|
||||
static void resolvenew_in_jointree(Node *jtnode, int varno, List *subtlist);
|
||||
static reduce_outer_joins_state *reduce_outer_joins_pass1(Node *jtnode);
|
||||
static void reduce_outer_joins_pass2(Node *jtnode,
|
||||
reduce_outer_joins_state *state,
|
||||
Query *parse,
|
||||
Relids nonnullable_rels);
|
||||
reduce_outer_joins_state * state,
|
||||
Query *parse,
|
||||
Relids nonnullable_rels);
|
||||
static Relids find_nonnullable_rels(Node *node, bool top_level);
|
||||
static void fix_in_clause_relids(List *in_info_list, int varno,
|
||||
Relids subrelids);
|
||||
Relids subrelids);
|
||||
static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
|
||||
|
||||
|
||||
@ -84,7 +84,7 @@ pull_up_IN_clauses(Query *parse, Node *node)
|
||||
return NULL;
|
||||
if (IsA(node, SubLink))
|
||||
{
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
Node *subst;
|
||||
|
||||
/* Is it a convertible IN clause? If not, return it as-is */
|
||||
@ -95,12 +95,12 @@ pull_up_IN_clauses(Query *parse, Node *node)
|
||||
}
|
||||
if (and_clause(node))
|
||||
{
|
||||
List *newclauses = NIL;
|
||||
List *oldclauses;
|
||||
List *newclauses = NIL;
|
||||
List *oldclauses;
|
||||
|
||||
foreach(oldclauses, ((BoolExpr *) node)->args)
|
||||
{
|
||||
Node *oldclause = lfirst(oldclauses);
|
||||
Node *oldclause = lfirst(oldclauses);
|
||||
|
||||
newclauses = lappend(newclauses,
|
||||
pull_up_IN_clauses(parse,
|
||||
@ -172,22 +172,22 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
|
||||
*/
|
||||
if (subquery->hasSubLinks)
|
||||
subquery->jointree->quals = pull_up_IN_clauses(subquery,
|
||||
subquery->jointree->quals);
|
||||
subquery->jointree->quals);
|
||||
|
||||
/*
|
||||
* Now, recursively pull up the subquery's subqueries, so
|
||||
* that this routine's processing is complete for its jointree
|
||||
* and rangetable. NB: if the same subquery is referenced
|
||||
* from multiple jointree items (which can't happen normally,
|
||||
* but might after rule rewriting), then we will invoke this
|
||||
* Now, recursively pull up the subquery's subqueries, so that
|
||||
* this routine's processing is complete for its jointree and
|
||||
* rangetable. NB: if the same subquery is referenced from
|
||||
* multiple jointree items (which can't happen normally, but
|
||||
* might after rule rewriting), then we will invoke this
|
||||
* processing multiple times on that subquery. OK because
|
||||
* nothing will happen after the first time. We do have to be
|
||||
* careful to copy everything we pull up, however, or risk
|
||||
* having chunks of structure multiply linked.
|
||||
*
|
||||
* Note: 'false' is correct here even if we are within an outer
|
||||
* join in the upper query; the lower query starts with a clean
|
||||
* slate for outer-join semantics.
|
||||
* join in the upper query; the lower query starts with a
|
||||
* clean slate for outer-join semantics.
|
||||
*/
|
||||
subquery->jointree = (FromExpr *)
|
||||
pull_up_subqueries(subquery, (Node *) subquery->jointree,
|
||||
@ -207,8 +207,8 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
|
||||
OffsetVarNodes((Node *) subquery, rtoffset, 0);
|
||||
|
||||
/*
|
||||
* Upper-level vars in subquery are now one level closer to their
|
||||
* parent than before.
|
||||
* Upper-level vars in subquery are now one level closer to
|
||||
* their parent than before.
|
||||
*/
|
||||
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
|
||||
|
||||
@ -257,13 +257,14 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
|
||||
parse->rowMarks = nconc(parse->rowMarks, subquery->rowMarks);
|
||||
|
||||
/*
|
||||
* We also have to fix the relid sets of any parent InClauseInfo
|
||||
* nodes. (This could perhaps be done by ResolveNew, but it
|
||||
* would clutter that routine's API unreasonably.)
|
||||
* We also have to fix the relid sets of any parent
|
||||
* InClauseInfo nodes. (This could perhaps be done by
|
||||
* ResolveNew, but it would clutter that routine's API
|
||||
* unreasonably.)
|
||||
*/
|
||||
if (parse->in_info_list)
|
||||
{
|
||||
Relids subrelids;
|
||||
Relids subrelids;
|
||||
|
||||
subrelids = get_relids_in_jointree((Node *) subquery->jointree);
|
||||
fix_in_clause_relids(parse->in_info_list, varno, subrelids);
|
||||
@ -513,14 +514,14 @@ reduce_outer_joins(Query *parse)
|
||||
reduce_outer_joins_state *state;
|
||||
|
||||
/*
|
||||
* To avoid doing strictness checks on more quals than necessary,
|
||||
* we want to stop descending the jointree as soon as there are no
|
||||
* outer joins below our current point. This consideration forces
|
||||
* a two-pass process. The first pass gathers information about which
|
||||
* To avoid doing strictness checks on more quals than necessary, we
|
||||
* want to stop descending the jointree as soon as there are no outer
|
||||
* joins below our current point. This consideration forces a
|
||||
* two-pass process. The first pass gathers information about which
|
||||
* base rels appear below each side of each join clause, and about
|
||||
* whether there are outer join(s) below each side of each join clause.
|
||||
* The second pass examines qual clauses and changes join types as
|
||||
* it descends the tree.
|
||||
* whether there are outer join(s) below each side of each join
|
||||
* clause. The second pass examines qual clauses and changes join
|
||||
* types as it descends the tree.
|
||||
*/
|
||||
state = reduce_outer_joins_pass1((Node *) parse->jointree);
|
||||
|
||||
@ -608,7 +609,7 @@ reduce_outer_joins_pass1(Node *jtnode)
|
||||
*/
|
||||
static void
|
||||
reduce_outer_joins_pass2(Node *jtnode,
|
||||
reduce_outer_joins_state *state,
|
||||
reduce_outer_joins_state * state,
|
||||
Query *parse,
|
||||
Relids nonnullable_rels)
|
||||
{
|
||||
@ -619,9 +620,7 @@ reduce_outer_joins_pass2(Node *jtnode,
|
||||
if (jtnode == NULL)
|
||||
elog(ERROR, "reached empty jointree");
|
||||
if (IsA(jtnode, RangeTblRef))
|
||||
{
|
||||
elog(ERROR, "reached base rel");
|
||||
}
|
||||
else if (IsA(jtnode, FromExpr))
|
||||
{
|
||||
FromExpr *f = (FromExpr *) jtnode;
|
||||
@ -701,10 +700,11 @@ reduce_outer_joins_pass2(Node *jtnode,
|
||||
/*
|
||||
* If this join is (now) inner, we can add any nonnullability
|
||||
* constraints its quals provide to those we got from above.
|
||||
* But if it is outer, we can only pass down the local constraints
|
||||
* into the nullable side, because an outer join never eliminates
|
||||
* any rows from its non-nullable side. If it's a FULL join then
|
||||
* it doesn't eliminate anything from either side.
|
||||
* But if it is outer, we can only pass down the local
|
||||
* constraints into the nullable side, because an outer join
|
||||
* never eliminates any rows from its non-nullable side. If
|
||||
* it's a FULL join then it doesn't eliminate anything from
|
||||
* either side.
|
||||
*/
|
||||
if (jointype != JOIN_FULL)
|
||||
{
|
||||
@ -713,7 +713,8 @@ reduce_outer_joins_pass2(Node *jtnode,
|
||||
nonnullable_rels);
|
||||
}
|
||||
else
|
||||
local_nonnullable = NULL; /* no use in calculating it */
|
||||
local_nonnullable = NULL; /* no use in calculating
|
||||
* it */
|
||||
|
||||
if (left_state->contains_outer)
|
||||
{
|
||||
@ -747,7 +748,7 @@ reduce_outer_joins_pass2(Node *jtnode,
|
||||
*
|
||||
* We don't use expression_tree_walker here because we don't want to
|
||||
* descend through very many kinds of nodes; only the ones we can be sure
|
||||
* are strict. We can descend through the top level of implicit AND'ing,
|
||||
* are strict. We can descend through the top level of implicit AND'ing,
|
||||
* but not through any explicit ANDs (or ORs) below that, since those are not
|
||||
* strict constructs. The List case handles the top-level implicit AND list
|
||||
* as well as lists of arguments to strict operators/functions.
|
||||
@ -785,7 +786,7 @@ find_nonnullable_rels(Node *node, bool top_level)
|
||||
}
|
||||
else if (IsA(node, OpExpr))
|
||||
{
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
|
||||
if (op_strict(expr->opno))
|
||||
result = find_nonnullable_rels((Node *) expr->args, false);
|
||||
@ -800,7 +801,7 @@ find_nonnullable_rels(Node *node, bool top_level)
|
||||
}
|
||||
else if (IsA(node, RelabelType))
|
||||
{
|
||||
RelabelType *expr = (RelabelType *) node;
|
||||
RelabelType *expr = (RelabelType *) node;
|
||||
|
||||
result = find_nonnullable_rels((Node *) expr->arg, top_level);
|
||||
}
|
||||
@ -817,7 +818,7 @@ find_nonnullable_rels(Node *node, bool top_level)
|
||||
}
|
||||
else if (IsA(node, BooleanTest))
|
||||
{
|
||||
BooleanTest *expr = (BooleanTest *) node;
|
||||
BooleanTest *expr = (BooleanTest *) node;
|
||||
|
||||
/*
|
||||
* Appropriate boolean tests are strict at top level.
|
||||
@ -894,10 +895,11 @@ simplify_jointree(Query *parse, Node *jtnode)
|
||||
(childlen + myothers) <= from_collapse_limit)
|
||||
{
|
||||
newlist = nconc(newlist, subf->fromlist);
|
||||
|
||||
/*
|
||||
* By now, the quals have been converted to implicit-AND
|
||||
* lists, so we just need to join the lists. NOTE: we
|
||||
* put the pulled-up quals first.
|
||||
* By now, the quals have been converted to
|
||||
* implicit-AND lists, so we just need to join the
|
||||
* lists. NOTE: we put the pulled-up quals first.
|
||||
*/
|
||||
f->quals = (Node *) nconc((List *) subf->quals,
|
||||
(List *) f->quals);
|
||||
@ -917,16 +919,17 @@ simplify_jointree(Query *parse, Node *jtnode)
|
||||
/* Recursively simplify the children... */
|
||||
j->larg = simplify_jointree(parse, j->larg);
|
||||
j->rarg = simplify_jointree(parse, j->rarg);
|
||||
|
||||
/*
|
||||
* If it is an outer join, we must not flatten it. An inner join
|
||||
* If it is an outer join, we must not flatten it. An inner join
|
||||
* is semantically equivalent to a FromExpr; we convert it to one,
|
||||
* allowing it to be flattened into its parent, if the resulting
|
||||
* FromExpr would have no more than join_collapse_limit members.
|
||||
*/
|
||||
if (j->jointype == JOIN_INNER && join_collapse_limit > 1)
|
||||
{
|
||||
int leftlen,
|
||||
rightlen;
|
||||
int leftlen,
|
||||
rightlen;
|
||||
|
||||
if (j->larg && IsA(j->larg, FromExpr))
|
||||
leftlen = length(((FromExpr *) j->larg)->fromlist);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.35 2003/05/28 22:32:49 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.36 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -21,12 +21,12 @@
|
||||
#include "utils/lsyscache.h"
|
||||
|
||||
static Expr *flatten_andors(Expr *qual);
|
||||
static void flatten_andors_and_walker(FastList *out_list, List *andlist);
|
||||
static void flatten_andors_or_walker(FastList *out_list, List *orlist);
|
||||
static void flatten_andors_and_walker(FastList * out_list, List *andlist);
|
||||
static void flatten_andors_or_walker(FastList * out_list, List *orlist);
|
||||
static List *pull_ands(List *andlist);
|
||||
static void pull_ands_walker(FastList *out_list, List *andlist);
|
||||
static void pull_ands_walker(FastList * out_list, List *andlist);
|
||||
static List *pull_ors(List *orlist);
|
||||
static void pull_ors_walker(FastList *out_list, List *orlist);
|
||||
static void pull_ors_walker(FastList * out_list, List *orlist);
|
||||
static Expr *find_nots(Expr *qual);
|
||||
static Expr *push_nots(Expr *qual);
|
||||
static Expr *find_ors(Expr *qual);
|
||||
@ -328,7 +328,7 @@ flatten_andors(Expr *qual)
|
||||
}
|
||||
|
||||
static void
|
||||
flatten_andors_and_walker(FastList *out_list, List *andlist)
|
||||
flatten_andors_and_walker(FastList * out_list, List *andlist)
|
||||
{
|
||||
List *arg;
|
||||
|
||||
@ -344,7 +344,7 @@ flatten_andors_and_walker(FastList *out_list, List *andlist)
|
||||
}
|
||||
|
||||
static void
|
||||
flatten_andors_or_walker(FastList *out_list, List *orlist)
|
||||
flatten_andors_or_walker(FastList * out_list, List *orlist)
|
||||
{
|
||||
List *arg;
|
||||
|
||||
@ -377,7 +377,7 @@ pull_ands(List *andlist)
|
||||
}
|
||||
|
||||
static void
|
||||
pull_ands_walker(FastList *out_list, List *andlist)
|
||||
pull_ands_walker(FastList * out_list, List *andlist)
|
||||
{
|
||||
List *arg;
|
||||
|
||||
@ -410,7 +410,7 @@ pull_ors(List *orlist)
|
||||
}
|
||||
|
||||
static void
|
||||
pull_ors_walker(FastList *out_list, List *orlist)
|
||||
pull_ors_walker(FastList * out_list, List *orlist)
|
||||
{
|
||||
List *arg;
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.62 2003/07/25 00:01:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.63 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -174,12 +174,12 @@ expand_targetlist(List *tlist, int command_type,
|
||||
*
|
||||
* When generating a NULL constant for a dropped column, we label
|
||||
* it INT4 (any other guaranteed-to-exist datatype would do as
|
||||
* well). We can't label it with the dropped column's datatype
|
||||
* since that might not exist anymore. It does not really
|
||||
* matter what we claim the type is, since NULL is NULL --- its
|
||||
* representation is datatype-independent. This could perhaps
|
||||
* confuse code comparing the finished plan to the target
|
||||
* relation, however.
|
||||
* well). We can't label it with the dropped column's
|
||||
* datatype since that might not exist anymore. It does not
|
||||
* really matter what we claim the type is, since NULL is NULL
|
||||
* --- its representation is datatype-independent. This could
|
||||
* perhaps confuse code comparing the finished plan to the
|
||||
* target relation, however.
|
||||
*/
|
||||
Oid atttype = att_tup->atttypid;
|
||||
int32 atttypmod = att_tup->atttypmod;
|
||||
@ -193,7 +193,7 @@ expand_targetlist(List *tlist, int command_type,
|
||||
new_expr = (Node *) makeConst(atttype,
|
||||
att_tup->attlen,
|
||||
(Datum) 0,
|
||||
true, /* isnull */
|
||||
true, /* isnull */
|
||||
att_tup->attbyval);
|
||||
new_expr = coerce_to_domain(new_expr,
|
||||
InvalidOid,
|
||||
@ -206,8 +206,8 @@ expand_targetlist(List *tlist, int command_type,
|
||||
new_expr = (Node *) makeConst(INT4OID,
|
||||
sizeof(int32),
|
||||
(Datum) 0,
|
||||
true, /* isnull */
|
||||
true /* byval */);
|
||||
true, /* isnull */
|
||||
true /* byval */ );
|
||||
/* label resdom with INT4, too */
|
||||
atttype = INT4OID;
|
||||
atttypmod = -1;
|
||||
@ -228,8 +228,8 @@ expand_targetlist(List *tlist, int command_type,
|
||||
new_expr = (Node *) makeConst(INT4OID,
|
||||
sizeof(int32),
|
||||
(Datum) 0,
|
||||
true, /* isnull */
|
||||
true /* byval */);
|
||||
true, /* isnull */
|
||||
true /* byval */ );
|
||||
/* label resdom with INT4, too */
|
||||
atttype = INT4OID;
|
||||
atttypmod = -1;
|
||||
|
@ -14,7 +14,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.101 2003/07/28 00:09:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.102 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -427,7 +427,7 @@ generate_setop_tlist(List *colTypes, int flag,
|
||||
}
|
||||
else
|
||||
{
|
||||
expr = coerce_to_common_type(NULL, /* no UNKNOWNs here */
|
||||
expr = coerce_to_common_type(NULL, /* no UNKNOWNs here */
|
||||
expr,
|
||||
colType,
|
||||
"UNION/INTERSECT/EXCEPT");
|
||||
@ -849,11 +849,11 @@ adjust_inherited_attrs_mutator(Node *node,
|
||||
if (IsA(node, InClauseInfo))
|
||||
{
|
||||
/* Copy the InClauseInfo node with correct mutation of subnodes */
|
||||
InClauseInfo *ininfo;
|
||||
InClauseInfo *ininfo;
|
||||
|
||||
ininfo = (InClauseInfo *) expression_tree_mutator(node,
|
||||
adjust_inherited_attrs_mutator,
|
||||
(void *) context);
|
||||
(void *) context);
|
||||
/* now fix InClauseInfo's relid sets */
|
||||
ininfo->lefthand = adjust_relid_set(ininfo->lefthand,
|
||||
context->old_rt_index,
|
||||
@ -880,9 +880,9 @@ adjust_inherited_attrs_mutator(Node *node,
|
||||
adjust_inherited_attrs_mutator((Node *) oldinfo->clause, context);
|
||||
|
||||
/*
|
||||
* We do NOT want to copy the original subclauseindices list, since
|
||||
* the new rel will have different indices. The list will be rebuilt
|
||||
* when needed during later planning.
|
||||
* We do NOT want to copy the original subclauseindices list,
|
||||
* since the new rel will have different indices. The list will
|
||||
* be rebuilt when needed during later planning.
|
||||
*/
|
||||
newinfo->subclauseindices = NIL;
|
||||
|
||||
@ -896,7 +896,7 @@ adjust_inherited_attrs_mutator(Node *node,
|
||||
context->old_rt_index,
|
||||
context->new_rt_index);
|
||||
|
||||
newinfo->eval_cost.startup = -1; /* reset these too */
|
||||
newinfo->eval_cost.startup = -1; /* reset these too */
|
||||
newinfo->this_selec = -1;
|
||||
newinfo->left_pathkey = NIL; /* and these */
|
||||
newinfo->right_pathkey = NIL;
|
||||
@ -925,7 +925,7 @@ adjust_inherited_attrs_mutator(Node *node,
|
||||
*/
|
||||
if (is_subplan(node))
|
||||
{
|
||||
SubPlan *subplan;
|
||||
SubPlan *subplan;
|
||||
|
||||
/* Copy the node and process subplan args */
|
||||
node = expression_tree_mutator(node, adjust_inherited_attrs_mutator,
|
||||
@ -963,7 +963,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid)
|
||||
*
|
||||
* The expressions have already been fixed, but we have to make sure that
|
||||
* the target resnos match the child table (they may not, in the case of
|
||||
* a column that was added after-the-fact by ALTER TABLE). In some cases
|
||||
* a column that was added after-the-fact by ALTER TABLE). In some cases
|
||||
* this can force us to re-order the tlist to preserve resno ordering.
|
||||
* (We do all this work in special cases so that preptlist.c is fast for
|
||||
* the typical case.)
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.149 2003/08/03 23:46:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.150 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
* HISTORY
|
||||
* AUTHOR DATE MAJOR EVENT
|
||||
@ -50,7 +50,7 @@ typedef struct
|
||||
int nargs;
|
||||
List *args;
|
||||
int *usecounts;
|
||||
} substitute_actual_parameters_context;
|
||||
} substitute_actual_parameters_context;
|
||||
|
||||
static bool contain_agg_clause_walker(Node *node, void *context);
|
||||
static bool contain_distinct_agg_clause_walker(Node *node, void *context);
|
||||
@ -62,15 +62,15 @@ static bool contain_volatile_functions_walker(Node *node, void *context);
|
||||
static bool contain_nonstrict_functions_walker(Node *node, void *context);
|
||||
static Node *eval_const_expressions_mutator(Node *node, List *active_fns);
|
||||
static Expr *simplify_function(Oid funcid, Oid result_type, List *args,
|
||||
bool allow_inline, List *active_fns);
|
||||
bool allow_inline, List *active_fns);
|
||||
static Expr *evaluate_function(Oid funcid, Oid result_type, List *args,
|
||||
HeapTuple func_tuple);
|
||||
HeapTuple func_tuple);
|
||||
static Expr *inline_function(Oid funcid, Oid result_type, List *args,
|
||||
HeapTuple func_tuple, List *active_fns);
|
||||
HeapTuple func_tuple, List *active_fns);
|
||||
static Node *substitute_actual_parameters(Node *expr, int nargs, List *args,
|
||||
int *usecounts);
|
||||
int *usecounts);
|
||||
static Node *substitute_actual_parameters_mutator(Node *node,
|
||||
substitute_actual_parameters_context *context);
|
||||
substitute_actual_parameters_context * context);
|
||||
static void sql_inline_error_callback(void *arg);
|
||||
static Expr *evaluate_expr(Expr *expr, Oid result_type);
|
||||
|
||||
@ -110,7 +110,7 @@ make_opclause(Oid opno, Oid opresulttype, bool opretset,
|
||||
Node *
|
||||
get_leftop(Expr *clause)
|
||||
{
|
||||
OpExpr *expr = (OpExpr *) clause;
|
||||
OpExpr *expr = (OpExpr *) clause;
|
||||
|
||||
if (expr->args != NIL)
|
||||
return lfirst(expr->args);
|
||||
@ -127,7 +127,7 @@ get_leftop(Expr *clause)
|
||||
Node *
|
||||
get_rightop(Expr *clause)
|
||||
{
|
||||
OpExpr *expr = (OpExpr *) clause;
|
||||
OpExpr *expr = (OpExpr *) clause;
|
||||
|
||||
if (expr->args != NIL && lnext(expr->args) != NIL)
|
||||
return lfirst(lnext(expr->args));
|
||||
@ -408,7 +408,7 @@ count_agg_clause_walker(Node *node, int *count)
|
||||
if (contain_agg_clause((Node *) ((Aggref *) node)->target))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_GROUPING_ERROR),
|
||||
errmsg("aggregate function calls may not be nested")));
|
||||
errmsg("aggregate function calls may not be nested")));
|
||||
|
||||
/*
|
||||
* Having checked that, we need not recurse into the argument.
|
||||
@ -454,7 +454,7 @@ expression_returns_set_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, OpExpr))
|
||||
{
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
|
||||
if (expr->opretset)
|
||||
return true;
|
||||
@ -556,7 +556,7 @@ contain_mutable_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, OpExpr))
|
||||
{
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
|
||||
if (op_volatile(expr->opno) != PROVOLATILE_IMMUTABLE)
|
||||
return true;
|
||||
@ -564,7 +564,7 @@ contain_mutable_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, DistinctExpr))
|
||||
{
|
||||
DistinctExpr *expr = (DistinctExpr *) node;
|
||||
DistinctExpr *expr = (DistinctExpr *) node;
|
||||
|
||||
if (op_volatile(expr->opno) != PROVOLATILE_IMMUTABLE)
|
||||
return true;
|
||||
@ -572,7 +572,7 @@ contain_mutable_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, ScalarArrayOpExpr))
|
||||
{
|
||||
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
|
||||
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
|
||||
|
||||
if (op_volatile(expr->opno) != PROVOLATILE_IMMUTABLE)
|
||||
return true;
|
||||
@ -580,7 +580,7 @@ contain_mutable_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, NullIfExpr))
|
||||
{
|
||||
NullIfExpr *expr = (NullIfExpr *) node;
|
||||
NullIfExpr *expr = (NullIfExpr *) node;
|
||||
|
||||
if (op_volatile(expr->opno) != PROVOLATILE_IMMUTABLE)
|
||||
return true;
|
||||
@ -588,7 +588,7 @@ contain_mutable_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, SubLink))
|
||||
{
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
List *opid;
|
||||
|
||||
foreach(opid, sublink->operOids)
|
||||
@ -639,7 +639,7 @@ contain_volatile_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, OpExpr))
|
||||
{
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
|
||||
if (op_volatile(expr->opno) == PROVOLATILE_VOLATILE)
|
||||
return true;
|
||||
@ -647,7 +647,7 @@ contain_volatile_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, DistinctExpr))
|
||||
{
|
||||
DistinctExpr *expr = (DistinctExpr *) node;
|
||||
DistinctExpr *expr = (DistinctExpr *) node;
|
||||
|
||||
if (op_volatile(expr->opno) == PROVOLATILE_VOLATILE)
|
||||
return true;
|
||||
@ -655,7 +655,7 @@ contain_volatile_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, ScalarArrayOpExpr))
|
||||
{
|
||||
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
|
||||
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
|
||||
|
||||
if (op_volatile(expr->opno) == PROVOLATILE_VOLATILE)
|
||||
return true;
|
||||
@ -663,7 +663,7 @@ contain_volatile_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, NullIfExpr))
|
||||
{
|
||||
NullIfExpr *expr = (NullIfExpr *) node;
|
||||
NullIfExpr *expr = (NullIfExpr *) node;
|
||||
|
||||
if (op_volatile(expr->opno) == PROVOLATILE_VOLATILE)
|
||||
return true;
|
||||
@ -671,7 +671,7 @@ contain_volatile_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, SubLink))
|
||||
{
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
List *opid;
|
||||
|
||||
foreach(opid, sublink->operOids)
|
||||
@ -698,7 +698,7 @@ contain_volatile_functions_walker(Node *node, void *context)
|
||||
* could produce non-NULL output with a NULL input.
|
||||
*
|
||||
* XXX we do not examine sub-selects to see if they contain uses of
|
||||
* nonstrict functions. It's not real clear if that is correct or not...
|
||||
* nonstrict functions. It's not real clear if that is correct or not...
|
||||
* for the current usage it does not matter, since inline_function()
|
||||
* rejects cases with sublinks.
|
||||
*/
|
||||
@ -723,7 +723,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
|
||||
}
|
||||
if (IsA(node, OpExpr))
|
||||
{
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
OpExpr *expr = (OpExpr *) node;
|
||||
|
||||
if (!op_strict(expr->opno))
|
||||
return true;
|
||||
@ -766,7 +766,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
|
||||
return true;
|
||||
if (IsA(node, SubLink))
|
||||
{
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
List *opid;
|
||||
|
||||
foreach(opid, sublink->operOids)
|
||||
@ -981,7 +981,7 @@ NumRelids(Node *clause)
|
||||
* XXX the clause is destructively modified!
|
||||
*/
|
||||
void
|
||||
CommuteClause(OpExpr *clause)
|
||||
CommuteClause(OpExpr * clause)
|
||||
{
|
||||
Oid opoid;
|
||||
Node *temp;
|
||||
@ -1062,18 +1062,20 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
args = (List *) expression_tree_mutator((Node *) expr->args,
|
||||
eval_const_expressions_mutator,
|
||||
(void *) active_fns);
|
||||
|
||||
/*
|
||||
* Code for op/func reduction is pretty bulky, so split it out
|
||||
* as a separate function.
|
||||
* Code for op/func reduction is pretty bulky, so split it out as
|
||||
* a separate function.
|
||||
*/
|
||||
simple = simplify_function(expr->funcid, expr->funcresulttype, args,
|
||||
true, active_fns);
|
||||
if (simple) /* successfully simplified it */
|
||||
return (Node *) simple;
|
||||
|
||||
/*
|
||||
* The expression cannot be simplified any further, so build and
|
||||
* return a replacement FuncExpr node using the possibly-simplified
|
||||
* arguments.
|
||||
* return a replacement FuncExpr node using the
|
||||
* possibly-simplified arguments.
|
||||
*/
|
||||
newexpr = makeNode(FuncExpr);
|
||||
newexpr->funcid = expr->funcid;
|
||||
@ -1098,19 +1100,22 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
args = (List *) expression_tree_mutator((Node *) expr->args,
|
||||
eval_const_expressions_mutator,
|
||||
(void *) active_fns);
|
||||
|
||||
/*
|
||||
* Need to get OID of underlying function. Okay to scribble on
|
||||
* Need to get OID of underlying function. Okay to scribble on
|
||||
* input to this extent.
|
||||
*/
|
||||
set_opfuncid(expr);
|
||||
|
||||
/*
|
||||
* Code for op/func reduction is pretty bulky, so split it out
|
||||
* as a separate function.
|
||||
* Code for op/func reduction is pretty bulky, so split it out as
|
||||
* a separate function.
|
||||
*/
|
||||
simple = simplify_function(expr->opfuncid, expr->opresulttype, args,
|
||||
true, active_fns);
|
||||
if (simple) /* successfully simplified it */
|
||||
return (Node *) simple;
|
||||
|
||||
/*
|
||||
* The expression cannot be simplified any further, so build and
|
||||
* return a replacement OpExpr node using the possibly-simplified
|
||||
@ -1136,8 +1141,8 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
DistinctExpr *newexpr;
|
||||
|
||||
/*
|
||||
* Reduce constants in the DistinctExpr's arguments. We know args is
|
||||
* either NIL or a List node, so we can call
|
||||
* Reduce constants in the DistinctExpr's arguments. We know args
|
||||
* is either NIL or a List node, so we can call
|
||||
* expression_tree_mutator directly rather than recursing to self.
|
||||
*/
|
||||
args = (List *) expression_tree_mutator((Node *) expr->args,
|
||||
@ -1145,9 +1150,9 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
(void *) active_fns);
|
||||
|
||||
/*
|
||||
* We must do our own check for NULLs because
|
||||
* DistinctExpr has different results for NULL input
|
||||
* than the underlying operator does.
|
||||
* We must do our own check for NULLs because DistinctExpr has
|
||||
* different results for NULL input than the underlying operator
|
||||
* does.
|
||||
*/
|
||||
foreach(arg, args)
|
||||
{
|
||||
@ -1175,10 +1180,12 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
/* (NOT okay to try to inline it, though!) */
|
||||
|
||||
/*
|
||||
* Need to get OID of underlying function. Okay to scribble on
|
||||
* input to this extent.
|
||||
* Need to get OID of underlying function. Okay to scribble
|
||||
* on input to this extent.
|
||||
*/
|
||||
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
|
||||
set_opfuncid((OpExpr *) expr); /* rely on struct
|
||||
* equivalence */
|
||||
|
||||
/*
|
||||
* Code for op/func reduction is pretty bulky, so split it out
|
||||
* as a separate function.
|
||||
@ -1191,7 +1198,7 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
* Since the underlying operator is "=", must negate its
|
||||
* result
|
||||
*/
|
||||
Const *csimple = (Const *) simple;
|
||||
Const *csimple = (Const *) simple;
|
||||
|
||||
Assert(IsA(csimple, Const));
|
||||
csimple->constvalue =
|
||||
@ -1359,8 +1366,7 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
if (IsA(node, SubPlan))
|
||||
{
|
||||
/*
|
||||
* Return a SubPlan unchanged --- too late to do anything
|
||||
* with it.
|
||||
* Return a SubPlan unchanged --- too late to do anything with it.
|
||||
*
|
||||
* XXX should we ereport() here instead? Probably this routine
|
||||
* should never be invoked after SubPlan creation.
|
||||
@ -1487,16 +1493,16 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
}
|
||||
if (IsA(node, ArrayExpr))
|
||||
{
|
||||
ArrayExpr *arrayexpr = (ArrayExpr *) node;
|
||||
ArrayExpr *newarray;
|
||||
bool all_const = true;
|
||||
ArrayExpr *arrayexpr = (ArrayExpr *) node;
|
||||
ArrayExpr *newarray;
|
||||
bool all_const = true;
|
||||
FastList newelems;
|
||||
List *element;
|
||||
List *element;
|
||||
|
||||
FastListInit(&newelems);
|
||||
foreach(element, arrayexpr->elements)
|
||||
{
|
||||
Node *e;
|
||||
Node *e;
|
||||
|
||||
e = eval_const_expressions_mutator((Node *) lfirst(element),
|
||||
active_fns);
|
||||
@ -1522,19 +1528,20 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
CoalesceExpr *coalesceexpr = (CoalesceExpr *) node;
|
||||
CoalesceExpr *newcoalesce;
|
||||
FastList newargs;
|
||||
List *arg;
|
||||
List *arg;
|
||||
|
||||
FastListInit(&newargs);
|
||||
foreach(arg, coalesceexpr->args)
|
||||
{
|
||||
Node *e;
|
||||
Node *e;
|
||||
|
||||
e = eval_const_expressions_mutator((Node *) lfirst(arg),
|
||||
active_fns);
|
||||
/*
|
||||
* We can remove null constants from the list.
|
||||
* For a non-null constant, if it has not been preceded by any
|
||||
* other non-null-constant expressions then that is the result.
|
||||
|
||||
/*
|
||||
* We can remove null constants from the list. For a non-null
|
||||
* constant, if it has not been preceded by any other
|
||||
* non-null-constant expressions then that is the result.
|
||||
*/
|
||||
if (IsA(e, Const))
|
||||
{
|
||||
@ -1555,10 +1562,11 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
|
||||
{
|
||||
/*
|
||||
* We can optimize field selection from a whole-row Var into a
|
||||
* simple Var. (This case won't be generated directly by the
|
||||
* parser, because ParseComplexProjection short-circuits it.
|
||||
* But it can arise while simplifying functions.) If the argument
|
||||
* isn't a whole-row Var, just fall through to do generic processing.
|
||||
* simple Var. (This case won't be generated directly by the
|
||||
* parser, because ParseComplexProjection short-circuits it. But
|
||||
* it can arise while simplifying functions.) If the argument
|
||||
* isn't a whole-row Var, just fall through to do generic
|
||||
* processing.
|
||||
*/
|
||||
FieldSelect *fselect = (FieldSelect *) node;
|
||||
Var *argvar = (Var *) fselect->arg;
|
||||
@ -1604,12 +1612,12 @@ simplify_function(Oid funcid, Oid result_type, List *args,
|
||||
Expr *newexpr;
|
||||
|
||||
/*
|
||||
* We have two strategies for simplification: either execute the function
|
||||
* to deliver a constant result, or expand in-line the body of the
|
||||
* function definition (which only works for simple SQL-language
|
||||
* functions, but that is a common case). In either case we need access
|
||||
* to the function's pg_proc tuple, so fetch it just once to use in both
|
||||
* attempts.
|
||||
* We have two strategies for simplification: either execute the
|
||||
* function to deliver a constant result, or expand in-line the body
|
||||
* of the function definition (which only works for simple
|
||||
* SQL-language functions, but that is a common case). In either case
|
||||
* we need access to the function's pg_proc tuple, so fetch it just
|
||||
* once to use in both attempts.
|
||||
*/
|
||||
func_tuple = SearchSysCache(PROCOID,
|
||||
ObjectIdGetDatum(funcid),
|
||||
@ -1668,15 +1676,15 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
|
||||
/*
|
||||
* If the function is strict and has a constant-NULL input, it will
|
||||
* never be called at all, so we can replace the call by a NULL
|
||||
* constant, even if there are other inputs that aren't constant,
|
||||
* and even if the function is not otherwise immutable.
|
||||
* constant, even if there are other inputs that aren't constant, and
|
||||
* even if the function is not otherwise immutable.
|
||||
*/
|
||||
if (funcform->proisstrict && has_null_input)
|
||||
return (Expr *) makeNullConst(result_type);
|
||||
|
||||
/*
|
||||
* Otherwise, can simplify only if the function is immutable and
|
||||
* all inputs are constants. (For a non-strict function, constant NULL
|
||||
* Otherwise, can simplify only if the function is immutable and all
|
||||
* inputs are constants. (For a non-strict function, constant NULL
|
||||
* inputs are treated the same as constant non-NULL inputs.)
|
||||
*/
|
||||
if (funcform->provolatile != PROVOLATILE_IMMUTABLE ||
|
||||
@ -1692,7 +1700,7 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
|
||||
newexpr->funcid = funcid;
|
||||
newexpr->funcresulttype = result_type;
|
||||
newexpr->funcretset = false;
|
||||
newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
|
||||
newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
|
||||
newexpr->args = args;
|
||||
|
||||
return evaluate_expr((Expr *) newexpr, result_type);
|
||||
@ -1712,7 +1720,7 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
|
||||
* do not re-expand them. Also, if a parameter is used more than once
|
||||
* in the SQL-function body, we require it not to contain any volatile
|
||||
* functions (volatiles might deliver inconsistent answers) nor to be
|
||||
* unreasonably expensive to evaluate. The expensiveness check not only
|
||||
* unreasonably expensive to evaluate. The expensiveness check not only
|
||||
* prevents us from doing multiple evaluations of an expensive parameter
|
||||
* at runtime, but is a safety value to limit growth of an expression due
|
||||
* to repeated inlining.
|
||||
@ -1747,7 +1755,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
|
||||
/*
|
||||
* Forget it if the function is not SQL-language or has other
|
||||
* showstopper properties. (The nargs check is just paranoia.)
|
||||
* showstopper properties. (The nargs check is just paranoia.)
|
||||
*/
|
||||
if (funcform->prolang != SQLlanguageId ||
|
||||
funcform->prosecdef ||
|
||||
@ -1755,7 +1763,10 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
funcform->pronargs != length(args))
|
||||
return NULL;
|
||||
|
||||
/* Forget it if declared return type is not base, domain, or polymorphic */
|
||||
/*
|
||||
* Forget it if declared return type is not base, domain, or
|
||||
* polymorphic
|
||||
*/
|
||||
result_typtype = get_typtype(funcform->prorettype);
|
||||
if (result_typtype != 'b' &&
|
||||
result_typtype != 'd')
|
||||
@ -1788,8 +1799,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup error traceback support for ereport(). This is so that we can
|
||||
* finger the function that bad information came from.
|
||||
* Setup error traceback support for ereport(). This is so that we
|
||||
* can finger the function that bad information came from.
|
||||
*/
|
||||
sqlerrcontext.callback = sql_inline_error_callback;
|
||||
sqlerrcontext.arg = funcform;
|
||||
@ -1818,9 +1829,9 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
|
||||
/*
|
||||
* We just do parsing and parse analysis, not rewriting, because
|
||||
* rewriting will not affect table-free-SELECT-only queries, which is all
|
||||
* that we care about. Also, we can punt as soon as we detect more than
|
||||
* one command in the function body.
|
||||
* rewriting will not affect table-free-SELECT-only queries, which is
|
||||
* all that we care about. Also, we can punt as soon as we detect
|
||||
* more than one command in the function body.
|
||||
*/
|
||||
raw_parsetree_list = pg_parse_query(src);
|
||||
if (length(raw_parsetree_list) != 1)
|
||||
@ -1863,8 +1874,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
* then it wasn't type-checked at definition time; must do so now.
|
||||
* (This will raise an error if wrong, but that's okay since the
|
||||
* function would fail at runtime anyway. Note we do not try this
|
||||
* until we have verified that no rewriting was needed; that's probably
|
||||
* not important, but let's be careful.)
|
||||
* until we have verified that no rewriting was needed; that's
|
||||
* probably not important, but let's be careful.)
|
||||
*/
|
||||
if (polymorphic)
|
||||
check_sql_fn_retval(result_type, get_typtype(result_type),
|
||||
@ -1875,9 +1886,9 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
* set, and it mustn't be more volatile than the surrounding function
|
||||
* (this is to avoid breaking hacks that involve pretending a function
|
||||
* is immutable when it really ain't). If the surrounding function is
|
||||
* declared strict, then the expression must contain only strict constructs
|
||||
* and must use all of the function parameters (this is overkill, but
|
||||
* an exact analysis is hard).
|
||||
* declared strict, then the expression must contain only strict
|
||||
* constructs and must use all of the function parameters (this is
|
||||
* overkill, but an exact analysis is hard).
|
||||
*/
|
||||
if (expression_returns_set(newexpr))
|
||||
goto fail;
|
||||
@ -1886,7 +1897,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
contain_mutable_functions(newexpr))
|
||||
goto fail;
|
||||
else if (funcform->provolatile == PROVOLATILE_STABLE &&
|
||||
contain_volatile_functions(newexpr))
|
||||
contain_volatile_functions(newexpr))
|
||||
goto fail;
|
||||
|
||||
if (funcform->proisstrict &&
|
||||
@ -1907,7 +1918,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
i = 0;
|
||||
foreach(arg, args)
|
||||
{
|
||||
Node *param = lfirst(arg);
|
||||
Node *param = lfirst(arg);
|
||||
|
||||
if (usecounts[i] == 0)
|
||||
{
|
||||
@ -1932,6 +1943,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
if (eval_cost.startup + eval_cost.per_tuple >
|
||||
10 * cpu_operator_cost)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Check volatility last since this is more expensive than the
|
||||
* above tests
|
||||
@ -1943,8 +1955,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
}
|
||||
|
||||
/*
|
||||
* Whew --- we can make the substitution. Copy the modified expression
|
||||
* out of the temporary memory context, and clean up.
|
||||
* Whew --- we can make the substitution. Copy the modified
|
||||
* expression out of the temporary memory context, and clean up.
|
||||
*/
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
@ -1981,7 +1993,7 @@ substitute_actual_parameters(Node *expr, int nargs, List *args,
|
||||
{
|
||||
substitute_actual_parameters_context context;
|
||||
|
||||
context.nargs = nargs;
|
||||
context.nargs = nargs;
|
||||
context.args = args;
|
||||
context.usecounts = usecounts;
|
||||
|
||||
@ -1990,7 +2002,7 @@ substitute_actual_parameters(Node *expr, int nargs, List *args,
|
||||
|
||||
static Node *
|
||||
substitute_actual_parameters_mutator(Node *node,
|
||||
substitute_actual_parameters_context *context)
|
||||
substitute_actual_parameters_context * context)
|
||||
{
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
@ -2059,10 +2071,10 @@ evaluate_expr(Expr *expr, Oid result_type)
|
||||
/*
|
||||
* And evaluate it.
|
||||
*
|
||||
* It is OK to use a default econtext because none of the
|
||||
* ExecEvalExpr() code used in this situation will use econtext. That
|
||||
* might seem fortuitous, but it's not so unreasonable --- a constant
|
||||
* expression does not depend on context, by definition, n'est ce pas?
|
||||
* It is OK to use a default econtext because none of the ExecEvalExpr()
|
||||
* code used in this situation will use econtext. That might seem
|
||||
* fortuitous, but it's not so unreasonable --- a constant expression
|
||||
* does not depend on context, by definition, n'est ce pas?
|
||||
*/
|
||||
const_val = ExecEvalExprSwitchContext(exprstate,
|
||||
GetPerTupleExprContext(estate),
|
||||
@ -2177,7 +2189,7 @@ evaluate_expr(Expr *expr, Oid result_type)
|
||||
*
|
||||
* expression_tree_walker will handle SubPlan nodes by recursing normally
|
||||
* into the "exprs" and "args" lists (which are expressions belonging to
|
||||
* the outer plan). It will not touch the completed subplan, however. Since
|
||||
* the outer plan). It will not touch the completed subplan, however. Since
|
||||
* there is no link to the original Query, it is not possible to recurse into
|
||||
* subselects of an already-planned expression tree. This is OK for current
|
||||
* uses, but may need to be revisited in future.
|
||||
@ -2283,6 +2295,7 @@ expression_tree_walker(Node *node,
|
||||
if (expression_tree_walker((Node *) sublink->lefthand,
|
||||
walker, context))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Also invoke the walker on the sublink's Query node, so
|
||||
* it can recurse into the sub-query if it wants to.
|
||||
@ -2292,7 +2305,7 @@ expression_tree_walker(Node *node,
|
||||
break;
|
||||
case T_SubPlan:
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
|
||||
/* recurse into the exprs list, but not into the Plan */
|
||||
if (expression_tree_walker((Node *) subplan->exprs,
|
||||
@ -2457,12 +2470,12 @@ query_tree_walker(Query *query,
|
||||
/* nothing to do */
|
||||
break;
|
||||
case RTE_SUBQUERY:
|
||||
if (! (flags & QTW_IGNORE_RT_SUBQUERIES))
|
||||
if (!(flags & QTW_IGNORE_RT_SUBQUERIES))
|
||||
if (walker(rte->subquery, context))
|
||||
return true;
|
||||
break;
|
||||
case RTE_JOIN:
|
||||
if (! (flags & QTW_IGNORE_JOINALIASES))
|
||||
if (!(flags & QTW_IGNORE_JOINALIASES))
|
||||
if (walker(rte->joinaliasvars, context))
|
||||
return true;
|
||||
break;
|
||||
@ -2622,8 +2635,8 @@ expression_tree_mutator(Node *node,
|
||||
break;
|
||||
case T_DistinctExpr:
|
||||
{
|
||||
DistinctExpr *expr = (DistinctExpr *) node;
|
||||
DistinctExpr *newnode;
|
||||
DistinctExpr *expr = (DistinctExpr *) node;
|
||||
DistinctExpr *newnode;
|
||||
|
||||
FLATCOPY(newnode, expr, DistinctExpr);
|
||||
MUTATE(newnode->args, expr->args, List *);
|
||||
@ -2632,8 +2645,8 @@ expression_tree_mutator(Node *node,
|
||||
break;
|
||||
case T_ScalarArrayOpExpr:
|
||||
{
|
||||
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
|
||||
ScalarArrayOpExpr *newnode;
|
||||
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
|
||||
ScalarArrayOpExpr *newnode;
|
||||
|
||||
FLATCOPY(newnode, expr, ScalarArrayOpExpr);
|
||||
MUTATE(newnode->args, expr->args, List *);
|
||||
@ -2657,6 +2670,7 @@ expression_tree_mutator(Node *node,
|
||||
|
||||
FLATCOPY(newnode, sublink, SubLink);
|
||||
MUTATE(newnode->lefthand, sublink->lefthand, List *);
|
||||
|
||||
/*
|
||||
* Also invoke the mutator on the sublink's Query node, so
|
||||
* it can recurse into the sub-query if it wants to.
|
||||
@ -2667,8 +2681,8 @@ expression_tree_mutator(Node *node,
|
||||
break;
|
||||
case T_SubPlan:
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
SubPlan *newnode;
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
SubPlan *newnode;
|
||||
|
||||
FLATCOPY(newnode, subplan, SubPlan);
|
||||
/* transform exprs list */
|
||||
@ -2725,8 +2739,8 @@ expression_tree_mutator(Node *node,
|
||||
break;
|
||||
case T_ArrayExpr:
|
||||
{
|
||||
ArrayExpr *arrayexpr = (ArrayExpr *) node;
|
||||
ArrayExpr *newnode;
|
||||
ArrayExpr *arrayexpr = (ArrayExpr *) node;
|
||||
ArrayExpr *newnode;
|
||||
|
||||
FLATCOPY(newnode, arrayexpr, ArrayExpr);
|
||||
MUTATE(newnode->elements, arrayexpr->elements, List *);
|
||||
@ -2745,8 +2759,8 @@ expression_tree_mutator(Node *node,
|
||||
break;
|
||||
case T_NullIfExpr:
|
||||
{
|
||||
NullIfExpr *expr = (NullIfExpr *) node;
|
||||
NullIfExpr *newnode;
|
||||
NullIfExpr *expr = (NullIfExpr *) node;
|
||||
NullIfExpr *newnode;
|
||||
|
||||
FLATCOPY(newnode, expr, NullIfExpr);
|
||||
MUTATE(newnode->args, expr->args, List *);
|
||||
@ -2891,7 +2905,7 @@ expression_tree_mutator(Node *node,
|
||||
* indicated items. (More flag bits may be added as needed.)
|
||||
*
|
||||
* Normally the Query node itself is copied, but some callers want it to be
|
||||
* modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
|
||||
* modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
|
||||
* modified substructure is safely copied in any case.
|
||||
*/
|
||||
Query *
|
||||
@ -2905,9 +2919,9 @@ query_tree_mutator(Query *query,
|
||||
|
||||
Assert(query != NULL && IsA(query, Query));
|
||||
|
||||
if (! (flags & QTW_DONT_COPY_QUERY))
|
||||
if (!(flags & QTW_DONT_COPY_QUERY))
|
||||
{
|
||||
Query *newquery;
|
||||
Query *newquery;
|
||||
|
||||
FLATCOPY(newquery, query, Query);
|
||||
query = newquery;
|
||||
@ -2933,7 +2947,7 @@ query_tree_mutator(Query *query,
|
||||
/* nothing to do, don't bother to make a copy */
|
||||
break;
|
||||
case RTE_SUBQUERY:
|
||||
if (! (flags & QTW_IGNORE_RT_SUBQUERIES))
|
||||
if (!(flags & QTW_IGNORE_RT_SUBQUERIES))
|
||||
{
|
||||
FLATCOPY(newrte, rte, RangeTblEntry);
|
||||
CHECKFLATCOPY(newrte->subquery, rte->subquery, Query);
|
||||
@ -2942,7 +2956,7 @@ query_tree_mutator(Query *query,
|
||||
}
|
||||
break;
|
||||
case RTE_JOIN:
|
||||
if (! (flags & QTW_IGNORE_JOINALIASES))
|
||||
if (!(flags & QTW_IGNORE_JOINALIASES))
|
||||
{
|
||||
FLATCOPY(newrte, rte, RangeTblEntry);
|
||||
MUTATE(newrte->joinaliasvars, rte->joinaliasvars, List *);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/joininfo.c,v 1.34 2003/02/08 20:20:55 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/joininfo.c,v 1.35 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -73,7 +73,7 @@ make_joininfo_node(RelOptInfo *this_rel, Relids join_relids)
|
||||
* appropriate rel node if necessary).
|
||||
*
|
||||
* Note that the same copy of the restrictinfo node is linked to by all the
|
||||
* lists it is in. This allows us to exploit caching of information about
|
||||
* lists it is in. This allows us to exploit caching of information about
|
||||
* the restriction clause (but we must be careful that the information does
|
||||
* not depend on context).
|
||||
*
|
||||
@ -109,9 +109,10 @@ add_join_clause_to_rels(Query *root,
|
||||
unjoined_relids);
|
||||
joininfo->jinfo_restrictinfo = lappend(joininfo->jinfo_restrictinfo,
|
||||
restrictinfo);
|
||||
|
||||
/*
|
||||
* Can't bms_free(unjoined_relids) because new joininfo node may
|
||||
* link to it. We could avoid leaking memory by doing bms_copy()
|
||||
* link to it. We could avoid leaking memory by doing bms_copy()
|
||||
* in make_joininfo_node, but for now speed seems better.
|
||||
*/
|
||||
}
|
||||
@ -156,13 +157,14 @@ remove_join_clause_from_rels(Query *root,
|
||||
joininfo = find_joininfo_node(find_base_rel(root, cur_relid),
|
||||
unjoined_relids);
|
||||
Assert(joininfo);
|
||||
|
||||
/*
|
||||
* Remove the restrictinfo from the list. Pointer comparison
|
||||
* is sufficient.
|
||||
* Remove the restrictinfo from the list. Pointer comparison is
|
||||
* sufficient.
|
||||
*/
|
||||
Assert(ptrMember(restrictinfo, joininfo->jinfo_restrictinfo));
|
||||
joininfo->jinfo_restrictinfo = lremove(restrictinfo,
|
||||
joininfo->jinfo_restrictinfo);
|
||||
joininfo->jinfo_restrictinfo);
|
||||
bms_free(unjoined_relids);
|
||||
}
|
||||
bms_free(tmprelids);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.93 2003/07/25 00:01:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.94 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -159,7 +159,7 @@ set_cheapest(RelOptInfo *parent_rel)
|
||||
|
||||
parent_rel->cheapest_startup_path = cheapest_startup_path;
|
||||
parent_rel->cheapest_total_path = cheapest_total_path;
|
||||
parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
|
||||
parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -452,7 +452,7 @@ create_result_path(RelOptInfo *rel, Path *subpath, List *constantqual)
|
||||
ResultPath *pathnode = makeNode(ResultPath);
|
||||
|
||||
pathnode->path.pathtype = T_Result;
|
||||
pathnode->path.parent = rel; /* may be NULL */
|
||||
pathnode->path.parent = rel; /* may be NULL */
|
||||
|
||||
if (subpath)
|
||||
pathnode->path.pathkeys = subpath->pathkeys;
|
||||
@ -545,8 +545,8 @@ create_unique_path(Query *root, RelOptInfo *rel, Path *subpath)
|
||||
pathnode->path.parent = rel;
|
||||
|
||||
/*
|
||||
* Treat the output as always unsorted, since we don't necessarily have
|
||||
* pathkeys to represent it.
|
||||
* Treat the output as always unsorted, since we don't necessarily
|
||||
* have pathkeys to represent it.
|
||||
*/
|
||||
pathnode->path.pathkeys = NIL;
|
||||
|
||||
@ -591,26 +591,28 @@ create_unique_path(Query *root, RelOptInfo *rel, Path *subpath)
|
||||
subpath->total_cost,
|
||||
rel->rows,
|
||||
rel->width);
|
||||
|
||||
/*
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We
|
||||
* assume all columns get compared at most of the tuples. (XXX probably
|
||||
* this is an overestimate.) This should agree with make_unique.
|
||||
* assume all columns get compared at most of the tuples. (XXX
|
||||
* probably this is an overestimate.) This should agree with
|
||||
* make_unique.
|
||||
*/
|
||||
sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
|
||||
|
||||
/*
|
||||
* Is it safe to use a hashed implementation? If so, estimate and
|
||||
* compare costs. We only try this if we know the targetlist for
|
||||
* sure (else we can't be sure about the datatypes involved).
|
||||
* compare costs. We only try this if we know the targetlist for sure
|
||||
* (else we can't be sure about the datatypes involved).
|
||||
*/
|
||||
pathnode->use_hash = false;
|
||||
if (enable_hashagg && sub_targetlist && hash_safe_tlist(sub_targetlist))
|
||||
{
|
||||
/*
|
||||
* Estimate the overhead per hashtable entry at 64 bytes (same
|
||||
* as in planner.c).
|
||||
* Estimate the overhead per hashtable entry at 64 bytes (same as
|
||||
* in planner.c).
|
||||
*/
|
||||
int hashentrysize = rel->width + 64;
|
||||
int hashentrysize = rel->width + 64;
|
||||
|
||||
if (hashentrysize * pathnode->rows <= SortMem * 1024L)
|
||||
{
|
||||
@ -647,7 +649,7 @@ create_unique_path(Query *root, RelOptInfo *rel, Path *subpath)
|
||||
* We assume hashed aggregation will work if the datatype's equality operator
|
||||
* is marked hashjoinable.
|
||||
*
|
||||
* XXX this probably should be somewhere else. See also hash_safe_grouping
|
||||
* XXX this probably should be somewhere else. See also hash_safe_grouping
|
||||
* in plan/planner.c.
|
||||
*/
|
||||
static bool
|
||||
@ -788,6 +790,7 @@ create_mergejoin_path(Query *root,
|
||||
if (innersortkeys &&
|
||||
pathkeys_contained_in(innersortkeys, inner_path->pathkeys))
|
||||
innersortkeys = NIL;
|
||||
|
||||
/*
|
||||
* If we are not sorting the inner path, we may need a materialize
|
||||
* node to ensure it can be marked/restored. (Sort does support
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.85 2003/07/25 00:01:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.86 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -65,7 +65,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
rel->max_attr = RelationGetNumberOfAttributes(relation);
|
||||
|
||||
/*
|
||||
* Make list of indexes. Ignore indexes on system catalogs if told to.
|
||||
* Make list of indexes. Ignore indexes on system catalogs if told
|
||||
* to.
|
||||
*/
|
||||
if (IsIgnoringSystemIndexes() && IsSystemClass(relation->rd_rel))
|
||||
hasindex = false;
|
||||
@ -99,8 +100,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
info->ncolumns = ncolumns = index->indnatts;
|
||||
|
||||
/*
|
||||
* Need to make classlist and ordering arrays large enough to put
|
||||
* a terminating 0 at the end of each one.
|
||||
* Need to make classlist and ordering arrays large enough to
|
||||
* put a terminating 0 at the end of each one.
|
||||
*/
|
||||
info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
|
||||
info->classlist = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));
|
||||
@ -118,7 +119,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
info->amcostestimate = index_cost_estimator(indexRelation);
|
||||
|
||||
/*
|
||||
* Fetch the ordering operators associated with the index, if any.
|
||||
* Fetch the ordering operators associated with the index, if
|
||||
* any.
|
||||
*/
|
||||
amorderstrategy = indexRelation->rd_am->amorderstrategy;
|
||||
if (amorderstrategy != 0)
|
||||
@ -135,8 +137,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
/*
|
||||
* Fetch the index expressions and predicate, if any. We must
|
||||
* modify the copies we obtain from the relcache to have the
|
||||
* correct varno for the parent relation, so that they match up
|
||||
* correctly against qual clauses.
|
||||
* correct varno for the parent relation, so that they match
|
||||
* up correctly against qual clauses.
|
||||
*/
|
||||
info->indexprs = RelationGetIndexExpressions(indexRelation);
|
||||
info->indpred = RelationGetIndexPredicate(indexRelation);
|
||||
@ -177,7 +179,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
* Exception: if there are any dropped columns, we punt and return NIL.
|
||||
* Ideally we would like to handle the dropped-column case too. However this
|
||||
* creates problems for ExecTypeFromTL, which may be asked to build a tupdesc
|
||||
* for a tlist that includes vars of no-longer-existent types. In theory we
|
||||
* for a tlist that includes vars of no-longer-existent types. In theory we
|
||||
* could dig out the required info from the pg_attribute entries of the
|
||||
* relation, but that data is not readily available to ExecTypeFromTL.
|
||||
* For now, we don't apply the physical-tlist optimization when there are
|
||||
@ -389,11 +391,11 @@ has_unique_index(RelOptInfo *rel, AttrNumber attno)
|
||||
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
|
||||
|
||||
/*
|
||||
* Note: ignore partial indexes, since they don't allow us to conclude
|
||||
* that all attr values are distinct. We don't take any interest in
|
||||
* expressional indexes either. Also, a multicolumn unique index
|
||||
* doesn't allow us to conclude that just the specified attr is
|
||||
* unique.
|
||||
* Note: ignore partial indexes, since they don't allow us to
|
||||
* conclude that all attr values are distinct. We don't take any
|
||||
* interest in expressional indexes either. Also, a multicolumn
|
||||
* unique index doesn't allow us to conclude that just the
|
||||
* specified attr is unique.
|
||||
*/
|
||||
if (index->unique &&
|
||||
index->ncolumns == 1 &&
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/relnode.c,v 1.50 2003/07/25 00:01:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/relnode.c,v 1.51 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -322,7 +322,8 @@ build_join_rel(Query *root,
|
||||
|
||||
/*
|
||||
* Create a new tlist containing just the vars that need to be output
|
||||
* from this join (ie, are needed for higher joinclauses or final output).
|
||||
* from this join (ie, are needed for higher joinclauses or final
|
||||
* output).
|
||||
*/
|
||||
build_joinrel_tlist(root, joinrel);
|
||||
|
||||
@ -389,8 +390,8 @@ build_joinrel_tlist(Query *root, RelOptInfo *joinrel)
|
||||
|
||||
foreach(vars, FastListValue(&baserel->reltargetlist))
|
||||
{
|
||||
Var *var = (Var *) lfirst(vars);
|
||||
int ndx = var->varattno - baserel->min_attr;
|
||||
Var *var = (Var *) lfirst(vars);
|
||||
int ndx = var->varattno - baserel->min_attr;
|
||||
|
||||
if (bms_nonempty_difference(baserel->attr_needed[ndx], relids))
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.17 2003/06/15 22:51:45 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.18 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -21,9 +21,9 @@
|
||||
|
||||
|
||||
static bool join_clause_is_redundant(Query *root,
|
||||
RestrictInfo *rinfo,
|
||||
List *reference_list,
|
||||
JoinType jointype);
|
||||
RestrictInfo *rinfo,
|
||||
List *reference_list,
|
||||
JoinType jointype);
|
||||
|
||||
|
||||
/*
|
||||
@ -174,7 +174,7 @@ select_nonredundant_join_clauses(Query *root,
|
||||
* left and right pathkeys, which uniquely identify the sets of equijoined
|
||||
* variables in question. All the members of a pathkey set that are in the
|
||||
* left relation have already been forced to be equal; likewise for those in
|
||||
* the right relation. So, we need to have only one clause that checks
|
||||
* the right relation. So, we need to have only one clause that checks
|
||||
* equality between any set member on the left and any member on the right;
|
||||
* by transitivity, all the rest are then equal.
|
||||
*
|
||||
@ -228,10 +228,9 @@ join_clause_is_redundant(Query *root,
|
||||
if (redundant)
|
||||
{
|
||||
/*
|
||||
* It looks redundant, now check for "var = const" case.
|
||||
* If left_relids/right_relids are set, then there are
|
||||
* definitely vars on both sides; else we must check the
|
||||
* hard way.
|
||||
* It looks redundant, now check for "var = const" case. If
|
||||
* left_relids/right_relids are set, then there are definitely
|
||||
* vars on both sides; else we must check the hard way.
|
||||
*/
|
||||
if (rinfo->left_relids)
|
||||
return true; /* var = var, so redundant */
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.58 2003/07/25 00:01:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.59 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -211,8 +211,8 @@ get_sortgroupclause_expr(SortClause *sortClause, List *targetList)
|
||||
List *
|
||||
get_sortgrouplist_exprs(List *sortClauses, List *targetList)
|
||||
{
|
||||
List *result = NIL;
|
||||
List *l;
|
||||
List *result = NIL;
|
||||
List *l;
|
||||
|
||||
foreach(l, sortClauses)
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.51 2003/06/06 15:04:02 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -39,7 +39,7 @@ typedef struct
|
||||
{
|
||||
int min_varlevel;
|
||||
int sublevels_up;
|
||||
} find_minimum_var_level_context;
|
||||
} find_minimum_var_level_context;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
@ -61,7 +61,7 @@ static bool contain_var_clause_walker(Node *node, void *context);
|
||||
static bool contain_vars_of_level_walker(Node *node, int *sublevels_up);
|
||||
static bool contain_vars_above_level_walker(Node *node, int *sublevels_up);
|
||||
static bool find_minimum_var_level_walker(Node *node,
|
||||
find_minimum_var_level_context *context);
|
||||
find_minimum_var_level_context * context);
|
||||
static bool pull_var_clause_walker(Node *node,
|
||||
pull_var_clause_context *context);
|
||||
static Node *flatten_join_alias_vars_mutator(Node *node,
|
||||
@ -242,12 +242,12 @@ contain_var_clause_walker(Node *node, void *context)
|
||||
*
|
||||
* Returns true if any such Var found.
|
||||
*
|
||||
* Will recurse into sublinks. Also, may be invoked directly on a Query.
|
||||
* Will recurse into sublinks. Also, may be invoked directly on a Query.
|
||||
*/
|
||||
bool
|
||||
contain_vars_of_level(Node *node, int levelsup)
|
||||
{
|
||||
int sublevels_up = levelsup;
|
||||
int sublevels_up = levelsup;
|
||||
|
||||
return query_or_expression_tree_walker(node,
|
||||
contain_vars_of_level_walker,
|
||||
@ -286,20 +286,20 @@ contain_vars_of_level_walker(Node *node, int *sublevels_up)
|
||||
/*
|
||||
* contain_vars_above_level
|
||||
* Recursively scan a clause to discover whether it contains any Var nodes
|
||||
* above the specified query level. (For example, pass zero to detect
|
||||
* above the specified query level. (For example, pass zero to detect
|
||||
* all nonlocal Vars.)
|
||||
*
|
||||
* Returns true if any such Var found.
|
||||
*
|
||||
* Will recurse into sublinks. Also, may be invoked directly on a Query.
|
||||
* Will recurse into sublinks. Also, may be invoked directly on a Query.
|
||||
*/
|
||||
bool
|
||||
contain_vars_above_level(Node *node, int levelsup)
|
||||
{
|
||||
int sublevels_up = levelsup;
|
||||
int sublevels_up = levelsup;
|
||||
|
||||
return query_or_expression_tree_walker(node,
|
||||
contain_vars_above_level_walker,
|
||||
contain_vars_above_level_walker,
|
||||
(void *) &sublevels_up,
|
||||
0);
|
||||
}
|
||||
@ -344,7 +344,7 @@ contain_vars_above_level_walker(Node *node, int *sublevels_up)
|
||||
*
|
||||
* -1 is returned if the clause has no variables at all.
|
||||
*
|
||||
* Will recurse into sublinks. Also, may be invoked directly on a Query.
|
||||
* Will recurse into sublinks. Also, may be invoked directly on a Query.
|
||||
*/
|
||||
int
|
||||
find_minimum_var_level(Node *node)
|
||||
@ -364,13 +364,13 @@ find_minimum_var_level(Node *node)
|
||||
|
||||
static bool
|
||||
find_minimum_var_level_walker(Node *node,
|
||||
find_minimum_var_level_context *context)
|
||||
find_minimum_var_level_context * context)
|
||||
{
|
||||
if (node == NULL)
|
||||
return false;
|
||||
if (IsA(node, Var))
|
||||
{
|
||||
int varlevelsup = ((Var *) node)->varlevelsup;
|
||||
int varlevelsup = ((Var *) node)->varlevelsup;
|
||||
|
||||
/* convert levelsup to frame of reference of original query */
|
||||
varlevelsup -= context->sublevels_up;
|
||||
@ -381,6 +381,7 @@ find_minimum_var_level_walker(Node *node,
|
||||
context->min_varlevel > varlevelsup)
|
||||
{
|
||||
context->min_varlevel = varlevelsup;
|
||||
|
||||
/*
|
||||
* As soon as we find a local variable, we can abort the
|
||||
* tree traversal, since min_varlevel is then certainly 0.
|
||||
@ -390,14 +391,16 @@ find_minimum_var_level_walker(Node *node,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* An Aggref must be treated like a Var of its level. Normally we'd get
|
||||
* the same result from looking at the Vars in the aggregate's argument,
|
||||
* but this fails in the case of a Var-less aggregate call (COUNT(*)).
|
||||
* An Aggref must be treated like a Var of its level. Normally we'd
|
||||
* get the same result from looking at the Vars in the aggregate's
|
||||
* argument, but this fails in the case of a Var-less aggregate call
|
||||
* (COUNT(*)).
|
||||
*/
|
||||
if (IsA(node, Aggref))
|
||||
{
|
||||
int agglevelsup = ((Aggref *) node)->agglevelsup;
|
||||
int agglevelsup = ((Aggref *) node)->agglevelsup;
|
||||
|
||||
/* convert levelsup to frame of reference of original query */
|
||||
agglevelsup -= context->sublevels_up;
|
||||
@ -408,6 +411,7 @@ find_minimum_var_level_walker(Node *node,
|
||||
context->min_varlevel > agglevelsup)
|
||||
{
|
||||
context->min_varlevel = agglevelsup;
|
||||
|
||||
/*
|
||||
* As soon as we find a local aggregate, we can abort the
|
||||
* tree traversal, since min_varlevel is then certainly 0.
|
||||
@ -519,6 +523,7 @@ flatten_join_alias_vars_mutator(Node *node,
|
||||
Assert(var->varattno > 0);
|
||||
/* Okay, must expand it */
|
||||
newvar = (Node *) nth(var->varattno - 1, rte->joinaliasvars);
|
||||
|
||||
/*
|
||||
* If we are expanding an alias carried down from an upper query,
|
||||
* must adjust its varlevelsup fields.
|
||||
@ -534,11 +539,11 @@ flatten_join_alias_vars_mutator(Node *node,
|
||||
if (IsA(node, InClauseInfo))
|
||||
{
|
||||
/* Copy the InClauseInfo node with correct mutation of subnodes */
|
||||
InClauseInfo *ininfo;
|
||||
InClauseInfo *ininfo;
|
||||
|
||||
ininfo = (InClauseInfo *) expression_tree_mutator(node,
|
||||
flatten_join_alias_vars_mutator,
|
||||
(void *) context);
|
||||
flatten_join_alias_vars_mutator,
|
||||
(void *) context);
|
||||
/* now fix InClauseInfo's relid sets */
|
||||
if (context->sublevels_up == 0)
|
||||
{
|
||||
|
Reference in New Issue
Block a user