1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-07 19:06:32 +03:00

Ye-old pgindent run. Same 4-space tabs.

This commit is contained in:
Bruce Momjian
2000-04-12 17:17:23 +00:00
parent db4518729d
commit 52f77df613
434 changed files with 24799 additions and 21246 deletions

View File

@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: geqo_eval.c,v 1.48 2000/02/15 20:49:14 tgl Exp $
* $Id: geqo_eval.c,v 1.49 2000/04/12 17:15:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,8 +99,8 @@ geqo_eval(Query *root, Gene *tour, int num_gene)
/*
* compute fitness
*
* XXX geqo does not currently support optimization for partial
* result retrieval --- how to fix?
* XXX geqo does not currently support optimization for partial result
* retrieval --- how to fix?
*/
fitness = joinrel->cheapest_total_path->total_cost;
@@ -135,7 +135,7 @@ gimme_tree(Query *root, Gene *tour, int rel_count, int num_gene, RelOptInfo *old
/* tour[0] = 3; tour[1] = 1; tour[2] = 2 */
base_rel_index = (int) tour[rel_count];
inner_rel = (RelOptInfo *) nth(base_rel_index-1, root->base_rel_list);
inner_rel = (RelOptInfo *) nth(base_rel_index - 1, root->base_rel_list);
if (rel_count == 0)
{ /* processing first join with
@@ -145,15 +145,15 @@ gimme_tree(Query *root, Gene *tour, int rel_count, int num_gene, RelOptInfo *old
}
else
{ /* tree main part */
List *acceptable_rels = lcons(inner_rel, NIL);
List *acceptable_rels = lcons(inner_rel, NIL);
new_rel = make_rels_by_clause_joins(root, old_rel,
acceptable_rels);
if (! new_rel)
if (!new_rel)
{
new_rel = make_rels_by_clauseless_joins(root, old_rel,
acceptable_rels);
if (! new_rel)
if (!new_rel)
elog(ERROR, "gimme_tree: failed to construct join rel");
}

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.6 2000/01/26 05:56:36 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.7 2000/04/12 17:15:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -485,7 +485,7 @@ xfunc_form_groups(Query *queryInfo, Stream root, Stream bottom)
}
/* ------------------- UTILITY FUNCTIONS ------------------------- */
/* ------------------- UTILITY FUNCTIONS ------------------------- */
/*
** xfunc_free_stream

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.59 2000/02/15 20:49:16 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.60 2000/04/12 17:15:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,8 +24,10 @@
#ifdef GEQO
bool enable_geqo = true;
#else
bool enable_geqo = false;
#endif
int geqo_rels = GEQO_RELS;
@@ -36,6 +38,7 @@ static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed);
#ifdef OPTIMIZER_DEBUG
static void debug_print_rel(Query *root, RelOptInfo *rel);
#endif
@@ -64,6 +67,7 @@ make_one_rel(Query *root)
if (levels_needed == 1)
{
/*
* Single relation, no more processing is required.
*/
@@ -71,6 +75,7 @@ make_one_rel(Query *root)
}
else
{
/*
* Generate join tree.
*/
@@ -100,8 +105,8 @@ set_base_rel_pathlist(Query *root)
/*
* Generate paths and add them to the rel's pathlist.
*
* Note: add_path() will discard any paths that are dominated
* by another available path, keeping only those paths that are
* Note: add_path() will discard any paths that are dominated by
* another available path, keeping only those paths that are
* superior along at least one dimension of cost or sortedness.
*/
@@ -116,9 +121,10 @@ set_base_rel_pathlist(Query *root)
rel->baserestrictinfo,
rel->joininfo);
/* Note: create_or_index_paths depends on create_index_paths
* to have marked OR restriction clauses with relevant indices;
* this is why it doesn't need to be given the list of indices.
/*
* Note: create_or_index_paths depends on create_index_paths to
* have marked OR restriction clauses with relevant indices; this
* is why it doesn't need to be given the list of indices.
*/
create_or_index_paths(root, rel, rel->baserestrictinfo);
@@ -153,11 +159,11 @@ make_one_rel_by_joins(Query *root, int levels_needed)
return geqo(root);
/*
* We employ a simple "dynamic programming" algorithm: we first
* find all ways to build joins of two base relations, then all ways
* to build joins of three base relations (from two-base-rel joins
* and other base rels), then four-base-rel joins, and so on until
* we have considered all ways to join all N relations into one rel.
* We employ a simple "dynamic programming" algorithm: we first find
* all ways to build joins of two base relations, then all ways to
* build joins of three base relations (from two-base-rel joins and
* other base rels), then four-base-rel joins, and so on until we have
* considered all ways to join all N relations into one rel.
*/
for (lev = 2; lev <= levels_needed; lev++)
@@ -185,9 +191,10 @@ make_one_rel_by_joins(Query *root, int levels_needed)
rel = (RelOptInfo *) lfirst(x);
#ifdef NOT_USED
/*
* * for each expensive predicate in each path in each distinct
* rel, * consider doing pullup -- JMH
* * for each expensive predicate in each path in each
* distinct rel, * consider doing pullup -- JMH
*/
if (XfuncMode != XFUNC_NOPULL && XfuncMode != XFUNC_OFF)
xfunc_trypullup(rel);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.33 2000/03/23 23:35:47 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.34 2000/04/12 17:15:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,17 +28,18 @@
* Data structure for accumulating info about possible range-query
* clause pairs in clauselist_selectivity.
*/
typedef struct RangeQueryClause {
struct RangeQueryClause *next; /* next in linked list */
typedef struct RangeQueryClause
{
struct RangeQueryClause *next; /* next in linked list */
Node *var; /* The common variable of the clauses */
bool have_lobound; /* found a low-bound clause yet? */
bool have_hibound; /* found a high-bound clause yet? */
Selectivity lobound; /* Selectivity of a var > something clause */
Selectivity hibound; /* Selectivity of a var < something clause */
Selectivity lobound; /* Selectivity of a var > something clause */
Selectivity hibound; /* Selectivity of a var < something clause */
} RangeQueryClause;
static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
int flag, bool isLTsel, Selectivity s2);
int flag, bool isLTsel, Selectivity s2);
/****************************************************************************
@@ -59,7 +60,7 @@ restrictlist_selectivity(Query *root,
int varRelid)
{
List *clauselist = get_actual_clauses(restrictinfo_list);
Selectivity result;
Selectivity result;
result = clauselist_selectivity(root, clauselist, varRelid);
freeList(clauselist);
@@ -75,7 +76,7 @@ restrictlist_selectivity(Query *root,
* See clause_selectivity() for the meaning of the varRelid parameter.
*
* Our basic approach is to take the product of the selectivities of the
* subclauses. However, that's only right if the subclauses have independent
* subclauses. However, that's only right if the subclauses have independent
* probabilities, and in reality they are often NOT independent. So,
* we want to be smarter where we can.
@@ -92,7 +93,7 @@ restrictlist_selectivity(Query *root,
* see that hisel is the fraction of the range below the high bound, while
* losel is the fraction above the low bound; so hisel can be interpreted
* directly as a 0..1 value but we need to convert losel to 1-losel before
* interpreting it as a value. Then the available range is 1-losel to hisel.)
* interpreting it as a value. Then the available range is 1-losel to hisel.)
* If the calculation yields zero or negative, however, we chicken out and
* use the default interpretation; that probably means that one or both
* selectivities is a default estimate rather than an actual range value.
@@ -104,9 +105,9 @@ clauselist_selectivity(Query *root,
List *clauses,
int varRelid)
{
Selectivity s1 = 1.0;
RangeQueryClause *rqlist = NULL;
List *clist;
Selectivity s1 = 1.0;
RangeQueryClause *rqlist = NULL;
List *clist;
/*
* Initial scan over clauses. Anything that doesn't look like a
@@ -116,13 +117,13 @@ clauselist_selectivity(Query *root,
foreach(clist, clauses)
{
Node *clause = (Node *) lfirst(clist);
Selectivity s2;
Selectivity s2;
/*
* See if it looks like a restriction clause with a constant.
* (If it's not a constant we can't really trust the selectivity!)
* NB: for consistency of results, this fragment of code had
* better match what clause_selectivity() would do.
* See if it looks like a restriction clause with a constant. (If
* it's not a constant we can't really trust the selectivity!) NB:
* for consistency of results, this fragment of code had better
* match what clause_selectivity() would do.
*/
if (varRelid != 0 || NumRelids(clause) == 1)
{
@@ -147,11 +148,12 @@ clauselist_selectivity(Query *root,
root->rtable),
attno,
constval, flag);
/*
* If we reach here, we have computed the same result
* that clause_selectivity would, so we can just use s2
* if it's the wrong oprrest. But if it's the right
* oprrest, add the clause to rqlist for later processing.
* If we reach here, we have computed the same result that
* clause_selectivity would, so we can just use s2 if it's
* the wrong oprrest. But if it's the right oprrest, add
* the clause to rqlist for later processing.
*/
switch (oprrest)
{
@@ -166,7 +168,7 @@ clauselist_selectivity(Query *root,
s1 = s1 * s2;
break;
}
continue; /* drop to loop bottom */
continue; /* drop to loop bottom */
}
}
/* Not the right form, so treat it generically. */
@@ -179,12 +181,12 @@ clauselist_selectivity(Query *root,
*/
while (rqlist != NULL)
{
RangeQueryClause *rqnext;
RangeQueryClause *rqnext;
if (rqlist->have_lobound && rqlist->have_hibound)
{
/* Successfully matched a pair of range clauses */
Selectivity s2 = rqlist->hibound + rqlist->lobound - 1.0;
Selectivity s2 = rqlist->hibound + rqlist->lobound - 1.0;
/*
* A zero or slightly negative s2 should be converted into a
@@ -199,14 +201,20 @@ clauselist_selectivity(Query *root,
{
if (s2 < -0.01)
{
/* No data available --- use a default estimate that
/*
* No data available --- use a default estimate that
* is small, but not real small.
*/
s2 = 0.01;
}
else
{
/* It's just roundoff error; use a small positive value */
/*
* It's just roundoff error; use a small positive
* value
*/
s2 = 1.0e-10;
}
}
@@ -239,15 +247,15 @@ static void
addRangeClause(RangeQueryClause **rqlist, Node *clause,
int flag, bool isLTsel, Selectivity s2)
{
RangeQueryClause *rqelem;
Node *var;
bool is_lobound;
RangeQueryClause *rqelem;
Node *var;
bool is_lobound;
/* get_relattval sets flag&SEL_RIGHT if the var is on the LEFT. */
if (flag & SEL_RIGHT)
{
var = (Node *) get_leftop((Expr *) clause);
is_lobound = ! isLTsel; /* x < something is high bound */
is_lobound = !isLTsel; /* x < something is high bound */
}
else
{
@@ -257,23 +265,27 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause,
for (rqelem = *rqlist; rqelem; rqelem = rqelem->next)
{
/* We use full equal() here because the "var" might be a function
/*
* We use full equal() here because the "var" might be a function
* of one or more attributes of the same relation...
*/
if (! equal(var, rqelem->var))
if (!equal(var, rqelem->var))
continue;
/* Found the right group to put this clause in */
if (is_lobound)
{
if (! rqelem->have_lobound)
if (!rqelem->have_lobound)
{
rqelem->have_lobound = true;
rqelem->lobound = s2;
}
else
{
/* We have found two similar clauses, such as
* x < y AND x < z. Keep only the more restrictive one.
/*
* We have found two similar clauses, such as x < y AND x
* < z. Keep only the more restrictive one.
*/
if (rqelem->lobound > s2)
rqelem->lobound = s2;
@@ -281,15 +293,17 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause,
}
else
{
if (! rqelem->have_hibound)
if (!rqelem->have_hibound)
{
rqelem->have_hibound = true;
rqelem->hibound = s2;
}
else
{
/* We have found two similar clauses, such as
* x > y AND x > z. Keep only the more restrictive one.
/*
* We have found two similar clauses, such as x > y AND x
* > z. Keep only the more restrictive one.
*/
if (rqelem->hibound > s2)
rqelem->hibound = s2;
@@ -331,7 +345,7 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause,
* nestloop join's inner relation --- varRelid should then be the ID of the
* inner relation.
*
* When varRelid is 0, all variables are treated as variables. This
* When varRelid is 0, all variables are treated as variables. This
* is appropriate for ordinary join clauses and restriction clauses.
*/
Selectivity
@@ -339,12 +353,13 @@ clause_selectivity(Query *root,
Node *clause,
int varRelid)
{
Selectivity s1 = 1.0; /* default for any unhandled clause type */
Selectivity s1 = 1.0; /* default for any unhandled clause type */
if (clause == NULL)
return s1;
if (IsA(clause, Var))
{
/*
* we have a bool Var. This is exactly equivalent to the clause:
* reln.attribute = 't' so we compute the selectivity as if that
@@ -352,7 +367,7 @@ clause_selectivity(Query *root,
* didn't want to have to do system cache look ups to find out all
* of that info.
*/
Index varno = ((Var *) clause)->varno;
Index varno = ((Var *) clause)->varno;
if (varRelid == 0 || varRelid == (int) varno)
s1 = restriction_selectivity(F_EQSEL,
@@ -377,7 +392,7 @@ clause_selectivity(Query *root,
{
/* inverse of the selectivity of the underlying clause */
s1 = 1.0 - clause_selectivity(root,
(Node*) get_notclausearg((Expr*) clause),
(Node *) get_notclausearg((Expr *) clause),
varRelid);
}
else if (and_clause(clause))
@@ -389,18 +404,21 @@ clause_selectivity(Query *root,
}
else if (or_clause(clause))
{
/*
* Selectivities for an 'or' clause are computed as s1+s2 - s1*s2
* to account for the probable overlap of selected tuple sets.
* XXX is this too conservative?
* to account for the probable overlap of selected tuple sets. XXX
* is this too conservative?
*/
List *arg;
List *arg;
s1 = 0.0;
foreach(arg, ((Expr *) clause)->args)
{
Selectivity s2 = clause_selectivity(root,
Selectivity s2 = clause_selectivity(root,
(Node *) lfirst(arg),
varRelid);
s1 = s1 + s2 - s1 * s2;
}
}
@@ -411,17 +429,20 @@ clause_selectivity(Query *root,
if (varRelid != 0)
{
/*
* If we are considering a nestloop join then all clauses
* are restriction clauses, since we are only interested in
* the one relation.
* If we are considering a nestloop join then all clauses are
* restriction clauses, since we are only interested in the
* one relation.
*/
is_join_clause = false;
}
else
{
/*
* Otherwise, it's a join if there's more than one relation used.
* Otherwise, it's a join if there's more than one relation
* used.
*/
is_join_clause = (NumRelids(clause) > 1);
}
@@ -432,8 +453,8 @@ clause_selectivity(Query *root,
RegProcedure oprjoin = get_oprjoin(opno);
/*
* if the oprjoin procedure is missing for whatever reason, use a
* selectivity of 0.5
* if the oprjoin procedure is missing for whatever reason,
* use a selectivity of 0.5
*/
if (!oprjoin)
s1 = (Selectivity) 0.5;
@@ -460,8 +481,8 @@ clause_selectivity(Query *root,
RegProcedure oprrest = get_oprrest(opno);
/*
* if the oprrest procedure is missing for whatever reason, use a
* selectivity of 0.5
* if the oprrest procedure is missing for whatever reason,
* use a selectivity of 0.5
*/
if (!oprrest)
s1 = (Selectivity) 0.5;
@@ -484,6 +505,7 @@ clause_selectivity(Query *root,
}
else if (is_funcclause(clause))
{
/*
* This is not an operator, so we guess at the selectivity. THIS
* IS A HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
@@ -493,6 +515,7 @@ clause_selectivity(Query *root,
}
else if (is_subplan(clause))
{
/*
* Just for the moment! FIX ME! - vadim 02/04/98
*/

View File

@@ -19,7 +19,7 @@
*
* Obviously, taking constants for these values is an oversimplification,
* but it's tough enough to get any useful estimates even at this level of
* detail. Note that all of these parameters are user-settable, in case
* detail. Note that all of these parameters are user-settable, in case
* the default values are drastically off for a particular platform.
*
* We compute two separate costs for each path:
@@ -37,12 +37,12 @@
* will be no zero divide.) RelOptInfos, Paths, and Plans themselves never
* account for LIMIT.
*
*
*
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.56 2000/04/09 04:31:36 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.57 2000/04/12 17:15:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -118,6 +118,7 @@ cost_seqscan(Path *path, RelOptInfo *baserel)
/* disk costs */
if (lfirsti(baserel->relids) < 0)
{
/*
* cost of sequentially scanning a materialized temporary relation
*/
@@ -125,15 +126,17 @@ cost_seqscan(Path *path, RelOptInfo *baserel)
}
else
{
/*
* The cost of reading a page sequentially is 1.0, by definition.
* Note that the Unix kernel will typically do some amount of
* read-ahead optimization, so that this cost is less than the true
* cost of reading a page from disk. We ignore that issue here,
* but must take it into account when estimating the cost of
* read-ahead optimization, so that this cost is less than the
* true cost of reading a page from disk. We ignore that issue
* here, but must take it into account when estimating the cost of
* non-sequential accesses!
*/
run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
run_cost += baserel->pages; /* sequential fetches with cost
* 1.0 */
}
/* CPU costs */
@@ -151,7 +154,7 @@ cost_seqscan(Path *path, RelOptInfo *baserel)
*
* The simplistic model that the cost is random_page_cost is what we want
* to use for large relations; but for small ones that is a serious
* overestimate because of the effects of caching. This routine tries to
* overestimate because of the effects of caching. This routine tries to
* account for that.
*
* Unfortunately we don't have any good way of estimating the effective cache
@@ -221,12 +224,12 @@ cost_index(Path *path, Query *root,
Cost cpu_per_tuple;
Cost indexStartupCost;
Cost indexTotalCost;
Selectivity indexSelectivity;
Selectivity indexSelectivity;
double tuples_fetched;
double pages_fetched;
/* Should only be applied to base relations */
Assert(IsA(baserel, RelOptInfo) && IsA(index, IndexOptInfo));
Assert(IsA(baserel, RelOptInfo) &&IsA(index, IndexOptInfo));
Assert(length(baserel->relids) == 1);
if (!enable_indexscan && !is_injoin)
@@ -234,8 +237,9 @@ cost_index(Path *path, Query *root,
/*
* Call index-access-method-specific code to estimate the processing
* cost for scanning the index, as well as the selectivity of the index
* (ie, the fraction of main-table tuples we will have to retrieve).
* cost for scanning the index, as well as the selectivity of the
* index (ie, the fraction of main-table tuples we will have to
* retrieve).
*/
fmgr(index->amcostestimate, root, baserel, index, indexQuals,
&indexStartupCost, &indexTotalCost, &indexSelectivity);
@@ -249,17 +253,18 @@ cost_index(Path *path, Query *root,
*
* If the number of tuples is much smaller than the number of pages in
* the relation, each tuple will cost a separate nonsequential fetch.
* If it is comparable or larger, then probably we will be able to avoid
* some fetches. We use a growth rate of log(#tuples/#pages + 1) ---
* probably totally bogus, but intuitively it gives the right shape of
* curve at least.
* If it is comparable or larger, then probably we will be able to
* avoid some fetches. We use a growth rate of log(#tuples/#pages +
* 1) --- probably totally bogus, but intuitively it gives the right
* shape of curve at least.
*
* XXX if the relation has recently been "clustered" using this index,
* then in fact the target tuples will be highly nonuniformly distributed,
* and we will be seriously overestimating the scan cost! Currently we
* have no way to know whether the relation has been clustered, nor how
* much it's been modified since the last clustering, so we ignore this
* effect. Would be nice to do better someday.
* then in fact the target tuples will be highly nonuniformly
* distributed, and we will be seriously overestimating the scan cost!
* Currently we have no way to know whether the relation has been
* clustered, nor how much it's been modified since the last
* clustering, so we ignore this effect. Would be nice to do better
* someday.
*/
tuples_fetched = indexSelectivity * baserel->tuples;
@@ -274,8 +279,8 @@ cost_index(Path *path, Query *root,
pages_fetched = tuples_fetched;
/*
* Now estimate one nonsequential access per page fetched,
* plus appropriate CPU costs per tuple.
* Now estimate one nonsequential access per page fetched, plus
* appropriate CPU costs per tuple.
*/
/* disk costs for main table */
@@ -283,16 +288,18 @@ cost_index(Path *path, Query *root,
/* CPU costs */
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
/*
* Normally the indexquals will be removed from the list of restriction
* clauses that we have to evaluate as qpquals, so we should subtract
* their costs from baserestrictcost. For a lossy index, however, we
* will have to recheck all the quals and so mustn't subtract anything.
* Also, if we are doing a join then some of the indexquals are join
* clauses and shouldn't be subtracted. Rather than work out exactly
* how much to subtract, we don't subtract anything in that case either.
* Normally the indexquals will be removed from the list of
* restriction clauses that we have to evaluate as qpquals, so we
* should subtract their costs from baserestrictcost. For a lossy
* index, however, we will have to recheck all the quals and so
* mustn't subtract anything. Also, if we are doing a join then some
* of the indexquals are join clauses and shouldn't be subtracted.
* Rather than work out exactly how much to subtract, we don't
* subtract anything in that case either.
*/
if (! index->lossy && ! is_injoin)
if (!index->lossy && !is_injoin)
cpu_per_tuple -= cost_qual_eval(indexQuals);
run_cost += cpu_per_tuple * tuples_fetched;
@@ -326,7 +333,7 @@ cost_tidscan(Path *path, RelOptInfo *baserel, List *tideval)
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
/*
* cost_sort
* Determines and returns the cost of sorting a relation.
@@ -341,7 +348,7 @@ cost_tidscan(Path *path, RelOptInfo *baserel, List *tideval)
* If the total volume exceeds SortMem, we switch to a tape-style merge
* algorithm. There will still be about t*log2(t) tuple comparisons in
* total, but we will also need to write and read each tuple once per
* merge pass. We expect about ceil(log6(r)) merge passes where r is the
* merge pass. We expect about ceil(log6(r)) merge passes where r is the
* number of initial runs formed (log6 because tuplesort.c uses six-tape
* merging). Since the average initial run should be about twice SortMem,
* we have
@@ -385,8 +392,8 @@ cost_sort(Path *path, List *pathkeys, double tuples, int width)
/*
* CPU costs
*
* Assume about two operator evals per tuple comparison
* and N log2 N comparisons
* Assume about two operator evals per tuple comparison and N log2 N
* comparisons
*/
startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
@@ -408,7 +415,7 @@ cost_sort(Path *path, List *pathkeys, double tuples, int width)
/*
* Note: should we bother to assign a nonzero run_cost to reflect the
* overhead of extracting tuples from the sort result? Probably not
* overhead of extracting tuples from the sort result? Probably not
* worth worrying about.
*/
path->startup_cost = startup_cost;
@@ -440,19 +447,22 @@ cost_nestloop(Path *path,
startup_cost += disable_cost;
/* cost of source data */
/*
* NOTE: we assume that the inner path's startup_cost is paid once, not
* over again on each restart. This is certainly correct if the inner
* path is materialized. Are there any cases where it is wrong?
* NOTE: we assume that the inner path's startup_cost is paid once,
* not over again on each restart. This is certainly correct if the
* inner path is materialized. Are there any cases where it is wrong?
*/
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
run_cost += outer_path->total_cost - outer_path->startup_cost;
run_cost += outer_path->parent->rows *
(inner_path->total_cost - inner_path->startup_cost);
/* Number of tuples processed (not number emitted!). If inner path is
/*
* Number of tuples processed (not number emitted!). If inner path is
* an indexscan, be sure to use its estimated output row count, which
* may be lower than the restriction-clause-only row count of its parent.
* may be lower than the restriction-clause-only row count of its
* parent.
*/
if (IsA(inner_path, IndexPath))
ntuples = ((IndexPath *) inner_path)->rows;
@@ -498,11 +508,12 @@ cost_mergejoin(Path *path,
startup_cost += disable_cost;
/* cost of source data */
/*
* Note we are assuming that each source tuple is fetched just once,
* which is not right in the presence of equal keys. If we had a way of
* estimating the proportion of equal keys, we could apply a correction
* factor...
* which is not right in the presence of equal keys. If we had a way
* of estimating the proportion of equal keys, we could apply a
* correction factor...
*/
if (outersortkeys) /* do we need to sort outer? */
{
@@ -537,10 +548,10 @@ cost_mergejoin(Path *path,
}
/*
* Estimate the number of tuples to be processed in the mergejoin itself
* as one per tuple in the two source relations. This could be a drastic
* underestimate if there are many equal-keyed tuples in either relation,
* but we have no good way of estimating that...
* Estimate the number of tuples to be processed in the mergejoin
* itself as one per tuple in the two source relations. This could be
* a drastic underestimate if there are many equal-keyed tuples in
* either relation, but we have no good way of estimating that...
*/
ntuples = outer_path->parent->rows + inner_path->parent->rows;
@@ -575,9 +586,9 @@ cost_hashjoin(Path *path,
Cost cpu_per_tuple;
double ntuples;
double outerbytes = relation_byte_size(outer_path->parent->rows,
outer_path->parent->width);
outer_path->parent->width);
double innerbytes = relation_byte_size(inner_path->parent->rows,
inner_path->parent->width);
inner_path->parent->width);
long hashtablebytes = SortMem * 1024L;
if (!enable_hashjoin)
@@ -592,7 +603,8 @@ cost_hashjoin(Path *path,
startup_cost += cpu_operator_cost * inner_path->parent->rows;
run_cost += cpu_operator_cost * outer_path->parent->rows;
/* the number of tuple comparisons needed is the number of outer
/*
* the number of tuple comparisons needed is the number of outer
* tuples times the typical hash bucket size, which we estimate
* conservatively as the inner disbursion times the inner tuple count.
*/
@@ -601,9 +613,9 @@ cost_hashjoin(Path *path,
/*
* Estimate the number of tuples that get through the hashing filter
* as one per tuple in the two source relations. This could be a drastic
* underestimate if there are many equal-keyed tuples in either relation,
* but we have no good way of estimating that...
* as one per tuple in the two source relations. This could be a
* drastic underestimate if there are many equal-keyed tuples in
* either relation, but we have no good way of estimating that...
*/
ntuples = outer_path->parent->rows + inner_path->parent->rows;
@@ -614,33 +626,31 @@ cost_hashjoin(Path *path,
/*
* if inner relation is too big then we will need to "batch" the join,
* which implies writing and reading most of the tuples to disk an
* extra time. Charge one cost unit per page of I/O (correct since
* it should be nice and sequential...). Writing the inner rel counts
* as startup cost, all the rest as run cost.
* extra time. Charge one cost unit per page of I/O (correct since it
* should be nice and sequential...). Writing the inner rel counts as
* startup cost, all the rest as run cost.
*/
if (innerbytes > hashtablebytes)
{
double outerpages = page_size(outer_path->parent->rows,
outer_path->parent->width);
double innerpages = page_size(inner_path->parent->rows,
inner_path->parent->width);
double outerpages = page_size(outer_path->parent->rows,
outer_path->parent->width);
double innerpages = page_size(inner_path->parent->rows,
inner_path->parent->width);
startup_cost += innerpages;
run_cost += innerpages + 2 * outerpages;
}
/*
* Bias against putting larger relation on inside. We don't want
* an absolute prohibition, though, since larger relation might have
* Bias against putting larger relation on inside. We don't want an
* absolute prohibition, though, since larger relation might have
* better disbursion --- and we can't trust the size estimates
* unreservedly, anyway. Instead, inflate the startup cost by
* the square root of the size ratio. (Why square root? No real good
* unreservedly, anyway. Instead, inflate the startup cost by the
* square root of the size ratio. (Why square root? No real good
* reason, but it seems reasonable...)
*/
if (innerbytes > outerbytes && outerbytes > 0)
{
startup_cost *= sqrt(innerbytes / outerbytes);
}
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
@@ -656,7 +666,7 @@ cost_hashjoin(Path *path,
Cost
cost_qual_eval(List *quals)
{
Cost total = 0;
Cost total = 0;
cost_qual_eval_walker((Node *) quals, &total);
return total;
@@ -667,10 +677,11 @@ cost_qual_eval_walker(Node *node, Cost *total)
{
if (node == NULL)
return false;
/*
* Our basic strategy is to charge one cpu_operator_cost for each
* operator or function node in the given tree. Vars and Consts
* are charged zero, and so are boolean operators (AND, OR, NOT).
* operator or function node in the given tree. Vars and Consts are
* charged zero, and so are boolean operators (AND, OR, NOT).
* Simplistic, but a lot better than no model at all.
*
* Should we try to account for the possibility of short-circuit
@@ -678,7 +689,7 @@ cost_qual_eval_walker(Node *node, Cost *total)
*/
if (IsA(node, Expr))
{
Expr *expr = (Expr *) node;
Expr *expr = (Expr *) node;
switch (expr->opType)
{
@@ -691,17 +702,19 @@ cost_qual_eval_walker(Node *node, Cost *total)
case NOT_EXPR:
break;
case SUBPLAN_EXPR:
/*
* A subplan node in an expression indicates that the subplan
* will be executed on each evaluation, so charge accordingly.
* (We assume that sub-selects that can be executed as
* InitPlans have already been removed from the expression.)
* A subplan node in an expression indicates that the
* subplan will be executed on each evaluation, so charge
* accordingly. (We assume that sub-selects that can be
* executed as InitPlans have already been removed from
* the expression.)
*
* NOTE: this logic should agree with the estimates used by
* make_subplan() in plan/subselect.c.
* make_subplan() in plan/subselect.c.
*/
{
SubPlan *subplan = (SubPlan *) expr->oper;
SubPlan *subplan = (SubPlan *) expr->oper;
Plan *plan = subplan->plan;
Cost subcost;
@@ -730,13 +743,14 @@ cost_qual_eval_walker(Node *node, Cost *total)
}
/* fall through to examine args of Expr node */
}
/*
* expression_tree_walker doesn't know what to do with RestrictInfo nodes,
* but we just want to recurse through them.
* expression_tree_walker doesn't know what to do with RestrictInfo
* nodes, but we just want to recurse through them.
*/
if (IsA(node, RestrictInfo))
{
RestrictInfo *restrictinfo = (RestrictInfo *) node;
RestrictInfo *restrictinfo = (RestrictInfo *) node;
return cost_qual_eval_walker((Node *) restrictinfo->clause, total);
}
@@ -755,7 +769,7 @@ cost_qual_eval_walker(Node *node, Cost *total)
*
* We set the following fields of the rel node:
* rows: the estimated number of output tuples (after applying
* restriction clauses).
* restriction clauses).
* width: the estimated average output tuple width in bytes.
* baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
*/
@@ -769,9 +783,11 @@ set_baserel_size_estimates(Query *root, RelOptInfo *rel)
restrictlist_selectivity(root,
rel->baserestrictinfo,
lfirsti(rel->relids));
/*
* Force estimate to be at least one row, to make explain output look
* better and to avoid possible divide-by-zero when interpolating cost.
* better and to avoid possible divide-by-zero when interpolating
* cost.
*/
if (rel->rows < 1.0)
rel->rows = 1.0;
@@ -812,10 +828,10 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
temp = outer_rel->rows * inner_rel->rows;
/*
* Apply join restrictivity. Note that we are only considering clauses
* that become restriction clauses at this join level; we are not
* double-counting them because they were not considered in estimating
* the sizes of the component rels.
* Apply join restrictivity. Note that we are only considering
* clauses that become restriction clauses at this join level; we are
* not double-counting them because they were not considered in
* estimating the sizes of the component rels.
*/
temp *= restrictlist_selectivity(root,
restrictlist,
@@ -823,7 +839,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
/*
* Force estimate to be at least one row, to make explain output look
* better and to avoid possible divide-by-zero when interpolating cost.
* better and to avoid possible divide-by-zero when interpolating
* cost.
*/
if (temp < 1.0)
temp = 1.0;
@@ -833,8 +850,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
* We could apply set_rel_width() to compute the output tuple width
* from scratch, but at present it's always just the sum of the input
* widths, so why work harder than necessary? If relnode.c is ever
* taught to remove unneeded columns from join targetlists, go back
* to using set_rel_width here.
* taught to remove unneeded columns from join targetlists, go back to
* using set_rel_width here.
*/
rel->width = outer_rel->width + inner_rel->width;
}
@@ -859,7 +876,7 @@ set_rel_width(Query *root, RelOptInfo *rel)
* compute_attribute_width
* Given a target list entry, find the size in bytes of the attribute.
*
* If a field is variable-length, we make a default assumption. Would be
* If a field is variable-length, we make a default assumption. Would be
* better if VACUUM recorded some stats about the average field width...
* also, we have access to the atttypmod, but fail to use it...
*/

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.81 2000/03/22 22:08:33 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.82 2000/04/12 17:15:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,62 +46,63 @@
#define is_indexable_operator(clause,opclass,relam,indexkey_on_left) \
(indexable_operator(clause,opclass,relam,indexkey_on_left) != InvalidOid)
typedef enum {
typedef enum
{
Prefix_None, Prefix_Partial, Prefix_Exact
} Prefix_Status;
static void match_index_orclauses(RelOptInfo *rel, IndexOptInfo *index,
List *restrictinfo_list);
List *restrictinfo_list);
static List *match_index_orclause(RelOptInfo *rel, IndexOptInfo *index,
List *or_clauses,
List *other_matching_indices);
List *or_clauses,
List *other_matching_indices);
static bool match_or_subclause_to_indexkey(RelOptInfo *rel,
IndexOptInfo *index,
Expr *clause);
IndexOptInfo *index,
Expr *clause);
static List *group_clauses_by_indexkey(RelOptInfo *rel, IndexOptInfo *index,
int *indexkeys, Oid *classes,
List *restrictinfo_list);
int *indexkeys, Oid *classes,
List *restrictinfo_list);
static List *group_clauses_by_ikey_for_joins(RelOptInfo *rel,
IndexOptInfo *index,
int *indexkeys, Oid *classes,
List *join_cinfo_list,
List *restr_cinfo_list);
IndexOptInfo *index,
int *indexkeys, Oid *classes,
List *join_cinfo_list,
List *restr_cinfo_list);
static bool match_clause_to_indexkey(RelOptInfo *rel, IndexOptInfo *index,
int indexkey, Oid opclass,
Expr *clause, bool join);
int indexkey, Oid opclass,
Expr *clause, bool join);
static bool pred_test(List *predicate_list, List *restrictinfo_list,
List *joininfo_list);
List *joininfo_list);
static bool one_pred_test(Expr *predicate, List *restrictinfo_list);
static bool one_pred_clause_expr_test(Expr *predicate, Node *clause);
static bool one_pred_clause_test(Expr *predicate, Node *clause);
static bool clause_pred_clause_test(Expr *predicate, Node *clause);
static void indexable_joinclauses(RelOptInfo *rel, IndexOptInfo *index,
List *joininfo_list, List *restrictinfo_list,
List **clausegroups, List **outerrelids);
List *joininfo_list, List *restrictinfo_list,
List **clausegroups, List **outerrelids);
static List *index_innerjoin(Query *root, RelOptInfo *rel, IndexOptInfo *index,
List *clausegroup_list, List *outerrelids_list);
List *clausegroup_list, List *outerrelids_list);
static bool useful_for_mergejoin(RelOptInfo *rel, IndexOptInfo *index,
List *joininfo_list);
List *joininfo_list);
static bool useful_for_ordering(Query *root, RelOptInfo *rel,
IndexOptInfo *index,
ScanDirection scandir);
IndexOptInfo *index,
ScanDirection scandir);
static bool match_index_to_operand(int indexkey, Var *operand,
RelOptInfo *rel, IndexOptInfo *index);
RelOptInfo *rel, IndexOptInfo *index);
static bool function_index_operand(Expr *funcOpnd, RelOptInfo *rel,
IndexOptInfo *index);
IndexOptInfo *index);
static bool match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
bool indexkey_on_left);
bool indexkey_on_left);
static Prefix_Status like_fixed_prefix(char *patt, char **prefix);
static Prefix_Status regex_fixed_prefix(char *patt, bool case_insensitive,
char **prefix);
char **prefix);
static List *prefix_quals(Var *leftop, Oid expr_op,
char *prefix, Prefix_Status pstatus);
static char *make_greater_string(const char * str, Oid datatype);
static Oid find_operator(const char * opname, Oid datatype);
static Datum string_to_datum(const char * str, Oid datatype);
static Const *string_to_const(const char * str, Oid datatype);
static bool string_lessthan(const char * str1, const char * str2,
Oid datatype);
char *prefix, Prefix_Status pstatus);
static char *make_greater_string(const char *str, Oid datatype);
static Oid find_operator(const char *opname, Oid datatype);
static Datum string_to_datum(const char *str, Oid datatype);
static Const *string_to_const(const char *str, Oid datatype);
static bool string_lessthan(const char *str1, const char *str2,
Oid datatype);
/*
@@ -153,34 +154,34 @@ create_index_paths(Query *root,
List *joinouterrelids;
/*
* If this is a partial index, we can only use it if it passes
* the predicate test.
* If this is a partial index, we can only use it if it passes the
* predicate test.
*/
if (index->indpred != NIL)
if (!pred_test(index->indpred, restrictinfo_list, joininfo_list))
continue;
/*
* 1. Try matching the index against subclauses of restriction 'or'
* clauses (ie, 'or' clauses that reference only this relation).
* The restrictinfo nodes for the 'or' clauses are marked with lists
* of the matching indices. No paths are actually created now;
* that will be done in orindxpath.c after all indexes for the rel
* have been examined. (We need to do it that way because we can
* potentially use a different index for each subclause of an 'or',
* so we can't build a path for an 'or' clause until all indexes have
* been matched against it.)
* 1. Try matching the index against subclauses of restriction
* 'or' clauses (ie, 'or' clauses that reference only this
* relation). The restrictinfo nodes for the 'or' clauses are
* marked with lists of the matching indices. No paths are
* actually created now; that will be done in orindxpath.c after
* all indexes for the rel have been examined. (We need to do it
* that way because we can potentially use a different index for
* each subclause of an 'or', so we can't build a path for an 'or'
* clause until all indexes have been matched against it.)
*
* We don't even think about special handling of 'or' clauses that
* involve more than one relation (ie, are join clauses).
* Can we do anything useful with those?
* involve more than one relation (ie, are join clauses). Can we
* do anything useful with those?
*/
match_index_orclauses(rel, index, restrictinfo_list);
/*
* 2. If the keys of this index match any of the available non-'or'
* restriction clauses, then create a path using those clauses
* as indexquals.
* 2. If the keys of this index match any of the available
* non-'or' restriction clauses, then create a path using those
* clauses as indexquals.
*/
restrictclauses = group_clauses_by_indexkey(rel,
index,
@@ -191,7 +192,7 @@ create_index_paths(Query *root,
if (restrictclauses != NIL)
add_path(rel, (Path *) create_index_path(root, rel, index,
restrictclauses,
NoMovementScanDirection));
NoMovementScanDirection));
/*
* 3. If this index can be used for a mergejoin, then create an
@@ -205,16 +206,17 @@ create_index_paths(Query *root,
if (restrictclauses == NIL)
{
if (useful_for_mergejoin(rel, index, joininfo_list) ||
useful_for_ordering(root, rel, index, ForwardScanDirection))
useful_for_ordering(root, rel, index, ForwardScanDirection))
add_path(rel, (Path *)
create_index_path(root, rel, index,
NIL,
ForwardScanDirection));
}
/*
* Currently, backwards scan is never considered except for the case
* of matching a query result ordering. Possibly should consider
* it in other places?
* Currently, backwards scan is never considered except for the
* case of matching a query result ordering. Possibly should
* consider it in other places?
*/
if (useful_for_ordering(root, rel, index, BackwardScanDirection))
add_path(rel, (Path *)
@@ -223,11 +225,11 @@ create_index_paths(Query *root,
BackwardScanDirection));
/*
* 4. Create an innerjoin index path for each combination of
* other rels used in available join clauses. These paths will
* be considered as the inner side of nestloop joins against
* those sets of other rels. indexable_joinclauses() finds sets
* of clauses that can be used with each combination of outer rels,
* 4. Create an innerjoin index path for each combination of other
* rels used in available join clauses. These paths will be
* considered as the inner side of nestloop joins against those
* sets of other rels. indexable_joinclauses() finds sets of
* clauses that can be used with each combination of outer rels,
* and index_innerjoin builds the paths themselves. We add the
* paths to the rel's innerjoin list, NOT to the result list.
*/
@@ -247,7 +249,7 @@ create_index_paths(Query *root,
/****************************************************************************
* ---- ROUTINES TO PROCESS 'OR' CLAUSES ----
* ---- ROUTINES TO PROCESS 'OR' CLAUSES ----
****************************************************************************/
@@ -280,6 +282,7 @@ match_index_orclauses(RelOptInfo *rel,
if (restriction_is_or_clause(restrictinfo))
{
/*
* Add this index to the subclause index list for each
* subclause that it matches.
@@ -309,7 +312,7 @@ match_index_orclauses(RelOptInfo *rel,
* that have already been matched to subclauses within this
* particular 'or' clause (i.e., a list previously generated by
* this routine), or NIL if this routine has not previously been
* run for this 'or' clause.
* run for this 'or' clause.
*
* Returns a list of the form ((a b c) (d e f) nil (g h) ...) where
* a,b,c are nodes of indices that match the first subclause in
@@ -326,7 +329,8 @@ match_index_orclause(RelOptInfo *rel,
List *index_list;
List *clist;
/* first time through, we create list of same length as OR clause,
/*
* first time through, we create list of same length as OR clause,
* containing an empty sublist for each subclause.
*/
if (!other_matching_indices)
@@ -374,8 +378,8 @@ match_or_subclause_to_indexkey(RelOptInfo *rel,
IndexOptInfo *index,
Expr *clause)
{
int indexkey = index->indexkeys[0];
Oid opclass = index->classlist[0];
int indexkey = index->indexkeys[0];
Oid opclass = index->classlist[0];
if (and_clause((Node *) clause))
{
@@ -400,10 +404,10 @@ match_or_subclause_to_indexkey(RelOptInfo *rel,
* used as indexquals.
*
* In the simplest case this just means making a one-element list of the
* given opclause. However, if the OR subclause is an AND, we have to
* given opclause. However, if the OR subclause is an AND, we have to
* scan it to find the opclause(s) that match the index. (There should
* be at least one, if match_or_subclause_to_indexkey succeeded, but there
* could be more.) Also, we apply expand_indexqual_conditions() to convert
* could be more.) Also, we apply expand_indexqual_conditions() to convert
* any special matching opclauses to indexable operators.
*
* The passed-in clause is not changed.
@@ -413,9 +417,9 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
IndexOptInfo *index,
Expr *orsubclause)
{
List *quals = NIL;
int indexkey = index->indexkeys[0];
Oid opclass = index->classlist[0];
List *quals = NIL;
int indexkey = index->indexkeys[0];
Oid opclass = index->classlist[0];
if (and_clause((Node *) orsubclause))
{
@@ -514,8 +518,9 @@ group_clauses_by_indexkey(RelOptInfo *rel,
clausegroup = lappend(clausegroup, rinfo);
}
/* If no clauses match this key, we're done; we don't want to
* look at keys to its right.
/*
* If no clauses match this key, we're done; we don't want to look
* at keys to its right.
*/
if (clausegroup == NIL)
break;
@@ -533,7 +538,7 @@ group_clauses_by_indexkey(RelOptInfo *rel,
/*
* group_clauses_by_ikey_for_joins
* Generates a list of join clauses that can be used with an index
* Generates a list of join clauses that can be used with an index
* to scan the inner side of a nestloop join.
*
* This is much like group_clauses_by_indexkey(), but we consider both
@@ -593,8 +598,9 @@ group_clauses_by_ikey_for_joins(RelOptInfo *rel,
clausegroup = lappend(clausegroup, rinfo);
}
/* If no clauses match this key, we're done; we don't want to
* look at keys to its right.
/*
* If no clauses match this key, we're done; we don't want to look
* at keys to its right.
*/
if (clausegroup == NIL)
break;
@@ -607,8 +613,8 @@ group_clauses_by_ikey_for_joins(RelOptInfo *rel,
} while (!DoneMatchingIndexKeys(indexkeys, index));
/*
* if no join clause was matched then there ain't clauses for
* joins at all.
* if no join clause was matched then there ain't clauses for joins at
* all.
*/
if (!jfound)
{
@@ -623,8 +629,8 @@ group_clauses_by_ikey_for_joins(RelOptInfo *rel,
/*
* match_clause_to_indexkey()
* Determines whether a restriction or join clause matches
* a key of an index.
* Determines whether a restriction or join clause matches
* a key of an index.
*
* To match, the clause:
@@ -673,43 +679,46 @@ match_clause_to_indexkey(RelOptInfo *rel,
*rightop;
/* Clause must be a binary opclause. */
if (! is_opclause((Node *) clause))
if (!is_opclause((Node *) clause))
return false;
leftop = get_leftop(clause);
rightop = get_rightop(clause);
if (! leftop || ! rightop)
if (!leftop || !rightop)
return false;
if (!join)
{
/*
* Not considering joins, so check for clauses of the form:
* (indexkey operator constant) or (constant operator indexkey).
* We will accept a Param as being constant.
*/
if ((IsA(rightop, Const) || IsA(rightop, Param)) &&
if ((IsA(rightop, Const) ||IsA(rightop, Param)) &&
match_index_to_operand(indexkey, leftop, rel, index))
{
if (is_indexable_operator(clause, opclass, index->relam, true))
return true;
/*
* If we didn't find a member of the index's opclass,
* see whether it is a "special" indexable operator.
* If we didn't find a member of the index's opclass, see
* whether it is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, index->relam,
true))
return true;
return false;
}
if ((IsA(leftop, Const) || IsA(leftop, Param)) &&
if ((IsA(leftop, Const) ||IsA(leftop, Param)) &&
match_index_to_operand(indexkey, rightop, rel, index))
{
if (is_indexable_operator(clause, opclass, index->relam, false))
return true;
/*
* If we didn't find a member of the index's opclass,
* see whether it is a "special" indexable operator.
* If we didn't find a member of the index's opclass, see
* whether it is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, index->relam,
false))
@@ -719,20 +728,21 @@ match_clause_to_indexkey(RelOptInfo *rel,
}
else
{
/*
* Check for an indexqual that could be handled by a nestloop join.
* We need the index key to be compared against an expression
* that uses none of the indexed relation's vars.
* Check for an indexqual that could be handled by a nestloop
* join. We need the index key to be compared against an
* expression that uses none of the indexed relation's vars.
*/
if (match_index_to_operand(indexkey, leftop, rel, index))
{
List *othervarnos = pull_varnos((Node *) rightop);
bool isIndexable;
isIndexable = ! intMember(lfirsti(rel->relids), othervarnos);
isIndexable = !intMember(lfirsti(rel->relids), othervarnos);
freeList(othervarnos);
if (isIndexable &&
is_indexable_operator(clause, opclass, index->relam, true))
is_indexable_operator(clause, opclass, index->relam, true))
return true;
}
else if (match_index_to_operand(indexkey, rightop, rel, index))
@@ -740,10 +750,10 @@ match_clause_to_indexkey(RelOptInfo *rel,
List *othervarnos = pull_varnos((Node *) leftop);
bool isIndexable;
isIndexable = ! intMember(lfirsti(rel->relids), othervarnos);
isIndexable = !intMember(lfirsti(rel->relids), othervarnos);
freeList(othervarnos);
if (isIndexable &&
is_indexable_operator(clause, opclass, index->relam, false))
is_indexable_operator(clause, opclass, index->relam, false))
return true;
}
}
@@ -768,7 +778,7 @@ match_clause_to_indexkey(RelOptInfo *rel,
*
* Returns the OID of the matching operator, or InvalidOid if no match.
* Note that the returned OID will be different from the one in the given
* expression if we used a binary-compatible substitution. Also note that
* expression if we used a binary-compatible substitution. Also note that
* if indexkey_on_left is FALSE (meaning we need to commute), the returned
* OID is *not* commuted; it can be plugged directly into the given clause.
*/
@@ -818,13 +828,14 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
if (HeapTupleIsValid(newop))
{
Oid new_expr_op = oprid(newop);
Oid new_expr_op = oprid(newop);
if (new_expr_op != expr_op)
{
/*
* OK, we found a binary-compatible operator of the same name;
* now does it match the index?
* OK, we found a binary-compatible operator of the same
* name; now does it match the index?
*/
if (indexkey_on_left)
commuted_op = new_expr_op;
@@ -883,12 +894,12 @@ useful_for_mergejoin(RelOptInfo *rel,
{
if (restrictinfo->left_sortop == ordering[0] &&
match_index_to_operand(indexkeys[0],
get_leftop(restrictinfo->clause),
get_leftop(restrictinfo->clause),
rel, index))
return true;
if (restrictinfo->right_sortop == ordering[0] &&
match_index_to_operand(indexkeys[0],
get_rightop(restrictinfo->clause),
get_rightop(restrictinfo->clause),
rel, index))
return true;
}
@@ -1127,7 +1138,7 @@ one_pred_clause_test(Expr *predicate, Node *clause)
*/
static StrategyNumber
BT_implic_table[BTMaxStrategyNumber][BTMaxStrategyNumber] = {
BT_implic_table[BTMaxStrategyNumber][BTMaxStrategyNumber] = {
{2, 2, 0, 0, 0},
{1, 2, 0, 0, 0},
{1, 2, 3, 4, 5},
@@ -1346,13 +1357,13 @@ clause_pred_clause_test(Expr *predicate, Node *clause)
* rel's restrictinfo list. Therefore, every clause in the group references
* the current rel plus the same set of other rels (except for the restrict
* clauses, which only reference the current rel). Therefore, this set
* of clauses could be used as an indexqual if the relation is scanned
* of clauses could be used as an indexqual if the relation is scanned
* as the inner side of a nestloop join when the outer side contains
* (at least) all those "other rels".
*
* XXX Actually, given that we are considering a join that requires an
* outer rel set (A,B,C), we should use all qual clauses that reference
* any subset of these rels, not just the full set or none. This is
* any subset of these rels, not just the full set or none. This is
* doable with a doubly nested loop over joininfo_list; is it worth it?
*
* Returns two parallel lists of the same length: the clause groups,
@@ -1430,10 +1441,11 @@ index_innerjoin(Query *root, RelOptInfo *rel, IndexOptInfo *index,
pathnode->path.pathtype = T_IndexScan;
pathnode->path.parent = rel;
/*
* There's no point in marking the path with any pathkeys, since
* it will only ever be used as the inner path of a nestloop,
* and so its ordering does not matter.
* it will only ever be used as the inner path of a nestloop, and
* so its ordering does not matter.
*/
pathnode->path.pathkeys = NIL;
@@ -1441,7 +1453,8 @@ index_innerjoin(Query *root, RelOptInfo *rel, IndexOptInfo *index,
/* expand special operators to indexquals the executor can handle */
indexquals = expand_indexqual_conditions(indexquals);
/* Note that we are making a pathnode for a single-scan indexscan;
/*
* Note that we are making a pathnode for a single-scan indexscan;
* therefore, both indexid and indexqual should be single-element
* lists.
*/
@@ -1456,14 +1469,15 @@ index_innerjoin(Query *root, RelOptInfo *rel, IndexOptInfo *index,
/*
* We must compute the estimated number of output rows for the
* indexscan. This is less than rel->rows because of the additional
* selectivity of the join clauses. Since clausegroup may contain
* both restriction and join clauses, we have to do a set union to
* get the full set of clauses that must be considered to compute
* the correct selectivity. (We can't just nconc the two lists;
* then we might have some restriction clauses appearing twice,
* which'd mislead restrictlist_selectivity into double-counting
* their selectivity.)
* indexscan. This is less than rel->rows because of the
* additional selectivity of the join clauses. Since clausegroup
* may contain both restriction and join clauses, we have to do a
* set union to get the full set of clauses that must be
* considered to compute the correct selectivity. (We can't just
* nconc the two lists; then we might have some restriction
* clauses appearing twice, which'd mislead
* restrictlist_selectivity into double-counting their
* selectivity.)
*/
pathnode->rows = rel->tuples *
restrictlist_selectivity(root,
@@ -1490,7 +1504,7 @@ index_innerjoin(Query *root, RelOptInfo *rel, IndexOptInfo *index,
* match_index_to_operand()
* Generalized test for a match between an index's key
* and the operand on one side of a restriction or join clause.
* Now check for functional indices as well.
* Now check for functional indices as well.
*/
static bool
match_index_to_operand(int indexkey,
@@ -1500,6 +1514,7 @@ match_index_to_operand(int indexkey,
{
if (index->indproc == InvalidOid)
{
/*
* Normal index.
*/
@@ -1530,7 +1545,7 @@ function_index_operand(Expr *funcOpnd, RelOptInfo *rel, IndexOptInfo *index)
/*
* sanity check, make sure we know what we're dealing with here.
*/
if (funcOpnd == NULL || ! IsA(funcOpnd, Expr) ||
if (funcOpnd == NULL || !IsA(funcOpnd, Expr) ||
funcOpnd->opType != FUNC_EXPR ||
funcOpnd->oper == NULL || indexKeys == NULL)
return false;
@@ -1550,9 +1565,9 @@ function_index_operand(Expr *funcOpnd, RelOptInfo *rel, IndexOptInfo *index)
i = 0;
foreach(arg, funcargs)
{
Var *var = (Var *) lfirst(arg);
Var *var = (Var *) lfirst(arg);
if (! IsA(var, Var))
if (!IsA(var, Var))
return false;
if (indexKeys[i] == 0)
return false;
@@ -1578,10 +1593,10 @@ function_index_operand(Expr *funcOpnd, RelOptInfo *rel, IndexOptInfo *index)
* indexscan machinery. The key idea is that these operators allow us
* to derive approximate indexscan qual clauses, such that any tuples
* that pass the operator clause itself must also satisfy the simpler
* indexscan condition(s). Then we can use the indexscan machinery
* indexscan condition(s). Then we can use the indexscan machinery
* to avoid scanning as much of the table as we'd otherwise have to,
* while applying the original operator as a qpqual condition to ensure
* we deliver only the tuples we want. (In essence, we're using a regular
* we deliver only the tuples we want. (In essence, we're using a regular
* index as if it were a lossy index.)
*
* An example of what we're doing is
@@ -1630,11 +1645,12 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
char *patt;
char *prefix;
/* Currently, all known special operators require the indexkey
* on the left, but this test could be pushed into the switch statement
* if some are added that do not...
/*
* Currently, all known special operators require the indexkey on the
* left, but this test could be pushed into the switch statement if
* some are added that do not...
*/
if (! indexkey_on_left)
if (!indexkey_on_left)
return false;
/* we know these will succeed */
@@ -1643,7 +1659,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
expr_op = ((Oper *) clause->oper)->opno;
/* again, required for all current special ops: */
if (! IsA(rightop, Const) ||
if (!IsA(rightop, Const) ||
((Const *) rightop)->constisnull)
return false;
constvalue = ((Const *) rightop)->constvalue;
@@ -1657,7 +1673,8 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
/* the right-hand const is type text for all of these */
patt = textout((text *) DatumGetPointer(constvalue));
isIndexable = like_fixed_prefix(patt, &prefix) != Prefix_None;
if (prefix) pfree(prefix);
if (prefix)
pfree(prefix);
pfree(patt);
break;
@@ -1668,7 +1685,8 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
/* the right-hand const is type text for all of these */
patt = textout((text *) DatumGetPointer(constvalue));
isIndexable = regex_fixed_prefix(patt, false, &prefix) != Prefix_None;
if (prefix) pfree(prefix);
if (prefix)
pfree(prefix);
pfree(patt);
break;
@@ -1679,13 +1697,14 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
/* the right-hand const is type text for all of these */
patt = textout((text *) DatumGetPointer(constvalue));
isIndexable = regex_fixed_prefix(patt, true, &prefix) != Prefix_None;
if (prefix) pfree(prefix);
if (prefix)
pfree(prefix);
pfree(patt);
break;
}
/* done if the expression doesn't look indexable */
if (! isIndexable)
if (!isIndexable)
return false;
/*
@@ -1699,32 +1718,32 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
case OID_TEXT_LIKE_OP:
case OID_TEXT_REGEXEQ_OP:
case OID_TEXT_ICREGEXEQ_OP:
if (! op_class(find_operator(">=", TEXTOID), opclass, relam) ||
! op_class(find_operator("<", TEXTOID), opclass, relam))
if (!op_class(find_operator(">=", TEXTOID), opclass, relam) ||
!op_class(find_operator("<", TEXTOID), opclass, relam))
isIndexable = false;
break;
case OID_BPCHAR_LIKE_OP:
case OID_BPCHAR_REGEXEQ_OP:
case OID_BPCHAR_ICREGEXEQ_OP:
if (! op_class(find_operator(">=", BPCHAROID), opclass, relam) ||
! op_class(find_operator("<", BPCHAROID), opclass, relam))
if (!op_class(find_operator(">=", BPCHAROID), opclass, relam) ||
!op_class(find_operator("<", BPCHAROID), opclass, relam))
isIndexable = false;
break;
case OID_VARCHAR_LIKE_OP:
case OID_VARCHAR_REGEXEQ_OP:
case OID_VARCHAR_ICREGEXEQ_OP:
if (! op_class(find_operator(">=", VARCHAROID), opclass, relam) ||
! op_class(find_operator("<", VARCHAROID), opclass, relam))
if (!op_class(find_operator(">=", VARCHAROID), opclass, relam) ||
!op_class(find_operator("<", VARCHAROID), opclass, relam))
isIndexable = false;
break;
case OID_NAME_LIKE_OP:
case OID_NAME_REGEXEQ_OP:
case OID_NAME_ICREGEXEQ_OP:
if (! op_class(find_operator(">=", NAMEOID), opclass, relam) ||
! op_class(find_operator("<", NAMEOID), opclass, relam))
if (!op_class(find_operator(">=", NAMEOID), opclass, relam) ||
!op_class(find_operator("<", NAMEOID), opclass, relam))
isIndexable = false;
break;
}
@@ -1736,7 +1755,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
* expand_indexqual_conditions
* Given a list of (implicitly ANDed) indexqual clauses,
* expand any "special" index operators into clauses that the indexscan
* machinery will know what to do with. Clauses that were not
* machinery will know what to do with. Clauses that were not
* recognized by match_special_index_operator() must be passed through
* unchanged.
*/
@@ -1749,6 +1768,7 @@ expand_indexqual_conditions(List *indexquals)
foreach(q, indexquals)
{
Expr *clause = (Expr *) lfirst(q);
/* we know these will succeed */
Var *leftop = get_leftop(clause);
Var *rightop = get_rightop(clause);
@@ -1760,11 +1780,13 @@ expand_indexqual_conditions(List *indexquals)
switch (expr_op)
{
/*
* LIKE and regex operators are not members of any index opclass,
* so if we find one in an indexqual list we can assume that
* it was accepted by match_special_index_operator().
*/
/*
* LIKE and regex operators are not members of any index
* opclass, so if we find one in an indexqual list we can
* assume that it was accepted by
* match_special_index_operator().
*/
case OID_TEXT_LIKE_OP:
case OID_BPCHAR_LIKE_OP:
case OID_VARCHAR_LIKE_OP:
@@ -1776,7 +1798,8 @@ expand_indexqual_conditions(List *indexquals)
resultquals = nconc(resultquals,
prefix_quals(leftop, expr_op,
prefix, pstatus));
if (prefix) pfree(prefix);
if (prefix)
pfree(prefix);
pfree(patt);
break;
@@ -1791,7 +1814,8 @@ expand_indexqual_conditions(List *indexquals)
resultquals = nconc(resultquals,
prefix_quals(leftop, expr_op,
prefix, pstatus));
if (prefix) pfree(prefix);
if (prefix)
pfree(prefix);
pfree(patt);
break;
@@ -1806,7 +1830,8 @@ expand_indexqual_conditions(List *indexquals)
resultquals = nconc(resultquals,
prefix_quals(leftop, expr_op,
prefix, pstatus));
if (prefix) pfree(prefix);
if (prefix)
pfree(prefix);
pfree(patt);
break;
@@ -1833,7 +1858,7 @@ like_fixed_prefix(char *patt, char **prefix)
int pos,
match_pos;
*prefix = match = palloc(strlen(patt)+1);
*prefix = match = palloc(strlen(patt) + 1);
match_pos = 0;
for (pos = 0; patt[pos]; pos++)
@@ -1849,14 +1874,15 @@ like_fixed_prefix(char *patt, char **prefix)
if (patt[pos] == '\0')
break;
}
/*
* NOTE: this code used to think that %% meant a literal %,
* but textlike() itself does not think that, and the SQL92
* spec doesn't say any such thing either.
* NOTE: this code used to think that %% meant a literal %, but
* textlike() itself does not think that, and the SQL92 spec
* doesn't say any such thing either.
*/
match[match_pos++] = patt[pos];
}
match[match_pos] = '\0';
/* in LIKE, an empty pattern is an exact match! */
@@ -1905,7 +1931,7 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
}
/* OK, allocate space for pattern */
*prefix = match = palloc(strlen(patt)+1);
*prefix = match = palloc(strlen(patt) + 1);
match_pos = 0;
/* note start at pos 1 to skip leading ^ */
@@ -1916,9 +1942,11 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
patt[pos] == '*' ||
patt[pos] == '[' ||
patt[pos] == '$' ||
/* XXX I suspect isalpha() is not an adequately locale-sensitive
* test for characters that can vary under case folding?
*/
/*
* XXX I suspect isalpha() is not an adequately locale-sensitive
* test for characters that can vary under case folding?
*/
(case_insensitive && isalpha(patt[pos])))
break;
if (patt[pos] == '\\')
@@ -1932,9 +1960,9 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
match[match_pos] = '\0';
if (patt[pos] == '$' && patt[pos+1] == '\0')
if (patt[pos] == '$' && patt[pos + 1] == '\0')
return Prefix_Exact; /* pattern specifies exact match */
if (match_pos > 0)
return Prefix_Partial;
return Prefix_None;
@@ -2020,7 +2048,8 @@ prefix_quals(Var *leftop, Oid expr_op,
result = lcons(expr, NIL);
/*
* If we can create a string larger than the prefix, say "x < greaterstr".
* If we can create a string larger than the prefix, say "x <
* greaterstr".
*/
greaterstr = make_greater_string(prefix, datatype);
if (greaterstr)
@@ -2058,17 +2087,20 @@ prefix_quals(Var *leftop, Oid expr_op,
* given "foos" and return "foot", will this actually be greater than "fooss"?
*/
static char *
make_greater_string(const char * str, Oid datatype)
make_greater_string(const char *str, Oid datatype)
{
char *workstr;
int len;
/* Make a modifiable copy, which will be our return value if successful */
/*
* Make a modifiable copy, which will be our return value if
* successful
*/
workstr = pstrdup((char *) str);
while ((len = strlen(workstr)) > 0)
{
unsigned char *lastchar = (unsigned char *) (workstr + len - 1);
unsigned char *lastchar = (unsigned char *) (workstr + len - 1);
/*
* Try to generate a larger string by incrementing the last byte.
@@ -2077,14 +2109,15 @@ make_greater_string(const char * str, Oid datatype)
{
(*lastchar)++;
if (string_lessthan(str, workstr, datatype))
return workstr; /* Success! */
return workstr; /* Success! */
}
/*
* Truncate off the last character, which might be more than 1 byte
* in MULTIBYTE case.
* Truncate off the last character, which might be more than 1
* byte in MULTIBYTE case.
*/
#ifdef MULTIBYTE
len = pg_mbcliplen((const unsigned char *) workstr, len, len-1);
len = pg_mbcliplen((const unsigned char *) workstr, len, len - 1);
workstr[len] = '\0';
#else
*lastchar = '\0';
@@ -2102,7 +2135,7 @@ make_greater_string(const char * str, Oid datatype)
/* See if there is a binary op of the given name for the given datatype */
static Oid
find_operator(const char * opname, Oid datatype)
find_operator(const char *opname, Oid datatype)
{
HeapTuple optup;
@@ -2122,10 +2155,12 @@ find_operator(const char * opname, Oid datatype)
* returned value should be pfree'd if no longer needed.
*/
static Datum
string_to_datum(const char * str, Oid datatype)
string_to_datum(const char *str, Oid datatype)
{
/* We cheat a little by assuming that textin() will do for
* bpchar and varchar constants too...
/*
* We cheat a little by assuming that textin() will do for bpchar and
* varchar constants too...
*/
if (datatype == NAMEOID)
return PointerGetDatum(namein((char *) str));
@@ -2137,7 +2172,7 @@ string_to_datum(const char * str, Oid datatype)
* Generate a Const node of the appropriate type from a C string.
*/
static Const *
string_to_const(const char * str, Oid datatype)
string_to_const(const char *str, Oid datatype)
{
Datum conval = string_to_datum(str, datatype);
@@ -2151,7 +2186,7 @@ string_to_const(const char * str, Oid datatype)
* "<" operator function, to ensure we get the right result...
*/
static bool
string_lessthan(const char * str1, const char * str2, Oid datatype)
string_lessthan(const char *str1, const char *str2, Oid datatype)
{
Datum datum1 = string_to_datum(str1, datatype);
Datum datum2 = string_to_datum(str2, datatype);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.53 2000/02/18 23:47:19 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.54 2000/04/12 17:15:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,25 +28,27 @@
#include "utils/lsyscache.h"
static void sort_inner_and_outer(Query *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list);
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list);
static void match_unsorted_outer(Query *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list);
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list);
#ifdef NOT_USED
static void match_unsorted_inner(Query *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list);
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list);
#endif
static void hash_inner_and_outer(Query *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist);
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist);
static Path *best_innerjoin(List *join_paths, List *outer_relid);
static Selectivity estimate_disbursion(Query *root, Var *var);
static List *select_mergejoin_clauses(RelOptInfo *joinrel,
RelOptInfo *outerrel,
RelOptInfo *innerrel,
List *restrictlist);
RelOptInfo *outerrel,
RelOptInfo *innerrel,
List *restrictlist);
/*
@@ -79,32 +81,33 @@ add_paths_to_joinrel(Query *root,
restrictlist);
/*
* 1. Consider mergejoin paths where both relations must be
* explicitly sorted.
* 1. Consider mergejoin paths where both relations must be explicitly
* sorted.
*/
sort_inner_and_outer(root, joinrel, outerrel, innerrel,
restrictlist, mergeclause_list);
/*
* 2. Consider paths where the outer relation need not be
* explicitly sorted. This includes both nestloops and
* mergejoins where the outer path is already ordered.
* 2. Consider paths where the outer relation need not be explicitly
* sorted. This includes both nestloops and mergejoins where the outer
* path is already ordered.
*/
match_unsorted_outer(root, joinrel, outerrel, innerrel,
restrictlist, mergeclause_list);
#ifdef NOT_USED
/*
* 3. Consider paths where the inner relation need not be
* explicitly sorted. This includes mergejoins only
* (nestloops were already built in match_unsorted_outer).
* 3. Consider paths where the inner relation need not be explicitly
* sorted. This includes mergejoins only (nestloops were already
* built in match_unsorted_outer).
*
* Diked out as redundant 2/13/2000 -- tgl. There isn't any
* really significant difference between the inner and outer
* side of a mergejoin, so match_unsorted_inner creates no paths
* that aren't equivalent to those made by match_unsorted_outer
* when add_paths_to_joinrel() is invoked with the two rels given
* in the other order.
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
* significant difference between the inner and outer side of a
* mergejoin, so match_unsorted_inner creates no paths that aren't
* equivalent to those made by match_unsorted_outer when
* add_paths_to_joinrel() is invoked with the two rels given in the
* other order.
*/
match_unsorted_inner(root, joinrel, outerrel, innerrel,
restrictlist, mergeclause_list);
@@ -144,31 +147,31 @@ sort_inner_and_outer(Query *root,
/*
* Each possible ordering of the available mergejoin clauses will
* generate a differently-sorted result path at essentially the
* same cost. We have no basis for choosing one over another at
* this level of joining, but some sort orders may be more useful
* than others for higher-level mergejoins. Generating a path here
* for *every* permutation of mergejoin clauses doesn't seem like
* a winning strategy, however; the cost in planning time is too high.
* generate a differently-sorted result path at essentially the same
* cost. We have no basis for choosing one over another at this level
* of joining, but some sort orders may be more useful than others for
* higher-level mergejoins. Generating a path here for *every*
* permutation of mergejoin clauses doesn't seem like a winning
* strategy, however; the cost in planning time is too high.
*
* For now, we generate one path for each mergejoin clause, listing that
* clause first and the rest in random order. This should allow at least
* a one-clause mergejoin without re-sorting against any other possible
* mergejoin partner path. But if we've not guessed the right ordering
* of secondary clauses, we may end up evaluating clauses as qpquals when
* they could have been done as mergeclauses. We need to figure out a
* better way. (Two possible approaches: look at all the relevant index
* relations to suggest plausible sort orders, or make just one output
* path and somehow mark it as having a sort-order that can be rearranged
* freely.)
* clause first and the rest in random order. This should allow at
* least a one-clause mergejoin without re-sorting against any other
* possible mergejoin partner path. But if we've not guessed the
* right ordering of secondary clauses, we may end up evaluating
* clauses as qpquals when they could have been done as mergeclauses.
* We need to figure out a better way. (Two possible approaches: look
* at all the relevant index relations to suggest plausible sort
* orders, or make just one output path and somehow mark it as having
* a sort-order that can be rearranged freely.)
*/
foreach(i, mergeclause_list)
{
RestrictInfo *restrictinfo = lfirst(i);
List *curclause_list;
List *outerkeys;
List *innerkeys;
List *merge_pathkeys;
RestrictInfo *restrictinfo = lfirst(i);
List *curclause_list;
List *outerkeys;
List *innerkeys;
List *merge_pathkeys;
/* Make a mergeclause list with this guy first. */
if (i != mergeclause_list)
@@ -176,13 +179,14 @@ sort_inner_and_outer(Query *root,
lremove(restrictinfo,
listCopy(mergeclause_list)));
else
curclause_list = mergeclause_list; /* no work at first one... */
curclause_list = mergeclause_list; /* no work at first one... */
/* Build sort pathkeys for both sides.
/*
* Build sort pathkeys for both sides.
*
* Note: it's possible that the cheapest paths will already be
* sorted properly. create_mergejoin_path will detect that case
* and suppress an explicit sort step, so we needn't do so here.
* Note: it's possible that the cheapest paths will already be sorted
* properly. create_mergejoin_path will detect that case and
* suppress an explicit sort step, so we needn't do so here.
*/
outerkeys = make_pathkeys_for_mergeclauses(root,
curclause_list,
@@ -198,8 +202,8 @@ sort_inner_and_outer(Query *root,
/*
* And now we can make the path. We only consider the cheapest-
* total-cost input paths, since we are assuming here that a sort
* is required. We will consider cheapest-startup-cost input paths
* later, and only if they don't need a sort.
* is required. We will consider cheapest-startup-cost input
* paths later, and only if they don't need a sort.
*/
add_path(joinrel, (Path *)
create_mergejoin_path(joinrel,
@@ -225,7 +229,7 @@ sort_inner_and_outer(Query *root,
* inner path, one on the cheapest-startup-cost inner path (if different),
* and one on the best inner-indexscan path (if any).
*
* We also consider mergejoins if mergejoin clauses are available. We have
* We also consider mergejoins if mergejoin clauses are available. We have
* two ways to generate the inner path for a mergejoin: sort the cheapest
* inner path, or use an inner path that is already suitably ordered for the
* merge. If we have several mergeclauses, it could be that there is no inner
@@ -255,8 +259,8 @@ match_unsorted_outer(Query *root,
List *i;
/*
* Get the best innerjoin indexpath (if any) for this outer rel.
* It's the same for all outer paths.
* Get the best innerjoin indexpath (if any) for this outer rel. It's
* the same for all outer paths.
*/
bestinnerjoin = best_innerjoin(innerrel->innerjoin, outerrel->relids);
@@ -274,8 +278,8 @@ match_unsorted_outer(Query *root,
/*
* The result will have this sort order (even if it is implemented
* as a nestloop, and even if some of the mergeclauses are implemented
* by qpquals rather than as true mergeclauses):
* as a nestloop, and even if some of the mergeclauses are
* implemented by qpquals rather than as true mergeclauses):
*/
merge_pathkeys = build_join_pathkeys(outerpath->pathkeys,
joinrel->targetlist,
@@ -318,11 +322,12 @@ match_unsorted_outer(Query *root,
/* Compute the required ordering of the inner path */
innersortkeys = make_pathkeys_for_mergeclauses(root,
mergeclauses,
innerrel->targetlist);
innerrel->targetlist);
/*
* Generate a mergejoin on the basis of sorting the cheapest inner.
* Since a sort will be needed, only cheapest total cost matters.
* Generate a mergejoin on the basis of sorting the cheapest
* inner. Since a sort will be needed, only cheapest total cost
* matters.
*/
add_path(joinrel, (Path *)
create_mergejoin_path(joinrel,
@@ -335,11 +340,11 @@ match_unsorted_outer(Query *root,
innersortkeys));
/*
* Look for presorted inner paths that satisfy the mergeclause list
* or any truncation thereof. Here, we consider both cheap startup
* cost and cheap total cost.
* Look for presorted inner paths that satisfy the mergeclause
* list or any truncation thereof. Here, we consider both cheap
* startup cost and cheap total cost.
*/
trialsortkeys = listCopy(innersortkeys); /* modifiable copy */
trialsortkeys = listCopy(innersortkeys); /* modifiable copy */
cheapest_startup_inner = NULL;
cheapest_total_inner = NULL;
num_mergeclauses = length(mergeclauses);
@@ -349,8 +354,9 @@ match_unsorted_outer(Query *root,
Path *innerpath;
List *newclauses = NIL;
/* Look for an inner path ordered well enough to merge with
* the first 'clausecnt' mergeclauses. NB: trialsortkeys list
/*
* Look for an inner path ordered well enough to merge with
* the first 'clausecnt' mergeclauses. NB: trialsortkeys list
* is modified destructively, which is why we made a copy...
*/
trialsortkeys = ltruncate(clausecnt, trialsortkeys);
@@ -391,14 +397,16 @@ match_unsorted_outer(Query *root,
/* Found a cheap (or even-cheaper) sorted path */
if (innerpath != cheapest_total_inner)
{
/* Avoid rebuilding clause list if we already made one;
* saves memory in big join trees...
/*
* Avoid rebuilding clause list if we already made
* one; saves memory in big join trees...
*/
if (newclauses == NIL)
{
if (clausecnt < num_mergeclauses)
newclauses = ltruncate(clausecnt,
listCopy(mergeclauses));
listCopy(mergeclauses));
else
newclauses = mergeclauses;
}
@@ -461,11 +469,12 @@ match_unsorted_inner(Query *root,
/* Compute the required ordering of the outer path */
outersortkeys = make_pathkeys_for_mergeclauses(root,
mergeclauses,
outerrel->targetlist);
outerrel->targetlist);
/*
* Generate a mergejoin on the basis of sorting the cheapest outer.
* Since a sort will be needed, only cheapest total cost matters.
* Generate a mergejoin on the basis of sorting the cheapest
* outer. Since a sort will be needed, only cheapest total cost
* matters.
*/
merge_pathkeys = build_join_pathkeys(outersortkeys,
joinrel->targetlist,
@@ -479,10 +488,11 @@ match_unsorted_inner(Query *root,
mergeclauses,
outersortkeys,
NIL));
/*
* Now generate mergejoins based on already-sufficiently-ordered
* outer paths. There's likely to be some redundancy here with paths
* already generated by merge_unsorted_outer ... but since
* outer paths. There's likely to be some redundancy here with
* paths already generated by merge_unsorted_outer ... but since
* merge_unsorted_outer doesn't consider all permutations of the
* mergeclause list, it may fail to notice that this particular
* innerpath could have been used with this outerpath.
@@ -491,7 +501,8 @@ match_unsorted_inner(Query *root,
outersortkeys,
TOTAL_COST);
if (totalouterpath == NULL)
continue; /* there won't be a startup-cost path either */
continue; /* there won't be a startup-cost path
* either */
merge_pathkeys = build_join_pathkeys(totalouterpath->pathkeys,
joinrel->targetlist,
@@ -552,8 +563,8 @@ hash_inner_and_outer(Query *root,
List *i;
/*
* Scan the join's restrictinfo list to find hashjoinable clauses
* that are usable with this pair of sub-relations. Since we currently
* Scan the join's restrictinfo list to find hashjoinable clauses that
* are usable with this pair of sub-relations. Since we currently
* accept only var-op-var clauses as hashjoinable, we need only check
* the membership of the vars to determine whether a particular clause
* can be used with this pair of sub-relations. This code would need
@@ -568,7 +579,7 @@ hash_inner_and_outer(Query *root,
*right,
*inner;
List *hashclauses;
Selectivity innerdisbursion;
Selectivity innerdisbursion;
if (restrictinfo->hashjoinoperator == InvalidOid)
continue; /* not hashjoinable */
@@ -595,9 +606,9 @@ hash_inner_and_outer(Query *root,
innerdisbursion = estimate_disbursion(root, inner);
/*
* We consider both the cheapest-total-cost and cheapest-startup-cost
* outer paths. There's no need to consider any but the cheapest-
* total-cost inner path, however.
* We consider both the cheapest-total-cost and
* cheapest-startup-cost outer paths. There's no need to consider
* any but the cheapest- total-cost inner path, however.
*/
add_path(joinrel, (Path *)
create_hashjoin_path(joinrel,
@@ -644,7 +655,8 @@ best_innerjoin(List *join_paths, Relids outer_relids)
Assert(IsA(path, IndexPath));
/* path->joinrelids is the set of base rels that must be part of
/*
* path->joinrelids is the set of base rels that must be part of
* outer_relids in order to use this inner path, because those
* rels are used in the index join quals of this inner path.
*/
@@ -661,7 +673,7 @@ best_innerjoin(List *join_paths, Relids outer_relids)
*
* We use a default of 0.1 if we can't figure out anything better.
* This will typically discourage use of a hash rather strongly,
* if the inner relation is large. We do not want to hash unless
* if the inner relation is large. We do not want to hash unless
* we know that the inner rel is well-dispersed (or the alternatives
* seem much worse).
*/
@@ -670,7 +682,7 @@ estimate_disbursion(Query *root, Var *var)
{
Oid relid;
if (! IsA(var, Var))
if (!IsA(var, Var))
return 0.1;
relid = getrelid(var->varno, root->rtable);
@@ -690,7 +702,7 @@ estimate_disbursion(Query *root, Var *var)
* Since we currently allow only plain Vars as the left and right sides
* of mergejoin clauses, this test is relatively simple. This routine
* would need to be upgraded to support more-complex expressions
* as sides of mergejoins. In theory, we could allow arbitrarily complex
* as sides of mergejoins. In theory, we could allow arbitrarily complex
* expressions in mergejoins, so long as one side uses only vars from one
* sub-relation and the other side uses only vars from the other.
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.43 2000/02/07 04:40:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.44 2000/04/12 17:15:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,7 @@
static RelOptInfo *make_join_rel(Query *root, RelOptInfo *rel1,
RelOptInfo *rel2);
RelOptInfo *rel2);
/*
@@ -44,22 +44,23 @@ make_rels_by_joins(Query *root, int level)
/*
* First, consider left-sided and right-sided plans, in which rels of
* exactly level-1 member relations are joined against base relations.
* We prefer to join using join clauses, but if we find a rel of level-1
* members that has no join clauses, we will generate Cartesian-product
* joins against all base rels not already contained in it.
* We prefer to join using join clauses, but if we find a rel of
* level-1 members that has no join clauses, we will generate
* Cartesian-product joins against all base rels not already contained
* in it.
*
* In the first pass (level == 2), we try to join each base rel to each
* base rel that appears later in base_rel_list. (The mirror-image
* joins are handled automatically by make_join_rel.) In later passes,
* we try to join rels of size level-1 from join_rel_list to each
* base rel in base_rel_list.
* joins are handled automatically by make_join_rel.) In later
* passes, we try to join rels of size level-1 from join_rel_list to
* each base rel in base_rel_list.
*
* We assume that the rels already present in join_rel_list appear in
* decreasing order of level (number of members). This should be true
* since we always add new higher-level rels to the front of the list.
*/
if (level == 2)
r = root->base_rel_list; /* level-1 is base rels */
r = root->base_rel_list;/* level-1 is base rels */
else
r = root->join_rel_list;
for (; r != NIL; r = lnext(r))
@@ -68,21 +69,23 @@ make_rels_by_joins(Query *root, int level)
int old_level = length(old_rel->relids);
List *other_rels;
if (old_level != level-1)
if (old_level != level - 1)
break;
if (level == 2)
other_rels = lnext(r); /* only consider remaining base rels */
other_rels = lnext(r); /* only consider remaining base
* rels */
else
other_rels = root->base_rel_list; /* consider all base rels */
other_rels = root->base_rel_list; /* consider all base rels */
if (old_rel->joininfo != NIL)
{
/*
* Note that if all available join clauses for this rel require
* more than one other rel, we will fail to make any joins against
* it here. That's OK; it'll be considered by "bushy plan" join
* code in a higher-level pass.
* Note that if all available join clauses for this rel
* require more than one other rel, we will fail to make any
* joins against it here. That's OK; it'll be considered by
* "bushy plan" join code in a higher-level pass.
*/
make_rels_by_clause_joins(root,
old_rel,
@@ -90,6 +93,7 @@ make_rels_by_joins(Query *root, int level)
}
else
{
/*
* Oops, we have a relation that is not joined to any other
* relation. Cartesian product time.
@@ -103,10 +107,11 @@ make_rels_by_joins(Query *root, int level)
/*
* Now, consider "bushy plans" in which relations of k base rels are
* joined to relations of level-k base rels, for 2 <= k <= level-2.
* The previous loop left r pointing to the first rel of level level-2.
* The previous loop left r pointing to the first rel of level
* level-2.
*
* We only consider bushy-plan joins for pairs of rels where there is
* a suitable join clause, in order to avoid unreasonable growth of
* We only consider bushy-plan joins for pairs of rels where there is a
* suitable join clause, in order to avoid unreasonable growth of
* planning time.
*/
for (; r != NIL; r = lnext(r))
@@ -115,8 +120,9 @@ make_rels_by_joins(Query *root, int level)
int old_level = length(old_rel->relids);
List *r2;
/* We can quit once past the halfway point (make_join_rel took care
* of making the opposite-direction joins)
/*
* We can quit once past the halfway point (make_join_rel took
* care of making the opposite-direction joins)
*/
if (old_level * 2 < level)
break;
@@ -137,8 +143,10 @@ make_rels_by_joins(Query *root, int level)
{
List *i;
/* OK, we can build a rel of the right level from this pair of
* rels. Do so if there is at least one usable join clause.
/*
* OK, we can build a rel of the right level from this
* pair of rels. Do so if there is at least one usable
* join clause.
*/
foreach(i, old_rel->joininfo)
{
@@ -192,7 +200,7 @@ make_rels_by_clause_joins(Query *root,
foreach(j, other_rels)
{
RelOptInfo *other_rel = (RelOptInfo *) lfirst(j);
RelOptInfo *other_rel = (RelOptInfo *) lfirst(j);
if (is_subseti(unjoined_relids, other_rel->relids))
result = make_join_rel(root, old_rel, other_rel);
@@ -251,8 +259,8 @@ make_rels_by_clauseless_joins(Query *root,
static RelOptInfo *
make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2)
{
RelOptInfo *joinrel;
List *restrictlist;
RelOptInfo *joinrel;
List *restrictlist;
/*
* Find or build the join RelOptInfo, and compute the restrictlist

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.38 2000/03/22 22:08:33 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.39 2000/04/12 17:15:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,14 +27,14 @@
static void best_or_subclause_indices(Query *root, RelOptInfo *rel,
List *subclauses, List *indices,
IndexPath *pathnode);
List *subclauses, List *indices,
IndexPath *pathnode);
static void best_or_subclause_index(Query *root, RelOptInfo *rel,
Expr *subclause, List *indices,
List **retIndexQual,
Oid *retIndexid,
Cost *retStartupCost,
Cost *retTotalCost);
Expr *subclause, List *indices,
List **retIndexQual,
Oid *retIndexid,
Cost *retStartupCost,
Cost *retTotalCost);
/*
@@ -61,8 +61,8 @@ create_or_index_paths(Query *root,
/*
* Check to see if this clause is an 'or' clause, and, if so,
* whether or not each of the subclauses within the 'or' clause
* has been matched by an index. The information used was
* saved by create_index_paths().
* has been matched by an index. The information used was saved
* by create_index_paths().
*/
if (restriction_is_or_clause(clausenode) &&
clausenode->subclauseindices)
@@ -80,6 +80,7 @@ create_or_index_paths(Query *root,
}
if (all_indexable)
{
/*
* OK, build an IndexPath for this OR clause, using the
* best available index for each subclause.
@@ -88,19 +89,23 @@ create_or_index_paths(Query *root,
pathnode->path.pathtype = T_IndexScan;
pathnode->path.parent = rel;
/*
* This is an IndexScan, but the overall result will consist
* of tuples extracted in multiple passes (one for each
* subclause of the OR), so the result cannot be claimed
* to have any particular ordering.
* This is an IndexScan, but the overall result will
* consist of tuples extracted in multiple passes (one for
* each subclause of the OR), so the result cannot be
* claimed to have any particular ordering.
*/
pathnode->path.pathkeys = NIL;
/* We don't actually care what order the index scans in ... */
/*
* We don't actually care what order the index scans in
* ...
*/
pathnode->indexscandir = NoMovementScanDirection;
/* This isn't a nestloop innerjoin, so: */
pathnode->joinrelids = NIL; /* no join clauses here */
pathnode->joinrelids = NIL; /* no join clauses here */
pathnode->rows = rel->rows;
best_or_subclause_indices(root,
@@ -125,7 +130,7 @@ create_or_index_paths(Query *root,
* This routine also creates the indexqual and indexid lists that will
* be needed by the executor. The indexqual list has one entry for each
* scan of the base rel, which is a sublist of indexqual conditions to
* apply in that scan. The implicit semantics are AND across each sublist
* apply in that scan. The implicit semantics are AND across each sublist
* of quals, and OR across the toplevel list (note that the executor
* takes care not to return any single tuple more than once). The indexid
* list gives the OID of the index to be used in each scan.
@@ -181,7 +186,7 @@ best_or_subclause_indices(Query *root,
pathnode->indexqual = lappend(pathnode->indexqual, best_indexqual);
pathnode->indexid = lappendi(pathnode->indexid, best_indexid);
if (slist == subclauses) /* first scan? */
if (slist == subclauses)/* first scan? */
pathnode->path.startup_cost = best_startup_cost;
pathnode->path.total_cost += best_total_cost;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.20 2000/02/18 23:47:19 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.21 2000/04/12 17:15:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,7 +28,7 @@
static PathKeyItem *makePathKeyItem(Node *key, Oid sortop);
static List *make_canonical_pathkey(Query *root, PathKeyItem *item);
static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
AttrNumber varattno);
AttrNumber varattno);
/*--------------------
@@ -42,8 +42,8 @@ static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
* of scanning the relation and the resulting ordering of the tuples.
* Sequential scan Paths have NIL pathkeys, indicating no known ordering.
* Index scans have Path.pathkeys that represent the chosen index's ordering,
* if any. A single-key index would create a pathkey with a single sublist,
* e.g. ( (tab1.indexkey1/sortop1) ). A multi-key index generates a sublist
* if any. A single-key index would create a pathkey with a single sublist,
* e.g. ( (tab1.indexkey1/sortop1) ). A multi-key index generates a sublist
* per key, e.g. ( (tab1.indexkey1/sortop1) (tab1.indexkey2/sortop2) ) which
* shows major sort by indexkey1 (ordering by sortop1) and minor sort by
* indexkey2 with sortop2.
@@ -56,10 +56,10 @@ static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
* ordering operators used.
*
* Things get more interesting when we consider joins. Suppose we do a
* mergejoin between A and B using the mergeclause A.X = B.Y. The output
* mergejoin between A and B using the mergeclause A.X = B.Y. The output
* of the mergejoin is sorted by X --- but it is also sorted by Y. We
* represent this fact by listing both keys in a single pathkey sublist:
* ( (A.X/xsortop B.Y/ysortop) ). This pathkey asserts that the major
* ( (A.X/xsortop B.Y/ysortop) ). This pathkey asserts that the major
* sort order of the Path can be taken to be *either* A.X or B.Y.
* They are equal, so they are both primary sort keys. By doing this,
* we allow future joins to use either var as a pre-sorted key, so upper
@@ -120,12 +120,12 @@ static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
* We did implement pathkeys just as described above, and found that the
* planner spent a huge amount of time comparing pathkeys, because the
* representation of pathkeys as unordered lists made it expensive to decide
* whether two were equal or not. So, we've modified the representation
* whether two were equal or not. So, we've modified the representation
* as described next.
*
* If we scan the WHERE clause for equijoin clauses (mergejoinable clauses)
* during planner startup, we can construct lists of equivalent pathkey items
* for the query. There could be more than two items per equivalence set;
* for the query. There could be more than two items per equivalence set;
* for example, WHERE A.X = B.Y AND B.Y = C.Z AND D.R = E.S creates the
* equivalence sets { A.X B.Y C.Z } and { D.R E.S } (plus associated sortops).
* Any pathkey item that belongs to an equivalence set implies that all the
@@ -147,20 +147,20 @@ static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
* equivalence set, we instantly add all the other vars equivalenced to it,
* whether they appear yet in the pathkey's relation or not. And we also
* mandate that the pathkey sublist appear in the same order as the
* equivalence set it comes from. (In practice, we simply return a pointer
* equivalence set it comes from. (In practice, we simply return a pointer
* to the relevant equivalence set without building any new sublist at all.)
* This makes comparing pathkeys very simple and fast, and saves a lot of
* work and memory space for pathkey construction as well.
*
* Note that pathkey sublists having just one item still exist, and are
* not expected to be equal() to any equivalence set. This occurs when
* not expected to be equal() to any equivalence set. This occurs when
* we describe a sort order that involves a var that's not mentioned in
* any equijoin clause of the WHERE. We could add singleton sets containing
* such vars to the query's list of equivalence sets, but there's little
* point in doing so.
*
* By the way, it's OK and even useful for us to build equivalence sets
* that mention multiple vars from the same relation. For example, if
* that mention multiple vars from the same relation. For example, if
* we have WHERE A.X = A.Y and we are scanning A using an index on X,
* we can legitimately conclude that the path is sorted by Y as well;
* and this could be handy if Y is the variable used in other join clauses
@@ -179,7 +179,7 @@ static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
static PathKeyItem *
makePathKeyItem(Node *key, Oid sortop)
{
PathKeyItem *item = makeNode(PathKeyItem);
PathKeyItem *item = makeNode(PathKeyItem);
item->key = key;
item->sortop = sortop;
@@ -219,11 +219,13 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
/* We might see a clause X=X; don't make a single-element list from it */
if (equal(item1, item2))
return;
/*
* Our plan is to make a two-element set, then sweep through the existing
* equijoin sets looking for matches to item1 or item2. When we find one,
* we remove that set from equi_key_list and union it into our new set.
* When done, we add the new set to the front of equi_key_list.
* Our plan is to make a two-element set, then sweep through the
* existing equijoin sets looking for matches to item1 or item2. When
* we find one, we remove that set from equi_key_list and union it
* into our new set. When done, we add the new set to the front of
* equi_key_list.
*
* This is a standard UNION-FIND problem, for which there exist better
* data structures than simple lists. If this code ever proves to be
@@ -240,8 +242,11 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
{
/* Found a set to merge into our new set */
newset = LispUnion(newset, curset);
/* Remove old set from equi_key_list. NOTE this does not change
* lnext(cursetlink), so the outer foreach doesn't break.
/*
* Remove old set from equi_key_list. NOTE this does not
* change lnext(cursetlink), so the outer foreach doesn't
* break.
*/
root->equi_key_list = lremove(curset, root->equi_key_list);
freeList(curset); /* might as well recycle old cons cells */
@@ -256,7 +261,7 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
* Given a PathKeyItem, find the equi_key_list subset it is a member of,
* if any. If so, return a pointer to that sublist, which is the
* canonical representation (for this query) of that PathKeyItem's
* equivalence set. If it is not found, return a single-element list
* equivalence set. If it is not found, return a single-element list
* containing the PathKeyItem (when the item has no equivalence peers,
* we just allow it to be a standalone list).
*
@@ -293,13 +298,13 @@ canonicalize_pathkeys(Query *root, List *pathkeys)
foreach(i, pathkeys)
{
List *pathkey = (List *) lfirst(i);
PathKeyItem *item;
List *pathkey = (List *) lfirst(i);
PathKeyItem *item;
/*
* It's sufficient to look at the first entry in the sublist;
* if there are more entries, they're already part of an
* equivalence set by definition.
* It's sufficient to look at the first entry in the sublist; if
* there are more entries, they're already part of an equivalence
* set by definition.
*/
Assert(pathkey != NIL);
item = (PathKeyItem *) lfirst(pathkey);
@@ -319,12 +324,12 @@ canonicalize_pathkeys(Query *root, List *pathkeys)
* one is "better" than the other.
*
* A pathkey can be considered better than another if it is a superset:
* it contains all the keys of the other plus more. For example, either
* it contains all the keys of the other plus more. For example, either
* ((A) (B)) or ((A B)) is better than ((A)).
*
* Because we actually only expect to see canonicalized pathkey sublists,
* we don't have to do the full two-way-subset-inclusion test on each
* pair of sublists that is implied by the above statement. Instead we
* pair of sublists that is implied by the above statement. Instead we
* just do an equal(). In the normal case where multi-element sublists
* are pointers into the root's equi_key_list, equal() will be very fast:
* it will recognize pointer equality when the sublists are the same,
@@ -345,23 +350,25 @@ compare_pathkeys(List *keys1, List *keys2)
List *subkey1 = lfirst(key1);
List *subkey2 = lfirst(key2);
/* We will never have two subkeys where one is a subset of the other,
* because of the canonicalization explained above. Either they are
* equal or they ain't.
/*
* We will never have two subkeys where one is a subset of the
* other, because of the canonicalization explained above. Either
* they are equal or they ain't.
*/
if (! equal(subkey1, subkey2))
return PATHKEYS_DIFFERENT; /* no need to keep looking */
if (!equal(subkey1, subkey2))
return PATHKEYS_DIFFERENT; /* no need to keep looking */
}
/* If we reached the end of only one list, the other is longer and
* therefore not a subset. (We assume the additional sublist(s)
* of the other list are not NIL --- no pathkey list should ever have
* a NIL sublist.)
/*
* If we reached the end of only one list, the other is longer and
* therefore not a subset. (We assume the additional sublist(s) of
* the other list are not NIL --- no pathkey list should ever have a
* NIL sublist.)
*/
if (key1 == NIL && key2 == NIL)
return PATHKEYS_EQUAL;
if (key1 != NIL)
return PATHKEYS_BETTER1; /* key1 is longer */
return PATHKEYS_BETTER1;/* key1 is longer */
return PATHKEYS_BETTER2; /* key2 is longer */
}
@@ -375,8 +382,8 @@ pathkeys_contained_in(List *keys1, List *keys2)
{
switch (compare_pathkeys(keys1, keys2))
{
case PATHKEYS_EQUAL:
case PATHKEYS_BETTER2:
case PATHKEYS_EQUAL:
case PATHKEYS_BETTER2:
return true;
default:
break;
@@ -448,7 +455,7 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
* do that first.
*/
if (matched_path != NULL &&
compare_fractional_path_costs(matched_path, path, fraction) <= 0)
compare_fractional_path_costs(matched_path, path, fraction) <= 0)
continue;
if (pathkeys_contained_in(pathkeys, path->pathkeys))
@@ -469,7 +476,7 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
* its "ordering" field, and we will return NIL.)
*
* If 'scandir' is BackwardScanDirection, attempt to build pathkeys
* representing a backwards scan of the index. Return NIL if can't do it.
* representing a backwards scan of the index. Return NIL if can't do it.
*/
List *
build_index_pathkeys(Query *root,
@@ -527,7 +534,7 @@ build_index_pathkeys(Query *root,
/* Normal non-functional index */
while (*indexkeys != 0 && *ordering != InvalidOid)
{
Var *relvar = find_indexkey_var(root, rel, *indexkeys);
Var *relvar = find_indexkey_var(root, rel, *indexkeys);
sortop = *ordering;
if (ScanDirectionIsBackward(scandir))
@@ -569,9 +576,9 @@ find_indexkey_var(Query *root, RelOptInfo *rel, AttrNumber varattno)
foreach(temp, rel->targetlist)
{
Var *tle_var = get_expr(lfirst(temp));
Var *tle_var = get_expr(lfirst(temp));
if (IsA(tle_var, Var) && tle_var->varattno == varattno)
if (IsA(tle_var, Var) &&tle_var->varattno == varattno)
return tle_var;
}
@@ -606,11 +613,12 @@ build_join_pathkeys(List *outer_pathkeys,
List *join_rel_tlist,
List *equi_key_list)
{
/*
* This used to be quite a complex bit of code, but now that all
* pathkey sublists start out life canonicalized, we don't have to
* do a darn thing here! The inner-rel vars we used to need to add
* are *already* part of the outer pathkey!
* pathkey sublists start out life canonicalized, we don't have to do
* a darn thing here! The inner-rel vars we used to need to add are
* *already* part of the outer pathkey!
*
* I'd remove the routine entirely, but maybe someday we'll need it...
*/
@@ -644,16 +652,17 @@ make_pathkeys_for_sortclauses(List *sortclauses,
foreach(i, sortclauses)
{
SortClause *sortcl = (SortClause *) lfirst(i);
Node *sortkey;
PathKeyItem *pathkey;
SortClause *sortcl = (SortClause *) lfirst(i);
Node *sortkey;
PathKeyItem *pathkey;
sortkey = get_sortgroupclause_expr(sortcl, tlist);
pathkey = makePathKeyItem(sortkey, sortcl->sortop);
/*
* The pathkey becomes a one-element sublist, for now;
* canonicalize_pathkeys() might replace it with a longer
* sublist later.
* canonicalize_pathkeys() might replace it with a longer sublist
* later.
*/
pathkeys = lappend(pathkeys, lcons(pathkey, NIL));
}
@@ -691,28 +700,28 @@ find_mergeclauses_for_pathkeys(List *pathkeys, List *restrictinfos)
foreach(i, pathkeys)
{
List *pathkey = lfirst(i);
RestrictInfo *matched_restrictinfo = NULL;
List *j;
List *pathkey = lfirst(i);
RestrictInfo *matched_restrictinfo = NULL;
List *j;
/*
* We can match any of the keys in this pathkey sublist,
* since they're all equivalent. And we can match against
* either left or right side of any mergejoin clause we haven't
* used yet. For the moment we use a dumb "greedy" algorithm
* with no backtracking. Is it worth being any smarter to
* make a longer list of usable mergeclauses? Probably not.
* We can match any of the keys in this pathkey sublist, since
* they're all equivalent. And we can match against either left
* or right side of any mergejoin clause we haven't used yet. For
* the moment we use a dumb "greedy" algorithm with no
* backtracking. Is it worth being any smarter to make a longer
* list of usable mergeclauses? Probably not.
*/
foreach(j, pathkey)
{
PathKeyItem *keyitem = lfirst(j);
Node *key = keyitem->key;
Oid keyop = keyitem->sortop;
List *k;
PathKeyItem *keyitem = lfirst(j);
Node *key = keyitem->key;
Oid keyop = keyitem->sortop;
List *k;
foreach(k, restrictinfos)
{
RestrictInfo *restrictinfo = lfirst(k);
RestrictInfo *restrictinfo = lfirst(k);
Assert(restrictinfo->mergejoinoperator != InvalidOid);
@@ -720,7 +729,7 @@ find_mergeclauses_for_pathkeys(List *pathkeys, List *restrictinfos)
equal(key, get_leftop(restrictinfo->clause))) ||
(keyop == restrictinfo->right_sortop &&
equal(key, get_rightop(restrictinfo->clause)))) &&
! member(restrictinfo, mergeclauses))
!member(restrictinfo, mergeclauses))
{
matched_restrictinfo = restrictinfo;
break;
@@ -732,11 +741,12 @@ find_mergeclauses_for_pathkeys(List *pathkeys, List *restrictinfos)
/*
* If we didn't find a mergeclause, we're done --- any additional
* sort-key positions in the pathkeys are useless. (But we can
* sort-key positions in the pathkeys are useless. (But we can
* still mergejoin if we found at least one mergeclause.)
*/
if (! matched_restrictinfo)
if (!matched_restrictinfo)
break;
/*
* If we did find a usable mergeclause for this sort-key position,
* add it to result list.
@@ -756,7 +766,7 @@ find_mergeclauses_for_pathkeys(List *pathkeys, List *restrictinfos)
* 'mergeclauses' is a list of RestrictInfos for mergejoin clauses
* that will be used in a merge join.
* 'tlist' is a relation target list for either the inner or outer
* side of the proposed join rel. (Not actually needed anymore)
* side of the proposed join rel. (Not actually needed anymore)
*
* Returns a pathkeys list that can be applied to the indicated relation.
*
@@ -785,24 +795,26 @@ make_pathkeys_for_mergeclauses(Query *root,
/*
* Find the key and sortop needed for this mergeclause.
*
* Both sides of the mergeclause should appear in one of the
* query's pathkey equivalence classes, so it doesn't matter
* which one we use here.
* Both sides of the mergeclause should appear in one of the query's
* pathkey equivalence classes, so it doesn't matter which one we
* use here.
*/
key = (Node *) get_leftop(restrictinfo->clause);
sortop = restrictinfo->left_sortop;
/*
* Find pathkey sublist for this sort item. We expect to find
* the canonical set including the mergeclause's left and right
* sides; if we get back just the one item, something is rotten.
* Find pathkey sublist for this sort item. We expect to find the
* canonical set including the mergeclause's left and right sides;
* if we get back just the one item, something is rotten.
*/
item = makePathKeyItem(key, sortop);
pathkey = make_canonical_pathkey(root, item);
Assert(length(pathkey) > 1);
/*
* Since the item we just made is not in the returned canonical set,
* we can free it --- this saves a useful amount of storage in a
* big join tree.
* Since the item we just made is not in the returned canonical
* set, we can free it --- this saves a useful amount of storage
* in a big join tree.
*/
pfree(item);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.5 2000/02/15 20:49:17 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.6 2000/04/12 17:15:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,30 +37,34 @@
#include "utils/lsyscache.h"
static void create_tidscan_joinpaths(RelOptInfo *rel);
static List *TidqualFromRestrictinfo(List *relids, List *restrictinfo);
static bool isEvaluable(int varno, Node *node);
static Node *TidequalClause(int varno, Expr *node);
static List *TidqualFromExpr(int varno, Expr *expr);
static List *TidqualFromRestrictinfo(List *relids, List *restrictinfo);
static bool isEvaluable(int varno, Node *node);
static Node *TidequalClause(int varno, Expr *node);
static List *TidqualFromExpr(int varno, Expr *expr);
static
bool isEvaluable(int varno, Node *node)
bool
isEvaluable(int varno, Node *node)
{
List *lst;
Expr *expr;
List *lst;
Expr *expr;
if (IsA(node, Const)) return true;
if (IsA(node, Param)) return true;
if (IsA(node, Const))
return true;
if (IsA(node, Param))
return true;
if (IsA(node, Var))
{
Var *var = (Var *)node;
Var *var = (Var *) node;
if (var->varno == varno)
return false;
return true;
}
if (!is_funcclause(node)) return false;
expr = (Expr *)node;
foreach (lst, expr->args)
if (!is_funcclause(node))
return false;
expr = (Expr *) node;
foreach(lst, expr->args)
{
if (!isEvaluable(varno, lfirst(lst)))
return false;
@@ -72,53 +76,60 @@ bool isEvaluable(int varno, Node *node)
/*
* The 2nd parameter should be an opclause
* Extract the right node if the opclause is CTID= ....
* or the left node if the opclause is ....=CTID
* or the left node if the opclause is ....=CTID
*/
static
Node *TidequalClause(int varno, Expr *node)
Node *
TidequalClause(int varno, Expr *node)
{
Node *rnode = 0, *arg1, *arg2, *arg;
Oper *oper;
Var *var;
Const *aconst;
Param *param;
Expr *expr;
Node *rnode = 0,
*arg1,
*arg2,
*arg;
Oper *oper;
Var *var;
Const *aconst;
Param *param;
Expr *expr;
if (!node->oper) return rnode;
if (!node->args) return rnode;
if (length(node->args) != 2) return rnode;
oper = (Oper *) node->oper;
if (!node->oper)
return rnode;
if (!node->args)
return rnode;
if (length(node->args) != 2)
return rnode;
oper = (Oper *) node->oper;
if (oper->opno != TIDEqualOperator)
return rnode;
arg1 = lfirst(node->args);
arg2 = lsecond(node->args);
arg = (Node *)0;
arg = (Node *) 0;
if (IsA(arg1, Var))
{
var = (Var *) arg1;
if (var->varno == varno &&
var->varattno == SelfItemPointerAttributeNumber &&
var->vartype == TIDOID)
var->varattno == SelfItemPointerAttributeNumber &&
var->vartype == TIDOID)
arg = arg2;
else if (var->varnoold == varno &&
var->varoattno == SelfItemPointerAttributeNumber &&
var->vartype == TIDOID)
var->varoattno == SelfItemPointerAttributeNumber &&
var->vartype == TIDOID)
arg = arg2;
}
if ((!arg) && IsA(arg2, Var))
{
var = (Var *) arg2;
if (var->varno == varno &&
var->varattno == SelfItemPointerAttributeNumber &&
var->vartype == TIDOID)
var->varattno == SelfItemPointerAttributeNumber &&
var->vartype == TIDOID)
arg = arg1;
}
if (!arg)
return rnode;
switch (nodeTag(arg))
{
case T_Const:
case T_Const:
aconst = (Const *) arg;
if (aconst->consttype != TIDOID)
return rnode;
@@ -126,27 +137,29 @@ Node *TidequalClause(int varno, Expr *node)
return rnode;
rnode = arg;
break;
case T_Param:
case T_Param:
param = (Param *) arg;
if (param->paramtype != TIDOID)
return rnode;
rnode = arg;
break;
case T_Var:
case T_Var:
var = (Var *) arg;
if (var->varno == varno ||
var->vartype != TIDOID)
var->vartype != TIDOID)
return rnode;
rnode = arg;
break;
case T_Expr:
case T_Expr:
expr = (Expr *) arg;
if (expr->typeOid != TIDOID) return rnode;
if (expr->opType != FUNC_EXPR) return rnode;
if (isEvaluable(varno, (Node *)expr))
if (expr->typeOid != TIDOID)
return rnode;
if (expr->opType != FUNC_EXPR)
return rnode;
if (isEvaluable(varno, (Node *) expr))
rnode = arg;
break;
default:
default:
break;
}
return rnode;
@@ -160,43 +173,43 @@ Node *TidequalClause(int varno, Expr *node)
* When the expr node is an and_clause,we return the list of
* CTID values if we could extract the CTID values from a member
* node.
*/
*/
static
List *TidqualFromExpr(int varno, Expr *expr)
List *
TidqualFromExpr(int varno, Expr *expr)
{
List *rlst = NIL, *lst, *frtn;
Node *node = (Node *) expr, *rnode;
List *rlst = NIL,
*lst,
*frtn;
Node *node = (Node *) expr,
*rnode;
if (is_opclause(node))
{
rnode = TidequalClause(varno, expr);
if (rnode)
{
rlst = lcons(rnode, rlst);
}
}
else if (and_clause(node))
{
foreach (lst, expr->args)
foreach(lst, expr->args)
{
node = lfirst(lst);
if (!IsA(node, Expr))
if (!IsA(node, Expr))
continue;
rlst = TidqualFromExpr(varno, (Expr *)node);
rlst = TidqualFromExpr(varno, (Expr *) node);
if (rlst)
break;
}
}
else if (or_clause(node))
{
foreach (lst, expr->args)
foreach(lst, expr->args)
{
node = lfirst(lst);
if (IsA(node, Expr) &&
(frtn = TidqualFromExpr(varno, (Expr *)node)) )
{
(frtn = TidqualFromExpr(varno, (Expr *) node)))
rlst = nconc(rlst, frtn);
}
else
{
if (rlst)
@@ -207,24 +220,26 @@ List *TidqualFromExpr(int varno, Expr *expr)
}
}
return rlst;
}
}
static List *
TidqualFromRestrictinfo(List *relids, List *restrictinfo)
{
List *lst, *rlst = NIL;
int varno;
Node *node;
Expr *expr;
List *lst,
*rlst = NIL;
int varno;
Node *node;
Expr *expr;
if (length(relids) != 1)
return NIL;
varno = lfirsti(relids);
foreach (lst, restrictinfo)
foreach(lst, restrictinfo)
{
node = lfirst(lst);
if (!IsA(node, RestrictInfo)) continue;
expr = ((RestrictInfo *)node)->clause;
if (!IsA(node, RestrictInfo))
continue;
expr = ((RestrictInfo *) node)->clause;
rlst = TidqualFromExpr(varno, expr);
if (rlst)
break;
@@ -241,20 +256,20 @@ TidqualFromRestrictinfo(List *relids, List *restrictinfo)
static void
create_tidscan_joinpaths(RelOptInfo *rel)
{
List *rlst = NIL,
*lst;
List *rlst = NIL,
*lst;
foreach (lst, rel->joininfo)
foreach(lst, rel->joininfo)
{
JoinInfo *joininfo = (JoinInfo *) lfirst(lst);
List *restinfo,
*tideval;
List *restinfo,
*tideval;
restinfo = joininfo->jinfo_restrictinfo;
tideval = TidqualFromRestrictinfo(rel->relids, restinfo);
if (length(tideval) == 1)
{
TidPath *pathnode = makeNode(TidPath);
TidPath *pathnode = makeNode(TidPath);
pathnode->path.pathtype = T_TidScan;
pathnode->path.parent = rel;
@@ -278,9 +293,9 @@ create_tidscan_joinpaths(RelOptInfo *rel)
void
create_tidscan_paths(Query *root, RelOptInfo *rel)
{
List *tideval = TidqualFromRestrictinfo(rel->relids,
rel->baserestrictinfo);
List *tideval = TidqualFromRestrictinfo(rel->relids,
rel->baserestrictinfo);
if (tideval)
add_path(rel, (Path *) create_tidscan_path(rel, tideval));
create_tidscan_joinpaths(rel);

View File

@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.88 2000/04/04 01:21:47 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.89 2000/04/12 17:15:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,15 +32,15 @@
static List *switch_outer(List *clauses);
static int set_tlist_sort_info(List *tlist, List *pathkeys);
static int set_tlist_sort_info(List *tlist, List *pathkeys);
static Scan *create_scan_node(Query *root, Path *best_path, List *tlist);
static Join *create_join_node(Query *root, JoinPath *best_path, List *tlist);
static SeqScan *create_seqscan_node(Path *best_path, List *tlist,
List *scan_clauses);
static IndexScan *create_indexscan_node(Query *root, IndexPath *best_path,
List *tlist, List *scan_clauses);
List *tlist, List *scan_clauses);
static TidScan *create_tidscan_node(TidPath *best_path, List *tlist,
List *scan_clauses);
List *scan_clauses);
static NestLoop *create_nestloop_node(NestPath *best_path, List *tlist,
List *clauses, Plan *outer_node, List *outer_tlist,
Plan *inner_node, List *inner_tlist);
@@ -52,16 +52,16 @@ static HashJoin *create_hashjoin_node(HashPath *best_path, List *tlist,
Plan *inner_node, List *inner_tlist);
static List *fix_indxqual_references(List *indexquals, IndexPath *index_path);
static List *fix_indxqual_sublist(List *indexqual, int baserelid, Oid relam,
Form_pg_index index);
Form_pg_index index);
static Node *fix_indxqual_operand(Node *node, int baserelid,
Form_pg_index index,
Oid *opclass);
Form_pg_index index,
Oid *opclass);
static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
List *indxid, List *indxqual,
List *indxqualorig,
ScanDirection indexscandir);
List *indxid, List *indxqual,
List *indxqualorig,
ScanDirection indexscandir);
static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
List *tideval);
List *tideval);
static NestLoop *make_nestloop(List *qptlist, List *qpqual, Plan *lefttree,
Plan *righttree);
static HashJoin *make_hashjoin(List *tlist, List *qpqual,
@@ -166,8 +166,8 @@ create_scan_node(Query *root, Path *best_path, List *tlist)
case T_TidScan:
node = (Scan *) create_tidscan_node((TidPath *) best_path,
tlist,
scan_clauses);
tlist,
scan_clauses);
break;
default:
@@ -242,6 +242,7 @@ create_join_node(Query *root, JoinPath *best_path, List *tlist)
}
#ifdef NOT_USED
/*
* * Expensive function pullups may have pulled local predicates *
* into this path node. Put them in the qpqual of the plan node. *
@@ -250,7 +251,7 @@ create_join_node(Query *root, JoinPath *best_path, List *tlist)
if (get_loc_restrictinfo(best_path) != NIL)
set_qpqual((Plan) retval,
nconc(get_qpqual((Plan) retval),
get_actual_clauses(get_loc_restrictinfo(best_path))));
get_actual_clauses(get_loc_restrictinfo(best_path))));
#endif
return retval;
@@ -345,17 +346,17 @@ create_indexscan_node(Query *root,
* for lossy indices the indxqual predicates need to be double-checked
* after the index fetches the best-guess tuples.
*
* Since the indexquals were generated from the restriction clauses
* given by scan_clauses, there will normally be some duplications
* between the lists. We get rid of the duplicates, then add back
* if lossy.
* Since the indexquals were generated from the restriction clauses given
* by scan_clauses, there will normally be some duplications between
* the lists. We get rid of the duplicates, then add back if lossy.
*/
if (length(indxqual) > 1)
{
/*
* Build an expression representation of the indexqual, expanding
* the implicit OR and AND semantics of the first- and second-level
* lists.
* the implicit OR and AND semantics of the first- and
* second-level lists.
*/
List *orclauses = NIL;
List *orclause;
@@ -374,8 +375,11 @@ create_indexscan_node(Query *root,
}
else if (indxqual != NIL)
{
/* Here, we can simply treat the first sublist as an independent
* set of qual expressions, since there is no top-level OR behavior.
/*
* Here, we can simply treat the first sublist as an independent
* set of qual expressions, since there is no top-level OR
* behavior.
*/
List *indxqual_list = lfirst(indxqual);
@@ -387,8 +391,9 @@ create_indexscan_node(Query *root,
else
qpqual = scan_clauses;
/* The executor needs a copy with the indexkey on the left of each clause
* and with index attr numbers substituted for table ones.
/*
* The executor needs a copy with the indexkey on the left of each
* clause and with index attr numbers substituted for table ones.
*/
fixed_indxqual = fix_indxqual_references(indxqual, best_path);
@@ -410,11 +415,11 @@ create_indexscan_node(Query *root,
static TidScan *
make_tidscan(List *qptlist,
List *qpqual,
Index scanrelid,
Index scanrelid,
List *tideval)
{
TidScan *node = makeNode(TidScan);
Plan *plan = &node->scan.plan;
TidScan *node = makeNode(TidScan);
Plan *plan = &node->scan.plan;
/* cost should be inserted by caller */
plan->state = (EState *) NULL;
@@ -423,7 +428,8 @@ make_tidscan(List *qptlist,
plan->lefttree = NULL;
plan->righttree = NULL;
node->scan.scanrelid = scanrelid;
node->tideval = copyObject(tideval); /* XXX do we really need a copy? */
node->tideval = copyObject(tideval); /* XXX do we really need a
* copy? */
node->needRescan = false;
node->scan.scanstate = (CommonScanState *) NULL;
@@ -438,8 +444,8 @@ make_tidscan(List *qptlist,
static TidScan *
create_tidscan_node(TidPath *best_path, List *tlist, List *scan_clauses)
{
TidScan *scan_node;
Index scan_relid;
TidScan *scan_node;
Index scan_relid;
/* there should be exactly one base rel involved... */
Assert(length(best_path->path.parent->relids) == 1);
@@ -452,7 +458,7 @@ create_tidscan_node(TidPath *best_path, List *tlist, List *scan_clauses)
best_path->tideval);
if (best_path->unjoined_relids)
scan_node->needRescan = true;
scan_node->needRescan = true;
copy_path_costsize(&scan_node->scan.plan, &best_path->path);
@@ -467,7 +473,7 @@ create_tidscan_node(TidPath *best_path, List *tlist, List *scan_clauses)
* once we have changed a Var node to refer to a subplan output rather than
* the original relation, it is no longer equal() to an unmodified Var node
* for the same var. So, we cannot easily compare reference-adjusted qual
* clauses to clauses that have not been adjusted. Fortunately, that
* clauses to clauses that have not been adjusted. Fortunately, that
* doesn't seem to be necessary; all the decisions are made before we do
* the reference adjustments.
*
@@ -493,6 +499,7 @@ create_nestloop_node(NestPath *best_path,
if (IsA(inner_node, IndexScan))
{
/*
* An index is being used to reduce the number of tuples scanned
* in the inner relation. If there are join clauses being used
@@ -522,12 +529,13 @@ create_nestloop_node(NestPath *best_path,
{
Index innerrel = innerscan->scan.scanrelid;
/* Remove redundant tests from my clauses, if possible.
* Note we must compare against indxqualorig not the "fixed"
* indxqual (which has index attnos instead of relation attnos,
* and may have been commuted as well).
/*
* Remove redundant tests from my clauses, if possible. Note
* we must compare against indxqualorig not the "fixed"
* indxqual (which has index attnos instead of relation
* attnos, and may have been commuted as well).
*/
if (length(indxqualorig) == 1) /* single indexscan? */
if (length(indxqualorig) == 1) /* single indexscan? */
clauses = set_difference(clauses, lfirst(indxqualorig));
/* only refs to outer vars get changed in the inner indexqual */
@@ -549,17 +557,20 @@ create_nestloop_node(NestPath *best_path,
}
else if (IsA(inner_node, TidScan))
{
List *inner_tideval = ((TidScan *) inner_node)->tideval;
TidScan *innerscan = (TidScan *) inner_node;
List *inner_tideval = ((TidScan *) inner_node)->tideval;
TidScan *innerscan = (TidScan *) inner_node;
((TidScan *) inner_node)->tideval = join_references(inner_tideval, outer_tlist, inner_tlist, innerscan->scan.scanrelid);
}
}
else if (IsA_Join(inner_node))
{
/*
* Materialize the inner join for speed reasons.
*
* XXX It is probably *not* always fastest to materialize an inner
* join --- how can we estimate whether this is a good thing to do?
* join --- how can we estimate whether this is a good thing to
* do?
*/
inner_node = (Plan *) make_noname(inner_tlist,
NIL,
@@ -595,9 +606,9 @@ create_mergejoin_node(MergePath *best_path,
mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
/*
* Remove the mergeclauses from the list of join qual clauses,
* leaving the list of quals that must be checked as qpquals.
* Set those clauses to contain INNER/OUTER var references.
* Remove the mergeclauses from the list of join qual clauses, leaving
* the list of quals that must be checked as qpquals. Set those
* clauses to contain INNER/OUTER var references.
*/
qpqual = join_references(set_difference(clauses, mergeclauses),
outer_tlist,
@@ -655,16 +666,16 @@ create_hashjoin_node(HashPath *best_path,
/*
* NOTE: there will always be exactly one hashclause in the list
* best_path->path_hashclauses (cf. hash_inner_and_outer()).
* We represent it as a list anyway, for convenience with routines
* that want to work on lists of clauses.
* best_path->path_hashclauses (cf. hash_inner_and_outer()). We
* represent it as a list anyway, for convenience with routines that
* want to work on lists of clauses.
*/
hashclauses = get_actual_clauses(best_path->path_hashclauses);
/*
* Remove the hashclauses from the list of join qual clauses,
* leaving the list of quals that must be checked as qpquals.
* Set those clauses to contain INNER/OUTER var references.
* Remove the hashclauses from the list of join qual clauses, leaving
* the list of quals that must be checked as qpquals. Set those
* clauses to contain INNER/OUTER var references.
*/
qpqual = join_references(set_difference(clauses, hashclauses),
outer_tlist,
@@ -779,7 +790,7 @@ fix_indxqual_references(List *indexquals, IndexPath *index_path)
*
* For each qual clause, commute if needed to put the indexkey operand on the
* left, and then change its varno. (We do not need to change the other side
* of the clause.) Also change the operator if necessary.
* of the clause.) Also change the operator if necessary.
*/
static List *
fix_indxqual_sublist(List *indexqual, int baserelid, Oid relam,
@@ -803,14 +814,16 @@ fix_indxqual_sublist(List *indexqual, int baserelid, Oid relam,
length(clause->args) != 2)
elog(ERROR, "fix_indxqual_sublist: indexqual clause is not binary opclause");
/* Which side is the indexkey on?
/*
* Which side is the indexkey on?
*
* get_relattval sets flag&SEL_RIGHT if the indexkey is on the LEFT.
*/
get_relattval((Node *) clause, baserelid,
&relid, &attno, &constval, &flag);
/* Make a copy that will become the fixed clause.
/*
* Make a copy that will become the fixed clause.
*
* We used to try to do a shallow copy here, but that fails if there
* is a subplan in the arguments of the opclause. So just do a
@@ -822,18 +835,19 @@ fix_indxqual_sublist(List *indexqual, int baserelid, Oid relam,
if ((flag & SEL_RIGHT) == 0)
CommuteClause(newclause);
/* Now, determine which index attribute this is,
* change the indexkey operand as needed,
* and get the index opclass.
/*
* Now, determine which index attribute this is, change the
* indexkey operand as needed, and get the index opclass.
*/
lfirst(newclause->args) = fix_indxqual_operand(lfirst(newclause->args),
baserelid,
index,
&opclass);
/* Substitute the appropriate operator if the expression operator
* is merely binary-compatible with the index. This shouldn't fail,
* since indxpath.c found it before...
/*
* Substitute the appropriate operator if the expression operator
* is merely binary-compatible with the index. This shouldn't
* fail, since indxpath.c found it before...
*/
newopno = indexable_operator(newclause, opclass, relam, true);
if (newopno == InvalidOid)
@@ -861,12 +875,14 @@ fix_indxqual_operand(Node *node, int baserelid, Form_pg_index index,
if (index->indkey[pos] == varatt)
{
Node *newnode = copyObject(node);
((Var *) newnode)->varattno = pos + 1;
*opclass = index->indclass[pos];
return newnode;
}
}
}
/*
* Oops, this Var isn't the indexkey!
*/
@@ -876,13 +892,13 @@ fix_indxqual_operand(Node *node, int baserelid, Form_pg_index index,
/*
* Else, it must be a func expression representing a functional index.
*
* Currently, there is no need for us to do anything here for
* functional indexes. If nodeIndexscan.c sees a func clause as the left
* or right-hand toplevel operand of an indexqual, it assumes that that is
* a reference to the functional index's value and makes the appropriate
* substitution. (It would be cleaner to make the substitution here, I
* think --- suspect this issue if a join clause involving a function call
* misbehaves...)
* Currently, there is no need for us to do anything here for functional
* indexes. If nodeIndexscan.c sees a func clause as the left or
* right-hand toplevel operand of an indexqual, it assumes that that
* is a reference to the functional index's value and makes the
* appropriate substitution. (It would be cleaner to make the
* substitution here, I think --- suspect this issue if a join clause
* involving a function call misbehaves...)
*/
/* indclass[0] is the only class of a functional index */
@@ -915,6 +931,7 @@ switch_outer(List *clauses)
Assert(op && IsA(op, Var));
if (var_is_outer(op))
{
/*
* Duplicate just enough of the structure to allow commuting
* the clause without changing the original list. Could use
@@ -954,21 +971,21 @@ set_tlist_sort_info(List *tlist, List *pathkeys)
foreach(i, pathkeys)
{
List *keysublist = (List *) lfirst(i);
PathKeyItem *pathkey = NULL;
Resdom *resdom = NULL;
List *j;
List *keysublist = (List *) lfirst(i);
PathKeyItem *pathkey = NULL;
Resdom *resdom = NULL;
List *j;
/*
* We can sort by any one of the sort key items listed in this
* sublist. For now, we take the first one that corresponds to
* an available Var in the tlist.
* sublist. For now, we take the first one that corresponds to an
* available Var in the tlist.
*
* XXX if we have a choice, is there any way of figuring out which
* might be cheapest to execute? (For example, int4lt is likely
* much cheaper to execute than numericlt, but both might appear in
* the same pathkey sublist...) Not clear that we ever will have
* a choice in practice, so it may not matter.
* much cheaper to execute than numericlt, but both might appear
* in the same pathkey sublist...) Not clear that we ever will
* have a choice in practice, so it may not matter.
*/
foreach(j, keysublist)
{
@@ -982,12 +999,12 @@ set_tlist_sort_info(List *tlist, List *pathkeys)
elog(ERROR, "set_tlist_sort_info: cannot find tlist item to sort");
/*
* The resdom might be already marked as a sort key, if the pathkeys
* contain duplicate entries. (This can happen in scenarios where
* multiple mergejoinable clauses mention the same var, for example.)
* In that case the current pathkey is essentially a no-op, because
* only one value can be seen within any subgroup where it would be
* consulted. We can ignore it.
* The resdom might be already marked as a sort key, if the
* pathkeys contain duplicate entries. (This can happen in
* scenarios where multiple mergejoinable clauses mention the same
* var, for example.) In that case the current pathkey is
* essentially a no-op, because only one value can be seen within
* any subgroup where it would be consulted. We can ignore it.
*/
if (resdom->reskey == 0)
{
@@ -1195,7 +1212,9 @@ make_hash(List *tlist, Var *hashkey, Plan *lefttree)
Plan *plan = &node->plan;
copy_plan_costsize(plan, lefttree);
/* For plausibility, make startup & total costs equal total cost of
/*
* For plausibility, make startup & total costs equal total cost of
* input plan; this only affects EXPLAIN display not decisions.
*/
plan->startup_cost = plan->total_cost;
@@ -1237,7 +1256,7 @@ make_sort(List *tlist, Oid nonameid, Plan *lefttree, int keycount)
Plan *plan = &node->plan;
Path sort_path; /* dummy for result of cost_sort */
copy_plan_costsize(plan, lefttree); /* only care about copying size */
copy_plan_costsize(plan, lefttree); /* only care about copying size */
cost_sort(&sort_path, NIL, lefttree->plan_rows, lefttree->plan_width);
plan->startup_cost = sort_path.startup_cost + lefttree->total_cost;
plan->total_cost = sort_path.total_cost + lefttree->total_cost;
@@ -1262,9 +1281,11 @@ make_material(List *tlist,
Plan *plan = &node->plan;
copy_plan_costsize(plan, lefttree);
/* For plausibility, make startup & total costs equal total cost of
* input plan; this only affects EXPLAIN display not decisions.
* XXX shouldn't we charge some additional cost for materialization?
/*
* For plausibility, make startup & total costs equal total cost of
* input plan; this only affects EXPLAIN display not decisions. XXX
* shouldn't we charge some additional cost for materialization?
*/
plan->startup_cost = plan->total_cost;
plan->state = (EState *) NULL;
@@ -1285,18 +1306,21 @@ make_agg(List *tlist, List *qual, Plan *lefttree)
Plan *plan = &node->plan;
copy_plan_costsize(plan, lefttree);
/*
* Charge one cpu_operator_cost per aggregate function per input tuple.
* Charge one cpu_operator_cost per aggregate function per input
* tuple.
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows *
(length(pull_agg_clause((Node *) tlist)) +
length(pull_agg_clause((Node *) qual)));
/*
* We will produce a single output tuple if the input is not a Group,
* and a tuple per group otherwise. For now, estimate the number of
* groups as 10% of the number of tuples --- bogus, but how to do better?
* (Note we assume the input Group node is in "tuplePerGroup" mode,
* so it didn't reduce its row count already.)
* groups as 10% of the number of tuples --- bogus, but how to do
* better? (Note we assume the input Group node is in "tuplePerGroup"
* mode, so it didn't reduce its row count already.)
*/
if (IsA(lefttree, Group))
plan->plan_rows *= 0.1;
@@ -1326,19 +1350,21 @@ make_group(List *tlist,
Plan *plan = &node->plan;
copy_plan_costsize(plan, lefttree);
/*
* Charge one cpu_operator_cost per comparison per input tuple.
* We assume all columns get compared at most of the tuples.
* Charge one cpu_operator_cost per comparison per input tuple. We
* assume all columns get compared at most of the tuples.
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * ngrp;
/*
* If tuplePerGroup (which is named exactly backwards) is true,
* we will return all the input tuples, so the input node's row count
* is OK. Otherwise, we'll return only one tuple from each group.
* For now, estimate the number of groups as 10% of the number of
* tuples --- bogus, but how to do better?
* If tuplePerGroup (which is named exactly backwards) is true, we
* will return all the input tuples, so the input node's row count is
* OK. Otherwise, we'll return only one tuple from each group. For
* now, estimate the number of groups as 10% of the number of tuples
* --- bogus, but how to do better?
*/
if (! tuplePerGroup)
if (!tuplePerGroup)
plan->plan_rows *= 0.1;
plan->state = (EState *) NULL;
@@ -1369,11 +1395,13 @@ make_unique(List *tlist, Plan *lefttree, List *distinctList)
List *slitem;
copy_plan_costsize(plan, lefttree);
/*
* Charge one cpu_operator_cost per comparison per input tuple.
* We assume all columns get compared at most of the tuples.
* Charge one cpu_operator_cost per comparison per input tuple. We
* assume all columns get compared at most of the tuples.
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
/*
* As for Group, we make the unsupported assumption that there will be
* 10% as many tuples out as in.
@@ -1388,14 +1416,17 @@ make_unique(List *tlist, Plan *lefttree, List *distinctList)
node->nonameid = _NONAME_RELATION_ID_;
node->keycount = 0;
/* convert SortClause list into array of attr indexes, as wanted by exec */
/*
* convert SortClause list into array of attr indexes, as wanted by
* exec
*/
Assert(numCols > 0);
uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
foreach(slitem, distinctList)
{
SortClause *sortcl = (SortClause *) lfirst(slitem);
TargetEntry *tle = get_sortgroupclause_tle(sortcl, tlist);
SortClause *sortcl = (SortClause *) lfirst(slitem);
TargetEntry *tle = get_sortgroupclause_tle(sortcl, tlist);
uniqColIdx[keyno++] = tle->resdom->resno;
}

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.45 2000/02/15 20:49:18 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.46 2000/04/12 17:15:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,7 @@
static void add_restrict_and_join_to_rel(Query *root, Node *clause);
static void add_join_info_to_rels(Query *root, RestrictInfo *restrictinfo,
Relids join_relids);
Relids join_relids);
static void add_vars_to_targetlist(Query *root, List *vars);
static void check_mergejoinable(RestrictInfo *restrictinfo);
static void check_hashjoinable(RestrictInfo *restrictinfo);
@@ -83,7 +83,7 @@ add_vars_to_targetlist(Query *root, List *vars)
*
* If we have a range variable in the FROM clause that does not appear
* in the target list nor qualifications, we must add it to the base
* relation list so that it will be joined. For instance, "select f.x
* relation list so that it will be joined. For instance, "select f.x
* from foo f, foo f2" is a join of f and f2. Note that if we have
* "select foo.x from foo f", it also gets turned into a join (between
* foo as foo and foo as f).
@@ -106,13 +106,15 @@ add_missing_rels_to_query(Query *root)
{
RelOptInfo *rel = get_base_rel(root, varno);
/* If the rel isn't otherwise referenced, give it a dummy
/*
* If the rel isn't otherwise referenced, give it a dummy
* targetlist consisting of its own OID.
*/
if (rel->targetlist == NIL)
{
Var *var = makeVar(varno, ObjectIdAttributeNumber,
OIDOID, -1, 0);
add_var_to_tlist(rel, var);
}
}
@@ -142,14 +144,14 @@ add_restrict_and_join_to_rels(Query *root, List *clauses)
List *clause;
foreach(clause, clauses)
add_restrict_and_join_to_rel(root, (Node*) lfirst(clause));
add_restrict_and_join_to_rel(root, (Node *) lfirst(clause));
}
/*
* add_restrict_and_join_to_rel
* Add clause information to either the 'RestrictInfo' or 'JoinInfo' field
* (depending on whether the clause is a join) of each base relation
* mentioned in the clause. A RestrictInfo node is created and added to
* mentioned in the clause. A RestrictInfo node is created and added to
* the appropriate list for each rel. Also, if the clause uses a
* mergejoinable operator, enter the left- and right-side expressions
* into the query's lists of equijoined vars.
@@ -175,6 +177,7 @@ add_restrict_and_join_to_rel(Query *root, Node *clause)
if (length(relids) == 1)
{
/*
* There is only one relation participating in 'clause', so
* 'clause' must be a restriction clause for that relation.
@@ -183,21 +186,24 @@ add_restrict_and_join_to_rel(Query *root, Node *clause)
rel->baserestrictinfo = lcons(restrictinfo,
rel->baserestrictinfo);
/*
* Check for a "mergejoinable" clause even though it's not a join
* clause. This is so that we can recognize that "a.x = a.y" makes
* x and y eligible to be considered equal, even when they belong
* to the same rel. Without this, we would not recognize that
* "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to consider
* z and q equal after their rels are joined.
* clause. This is so that we can recognize that "a.x = a.y"
* makes x and y eligible to be considered equal, even when they
* belong to the same rel. Without this, we would not recognize
* that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to
* consider z and q equal after their rels are joined.
*/
check_mergejoinable(restrictinfo);
}
else
{
/*
* 'clause' is a join clause, since there is more than one atom in
* the relid list. Set additional RestrictInfo fields for joining.
* the relid list. Set additional RestrictInfo fields for
* joining.
*
* We need the merge info whether or not mergejoin is enabled (for
* constructing equijoined-var lists), but we don't bother setting
@@ -206,16 +212,19 @@ add_restrict_and_join_to_rel(Query *root, Node *clause)
check_mergejoinable(restrictinfo);
if (enable_hashjoin)
check_hashjoinable(restrictinfo);
/*
* Add clause to the join lists of all the relevant
* relations. (If, perchance, 'clause' contains NO vars, then
* nothing will happen...)
* Add clause to the join lists of all the relevant relations.
* (If, perchance, 'clause' contains NO vars, then nothing will
* happen...)
*/
add_join_info_to_rels(root, restrictinfo, relids);
/*
* Add vars used in the join clause to targetlists of member relations,
* so that they will be emitted by the plan nodes that scan those
* relations (else they won't be available at the join node!).
* Add vars used in the join clause to targetlists of member
* relations, so that they will be emitted by the plan nodes that
* scan those relations (else they won't be available at the join
* node!).
*/
add_vars_to_targetlist(root, vars);
}
@@ -267,7 +276,7 @@ add_join_info_to_rels(Query *root, RestrictInfo *restrictinfo,
joininfo = find_joininfo_node(get_base_rel(root, cur_relid),
unjoined_relids);
joininfo->jinfo_restrictinfo = lcons(restrictinfo,
joininfo->jinfo_restrictinfo);
joininfo->jinfo_restrictinfo);
}
}
@@ -296,16 +305,16 @@ check_mergejoinable(RestrictInfo *restrictinfo)
leftOp,
rightOp;
if (! is_opclause((Node *) clause))
if (!is_opclause((Node *) clause))
return;
left = get_leftop(clause);
right = get_rightop(clause);
/* caution: is_opclause accepts more than I do, so check it */
if (! right)
if (!right)
return; /* unary opclauses need not apply */
if (!IsA(left, Var) || !IsA(right, Var))
if (!IsA(left, Var) ||!IsA(right, Var))
return;
opno = ((Oper *) clause->oper)->opno;
@@ -339,16 +348,16 @@ check_hashjoinable(RestrictInfo *restrictinfo)
*right;
Oid opno;
if (! is_opclause((Node *) clause))
if (!is_opclause((Node *) clause))
return;
left = get_leftop(clause);
right = get_rightop(clause);
/* caution: is_opclause accepts more than I do, so check it */
if (! right)
if (!right)
return; /* unary opclauses need not apply */
if (!IsA(left, Var) || !IsA(right, Var))
if (!IsA(left, Var) ||!IsA(right, Var))
return;
opno = ((Oper *) clause->oper)->opno;
@@ -356,7 +365,5 @@ check_hashjoinable(RestrictInfo *restrictinfo)
if (op_hashjoinable(opno,
left->vartype,
right->vartype))
{
restrictinfo->hashjoinoperator = opno;
}
}

View File

@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.54 2000/03/24 21:40:43 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.55 2000/04/12 17:15:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
static Plan *subplanner(Query *root, List *flat_tlist, List *qual,
double tuple_fraction);
double tuple_fraction);
/*--------------------
@@ -55,12 +55,12 @@ static Plan *subplanner(Query *root, List *flat_tlist, List *qual,
* Query field and not a passed parameter is that the low-level routines
* in indxpath.c need to see it.) The pathkeys value passed to query_planner
* has not yet been "canonicalized", since the necessary info does not get
* computed until subplanner() scans the qual clauses. We canonicalize it
* computed until subplanner() scans the qual clauses. We canonicalize it
* inside subplanner() as soon as that task is done. The output value
* will be in canonical form as well.
*
* tuple_fraction is interpreted as follows:
* 0 (or less): expect all tuples to be retrieved (normal case)
* 0 (or less): expect all tuples to be retrieved (normal case)
* 0 < tuple_fraction < 1: expect the given fraction of tuples available
* from the plan to be retrieved
* tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
@@ -91,7 +91,7 @@ query_planner(Query *root,
if (root->commandType != CMD_SELECT)
elog(ERROR, "Empty range table for non-SELECT query");
root->query_pathkeys = NIL; /* signal unordered result */
root->query_pathkeys = NIL; /* signal unordered result */
/* Make childless Result node to evaluate given tlist. */
return (Plan *) make_result(tlist, (Node *) qual, (Plan *) NULL);
@@ -115,8 +115,8 @@ query_planner(Query *root,
*
* All subplan nodes will have "flat" (var-only) tlists.
*
* This implies that all expression evaluations are done at the root
* of the plan tree. Once upon a time there was code to try to push
* This implies that all expression evaluations are done at the root of
* the plan tree. Once upon a time there was code to try to push
* expensive function calls down to lower plan nodes, but that's dead
* code and has been for a long time...
*/
@@ -132,9 +132,10 @@ query_planner(Query *root,
*/
if (constant_qual)
{
/*
* The result node will also be responsible for evaluating
* the originally requested tlist.
* The result node will also be responsible for evaluating the
* originally requested tlist.
*/
subplan = (Plan *) make_result(tlist,
(Node *) constant_qual,
@@ -142,9 +143,11 @@ query_planner(Query *root,
}
else
{
/*
* Replace the toplevel plan node's flattened target list with the
* targetlist given by my caller, so that expressions are evaluated.
* targetlist given by my caller, so that expressions are
* evaluated.
*/
subplan->targetlist = tlist;
}
@@ -180,8 +183,9 @@ subplanner(Query *root,
* Initialize the targetlist and qualification, adding entries to
* base_rel_list as relation references are found (e.g., in the
* qualification, the targetlist, etc.). Restrict and join clauses
* are added to appropriate lists belonging to the mentioned relations,
* and we also build lists of equijoined keys for pathkey construction.
* are added to appropriate lists belonging to the mentioned
* relations, and we also build lists of equijoined keys for pathkey
* construction.
*/
root->base_rel_list = NIL;
root->join_rel_list = NIL;
@@ -192,9 +196,9 @@ subplanner(Query *root,
add_missing_rels_to_query(root);
/*
* We should now have all the pathkey equivalence sets built,
* so it's now possible to convert the requested query_pathkeys
* to canonical form.
* We should now have all the pathkey equivalence sets built, so it's
* now possible to convert the requested query_pathkeys to canonical
* form.
*/
root->query_pathkeys = canonicalize_pathkeys(root, root->query_pathkeys);
@@ -203,20 +207,22 @@ subplanner(Query *root,
*/
final_rel = make_one_rel(root);
if (! final_rel)
if (!final_rel)
{
/*
* We expect to end up here for a trivial INSERT ... VALUES query
* (which will have a target relation, so it gets past query_planner's
* check for empty range table; but the target rel is unreferenced
* and not marked inJoinSet, so we find there is nothing to join).
*
* (which will have a target relation, so it gets past
* query_planner's check for empty range table; but the target rel
* is unreferenced and not marked inJoinSet, so we find there is
* nothing to join).
*
* It's also possible to get here if the query was rewritten by the
* rule processor (creating rangetable entries not marked inJoinSet)
* but the rules either did nothing or were simplified to nothing
* by constant-expression folding. So, don't complain.
* rule processor (creating rangetable entries not marked
* inJoinSet) but the rules either did nothing or were simplified
* to nothing by constant-expression folding. So, don't complain.
*/
root->query_pathkeys = NIL; /* signal unordered result */
root->query_pathkeys = NIL; /* signal unordered result */
/* Make childless Result node to evaluate given tlist. */
return (Plan *) make_result(flat_tlist, (Node *) qual, (Plan *) NULL);
@@ -246,16 +252,16 @@ subplanner(Query *root,
#endif
/*
* Now that we have an estimate of the final rel's size, we can convert
* a tuple_fraction specified as an absolute count (ie, a LIMIT option)
* into a fraction of the total tuples.
* Now that we have an estimate of the final rel's size, we can
* convert a tuple_fraction specified as an absolute count (ie, a
* LIMIT option) into a fraction of the total tuples.
*/
if (tuple_fraction >= 1.0)
tuple_fraction /= final_rel->rows;
/*
* Determine the cheapest path, independently of any ordering
* considerations. We do, however, take into account whether the
* considerations. We do, however, take into account whether the
* whole plan is expected to be evaluated or not.
*/
if (tuple_fraction <= 0.0 || tuple_fraction >= 1.0)
@@ -271,8 +277,8 @@ subplanner(Query *root,
/*
* Select the best path and create a subplan to execute it.
*
* If no special sort order is wanted, or if the cheapest path is
* already appropriately ordered, we use the cheapest path found above.
* If no special sort order is wanted, or if the cheapest path is already
* appropriately ordered, we use the cheapest path found above.
*/
if (root->query_pathkeys == NIL ||
pathkeys_contained_in(root->query_pathkeys,
@@ -284,7 +290,8 @@ subplanner(Query *root,
/*
* Otherwise, look to see if we have an already-ordered path that is
* cheaper than doing an explicit sort on the cheapest-total-cost path.
* cheaper than doing an explicit sort on the cheapest-total-cost
* path.
*/
cheapestpath = final_rel->cheapest_total_path;
presortedpath =
@@ -310,11 +317,11 @@ subplanner(Query *root,
}
/*
* Nothing for it but to sort the cheapest-total-cost path --- but we let
* the caller do that. union_planner has to be able to add a sort node
* anyway, so no need for extra code here. (Furthermore, the given
* pathkeys might involve something we can't compute here, such as an
* aggregate function...)
* Nothing for it but to sort the cheapest-total-cost path --- but we
* let the caller do that. union_planner has to be able to add a sort
* node anyway, so no need for extra code here. (Furthermore, the
* given pathkeys might involve something we can't compute here, such
* as an aggregate function...)
*/
root->query_pathkeys = cheapestpath->pathkeys;
return create_plan(root, cheapestpath);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.78 2000/03/21 05:12:01 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.79 2000/04/12 17:15:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,10 +38,10 @@
static List *make_subplanTargetList(Query *parse, List *tlist,
AttrNumber **groupColIdx);
AttrNumber **groupColIdx);
static Plan *make_groupplan(List *group_tlist, bool tuplePerGroup,
List *groupClause, AttrNumber *grpColIdx,
bool is_presorted, Plan *subplan);
List *groupClause, AttrNumber *grpColIdx,
bool is_presorted, Plan *subplan);
static Plan *make_sortplan(List *tlist, List *sortcls, Plan *plannode);
/*****************************************************************************
@@ -64,7 +64,7 @@ planner(Query *parse)
transformKeySetQuery(parse);
/* primary planning entry point (may recurse for subplans) */
result_plan = subquery_planner(parse, -1.0 /* default case */);
result_plan = subquery_planner(parse, -1.0 /* default case */ );
Assert(PlannerQueryLevel == 1);
@@ -110,21 +110,22 @@ planner(Query *parse)
Plan *
subquery_planner(Query *parse, double tuple_fraction)
{
/*
* A HAVING clause without aggregates is equivalent to a WHERE clause
* (except it can only refer to grouped fields). If there are no
* aggs anywhere in the query, then we don't want to create an Agg
* plan node, so merge the HAVING condition into WHERE. (We used to
* (except it can only refer to grouped fields). If there are no aggs
* anywhere in the query, then we don't want to create an Agg plan
* node, so merge the HAVING condition into WHERE. (We used to
* consider this an error condition, but it seems to be legal SQL.)
*/
if (parse->havingQual != NULL && ! parse->hasAggs)
if (parse->havingQual != NULL && !parse->hasAggs)
{
if (parse->qual == NULL)
parse->qual = parse->havingQual;
else
parse->qual = (Node *) make_andclause(lappend(lcons(parse->qual,
NIL),
parse->havingQual));
parse->havingQual));
parse->havingQual = NULL;
}
@@ -144,8 +145,8 @@ subquery_planner(Query *parse, double tuple_fraction)
/*
* Canonicalize the qual, and convert it to implicit-AND format.
*
* XXX Is there any value in re-applying eval_const_expressions
* after canonicalize_qual?
* XXX Is there any value in re-applying eval_const_expressions after
* canonicalize_qual?
*/
parse->qual = (Node *) canonicalize_qual((Expr *) parse->qual, true);
#ifdef OPTIMIZER_DEBUG
@@ -169,15 +170,17 @@ subquery_planner(Query *parse, double tuple_fraction)
if (parse->groupClause != NIL)
{
/*
* Check for ungrouped variables passed to subplans.
* Note we do NOT do this for subplans in WHERE; it's legal
* there because WHERE is evaluated pre-GROUP.
* Check for ungrouped variables passed to subplans. Note we
* do NOT do this for subplans in WHERE; it's legal there
* because WHERE is evaluated pre-GROUP.
*
* An interesting fine point: if we reassigned a HAVING qual
* into WHERE above, then we will accept references to ungrouped
* vars from subplans in the HAVING qual. This is not entirely
* consistent, but it doesn't seem particularly harmful...
* An interesting fine point: if we reassigned a HAVING qual into
* WHERE above, then we will accept references to ungrouped
* vars from subplans in the HAVING qual. This is not
* entirely consistent, but it doesn't seem particularly
* harmful...
*/
check_subplans_for_ungrouped_vars((Node *) parse->targetList,
parse);
@@ -218,8 +221,8 @@ subquery_planner(Query *parse, double tuple_fraction)
* tuple_fraction is the fraction of tuples we expect will be retrieved
*
* tuple_fraction is interpreted as follows:
* < 0: determine fraction by inspection of query (normal case)
* 0: expect all tuples to be retrieved
* < 0: determine fraction by inspection of query (normal case)
* 0: expect all tuples to be retrieved
* 0 < tuple_fraction < 1: expect the given fraction of tuples available
* from the plan to be retrieved
* tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
@@ -251,13 +254,18 @@ union_planner(Query *parse,
parse->commandType,
parse->resultRelation,
parse->rtable);
/*
* We leave current_pathkeys NIL indicating we do not know sort order.
* Actually, for a normal UNION we have done an explicit sort; ought
* to change interface to plan_union_queries to pass that info back!
* We leave current_pathkeys NIL indicating we do not know sort
* order. Actually, for a normal UNION we have done an explicit
* sort; ought to change interface to plan_union_queries to pass
* that info back!
*/
/* Calculate pathkeys that represent grouping/ordering requirements */
/*
* Calculate pathkeys that represent grouping/ordering
* requirements
*/
group_pathkeys = make_pathkeys_for_sortclauses(parse->groupClause,
tlist);
sort_pathkeys = make_pathkeys_for_sortclauses(parse->sortClause,
@@ -280,13 +288,13 @@ union_planner(Query *parse,
rt_index);
/*
* Fix up outer target list. NOTE: unlike the case for non-inherited
* query, we pass the unfixed tlist to subplans, which do their own
* fixing. But we still want to fix the outer target list afterwards.
* I *think* this is correct --- doing the fix before recursing is
* definitely wrong, because preprocess_targetlist() will do the
* wrong thing if invoked twice on the same list. Maybe that is a bug?
* tgl 6/6/99
* Fix up outer target list. NOTE: unlike the case for
* non-inherited query, we pass the unfixed tlist to subplans,
* which do their own fixing. But we still want to fix the outer
* target list afterwards. I *think* this is correct --- doing the
* fix before recursing is definitely wrong, because
* preprocess_targetlist() will do the wrong thing if invoked
* twice on the same list. Maybe that is a bug? tgl 6/6/99
*/
tlist = preprocess_targetlist(tlist,
parse->commandType,
@@ -295,12 +303,16 @@ union_planner(Query *parse,
if (parse->rowMark != NULL)
elog(ERROR, "SELECT FOR UPDATE is not supported for inherit queries");
/*
* We leave current_pathkeys NIL indicating we do not know sort order
* of the Append-ed results.
* We leave current_pathkeys NIL indicating we do not know sort
* order of the Append-ed results.
*/
/* Calculate pathkeys that represent grouping/ordering requirements */
/*
* Calculate pathkeys that represent grouping/ordering
* requirements
*/
group_pathkeys = make_pathkeys_for_sortclauses(parse->groupClause,
tlist);
sort_pathkeys = make_pathkeys_for_sortclauses(parse->sortClause,
@@ -358,7 +370,10 @@ union_planner(Query *parse,
*/
sub_tlist = make_subplanTargetList(parse, tlist, &groupColIdx);
/* Calculate pathkeys that represent grouping/ordering requirements */
/*
* Calculate pathkeys that represent grouping/ordering
* requirements
*/
group_pathkeys = make_pathkeys_for_sortclauses(parse->groupClause,
tlist);
sort_pathkeys = make_pathkeys_for_sortclauses(parse->sortClause,
@@ -368,11 +383,12 @@ union_planner(Query *parse,
* Figure out whether we need a sorted result from query_planner.
*
* If we have a GROUP BY clause, then we want a result sorted
* properly for grouping. Otherwise, if there is an ORDER BY clause,
* we want to sort by the ORDER BY clause. (Note: if we have both,
* and ORDER BY is a superset of GROUP BY, it would be tempting to
* request sort by ORDER BY --- but that might just leave us failing
* to exploit an available sort order at all. Needs more thought...)
* properly for grouping. Otherwise, if there is an ORDER BY
* clause, we want to sort by the ORDER BY clause. (Note: if we
* have both, and ORDER BY is a superset of GROUP BY, it would be
* tempting to request sort by ORDER BY --- but that might just
* leave us failing to exploit an available sort order at all.
* Needs more thought...)
*/
if (parse->groupClause)
parse->query_pathkeys = group_pathkeys;
@@ -382,15 +398,16 @@ union_planner(Query *parse,
parse->query_pathkeys = NIL;
/*
* Figure out whether we expect to retrieve all the tuples that the
* plan can generate, or to stop early due to a LIMIT or other
* factors. If the caller passed a value >= 0, believe that value,
* else do our own examination of the query context.
* Figure out whether we expect to retrieve all the tuples that
* the plan can generate, or to stop early due to a LIMIT or other
* factors. If the caller passed a value >= 0, believe that
* value, else do our own examination of the query context.
*/
if (tuple_fraction < 0.0)
{
/* Initial assumption is we need all the tuples */
tuple_fraction = 0.0;
/*
* Check for a LIMIT clause.
*/
@@ -430,33 +447,37 @@ union_planner(Query *parse,
}
else
{
/*
* COUNT is a PARAM ... don't know exactly what the limit
* will be, but for lack of a better idea assume 10%
* of the plan's result is wanted.
* COUNT is a PARAM ... don't know exactly what the
* limit will be, but for lack of a better idea assume
* 10% of the plan's result is wanted.
*/
tuple_fraction = 0.10;
}
}
/*
* Check for a retrieve-into-portal, ie DECLARE CURSOR.
*
* We have no real idea how many tuples the user will ultimately
* FETCH from a cursor, but it seems a good bet that he doesn't
* want 'em all. Optimize for 10% retrieval (you gotta better
* number?)
* FETCH from a cursor, but it seems a good bet that he
* doesn't want 'em all. Optimize for 10% retrieval (you
* gotta better number?)
*/
if (parse->isPortal)
tuple_fraction = 0.10;
}
/*
* Adjust tuple_fraction if we see that we are going to apply
* grouping/aggregation/etc. This is not overridable by the
* caller, since it reflects plan actions that this routine
* will certainly take, not assumptions about context.
* caller, since it reflects plan actions that this routine will
* certainly take, not assumptions about context.
*/
if (parse->groupClause)
{
/*
* In GROUP BY mode, we have the little problem that we don't
* really know how many input tuples will be needed to make a
@@ -464,33 +485,42 @@ union_planner(Query *parse,
* input count. For lack of a better idea, assume 25% of the
* input data will be processed if there is any output limit.
* However, if the caller gave us a fraction rather than an
* absolute count, we can keep using that fraction (which amounts
* to assuming that all the groups are about the same size).
* absolute count, we can keep using that fraction (which
* amounts to assuming that all the groups are about the same
* size).
*/
if (tuple_fraction >= 1.0)
tuple_fraction = 0.25;
/*
* If both GROUP BY and ORDER BY are specified, we will need
* two levels of sort --- and, therefore, certainly need to
* read all the input tuples --- unless ORDER BY is a subset
* of GROUP BY. (Although we are comparing non-canonicalized
* pathkeys here, it should be OK since they will both contain
* only single-element sublists at this point. See pathkeys.c.)
* only single-element sublists at this point. See
* pathkeys.c.)
*/
if (parse->groupClause && parse->sortClause &&
! pathkeys_contained_in(sort_pathkeys, group_pathkeys))
!pathkeys_contained_in(sort_pathkeys, group_pathkeys))
tuple_fraction = 0.0;
}
else if (parse->hasAggs)
{
/* Ungrouped aggregate will certainly want all the input tuples. */
/*
* Ungrouped aggregate will certainly want all the input
* tuples.
*/
tuple_fraction = 0.0;
}
else if (parse->distinctClause)
{
/*
* SELECT DISTINCT, like GROUP, will absorb an unpredictable
* number of input tuples per output tuple. Handle the same way.
* number of input tuples per output tuple. Handle the same
* way.
*/
if (tuple_fraction >= 1.0)
tuple_fraction = 0.25;
@@ -502,14 +532,15 @@ union_planner(Query *parse,
(List *) parse->qual,
tuple_fraction);
/* query_planner returns actual sort order (which is not
/*
* query_planner returns actual sort order (which is not
* necessarily what we requested) in query_pathkeys.
*/
current_pathkeys = parse->query_pathkeys;
}
/* query_planner returns NULL if it thinks plan is bogus */
if (! result_plan)
if (!result_plan)
elog(ERROR, "union_planner: failed to create plan");
/*
@@ -539,9 +570,9 @@ union_planner(Query *parse,
/*
* If there are aggregates then the Group node should just return
* the same set of vars as the subplan did (but we can exclude
* any GROUP BY expressions). If there are no aggregates
* then the Group node had better compute the final tlist.
* the same set of vars as the subplan did (but we can exclude any
* GROUP BY expressions). If there are no aggregates then the
* Group node had better compute the final tlist.
*/
if (parse->hasAggs)
group_tlist = flatten_tlist(result_plan->targetlist);
@@ -549,8 +580,8 @@ union_planner(Query *parse,
group_tlist = tlist;
/*
* Figure out whether the path result is already ordered the way we
* need it --- if so, no need for an explicit sort step.
* Figure out whether the path result is already ordered the way
* we need it --- if so, no need for an explicit sort step.
*/
if (pathkeys_contained_in(group_pathkeys, current_pathkeys))
{
@@ -559,7 +590,9 @@ union_planner(Query *parse,
}
else
{
/* We will need to do an explicit sort by the GROUP BY clause.
/*
* We will need to do an explicit sort by the GROUP BY clause.
* make_groupplan will do the work, but set current_pathkeys
* to indicate the resulting order.
*/
@@ -594,10 +627,8 @@ union_planner(Query *parse,
*/
if (parse->sortClause)
{
if (! pathkeys_contained_in(sort_pathkeys, current_pathkeys))
{
if (!pathkeys_contained_in(sort_pathkeys, current_pathkeys))
result_plan = make_sortplan(tlist, parse->sortClause, result_plan);
}
}
/*
@@ -633,7 +664,7 @@ union_planner(Query *parse,
* we want to pass this targetlist to the subplan:
* a,b,c,d,a+b
* where the a+b target will be used by the Sort/Group steps, and the
* other targets will be used for computing the final results. (In the
* other targets will be used for computing the final results. (In the
* above example we could theoretically suppress the a and b targets and
* use only a+b, but it's not really worth the trouble.)
*
@@ -675,8 +706,9 @@ make_subplanTargetList(Query *parse,
/*
* If grouping, create sub_tlist entries for all GROUP BY expressions
* (GROUP BY items that are simple Vars should be in the list already),
* and make an array showing where the group columns are in the sub_tlist.
* (GROUP BY items that are simple Vars should be in the list
* already), and make an array showing where the group columns are in
* the sub_tlist.
*/
numCols = length(parse->groupClause);
if (numCols > 0)
@@ -690,10 +722,10 @@ make_subplanTargetList(Query *parse,
foreach(gl, parse->groupClause)
{
GroupClause *grpcl = (GroupClause *) lfirst(gl);
Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
TargetEntry *te = NULL;
List *sl;
GroupClause *grpcl = (GroupClause *) lfirst(gl);
Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
TargetEntry *te = NULL;
List *sl;
/* Find or make a matching sub_tlist entry */
foreach(sl, sub_tlist)
@@ -702,7 +734,7 @@ make_subplanTargetList(Query *parse,
if (equal(groupexpr, te->expr))
break;
}
if (! sl)
if (!sl)
{
te = makeTargetEntry(makeResdom(length(sub_tlist) + 1,
exprType(groupexpr),
@@ -739,8 +771,9 @@ make_groupplan(List *group_tlist,
{
int numCols = length(groupClause);
if (! is_presorted)
if (!is_presorted)
{
/*
* The Sort node always just takes a copy of the subplan's tlist
* plus ordering information. (This might seem inefficient if the
@@ -755,14 +788,14 @@ make_groupplan(List *group_tlist,
foreach(gl, groupClause)
{
GroupClause *grpcl = (GroupClause *) lfirst(gl);
TargetEntry *te = nth(grpColIdx[keyno]-1, sort_tlist);
Resdom *resdom = te->resdom;
GroupClause *grpcl = (GroupClause *) lfirst(gl);
TargetEntry *te = nth(grpColIdx[keyno] - 1, sort_tlist);
Resdom *resdom = te->resdom;
/*
* Check for the possibility of duplicate group-by clauses --- the
* parser should have removed 'em, but the Sort executor will get
* terribly confused if any get through!
* Check for the possibility of duplicate group-by clauses ---
* the parser should have removed 'em, but the Sort executor
* will get terribly confused if any get through!
*/
if (resdom->reskey == 0)
{
@@ -808,8 +841,8 @@ make_sortplan(List *tlist, List *sortcls, Plan *plannode)
/*
* Check for the possibility of duplicate order-by clauses --- the
* parser should have removed 'em, but the executor will get terribly
* confused if any get through!
* parser should have removed 'em, but the executor will get
* terribly confused if any get through!
*/
if (resdom->reskey == 0)
{

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.61 2000/04/04 01:21:47 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.62 2000/04/12 17:15:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,13 +24,15 @@
#include "optimizer/tlist.h"
#include "optimizer/var.h"
typedef struct {
typedef struct
{
List *outer_tlist;
List *inner_tlist;
Index acceptable_rel;
} join_references_context;
typedef struct {
typedef struct
{
Index subvarno;
List *subplanTargetList;
} replace_vars_with_subplan_refs_context;
@@ -38,12 +40,12 @@ typedef struct {
static void set_join_references(Join *join);
static void set_uppernode_references(Plan *plan, Index subvarno);
static Node *join_references_mutator(Node *node,
join_references_context *context);
join_references_context *context);
static Node *replace_vars_with_subplan_refs(Node *node,
Index subvarno,
List *subplanTargetList);
Index subvarno,
List *subplanTargetList);
static Node *replace_vars_with_subplan_refs_mutator(Node *node,
replace_vars_with_subplan_refs_context *context);
replace_vars_with_subplan_refs_context *context);
static bool fix_opids_walker(Node *node, void *context);
/*****************************************************************************
@@ -56,7 +58,7 @@ static bool fix_opids_walker(Node *node, void *context);
* set_plan_references
* This is the final processing pass of the planner/optimizer. The plan
* tree is complete; we just have to adjust some representational details
* for the convenience of the executor. We update Vars in upper plan nodes
* for the convenience of the executor. We update Vars in upper plan nodes
* to refer to the outputs of their subplans, and we compute regproc OIDs
* for operators (ie, we look up the function that implements each op).
* We must also build lists of all the subplan nodes present in each
@@ -74,7 +76,8 @@ set_plan_references(Plan *plan)
if (plan == NULL)
return;
/* We must rebuild the plan's list of subplan nodes, since we are
/*
* We must rebuild the plan's list of subplan nodes, since we are
* copying/mutating its expression trees.
*/
plan->subPlan = NIL;
@@ -92,10 +95,10 @@ set_plan_references(Plan *plan)
fix_opids((Node *) ((IndexScan *) plan)->indxqualorig);
plan->subPlan =
nconc(plan->subPlan,
pull_subplans((Node *) ((IndexScan *) plan)->indxqual));
pull_subplans((Node *) ((IndexScan *) plan)->indxqual));
plan->subPlan =
nconc(plan->subPlan,
pull_subplans((Node *) ((IndexScan *) plan)->indxqualorig));
pull_subplans((Node *) ((IndexScan *) plan)->indxqualorig));
break;
case T_NestLoop:
set_join_references((Join *) plan);
@@ -105,24 +108,26 @@ set_plan_references(Plan *plan)
fix_opids((Node *) ((MergeJoin *) plan)->mergeclauses);
plan->subPlan =
nconc(plan->subPlan,
pull_subplans((Node *) ((MergeJoin *) plan)->mergeclauses));
pull_subplans((Node *) ((MergeJoin *) plan)->mergeclauses));
break;
case T_HashJoin:
set_join_references((Join *) plan);
fix_opids((Node *) ((HashJoin *) plan)->hashclauses);
plan->subPlan =
nconc(plan->subPlan,
pull_subplans((Node *) ((HashJoin *) plan)->hashclauses));
pull_subplans((Node *) ((HashJoin *) plan)->hashclauses));
break;
case T_Material:
case T_Sort:
case T_Unique:
case T_Hash:
/* These plan types don't actually bother to evaluate their
/*
* These plan types don't actually bother to evaluate their
* targetlists or quals (because they just return their
* unmodified input tuples). The optimizer is lazy about
* creating really valid targetlists for them. Best to
* just leave the targetlist alone.
* creating really valid targetlists for them. Best to just
* leave the targetlist alone.
*/
break;
case T_Agg:
@@ -130,7 +135,9 @@ set_plan_references(Plan *plan)
set_uppernode_references(plan, (Index) 0);
break;
case T_Result:
/* Result may or may not have a subplan; no need to fix up
/*
* Result may or may not have a subplan; no need to fix up
* subplan references if it hasn't got one...
*
* XXX why does Result use a different subvarno from Agg/Group?
@@ -144,9 +151,7 @@ set_plan_references(Plan *plan)
break;
case T_Append:
foreach(pl, ((Append *) plan)->appendplans)
{
set_plan_references((Plan *) lfirst(pl));
}
break;
case T_TidScan:
/* nothing special */
@@ -158,8 +163,8 @@ set_plan_references(Plan *plan)
}
/*
* For all plan types, fix operators in targetlist and qual expressions,
* and find subplans therein.
* For all plan types, fix operators in targetlist and qual
* expressions, and find subplans therein.
*/
fix_opids((Node *) plan->targetlist);
fix_opids((Node *) plan->qual);
@@ -176,20 +181,21 @@ set_plan_references(Plan *plan)
* NOTE: it is essential that we recurse into subplans AFTER we set
* subplan references in this plan's tlist and quals. If we did the
* reference-adjustments bottom-up, then we would fail to match this
* plan's var nodes against the already-modified nodes of the subplans.
* plan's var nodes against the already-modified nodes of the
* subplans.
*/
set_plan_references(plan->lefttree);
set_plan_references(plan->righttree);
foreach(pl, plan->initPlan)
{
SubPlan *sp = (SubPlan *) lfirst(pl);
SubPlan *sp = (SubPlan *) lfirst(pl);
Assert(IsA(sp, SubPlan));
set_plan_references(sp->plan);
}
foreach(pl, plan->subPlan)
{
SubPlan *sp = (SubPlan *) lfirst(pl);
SubPlan *sp = (SubPlan *) lfirst(pl);
Assert(IsA(sp, SubPlan));
set_plan_references(sp->plan);
@@ -325,9 +331,10 @@ join_references_mutator(Node *node,
newvar->varattno = resdom->resno;
return (Node *) newvar;
}
/*
* Var not in either tlist --- either raise an error,
* or return the Var unmodified.
* Var not in either tlist --- either raise an error, or return
* the Var unmodified.
*/
if (var->varno != context->acceptable_rel)
elog(ERROR, "join_references: variable not in subplan target lists");
@@ -370,7 +377,7 @@ replace_vars_with_subplan_refs(Node *node,
static Node *
replace_vars_with_subplan_refs_mutator(Node *node,
replace_vars_with_subplan_refs_context *context)
replace_vars_with_subplan_refs_context *context)
{
if (node == NULL)
return NULL;
@@ -414,7 +421,7 @@ fix_opids(Node *node)
}
static bool
fix_opids_walker (Node *node, void *context)
fix_opids_walker(Node *node, void *context)
{
if (node == NULL)
return false;

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/subselect.c,v 1.34 2000/04/04 01:21:47 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/plan/subselect.c,v 1.35 2000/04/12 17:15:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,19 +82,19 @@ replace_var(Var *var)
varlevel = PlannerQueryLevel - var->varlevelsup;
/*
* If there's already a PlannerParamVar entry for this same Var,
* just use it. NOTE: in situations involving UNION or inheritance,
* it is possible for the same varno/varlevel to refer to different RTEs
* in different parts of the parsetree, so that different fields might
* end up sharing the same Param number. As long as we check the vartype
* as well, I believe that this sort of aliasing will cause no trouble.
* The correct field should get stored into the Param slot at execution
* in each part of the tree.
* If there's already a PlannerParamVar entry for this same Var, just
* use it. NOTE: in situations involving UNION or inheritance, it is
* possible for the same varno/varlevel to refer to different RTEs in
* different parts of the parsetree, so that different fields might
* end up sharing the same Param number. As long as we check the
* vartype as well, I believe that this sort of aliasing will cause no
* trouble. The correct field should get stored into the Param slot at
* execution in each part of the tree.
*/
i = 0;
foreach(ppv, PlannerParamVar)
{
Var *pvar = lfirst(ppv);
Var *pvar = lfirst(ppv);
if (pvar->varno == var->varno &&
pvar->varattno == var->varattno &&
@@ -104,7 +104,7 @@ replace_var(Var *var)
i++;
}
if (! ppv)
if (!ppv)
{
/* Nope, so make a new one */
i = new_param(var, varlevel);
@@ -137,23 +137,25 @@ make_subplan(SubLink *slink)
PlannerQueryLevel++; /* we become child */
/*
* For an EXISTS subplan, tell lower-level planner to expect that
* only the first tuple will be retrieved. For ALL and ANY subplans,
* we will be able to stop evaluating if the test condition fails,
* so very often not all the tuples will be retrieved; for lack of a
* better idea, specify 50% retrieval. For EXPR and MULTIEXPR subplans,
* use default behavior (we're only expecting one row out, anyway).
* For an EXISTS subplan, tell lower-level planner to expect that only
* the first tuple will be retrieved. For ALL and ANY subplans, we
* will be able to stop evaluating if the test condition fails, so
* very often not all the tuples will be retrieved; for lack of a
* better idea, specify 50% retrieval. For EXPR and MULTIEXPR
* subplans, use default behavior (we're only expecting one row out,
* anyway).
*
* NOTE: if you change these numbers, also change cost_qual_eval_walker()
* in path/costsize.c.
*
* XXX If an ALL/ANY subplan is uncorrelated, we may decide to materialize
* its result below. In that case it would've been better to specify
* full retrieval. At present, however, we can only detect correlation
* or lack of it after we've made the subplan :-(. Perhaps detection
* of correlation should be done as a separate step. Meanwhile, we don't
* want to be too optimistic about the percentage of tuples retrieved,
* for fear of selecting a plan that's bad for the materialization case.
* XXX If an ALL/ANY subplan is uncorrelated, we may decide to
* materialize its result below. In that case it would've been better
* to specify full retrieval. At present, however, we can only detect
* correlation or lack of it after we've made the subplan :-(. Perhaps
* detection of correlation should be done as a separate step.
* Meanwhile, we don't want to be too optimistic about the percentage
* of tuples retrieved, for fear of selecting a plan that's bad for
* the materialization case.
*/
if (slink->subLinkType == EXISTS_SUBLINK)
tuple_fraction = 1.0; /* just like a LIMIT 1 */
@@ -167,8 +169,8 @@ make_subplan(SubLink *slink)
/*
* Assign subPlan, extParam and locParam to plan nodes. At the moment,
* SS_finalize_plan doesn't handle initPlan-s and so we assign them
* to the topmost plan node and take care about its extParam too.
* SS_finalize_plan doesn't handle initPlan-s and so we assign them to
* the topmost plan node and take care about its extParam too.
*/
(void) SS_finalize_plan(plan);
plan->initPlan = PlannerInitPlan;
@@ -206,8 +208,8 @@ make_subplan(SubLink *slink)
/*
* Un-correlated or undirect correlated plans of EXISTS, EXPR, or
* MULTIEXPR types can be used as initPlans. For EXISTS or EXPR,
* we just produce a Param referring to the result of evaluating the
* MULTIEXPR types can be used as initPlans. For EXISTS or EXPR, we
* just produce a Param referring to the result of evaluating the
* initPlan. For MULTIEXPR, we must build an AND or OR-clause of the
* individual comparison operators, using the appropriate lefthand
* side expressions and Params for the initPlan's target items.
@@ -228,6 +230,7 @@ make_subplan(SubLink *slink)
else if (node->parParam == NIL && slink->subLinkType == EXPR_SUBLINK)
{
TargetEntry *te = lfirst(plan->targetlist);
/* need a var node just to pass to new_param()... */
Var *var = makeVar(0, 0, te->resdom->restype,
te->resdom->restypmod, 0);
@@ -247,14 +250,15 @@ make_subplan(SubLink *slink)
int i = 0;
/*
* Convert oper list of Opers into a list of Exprs, using
* lefthand arguments and Params representing inside results.
* Convert oper list of Opers into a list of Exprs, using lefthand
* arguments and Params representing inside results.
*/
foreach(lst, slink->oper)
{
Oper *oper = (Oper *) lfirst(lst);
Node *lefthand = nth(i, slink->lefthand);
TargetEntry *te = nth(i, plan->targetlist);
/* need a var node just to pass to new_param()... */
Var *var = makeVar(0, 0, te->resdom->restype,
te->resdom->restypmod, 0);
@@ -273,7 +277,9 @@ make_subplan(SubLink *slink)
tup = get_operator_tuple(oper->opno);
Assert(HeapTupleIsValid(tup));
opform = (Form_pg_operator) GETSTRUCT(tup);
/* Note: we use make_operand in case runtime type conversion
/*
* Note: we use make_operand in case runtime type conversion
* function calls must be inserted for this operator!
*/
left = make_operand("", lefthand,
@@ -304,15 +310,16 @@ make_subplan(SubLink *slink)
int i = 0;
/*
* We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types to
* initPlans, even when they are uncorrelated or undirect correlated,
* because we need to scan the output of the subplan for each outer
* tuple. However, we have the option to tack a MATERIAL node onto
* the top of an uncorrelated/undirect correlated subplan, which lets
* us do the work of evaluating the subplan only once. We do this
* if the subplan's top plan node is anything more complicated than
* a plain sequential scan, and we do it even for seqscan if the
* qual appears selective enough to eliminate many tuples.
* We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types
* to initPlans, even when they are uncorrelated or undirect
* correlated, because we need to scan the output of the subplan
* for each outer tuple. However, we have the option to tack a
* MATERIAL node onto the top of an uncorrelated/undirect
* correlated subplan, which lets us do the work of evaluating the
* subplan only once. We do this if the subplan's top plan node
* is anything more complicated than a plain sequential scan, and
* we do it even for seqscan if the qual appears selective enough
* to eliminate many tuples.
*/
if (node->parParam == NIL)
{
@@ -336,10 +343,12 @@ make_subplan(SubLink *slink)
break;
case T_Material:
case T_Sort:
/* Don't add another Material node if there's one already,
* nor if the top node is a Sort, since Sort materializes
* its output anyway. (I doubt either case can happen in
* practice for a subplan, but...)
/*
* Don't add another Material node if there's one
* already, nor if the top node is a Sort, since Sort
* materializes its output anyway. (I doubt either
* case can happen in practice for a subplan, but...)
*/
use_material = false;
break;
@@ -359,7 +368,7 @@ make_subplan(SubLink *slink)
/*
* Make expression of SUBPLAN type
*/
expr->typeOid = BOOLOID; /* bogus, but we don't really care */
expr->typeOid = BOOLOID;/* bogus, but we don't really care */
expr->opType = SUBPLAN_EXPR;
expr->oper = (Node *) node;
@@ -371,17 +380,20 @@ make_subplan(SubLink *slink)
Var *var = nth(lfirsti(lst), PlannerParamVar);
var = (Var *) copyObject(var);
/* Must fix absolute-level varlevelsup from the
* PlannerParamVar entry. But since var is at current
* subplan level, this is easy:
/*
* Must fix absolute-level varlevelsup from the
* PlannerParamVar entry. But since var is at current subplan
* level, this is easy:
*/
var->varlevelsup = 0;
args = lappend(args, var);
}
expr->args = args;
/*
* Convert oper list of Opers into a list of Exprs, using
* lefthand arguments and Consts representing inside results.
* Convert oper list of Opers into a list of Exprs, using lefthand
* arguments and Consts representing inside results.
*/
foreach(lst, slink->oper)
{
@@ -395,8 +407,8 @@ make_subplan(SubLink *slink)
*right;
/*
* XXX really ought to fill in constlen and constbyval correctly,
* but right now ExecEvalExpr won't look at them...
* XXX really ought to fill in constlen and constbyval
* correctly, but right now ExecEvalExpr won't look at them...
*/
con = makeConst(te->resdom->restype, 0, 0, true, 0, 0, 0);
@@ -404,7 +416,9 @@ make_subplan(SubLink *slink)
tup = get_operator_tuple(oper->opno);
Assert(HeapTupleIsValid(tup));
opform = (Form_pg_operator) GETSTRUCT(tup);
/* Note: we use make_operand in case runtime type conversion
/*
* Note: we use make_operand in case runtime type conversion
* function calls must be inserted for this operator!
*/
left = make_operand("", lefthand,
@@ -450,9 +464,10 @@ set_unioni(List *l1, List *l2)
* check in make_subplan to see whether a subselect has any subselects.
*/
typedef struct finalize_primnode_results {
List *subplans; /* List of subplans found in expr */
List *paramids; /* List of PARAM_EXEC paramids found */
typedef struct finalize_primnode_results
{
List *subplans; /* List of subplans found in expr */
List *paramids; /* List of PARAM_EXEC paramids found */
} finalize_primnode_results;
static bool
@@ -464,16 +479,16 @@ finalize_primnode(Node *node, finalize_primnode_results *results)
{
if (((Param *) node)->paramkind == PARAM_EXEC)
{
int paramid = (int) ((Param *) node)->paramid;
int paramid = (int) ((Param *) node)->paramid;
if (! intMember(paramid, results->paramids))
if (!intMember(paramid, results->paramids))
results->paramids = lconsi(paramid, results->paramids);
}
return false; /* no more to do here */
}
if (is_subplan(node))
{
SubPlan *subplan = (SubPlan *) ((Expr *) node)->oper;
SubPlan *subplan = (SubPlan *) ((Expr *) node)->oper;
List *lst;
/* Add subplan to subplans list */
@@ -486,7 +501,7 @@ finalize_primnode(Node *node, finalize_primnode_results *results)
/* note varlevelsup is absolute level number */
if (var->varlevelsup < PlannerQueryLevel &&
! intMember(paramid, results->paramids))
!intMember(paramid, results->paramids))
results->paramids = lconsi(paramid, results->paramids);
}
/* fall through to recurse into subplan args */
@@ -533,7 +548,7 @@ Node *
SS_process_sublinks(Node *expr)
{
/* No setup needed for tree walk, so away we go */
return process_sublinks_mutator(expr, NULL);
return process_sublinks_mutator(expr, NULL);
}
static Node *
@@ -543,25 +558,26 @@ process_sublinks_mutator(Node *node, void *context)
return NULL;
if (IsA(node, SubLink))
{
SubLink *sublink = (SubLink *) node;
SubLink *sublink = (SubLink *) node;
/* First, scan the lefthand-side expressions, if any.
* This is a tad klugy since we modify the input SubLink node,
* but that should be OK (make_subplan does it too!)
/*
* First, scan the lefthand-side expressions, if any. This is a
* tad klugy since we modify the input SubLink node, but that
* should be OK (make_subplan does it too!)
*/
sublink->lefthand = (List *)
process_sublinks_mutator((Node *) sublink->lefthand, context);
/* Now build the SubPlan node and make the expr to return */
return make_subplan(sublink);
}
/*
* Note that we will never see a SubPlan expression in the input
* (since this is the very routine that creates 'em to begin with).
* So the code in expression_tree_mutator() that might do
* inappropriate things with SubPlans or SubLinks will not be
* exercised.
* (since this is the very routine that creates 'em to begin with). So
* the code in expression_tree_mutator() that might do inappropriate
* things with SubPlans or SubLinks will not be exercised.
*/
Assert(! is_subplan(node));
Assert(!is_subplan(node));
return expression_tree_mutator(node,
process_sublinks_mutator,
@@ -581,12 +597,13 @@ SS_finalize_plan(Plan *plan)
results.subplans = NIL; /* initialize lists to NIL */
results.paramids = NIL;
/*
* When we call finalize_primnode, results.paramids lists are
* automatically merged together. But when recursing to self,
* we have to do it the hard way. We want the paramids list
* to include params in subplans as well as at this level.
* (We don't care about finding subplans of subplans, though.)
* automatically merged together. But when recursing to self, we have
* to do it the hard way. We want the paramids list to include params
* in subplans as well as at this level. (We don't care about finding
* subplans of subplans, though.)
*/
/* Find params and subplans in targetlist and qual */
@@ -604,13 +621,15 @@ SS_finalize_plan(Plan *plan)
case T_Append:
foreach(lst, ((Append *) plan)->appendplans)
results.paramids = set_unioni(results.paramids,
SS_finalize_plan((Plan *) lfirst(lst)));
SS_finalize_plan((Plan *) lfirst(lst)));
break;
case T_IndexScan:
finalize_primnode((Node *) ((IndexScan *) plan)->indxqual,
&results);
/* we need not look at indxqualorig, since it will have the
/*
* we need not look at indxqualorig, since it will have the
* same param references as indxqual, and we aren't really
* concerned yet about having a complete subplan list.
*/
@@ -633,7 +652,7 @@ SS_finalize_plan(Plan *plan)
case T_TidScan:
finalize_primnode((Node *) ((TidScan *) plan)->tideval,
&results);
&results);
break;
case T_Agg:

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.23 2000/02/27 19:45:44 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.24 2000/04/12 17:15:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@ static Expr *and_normalize(List *andlist);
static Expr *qual_cleanup(Expr *qual);
static List *remove_duplicates(List *list);
static void count_bool_nodes(Expr *qual, double *nodes,
double *cnfnodes, double *dnfnodes);
double *cnfnodes, double *dnfnodes);
/*****************************************************************************
*
@@ -71,7 +71,7 @@ static void count_bool_nodes(Expr *qual, double *nodes,
*
* If 'removeAndFlag' is true then it removes explicit AND at the top level,
* producing a list of implicitly-ANDed conditions. Otherwise, a regular
* boolean expression is returned. Since most callers pass 'true', we
* boolean expression is returned. Since most callers pass 'true', we
* prefer to declare the result as List *, not Expr *.
*
* XXX This code could be much smarter, at the cost of also being slower,
@@ -95,12 +95,14 @@ canonicalize_qual(Expr *qual, bool removeAndFlag)
if (qual == NULL)
return NIL;
/* Flatten AND and OR groups throughout the tree.
* This improvement is always worthwhile, so do it unconditionally.
/*
* Flatten AND and OR groups throughout the tree. This improvement is
* always worthwhile, so do it unconditionally.
*/
qual = flatten_andors(qual);
/* Push down NOTs. We do this only in the top-level boolean
/*
* Push down NOTs. We do this only in the top-level boolean
* expression, without examining arguments of operators/functions.
* Even so, it might not be a win if we are unable to find negators
* for all the operators involved; perhaps we should compare before-
@@ -109,21 +111,24 @@ canonicalize_qual(Expr *qual, bool removeAndFlag)
newqual = find_nots(qual);
/*
* Choose whether to convert to CNF, or DNF, or leave well enough alone.
* Choose whether to convert to CNF, or DNF, or leave well enough
* alone.
*
* We make an approximate estimate of the number of bottom-level nodes
* that will appear in the CNF and DNF forms of the query.
*/
count_bool_nodes(newqual, &nodes, &cnfnodes, &dnfnodes);
/*
* First heuristic is to forget about *both* normal forms if there are
* a huge number of terms in the qual clause. This would only happen
* with machine-generated queries, presumably; and most likely such
* a query is already in either CNF or DNF.
* with machine-generated queries, presumably; and most likely such a
* query is already in either CNF or DNF.
*/
cnfok = dnfok = true;
if (nodes >= 500.0)
cnfok = dnfok = false;
/*
* Second heuristic is to forget about either CNF or DNF if it shows
* unreasonable growth compared to the original form of the qual,
@@ -134,15 +139,17 @@ canonicalize_qual(Expr *qual, bool removeAndFlag)
cnfok = false;
if (dnfnodes >= 4.0 * nodes)
dnfok = false;
/*
* Third heuristic is to prefer DNF if top level is already an OR,
* and only one relation is mentioned, and DNF is no larger than
* the CNF representation. (Pretty shaky; can we improve on this?)
* Third heuristic is to prefer DNF if top level is already an OR, and
* only one relation is mentioned, and DNF is no larger than the CNF
* representation. (Pretty shaky; can we improve on this?)
*/
if (cnfok && dnfok && dnfnodes <= cnfnodes &&
or_clause((Node *) newqual) &&
NumRelids((Node *) newqual) == 1)
cnfok = false;
/*
* Otherwise, we prefer CNF.
*
@@ -150,20 +157,26 @@ canonicalize_qual(Expr *qual, bool removeAndFlag)
*/
if (cnfok)
{
/* Normalize into conjunctive normal form, and clean up the result. */
/*
* Normalize into conjunctive normal form, and clean up the
* result.
*/
newqual = qual_cleanup(find_ors(newqual));
}
else if (dnfok)
{
/* Normalize into disjunctive normal form, and clean up the result. */
/*
* Normalize into disjunctive normal form, and clean up the
* result.
*/
newqual = qual_cleanup(find_ands(newqual));
}
/* Convert to implicit-AND list if requested */
if (removeAndFlag)
{
newqual = (Expr *) make_ands_implicit(newqual);
}
return (List *) newqual;
}
@@ -177,7 +190,7 @@ canonicalize_qual(Expr *qual, bool removeAndFlag)
*
* If 'removeAndFlag' is true then it removes explicit AND at the top level,
* producing a list of implicitly-ANDed conditions. Otherwise, a regular
* boolean expression is returned. Since most callers pass 'true', we
* boolean expression is returned. Since most callers pass 'true', we
* prefer to declare the result as List *, not Expr *.
*/
List *
@@ -188,11 +201,14 @@ cnfify(Expr *qual, bool removeAndFlag)
if (qual == NULL)
return NIL;
/* Flatten AND and OR groups throughout the tree.
* This improvement is always worthwhile.
/*
* Flatten AND and OR groups throughout the tree. This improvement is
* always worthwhile.
*/
newqual = flatten_andors(qual);
/* Push down NOTs. We do this only in the top-level boolean
/*
* Push down NOTs. We do this only in the top-level boolean
* expression, without examining arguments of operators/functions.
*/
newqual = find_nots(newqual);
@@ -202,9 +218,7 @@ cnfify(Expr *qual, bool removeAndFlag)
newqual = qual_cleanup(newqual);
if (removeAndFlag)
{
newqual = (Expr *) make_ands_implicit(newqual);
}
return (List *) newqual;
}
@@ -227,11 +241,14 @@ dnfify(Expr *qual)
if (qual == NULL)
return NULL;
/* Flatten AND and OR groups throughout the tree.
* This improvement is always worthwhile.
/*
* Flatten AND and OR groups throughout the tree. This improvement is
* always worthwhile.
*/
newqual = flatten_andors(qual);
/* Push down NOTs. We do this only in the top-level boolean
/*
* Push down NOTs. We do this only in the top-level boolean
* expression, without examining arguments of operators/functions.
*/
newqual = find_nots(newqual);
@@ -280,13 +297,13 @@ flatten_andors(Expr *qual)
foreach(arg, qual->args)
{
Expr *subexpr = flatten_andors((Expr *) lfirst(arg));
Expr *subexpr = flatten_andors((Expr *) lfirst(arg));
/*
* Note: we can destructively nconc the subexpression's arglist
* because we know the recursive invocation of flatten_andors
* will have built a new arglist not shared with any other expr.
* Otherwise we'd need a listCopy here.
* Note: we can destructively nconc the subexpression's
* arglist because we know the recursive invocation of
* flatten_andors will have built a new arglist not shared
* with any other expr. Otherwise we'd need a listCopy here.
*/
if (and_clause((Node *) subexpr))
out_list = nconc(out_list, subexpr->args);
@@ -302,13 +319,13 @@ flatten_andors(Expr *qual)
foreach(arg, qual->args)
{
Expr *subexpr = flatten_andors((Expr *) lfirst(arg));
Expr *subexpr = flatten_andors((Expr *) lfirst(arg));
/*
* Note: we can destructively nconc the subexpression's arglist
* because we know the recursive invocation of flatten_andors
* will have built a new arglist not shared with any other expr.
* Otherwise we'd need a listCopy here.
* Note: we can destructively nconc the subexpression's
* arglist because we know the recursive invocation of
* flatten_andors will have built a new arglist not shared
* with any other expr. Otherwise we'd need a listCopy here.
*/
if (or_clause((Node *) subexpr))
out_list = nconc(out_list, subexpr->args);
@@ -354,13 +371,13 @@ pull_ors(List *orlist)
foreach(arg, orlist)
{
Expr *subexpr = (Expr *) lfirst(arg);
Expr *subexpr = (Expr *) lfirst(arg);
/*
* Note: we can destructively nconc the subexpression's arglist
* because we know the recursive invocation of pull_ors
* will have built a new arglist not shared with any other expr.
* Otherwise we'd need a listCopy here.
* because we know the recursive invocation of pull_ors will have
* built a new arglist not shared with any other expr. Otherwise
* we'd need a listCopy here.
*/
if (or_clause((Node *) subexpr))
out_list = nconc(out_list, pull_ors(subexpr->args));
@@ -385,13 +402,13 @@ pull_ands(List *andlist)
foreach(arg, andlist)
{
Expr *subexpr = (Expr *) lfirst(arg);
Expr *subexpr = (Expr *) lfirst(arg);
/*
* Note: we can destructively nconc the subexpression's arglist
* because we know the recursive invocation of pull_ands
* will have built a new arglist not shared with any other expr.
* Otherwise we'd need a listCopy here.
* because we know the recursive invocation of pull_ands will have
* built a new arglist not shared with any other expr. Otherwise
* we'd need a listCopy here.
*/
if (and_clause((Node *) subexpr))
out_list = nconc(out_list, pull_ands(subexpr->args));
@@ -407,7 +424,7 @@ pull_ands(List *andlist)
* For 'NOT' clauses, apply push_not() to try to push down the 'NOT'.
* For all other clause types, simply recurse.
*
* Returns the modified qualification. AND/OR flatness is preserved.
* Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_nots(Expr *qual)
@@ -468,7 +485,8 @@ static Expr *
push_nots(Expr *qual)
{
if (qual == NULL)
return make_notclause(qual); /* XXX is this right? Or possible? */
return make_notclause(qual); /* XXX is this right? Or
* possible? */
/*
* Negate an operator clause if possible: ("NOT" (< A B)) => (> A B)
@@ -486,6 +504,7 @@ push_nots(Expr *qual)
InvalidOid,
oper->opresulttype,
0, NULL);
return make_opclause(op, get_leftop(qual), get_rightop(qual));
}
else
@@ -496,7 +515,7 @@ push_nots(Expr *qual)
/*--------------------
* Apply DeMorgan's Laws:
* ("NOT" ("AND" A B)) => ("OR" ("NOT" A) ("NOT" B))
* ("NOT" ("OR" A B)) => ("AND" ("NOT" A) ("NOT" B))
* ("NOT" ("OR" A B)) => ("AND" ("NOT" A) ("NOT" B))
* i.e., swap AND for OR and negate all the subclauses.
*--------------------
*/
@@ -518,6 +537,7 @@ push_nots(Expr *qual)
}
else if (not_clause((Node *) qual))
{
/*
* Another 'not' cancels this 'not', so eliminate the 'not' and
* stop negating this branch. But search the subexpression for
@@ -527,6 +547,7 @@ push_nots(Expr *qual)
}
else
{
/*
* We don't know how to negate anything else, place a 'not' at
* this level.
@@ -544,7 +565,7 @@ push_nots(Expr *qual)
* Note that 'or' clauses will always be turned into 'and' clauses
* if they contain any 'and' subclauses.
*
* Returns the modified qualification. AND/OR flatness is preserved.
* Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_ors(Expr *qual)
@@ -601,17 +622,17 @@ or_normalize(List *orlist)
return lfirst(orlist); /* single-expression OR (can this happen?) */
/*
* If we have a choice of AND clauses, pick the one with the
* most subclauses. Because we initialized num_subclauses = 1,
* any AND clauses with only one arg will be ignored as useless.
* If we have a choice of AND clauses, pick the one with the most
* subclauses. Because we initialized num_subclauses = 1, any AND
* clauses with only one arg will be ignored as useless.
*/
foreach(temp, orlist)
{
Expr *clause = lfirst(temp);
Expr *clause = lfirst(temp);
if (and_clause((Node *) clause))
{
int nclauses = length(clause->args);
int nclauses = length(clause->args);
if (nclauses > num_subclauses)
{
@@ -622,12 +643,13 @@ or_normalize(List *orlist)
}
/* if there's no suitable AND clause, we can't transform the OR */
if (! distributable)
if (!distributable)
return make_orclause(orlist);
/* Caution: lremove destructively modifies the input orlist.
* This should be OK, since or_normalize is only called with
* freshly constructed lists that are not referenced elsewhere.
/*
* Caution: lremove destructively modifies the input orlist. This
* should be OK, since or_normalize is only called with freshly
* constructed lists that are not referenced elsewhere.
*/
orlist = lremove(distributable, orlist);
@@ -635,11 +657,12 @@ or_normalize(List *orlist)
{
Expr *andclause = lfirst(temp);
/* pull_ors is needed here in case andclause has a top-level OR.
* Then we recursively apply or_normalize, since there might
* be an AND subclause in the resulting OR-list.
* Note: we rely on pull_ors to build a fresh list,
* and not damage the given orlist.
/*
* pull_ors is needed here in case andclause has a top-level OR.
* Then we recursively apply or_normalize, since there might be an
* AND subclause in the resulting OR-list. Note: we rely on
* pull_ors to build a fresh list, and not damage the given
* orlist.
*/
andclause = or_normalize(pull_ors(lcons(andclause, orlist)));
andclauses = lappend(andclauses, andclause);
@@ -658,7 +681,7 @@ or_normalize(List *orlist)
* Note that 'and' clauses will always be turned into 'or' clauses
* if they contain any 'or' subclauses.
*
* Returns the modified qualification. AND/OR flatness is preserved.
* Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_ands(Expr *qual)
@@ -712,20 +735,21 @@ and_normalize(List *andlist)
if (andlist == NIL)
return NULL; /* probably can't happen */
if (lnext(andlist) == NIL)
return lfirst(andlist); /* single-expression AND (can this happen?) */
return lfirst(andlist); /* single-expression AND (can this
* happen?) */
/*
* If we have a choice of OR clauses, pick the one with the
* most subclauses. Because we initialized num_subclauses = 1,
* any OR clauses with only one arg will be ignored as useless.
* If we have a choice of OR clauses, pick the one with the most
* subclauses. Because we initialized num_subclauses = 1, any OR
* clauses with only one arg will be ignored as useless.
*/
foreach(temp, andlist)
{
Expr *clause = lfirst(temp);
Expr *clause = lfirst(temp);
if (or_clause((Node *) clause))
{
int nclauses = length(clause->args);
int nclauses = length(clause->args);
if (nclauses > num_subclauses)
{
@@ -736,12 +760,13 @@ and_normalize(List *andlist)
}
/* if there's no suitable OR clause, we can't transform the AND */
if (! distributable)
if (!distributable)
return make_andclause(andlist);
/* Caution: lremove destructively modifies the input andlist.
* This should be OK, since and_normalize is only called with
* freshly constructed lists that are not referenced elsewhere.
/*
* Caution: lremove destructively modifies the input andlist. This
* should be OK, since and_normalize is only called with freshly
* constructed lists that are not referenced elsewhere.
*/
andlist = lremove(distributable, andlist);
@@ -749,11 +774,12 @@ and_normalize(List *andlist)
{
Expr *orclause = lfirst(temp);
/* pull_ands is needed here in case orclause has a top-level AND.
* Then we recursively apply and_normalize, since there might
* be an OR subclause in the resulting AND-list.
* Note: we rely on pull_ands to build a fresh list,
* and not damage the given andlist.
/*
* pull_ands is needed here in case orclause has a top-level AND.
* Then we recursively apply and_normalize, since there might be
* an OR subclause in the resulting AND-list. Note: we rely on
* pull_ands to build a fresh list, and not damage the given
* andlist.
*/
orclause = and_normalize(pull_ands(lcons(orclause, andlist)));
orclauses = lappend(orclauses, orclause);
@@ -767,7 +793,7 @@ and_normalize(List *andlist)
* qual_cleanup
* Fix up a qualification by removing duplicate entries (which could be
* created during normalization, if identical subexpressions from different
* parts of the tree are brought together). Also, check for AND and OR
* parts of the tree are brought together). Also, check for AND and OR
* clauses with only one remaining subexpression, and simplify.
*
* Returns the modified qualification.
@@ -828,7 +854,7 @@ remove_duplicates(List *list)
foreach(i, list)
{
if (! member(lfirst(i), result))
if (!member(lfirst(i), result))
result = lappend(result, lfirst(i));
}
return result;
@@ -855,7 +881,9 @@ count_bool_nodes(Expr *qual,
double *dnfnodes)
{
List *temp;
double subnodes, subcnfnodes, subdnfnodes;
double subnodes,
subcnfnodes,
subdnfnodes;
if (and_clause((Node *) qual))
{
@@ -864,13 +892,15 @@ count_bool_nodes(Expr *qual,
foreach(temp, qual->args)
{
count_bool_nodes(lfirst(temp),
count_bool_nodes(lfirst(temp),
&subnodes, &subcnfnodes, &subdnfnodes);
*nodes += subnodes;
*cnfnodes += subcnfnodes;
*dnfnodes *= subdnfnodes;
}
/* we could get dnfnodes < cnfnodes here, if all the sub-nodes are
/*
* we could get dnfnodes < cnfnodes here, if all the sub-nodes are
* simple ones with count 1. Make sure dnfnodes isn't too small.
*/
if (*dnfnodes < *cnfnodes)
@@ -883,13 +913,15 @@ count_bool_nodes(Expr *qual,
foreach(temp, qual->args)
{
count_bool_nodes(lfirst(temp),
count_bool_nodes(lfirst(temp),
&subnodes, &subcnfnodes, &subdnfnodes);
*nodes += subnodes;
*cnfnodes *= subcnfnodes;
*dnfnodes += subdnfnodes;
}
/* we could get cnfnodes < dnfnodes here, if all the sub-nodes are
/*
* we could get cnfnodes < dnfnodes here, if all the sub-nodes are
* simple ones with count 1. Make sure cnfnodes isn't too small.
*/
if (*cnfnodes < *dnfnodes)

View File

@@ -4,7 +4,7 @@
* Routines to preprocess the parse tree target list
*
* This module takes care of altering the query targetlist as needed for
* INSERT, UPDATE, and DELETE queries. For INSERT and UPDATE queries,
* INSERT, UPDATE, and DELETE queries. For INSERT and UPDATE queries,
* the targetlist must contain an entry for each attribute of the target
* relation in the correct order. For both UPDATE and DELETE queries,
* we need a junk targetlist entry holding the CTID attribute --- the
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.35 2000/03/09 05:00:24 inoue Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.36 2000/04/12 17:15:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
static List *expand_targetlist(List *tlist, int command_type,
Index result_relation, List *range_table);
Index result_relation, List *range_table);
/*
@@ -46,6 +46,7 @@ preprocess_targetlist(List *tlist,
Index result_relation,
List *range_table)
{
/*
* for heap_formtuple to work, the targetlist must match the exact
* order of the attributes. We also need to fill in any missing
@@ -56,11 +57,11 @@ preprocess_targetlist(List *tlist,
result_relation, range_table);
/*
* for "update" and "delete" queries, add ctid of the result
* relation into the target list so that the ctid will propagate
* through execution and ExecutePlan() will be able to identify
* the right tuple to replace or delete. This extra field is
* marked "junk" so that it is not stored back into the tuple.
* for "update" and "delete" queries, add ctid of the result relation
* into the target list so that the ctid will propagate through
* execution and ExecutePlan() will be able to identify the right
* tuple to replace or delete. This extra field is marked "junk" so
* that it is not stored back into the tuple.
*/
if (command_type == CMD_UPDATE || command_type == CMD_DELETE)
{
@@ -78,7 +79,8 @@ preprocess_targetlist(List *tlist,
var = makeVar(result_relation, SelfItemPointerAttributeNumber,
TIDOID, -1, 0);
/* For an UPDATE, expand_targetlist already created a fresh tlist.
/*
* For an UPDATE, expand_targetlist already created a fresh tlist.
* For DELETE, better do a listCopy so that we don't destructively
* modify the original tlist (is this really necessary?).
*/
@@ -117,11 +119,11 @@ expand_targetlist(List *tlist, int command_type,
List *temp;
/*
* Keep a map of which tlist items we have transferred to new list.
* +1 here keeps palloc from complaining if old_tlist_len=0.
* Keep a map of which tlist items we have transferred to new list. +1
* here keeps palloc from complaining if old_tlist_len=0.
*/
tlistentry_used = (bool *) palloc((old_tlist_len+1) * sizeof(bool));
memset(tlistentry_used, 0, (old_tlist_len+1) * sizeof(bool));
tlistentry_used = (bool *) palloc((old_tlist_len + 1) * sizeof(bool));
memset(tlistentry_used, 0, (old_tlist_len + 1) * sizeof(bool));
/*
* Scan the tuple description in the relation's relcache entry to make
@@ -133,9 +135,9 @@ expand_targetlist(List *tlist, int command_type,
for (attrno = 1; attrno <= numattrs; attrno++)
{
Form_pg_attribute att_tup = rel->rd_att->attrs[attrno-1];
char *attrname = NameStr(att_tup->attname);
TargetEntry *new_tle = NULL;
Form_pg_attribute att_tup = rel->rd_att->attrs[attrno - 1];
char *attrname = NameStr(att_tup->attname);
TargetEntry *new_tle = NULL;
/*
* We match targetlist entries to attributes using the resname.
@@ -143,22 +145,22 @@ expand_targetlist(List *tlist, int command_type,
old_tlist_index = 0;
foreach(temp, tlist)
{
TargetEntry *old_tle = (TargetEntry *) lfirst(temp);
Resdom *resdom = old_tle->resdom;
TargetEntry *old_tle = (TargetEntry *) lfirst(temp);
Resdom *resdom = old_tle->resdom;
if (! tlistentry_used[old_tlist_index] &&
if (!tlistentry_used[old_tlist_index] &&
strcmp(resdom->resname, attrname) == 0 &&
! resdom->resjunk)
!resdom->resjunk)
{
/*
* We can recycle the old TLE+resdom if right resno; else
* make a new one to avoid modifying the old tlist structure.
* (Is preserving old tlist actually necessary?)
* make a new one to avoid modifying the old tlist
* structure. (Is preserving old tlist actually
* necessary?)
*/
if (resdom->resno == attrno)
{
new_tle = old_tle;
}
else
{
resdom = (Resdom *) copyObject((Node *) resdom);
@@ -173,14 +175,15 @@ expand_targetlist(List *tlist, int command_type,
if (new_tle == NULL)
{
/*
* Didn't find a matching tlist entry, so make one.
*
* For INSERT, generate a constant of the default value for
* the attribute type, or NULL if no default value.
* For INSERT, generate a constant of the default value for the
* attribute type, or NULL if no default value.
*
* For UPDATE, generate a Var reference to the existing value
* of the attribute, so that it gets copied to the new tuple.
* For UPDATE, generate a Var reference to the existing value of
* the attribute, so that it gets copied to the new tuple.
*/
Oid atttype = att_tup->atttypid;
int32 atttypmod = att_tup->atttypmod;
@@ -188,92 +191,96 @@ expand_targetlist(List *tlist, int command_type,
switch (command_type)
{
case CMD_INSERT:
{
{
#ifdef _DROP_COLUMN_HACK__
Datum typedefault;
Datum typedefault;
#else
Datum typedefault = get_typdefault(atttype);
#endif /* _DROP_COLUMN_HACK__ */
int typlen;
Const *temp_const;
Datum typedefault = get_typdefault(atttype);
#endif /* _DROP_COLUMN_HACK__ */
int typlen;
Const *temp_const;
#ifdef _DROP_COLUMN_HACK__
if (COLUMN_IS_DROPPED(att_tup))
typedefault = PointerGetDatum(NULL);
else
typedefault = get_typdefault(atttype);
#endif /* _DROP_COLUMN_HACK__ */
if (typedefault == PointerGetDatum(NULL))
typlen = 0;
else
{
/*
* Since this is an append or replace, the size of
* any set attribute is the size of the OID used to
* represent it.
*/
if (att_tup->attisset)
typlen = get_typlen(OIDOID);
if (COLUMN_IS_DROPPED(att_tup))
typedefault = PointerGetDatum(NULL);
else
typlen = get_typlen(atttype);
typedefault = get_typdefault(atttype);
#endif /* _DROP_COLUMN_HACK__ */
if (typedefault == PointerGetDatum(NULL))
typlen = 0;
else
{
/*
* Since this is an append or replace, the
* size of any set attribute is the size of
* the OID used to represent it.
*/
if (att_tup->attisset)
typlen = get_typlen(OIDOID);
else
typlen = get_typlen(atttype);
}
temp_const = makeConst(atttype,
typlen,
typedefault,
(typedefault == PointerGetDatum(NULL)),
false,
false, /* not a set */
false);
new_tle = makeTargetEntry(makeResdom(attrno,
atttype,
-1,
pstrdup(attrname),
0,
(Oid) 0,
false),
(Node *) temp_const);
break;
}
temp_const = makeConst(atttype,
typlen,
typedefault,
(typedefault == PointerGetDatum(NULL)),
false,
false, /* not a set */
false);
new_tle = makeTargetEntry(makeResdom(attrno,
atttype,
-1,
pstrdup(attrname),
0,
(Oid) 0,
false),
(Node *) temp_const);
break;
}
case CMD_UPDATE:
{
Var *temp_var;
#ifdef _DROP_COLUMN_HACK__
Node *temp_node = (Node *) NULL;
if (COLUMN_IS_DROPPED(att_tup))
{
temp_node = (Node *)makeConst(atttype, 0,
PointerGetDatum(NULL),
true,
false,
false, /* not a set */
false);
}
else
#endif /* _DROP_COLUMN_HACK__ */
temp_var = makeVar(result_relation, attrno, atttype,
atttypmod, 0);
#ifdef _DROP_COLUMN_HACK__
if (!temp_node)
temp_node = (Node *) temp_var;
#endif /* _DROP_COLUMN_HACK__ */
Var *temp_var;
new_tle = makeTargetEntry(makeResdom(attrno,
atttype,
atttypmod,
pstrdup(attrname),
0,
(Oid) 0,
false),
#ifdef _DROP_COLUMN_HACK__
temp_node);
Node *temp_node = (Node *) NULL;
if (COLUMN_IS_DROPPED(att_tup))
{
temp_node = (Node *) makeConst(atttype, 0,
PointerGetDatum(NULL),
true,
false,
false, /* not a set */
false);
}
else
#endif /* _DROP_COLUMN_HACK__ */
temp_var = makeVar(result_relation, attrno, atttype,
atttypmod, 0);
#ifdef _DROP_COLUMN_HACK__
if (!temp_node)
temp_node = (Node *) temp_var;
#endif /* _DROP_COLUMN_HACK__ */
new_tle = makeTargetEntry(makeResdom(attrno,
atttype,
atttypmod,
pstrdup(attrname),
0,
(Oid) 0,
false),
#ifdef _DROP_COLUMN_HACK__
temp_node);
#else
(Node *) temp_var);
#endif /* _DROP_COLUMN_HACK__ */
break;
}
(Node *) temp_var);
#endif /* _DROP_COLUMN_HACK__ */
break;
}
default:
elog(ERROR, "expand_targetlist: unexpected command_type");
break;
@@ -285,18 +292,19 @@ expand_targetlist(List *tlist, int command_type,
/*
* Copy all unprocessed tlist entries to the end of the new tlist,
* making sure they are marked resjunk = true. Typical junk entries
* include ORDER BY or GROUP BY expressions (are these actually possible
* in an INSERT or UPDATE?), system attribute references, etc.
* making sure they are marked resjunk = true. Typical junk entries
* include ORDER BY or GROUP BY expressions (are these actually
* possible in an INSERT or UPDATE?), system attribute references,
* etc.
*/
old_tlist_index = 0;
foreach(temp, tlist)
{
TargetEntry *old_tle = (TargetEntry *) lfirst(temp);
TargetEntry *old_tle = (TargetEntry *) lfirst(temp);
if (! tlistentry_used[old_tlist_index])
if (!tlistentry_used[old_tlist_index])
{
Resdom *resdom;
Resdom *resdom;
resdom = (Resdom *) copyObject((Node *) old_tle->resdom);
resdom->resno = attrno++;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.47 2000/03/21 05:12:06 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.48 2000/04/12 17:15:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,7 +26,8 @@
#include "parser/parsetree.h"
#include "utils/lsyscache.h"
typedef struct {
typedef struct
{
Index rt_index;
int sublevels_up;
Oid old_relid;
@@ -40,11 +41,11 @@ static RangeTblEntry *new_rangetable_entry(Oid new_relid,
RangeTblEntry *old_entry);
static void fix_parsetree_attnums(Index rt_index, Oid old_relid,
Oid new_relid, Query *parsetree);
static bool fix_parsetree_attnums_walker (Node *node,
fix_parsetree_attnums_context *context);
static bool fix_parsetree_attnums_walker(Node *node,
fix_parsetree_attnums_context *context);
static Append *make_append(List *appendplans, List *unionrtables,
Index rt_index,
List *inheritrtable, List *tlist);
Index rt_index,
List *inheritrtable, List *tlist);
/*
@@ -122,11 +123,11 @@ plan_union_queries(Query *parse)
/* Is this a simple one */
if (!union_all_found ||
!union_found ||
/* A trailing UNION negates the effect of earlier UNION ALLs */
/* A trailing UNION negates the effect of earlier UNION ALLs */
!last_union_all_flag)
{
List *hold_unionClause = parse->unionClause;
double tuple_fraction = -1.0; /* default processing */
double tuple_fraction = -1.0; /* default processing */
/* we will do sorting later, so don't do it now */
if (!union_all_found ||
@@ -134,6 +135,7 @@ plan_union_queries(Query *parse)
{
parse->sortClause = NIL;
parse->distinctClause = NIL;
/*
* force lower-level planning to assume that all tuples will
* be retrieved, even if it sees a LIMIT in the query node.
@@ -149,8 +151,9 @@ plan_union_queries(Query *parse)
{
Query *union_query = lfirst(ulist);
/* use subquery_planner here because the union'd queries
* have not been preprocessed yet. My goodness this is messy...
/*
* use subquery_planner here because the union'd queries have
* not been preprocessed yet. My goodness this is messy...
*/
union_plans = lappend(union_plans,
subquery_planner(union_query,
@@ -178,8 +181,8 @@ plan_union_queries(Query *parse)
* Recursion, but UNION only. The last one is a UNION, so it will
* not come here in recursion.
*
* XXX is it OK to pass default -1 to union_planner in this path,
* or should we force a tuple_fraction value?
* XXX is it OK to pass default -1 to union_planner in this path, or
* should we force a tuple_fraction value?
*/
union_plans = lcons(union_planner(parse, -1.0), NIL);
union_rts = lcons(parse->rtable, NIL);
@@ -189,11 +192,12 @@ plan_union_queries(Query *parse)
{
Query *union_all_query = lfirst(ulist);
/* use subquery_planner here because the union'd queries
* have not been preprocessed yet. My goodness this is messy...
/*
* use subquery_planner here because the union'd queries have
* not been preprocessed yet. My goodness this is messy...
*/
union_plans = lappend(union_plans,
subquery_planner(union_all_query, -1.0));
subquery_planner(union_all_query, -1.0));
union_rts = lappend(union_rts, union_all_query->rtable);
}
}
@@ -201,9 +205,11 @@ plan_union_queries(Query *parse)
/* We have already split UNION and UNION ALL and we made it consistent */
if (!last_union_all_flag)
{
/* Need SELECT DISTINCT behavior to implement UNION.
* Put back the held sortClause, add any missing columns to the
* sort clause, and set distinctClause properly.
/*
* Need SELECT DISTINCT behavior to implement UNION. Put back the
* held sortClause, add any missing columns to the sort clause,
* and set distinctClause properly.
*/
List *slitem;
@@ -215,7 +221,7 @@ plan_union_queries(Query *parse)
SortClause *scl = (SortClause *) lfirst(slitem);
TargetEntry *tle = get_sortgroupclause_tle(scl, parse->targetList);
if (! tle->resdom->resjunk)
if (!tle->resdom->resjunk)
parse->distinctClause = lappend(parse->distinctClause,
copyObject(scl));
}
@@ -226,9 +232,10 @@ plan_union_queries(Query *parse)
parse->distinctClause = NIL;
}
/* Make sure we don't try to apply the first query's grouping stuff
* to the Append node, either. Basically we don't want union_planner
* to do anything when we return control, except add the top sort/unique
/*
* Make sure we don't try to apply the first query's grouping stuff to
* the Append node, either. Basically we don't want union_planner to
* do anything when we return control, except add the top sort/unique
* nodes for DISTINCT processing if this wasn't UNION ALL, or the top
* sort node if it was UNION ALL with a user-provided sort clause.
*/
@@ -259,13 +266,13 @@ plan_union_queries(Query *parse)
*
* If grouping, aggregation, or sorting is specified in the parent plan,
* the subplans should not do any of those steps --- we must do those
* operations just once above the APPEND node. The given tlist has been
* operations just once above the APPEND node. The given tlist has been
* modified appropriately to remove group/aggregate expressions, but the
* Query node still has the relevant fields set. We remove them in the
* copies used for subplans (see plan_inherit_query).
*
* NOTE: this can be invoked recursively if more than one inheritance wildcard
* is present. At each level of recursion, the first wildcard remaining in
* is present. At each level of recursion, the first wildcard remaining in
* the rangetable is expanded.
*/
Append *
@@ -282,8 +289,8 @@ plan_inherit_queries(Query *parse, List *tlist, Index rt_index)
/*
* Remove the flag for this relation, since we're about to handle it.
* XXX destructive change to parent parse tree, but necessary to prevent
* infinite recursion.
* XXX destructive change to parent parse tree, but necessary to
* prevent infinite recursion.
*/
rt_entry->inh = false;
@@ -313,42 +320,44 @@ plan_inherit_query(Relids relids,
List *union_plans = NIL;
List *union_rtentries = NIL;
List *save_tlist = root->targetList;
double tuple_fraction;
double tuple_fraction;
List *i;
/*
* Avoid making copies of the root's tlist, which we aren't going to
* use anyway (we are going to make copies of the passed tlist, instead).
* use anyway (we are going to make copies of the passed tlist,
* instead).
*/
root->targetList = NIL;
/*
* If we are going to need sorting or grouping at the top level,
* force lower-level planners to assume that all tuples will be
* retrieved.
* If we are going to need sorting or grouping at the top level, force
* lower-level planners to assume that all tuples will be retrieved.
*/
if (root->distinctClause || root->sortClause ||
root->groupClause || root->hasAggs)
tuple_fraction = 0.0; /* will need all tuples from each subplan */
tuple_fraction = 0.0; /* will need all tuples from each subplan */
else
tuple_fraction = -1.0; /* default behavior is OK (I think) */
tuple_fraction = -1.0; /* default behavior is OK (I think) */
foreach(i, relids)
{
int relid = lfirsti(i);
/*
* Make a modifiable copy of the original query,
* and replace the target rangetable entry with a new one
* identifying this child table.
* Make a modifiable copy of the original query, and replace the
* target rangetable entry with a new one identifying this child
* table.
*/
Query *new_root = copyObject(root);
RangeTblEntry *new_rt_entry = new_rangetable_entry(relid,
rt_entry);
rt_store(rt_index, new_root->rtable, new_rt_entry);
/*
* Insert (a modifiable copy of) the desired simplified tlist
* into the subquery
* Insert (a modifiable copy of) the desired simplified tlist into
* the subquery
*/
new_root->targetList = copyObject(tlist);
@@ -360,14 +369,15 @@ plan_inherit_query(Relids relids,
new_root->sortClause = NIL;
new_root->groupClause = NIL;
new_root->havingQual = NULL;
new_root->hasAggs = false; /* shouldn't be any left ... */
new_root->hasAggs = false; /* shouldn't be any left ... */
/*
* Update attribute numbers in case child has different ordering
* of columns than parent (as can happen after ALTER TABLE).
*
* XXX This is a crock, and it doesn't really work. It'd be better
* to fix ALTER TABLE to preserve consistency of attribute numbering.
* to fix ALTER TABLE to preserve consistency of attribute
* numbering.
*/
fix_parsetree_attnums(rt_index,
rt_entry->relid,
@@ -397,23 +407,24 @@ find_all_inheritors(Oid parentrel)
List *unexamined_relids = lconsi(parentrel, NIL);
/*
* While the queue of unexamined relids is nonempty, remove the
* first element, mark it examined, and find its direct descendants.
* NB: cannot use foreach(), since we modify the queue inside loop.
* While the queue of unexamined relids is nonempty, remove the first
* element, mark it examined, and find its direct descendants. NB:
* cannot use foreach(), since we modify the queue inside loop.
*/
while (unexamined_relids != NIL)
{
Oid currentrel = lfirsti(unexamined_relids);
List *currentchildren;
Oid currentrel = lfirsti(unexamined_relids);
List *currentchildren;
unexamined_relids = lnext(unexamined_relids);
examined_relids = lappendi(examined_relids, currentrel);
currentchildren = find_inheritance_children(currentrel);
/*
* Add to the queue only those children not already seen.
* This could probably be simplified to a plain nconc,
* because our inheritance relationships should always be a
* strict tree, no? Should never find any matches, ISTM...
* Add to the queue only those children not already seen. This
* could probably be simplified to a plain nconc, because our
* inheritance relationships should always be a strict tree, no?
* Should never find any matches, ISTM...
*/
currentchildren = set_differencei(currentchildren, examined_relids);
unexamined_relids = LispUnioni(unexamined_relids, currentchildren);
@@ -477,7 +488,7 @@ new_rangetable_entry(Oid new_relid, RangeTblEntry *old_entry)
* 'old_relid' in 'parsetree' with the attribute numbers from
* 'new_relid'.
*
* The parsetree is MODIFIED IN PLACE. This is OK only because
* The parsetree is MODIFIED IN PLACE. This is OK only because
* plan_inherit_query made a copy of the tree for us to hack upon.
*/
static void
@@ -495,6 +506,7 @@ fix_parsetree_attnums(Index rt_index,
context.old_relid = old_relid;
context.new_relid = new_relid;
context.sublevels_up = 0;
/*
* We must scan both the targetlist and qual, but we know the
* havingQual is empty, so we can ignore it.
@@ -504,7 +516,7 @@ fix_parsetree_attnums(Index rt_index,
}
/*
* Adjust varnos for child tables. This routine makes it possible for
* Adjust varnos for child tables. This routine makes it possible for
* child tables to have different column positions for the "same" attribute
* as a parent, which helps ALTER TABLE ADD COLUMN. Unfortunately this isn't
* nearly enough to make it work transparently; there are other places where
@@ -513,8 +525,8 @@ fix_parsetree_attnums(Index rt_index,
* ALTER TABLE...
*/
static bool
fix_parsetree_attnums_walker (Node *node,
fix_parsetree_attnums_context *context)
fix_parsetree_attnums_walker(Node *node,
fix_parsetree_attnums_context *context)
{
if (node == NULL)
return false;
@@ -534,9 +546,10 @@ fix_parsetree_attnums_walker (Node *node,
}
if (IsA(node, SubLink))
{
/*
* Standard expression_tree_walker will not recurse into subselect,
* but here we must do so.
* Standard expression_tree_walker will not recurse into
* subselect, but here we must do so.
*/
SubLink *sub = (SubLink *) node;
@@ -588,9 +601,9 @@ make_append(List *appendplans,
node->plan.plan_width = 0;
foreach(subnode, appendplans)
{
Plan *subplan = (Plan *) lfirst(subnode);
Plan *subplan = (Plan *) lfirst(subnode);
if (subnode == appendplans) /* first node? */
if (subnode == appendplans) /* first node? */
node->plan.startup_cost = subplan->startup_cost;
node->plan.total_cost += subplan->total_cost;
node->plan.plan_rows += subplan->plan_rows;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.64 2000/04/04 01:21:46 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.65 2000/04/12 17:15:24 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -46,9 +46,9 @@ static bool pull_agg_clause_walker(Node *node, List **listptr);
static bool contain_subplans_walker(Node *node, void *context);
static bool pull_subplans_walker(Node *node, List **listptr);
static bool check_subplans_for_ungrouped_vars_walker(Node *node,
Query *context);
static int is_single_func(Node *node);
static Node *eval_const_expressions_mutator (Node *node, void *context);
Query *context);
static int is_single_func(Node *node);
static Node *eval_const_expressions_mutator(Node *node, void *context);
static Expr *simplify_op_or_func(Expr *expr, List *args);
@@ -340,18 +340,19 @@ make_ands_explicit(List *andclauses)
List *
make_ands_implicit(Expr *clause)
{
/*
* NB: because the parser sets the qual field to NULL in a query that
* has no WHERE clause, we must consider a NULL input clause as TRUE,
* even though one might more reasonably think it FALSE. Grumble.
* If this causes trouble, consider changing the parser's behavior.
* even though one might more reasonably think it FALSE. Grumble. If
* this causes trouble, consider changing the parser's behavior.
*/
if (clause == NULL)
return NIL; /* NULL -> NIL list == TRUE */
else if (and_clause((Node *) clause))
return clause->args;
else if (IsA(clause, Const) &&
! ((Const *) clause)->constisnull &&
!((Const *) clause)->constisnull &&
DatumGetInt32(((Const *) clause)->constvalue))
return NIL; /* constant TRUE input -> NIL list */
else
@@ -381,7 +382,8 @@ contain_agg_clause_walker(Node *node, void *context)
if (node == NULL)
return false;
if (IsA(node, Aggref))
return true; /* abort the tree traversal and return true */
return true; /* abort the tree traversal and return
* true */
return expression_tree_walker(node, contain_agg_clause_walker, context);
}
@@ -411,12 +413,14 @@ pull_agg_clause_walker(Node *node, List **listptr)
if (IsA(node, Aggref))
{
*listptr = lappend(*listptr, node);
/*
* Complain if the aggregate's argument contains any aggregates;
* nested agg functions are semantically nonsensical.
*/
if (contain_agg_clause(((Aggref *) node)->target))
elog(ERROR, "Aggregate function calls may not be nested");
/*
* Having checked that, we need not recurse into the argument.
*/
@@ -454,7 +458,8 @@ contain_subplans_walker(Node *node, void *context)
if (node == NULL)
return false;
if (is_subplan(node) || IsA(node, SubLink))
return true; /* abort the tree traversal and return true */
return true; /* abort the tree traversal and return
* true */
return expression_tree_walker(node, contain_subplans_walker, context);
}
@@ -462,7 +467,7 @@ contain_subplans_walker(Node *node, void *context)
* pull_subplans
* Recursively pulls all subplans from an expression tree.
*
* Returns list of subplan nodes found. Note the nodes themselves are not
* Returns list of subplan nodes found. Note the nodes themselves are not
* copied, only referenced.
*/
List *
@@ -507,7 +512,11 @@ void
check_subplans_for_ungrouped_vars(Node *clause,
Query *query)
{
/* No special setup needed; context for walker is just the Query pointer */
/*
* No special setup needed; context for walker is just the Query
* pointer
*/
check_subplans_for_ungrouped_vars_walker(clause, query);
}
@@ -517,17 +526,19 @@ check_subplans_for_ungrouped_vars_walker(Node *node,
{
if (node == NULL)
return false;
/*
* We can ignore Vars other than in subplan args lists,
* since the parser already checked 'em.
* We can ignore Vars other than in subplan args lists, since the
* parser already checked 'em.
*/
if (is_subplan(node))
{
/*
* The args list of the subplan node represents attributes from
* outside passed into the sublink.
*/
List *t;
List *t;
foreach(t, ((Expr *) node)->args)
{
@@ -539,10 +550,10 @@ check_subplans_for_ungrouped_vars_walker(Node *node,
/*
* We do not care about args that are not local variables;
* params or outer-level vars are not our responsibility to
* check. (The outer-level query passing them to us needs
* to worry, instead.)
* check. (The outer-level query passing them to us needs to
* worry, instead.)
*/
if (! IsA(thisarg, Var))
if (!IsA(thisarg, Var))
continue;
var = (Var *) thisarg;
if (var->varlevelsup > 0)
@@ -554,8 +565,8 @@ check_subplans_for_ungrouped_vars_walker(Node *node,
contained_in_group_clause = false;
foreach(gl, context->groupClause)
{
GroupClause *gcl = lfirst(gl);
Node *groupexpr;
GroupClause *gcl = lfirst(gl);
Node *groupexpr;
groupexpr = get_sortgroupclause_expr(gcl,
context->targetList);
@@ -569,14 +580,14 @@ check_subplans_for_ungrouped_vars_walker(Node *node,
if (!contained_in_group_clause)
{
/* Found an ungrouped argument. Complain. */
RangeTblEntry *rte;
char *attname;
RangeTblEntry *rte;
char *attname;
Assert(var->varno > 0 &&
var->varno <= length(context->rtable));
rte = rt_fetch(var->varno, context->rtable);
attname = get_attname(rte->relid, var->varattno);
if (! attname)
if (!attname)
elog(ERROR, "cache lookup of attribute %d in relation %u failed",
var->varattno, rte->relid);
elog(ERROR, "Sub-SELECT uses un-GROUPed attribute %s.%s from outer query",
@@ -585,7 +596,7 @@ check_subplans_for_ungrouped_vars_walker(Node *node,
}
}
return expression_tree_walker(node,
check_subplans_for_ungrouped_vars_walker,
check_subplans_for_ungrouped_vars_walker,
(void *) context);
}
@@ -697,7 +708,7 @@ NumRelids(Node *clause)
* is referenced in the clause). The routine checks that the
* expression is of the form (var op something) or (something op var)
* where the var is an attribute of the specified relation, or
* a function of a var of the specified relation. If so, it
* a function of a var of the specified relation. If so, it
* returns the following info:
* the found relation number (same as targetrelid unless that is 0)
* the found var number (or InvalidAttrNumber if a function)
@@ -707,7 +718,7 @@ NumRelids(Node *clause)
* specifically 0 for the relid and attno, 0 for the constant value.
*
* Note that negative attno values are *not* invalid, but represent
* system attributes such as OID. It's sufficient to check for relid=0
* system attributes such as OID. It's sufficient to check for relid=0
* to determine whether the routine succeeded.
*/
void
@@ -785,15 +796,13 @@ default_results:
*flag |= SEL_CONSTANT;
}
else
{
*constval = 0;
}
}
/*
* is_single_func
* If the given expression is a function of a single relation,
* return the relation number; else return 0
* If the given expression is a function of a single relation,
* return the relation number; else return 0
*/
static int
is_single_func(Node *node)
@@ -804,7 +813,7 @@ is_single_func(Node *node)
if (length(varnos) == 1)
{
int funcvarno = lfirsti(varnos);
int funcvarno = lfirsti(varnos);
freeList(varnos);
return funcvarno;
@@ -922,7 +931,7 @@ CommuteClause(Expr *clause)
* expression tree, for example "2 + 2" => "4". More interestingly,
* we can reduce certain boolean expressions even when they contain
* non-constant subexpressions: "x OR true" => "true" no matter what
* the subexpression x is. (XXX We assume that no such subexpression
* the subexpression x is. (XXX We assume that no such subexpression
* will have important side-effects, which is not necessarily a good
* assumption in the presence of user-defined functions; do we need a
* pg_proc flag that prevents discarding the execution of a function?)
@@ -954,7 +963,7 @@ eval_const_expressions(Node *node)
}
static Node *
eval_const_expressions_mutator (Node *node, void *context)
eval_const_expressions_mutator(Node *node, void *context)
{
if (node == NULL)
return NULL;
@@ -963,21 +972,22 @@ eval_const_expressions_mutator (Node *node, void *context)
Expr *expr = (Expr *) node;
List *args;
Const *const_input;
Expr *newexpr;
Expr *newexpr;
/*
* Reduce constants in the Expr's arguments. We know args is
* either NIL or a List node, so we can call expression_tree_mutator
* directly rather than recursing to self.
* either NIL or a List node, so we can call
* expression_tree_mutator directly rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
eval_const_expressions_mutator,
eval_const_expressions_mutator,
(void *) context);
switch (expr->opType)
{
case OP_EXPR:
case FUNC_EXPR:
/*
* Code for op/func case is pretty bulky, so split it out
* as a separate function.
@@ -985,123 +995,131 @@ eval_const_expressions_mutator (Node *node, void *context)
newexpr = simplify_op_or_func(expr, args);
if (newexpr) /* successfully simplified it */
return (Node *) newexpr;
/* else fall out to build new Expr node with simplified args */
/*
* else fall out to build new Expr node with simplified
* args
*/
break;
case OR_EXPR:
{
/*
* OR arguments are handled as follows:
* non constant: keep
* FALSE: drop (does not affect result)
* TRUE: force result to TRUE
* NULL: keep only one
* We keep one NULL input because ExecEvalOr returns
* NULL when no input is TRUE and at least one is NULL.
*/
List *newargs = NIL;
List *arg;
bool haveNull = false;
bool forceTrue = false;
foreach(arg, args)
{
if (! IsA(lfirst(arg), Const))
/*
* OR arguments are handled as follows: non constant:
* keep FALSE: drop (does not affect result) TRUE:
* force result to TRUE NULL: keep only one We keep
* one NULL input because ExecEvalOr returns NULL when
* no input is TRUE and at least one is NULL.
*/
List *newargs = NIL;
List *arg;
bool haveNull = false;
bool forceTrue = false;
foreach(arg, args)
{
newargs = lappend(newargs, lfirst(arg));
continue;
if (!IsA(lfirst(arg), Const))
{
newargs = lappend(newargs, lfirst(arg));
continue;
}
const_input = (Const *) lfirst(arg);
if (const_input->constisnull)
haveNull = true;
else if (DatumGetInt32(const_input->constvalue))
forceTrue = true;
/* otherwise, we can drop the constant-false input */
}
const_input = (Const *) lfirst(arg);
if (const_input->constisnull)
haveNull = true;
else if (DatumGetInt32(const_input->constvalue))
forceTrue = true;
/* otherwise, we can drop the constant-false input */
/*
* We could return TRUE before falling out of the
* loop, but this coding method will be easier to
* adapt if we ever add a notion of non-removable
* functions. We'd need to check all the inputs for
* non-removability.
*/
if (forceTrue)
return MAKEBOOLCONST(true, false);
if (haveNull)
newargs = lappend(newargs, MAKEBOOLCONST(false, true));
/* If all the inputs are FALSE, result is FALSE */
if (newargs == NIL)
return MAKEBOOLCONST(false, false);
/* If only one nonconst-or-NULL input, it's the result */
if (lnext(newargs) == NIL)
return (Node *) lfirst(newargs);
/* Else we still need an OR node */
return (Node *) make_orclause(newargs);
}
/*
* We could return TRUE before falling out of the loop,
* but this coding method will be easier to adapt if
* we ever add a notion of non-removable functions.
* We'd need to check all the inputs for non-removability.
*/
if (forceTrue)
return MAKEBOOLCONST(true, false);
if (haveNull)
newargs = lappend(newargs, MAKEBOOLCONST(false, true));
/* If all the inputs are FALSE, result is FALSE */
if (newargs == NIL)
return MAKEBOOLCONST(false, false);
/* If only one nonconst-or-NULL input, it's the result */
if (lnext(newargs) == NIL)
return (Node *) lfirst(newargs);
/* Else we still need an OR node */
return (Node *) make_orclause(newargs);
}
case AND_EXPR:
{
/*
* AND arguments are handled as follows:
* non constant: keep
* TRUE: drop (does not affect result)
* FALSE: force result to FALSE
* NULL: keep only one
* We keep one NULL input because ExecEvalAnd returns
* NULL when no input is FALSE and at least one is NULL.
*/
List *newargs = NIL;
List *arg;
bool haveNull = false;
bool forceFalse = false;
foreach(arg, args)
{
if (! IsA(lfirst(arg), Const))
/*
* AND arguments are handled as follows: non constant:
* keep TRUE: drop (does not affect result) FALSE:
* force result to FALSE NULL: keep only one We keep
* one NULL input because ExecEvalAnd returns NULL
* when no input is FALSE and at least one is NULL.
*/
List *newargs = NIL;
List *arg;
bool haveNull = false;
bool forceFalse = false;
foreach(arg, args)
{
newargs = lappend(newargs, lfirst(arg));
continue;
if (!IsA(lfirst(arg), Const))
{
newargs = lappend(newargs, lfirst(arg));
continue;
}
const_input = (Const *) lfirst(arg);
if (const_input->constisnull)
haveNull = true;
else if (!DatumGetInt32(const_input->constvalue))
forceFalse = true;
/* otherwise, we can drop the constant-true input */
}
const_input = (Const *) lfirst(arg);
if (const_input->constisnull)
haveNull = true;
else if (! DatumGetInt32(const_input->constvalue))
forceFalse = true;
/* otherwise, we can drop the constant-true input */
/*
* We could return FALSE before falling out of the
* loop, but this coding method will be easier to
* adapt if we ever add a notion of non-removable
* functions. We'd need to check all the inputs for
* non-removability.
*/
if (forceFalse)
return MAKEBOOLCONST(false, false);
if (haveNull)
newargs = lappend(newargs, MAKEBOOLCONST(false, true));
/* If all the inputs are TRUE, result is TRUE */
if (newargs == NIL)
return MAKEBOOLCONST(true, false);
/* If only one nonconst-or-NULL input, it's the result */
if (lnext(newargs) == NIL)
return (Node *) lfirst(newargs);
/* Else we still need an AND node */
return (Node *) make_andclause(newargs);
}
/*
* We could return FALSE before falling out of the loop,
* but this coding method will be easier to adapt if
* we ever add a notion of non-removable functions.
* We'd need to check all the inputs for non-removability.
*/
if (forceFalse)
return MAKEBOOLCONST(false, false);
if (haveNull)
newargs = lappend(newargs, MAKEBOOLCONST(false, true));
/* If all the inputs are TRUE, result is TRUE */
if (newargs == NIL)
return MAKEBOOLCONST(true, false);
/* If only one nonconst-or-NULL input, it's the result */
if (lnext(newargs) == NIL)
return (Node *) lfirst(newargs);
/* Else we still need an AND node */
return (Node *) make_andclause(newargs);
}
case NOT_EXPR:
Assert(length(args) == 1);
if (! IsA(lfirst(args), Const))
if (!IsA(lfirst(args), Const))
break;
const_input = (Const *) lfirst(args);
/* NOT NULL => NULL */
if (const_input->constisnull)
return MAKEBOOLCONST(false, true);
/* otherwise pretty easy */
return MAKEBOOLCONST(! DatumGetInt32(const_input->constvalue),
return MAKEBOOLCONST(!DatumGetInt32(const_input->constvalue),
false);
case SUBPLAN_EXPR:
/*
* Safety measure per notes at head of this routine:
* return a SubPlan unchanged. Too late to do anything
* return a SubPlan unchanged. Too late to do anything
* with it. The arglist simplification above was wasted
* work (the list probably only contains Var nodes anyway).
* work (the list probably only contains Var nodes
* anyway).
*/
return (Node *) expr;
default:
@@ -1112,25 +1130,26 @@ eval_const_expressions_mutator (Node *node, void *context)
/*
* If we break out of the above switch on opType, then the
* expression cannot be simplified any further, so build
* and return a replacement Expr node using the
* possibly-simplified arguments and the original oper node.
* Can't use make_clause() here because we want to be sure
* the typeOid field is preserved...
* expression cannot be simplified any further, so build and
* return a replacement Expr node using the possibly-simplified
* arguments and the original oper node. Can't use make_clause()
* here because we want to be sure the typeOid field is
* preserved...
*/
newexpr = makeNode(Expr);
newexpr->typeOid = expr->typeOid;
newexpr->opType = expr->opType;
newexpr->oper = expr->oper;
newexpr->args = args;
return (Node *) newexpr;
newexpr->typeOid = expr->typeOid;
newexpr->opType = expr->opType;
newexpr->oper = expr->oper;
newexpr->args = args;
return (Node *) newexpr;
}
if (IsA(node, RelabelType))
{
/*
* If we can simplify the input to a constant, then we don't need
* the RelabelType node anymore: just change the type field of
* the Const node. Otherwise, copy the RelabelType node.
* the RelabelType node anymore: just change the type field of the
* Const node. Otherwise, copy the RelabelType node.
*/
RelabelType *relabel = (RelabelType *) node;
Node *arg;
@@ -1138,13 +1157,15 @@ eval_const_expressions_mutator (Node *node, void *context)
arg = eval_const_expressions_mutator(relabel->arg, context);
if (arg && IsA(arg, Const))
{
Const *con = (Const *) arg;
Const *con = (Const *) arg;
con->consttype = relabel->resulttype;
/*
* relabel's resulttypmod is discarded, which is OK for now;
* if the type actually needs a runtime length coercion then
* there should be a function call to do it just above this node.
* there should be a function call to do it just above this
* node.
*/
return (Node *) con;
}
@@ -1160,15 +1181,15 @@ eval_const_expressions_mutator (Node *node, void *context)
}
if (IsA(node, CaseExpr))
{
/*
* CASE expressions can be simplified if there are constant condition
* clauses:
* FALSE (or NULL): drop the alternative
* TRUE: drop all remaining alternatives
* If the first non-FALSE alternative is a constant TRUE, we can
* simplify the entire CASE to that alternative's expression.
* If there are no non-FALSE alternatives, we simplify the entire
* CASE to the default result (ELSE result).
* CASE expressions can be simplified if there are constant
* condition clauses: FALSE (or NULL): drop the alternative TRUE:
* drop all remaining alternatives If the first non-FALSE
* alternative is a constant TRUE, we can simplify the entire CASE
* to that alternative's expression. If there are no non-FALSE
* alternatives, we simplify the entire CASE to the default result
* (ELSE result).
*/
CaseExpr *caseexpr = (CaseExpr *) node;
CaseExpr *newcase;
@@ -1181,26 +1202,29 @@ eval_const_expressions_mutator (Node *node, void *context)
{
/* Simplify this alternative's condition and result */
CaseWhen *casewhen = (CaseWhen *)
expression_tree_mutator((Node *) lfirst(arg),
eval_const_expressions_mutator,
(void *) context);
expression_tree_mutator((Node *) lfirst(arg),
eval_const_expressions_mutator,
(void *) context);
Assert(IsA(casewhen, CaseWhen));
if (casewhen->expr == NULL ||
! IsA(casewhen->expr, Const))
!IsA(casewhen->expr, Const))
{
newargs = lappend(newargs, casewhen);
continue;
}
const_input = (Const *) casewhen->expr;
if (const_input->constisnull ||
! DatumGetInt32(const_input->constvalue))
!DatumGetInt32(const_input->constvalue))
continue; /* drop alternative with FALSE condition */
/*
* Found a TRUE condition. If it's the first (un-dropped)
* Found a TRUE condition. If it's the first (un-dropped)
* alternative, the CASE reduces to just this alternative.
*/
if (newargs == NIL)
return casewhen->result;
/*
* Otherwise, add it to the list, and drop all the rest.
*/
@@ -1211,7 +1235,11 @@ eval_const_expressions_mutator (Node *node, void *context)
/* Simplify the default result */
defresult = eval_const_expressions_mutator(caseexpr->defresult,
context);
/* If no non-FALSE alternatives, CASE reduces to the default result */
/*
* If no non-FALSE alternatives, CASE reduces to the default
* result
*/
if (newargs == NIL)
return defresult;
/* Otherwise we need a new CASE node */
@@ -1224,21 +1252,21 @@ eval_const_expressions_mutator (Node *node, void *context)
}
if (IsA(node, Iter))
{
/*
* The argument of an Iter is normally a function call.
* We must not try to eliminate the function, but we
* can try to simplify its arguments. If, by chance,
* the arg is NOT a function then we go ahead and try to
* simplify it (by falling into expression_tree_mutator).
* Is that the right thing?
* The argument of an Iter is normally a function call. We must
* not try to eliminate the function, but we can try to simplify
* its arguments. If, by chance, the arg is NOT a function then
* we go ahead and try to simplify it (by falling into
* expression_tree_mutator). Is that the right thing?
*/
Iter *iter = (Iter *) node;
if (is_funcclause(iter->iterexpr))
{
Expr *func = (Expr *) iter->iterexpr;
Expr *newfunc;
Iter *newiter;
Expr *func = (Expr *) iter->iterexpr;
Expr *newfunc;
Iter *newiter;
newfunc = makeNode(Expr);
newfunc->typeOid = func->typeOid;
@@ -1254,12 +1282,13 @@ eval_const_expressions_mutator (Node *node, void *context)
return (Node *) newiter;
}
}
/*
* For any node type not handled above, we recurse using
* expression_tree_mutator, which will copy the node unchanged
* but try to simplify its arguments (if any) using this routine.
* For example: we cannot eliminate an ArrayRef node, but we
* might be able to simplify constant expressions in its subscripts.
* expression_tree_mutator, which will copy the node unchanged but try
* to simplify its arguments (if any) using this routine. For example:
* we cannot eliminate an ArrayRef node, but we might be able to
* simplify constant expressions in its subscripts.
*/
return expression_tree_mutator(node, eval_const_expressions_mutator,
(void *) context);
@@ -1289,31 +1318,32 @@ simplify_op_or_func(Expr *expr, List *args)
HeapTuple func_tuple;
Form_pg_proc funcform;
Type resultType;
Expr *newexpr;
Expr *newexpr;
Datum const_val;
bool const_is_null;
bool isDone;
/*
* For an operator or function, we cannot simplify unless all the inputs
* are constants. (XXX possible future improvement: if the op/func is
* strict and at least one input is NULL, we could simplify to NULL.
* But we do not currently have any way to know if the op/func is strict
* or not. For now, a NULL input is treated the same as any other
* constant node.)
* For an operator or function, we cannot simplify unless all the
* inputs are constants. (XXX possible future improvement: if the
* op/func is strict and at least one input is NULL, we could simplify
* to NULL. But we do not currently have any way to know if the
* op/func is strict or not. For now, a NULL input is treated the
* same as any other constant node.)
*/
foreach(arg, args)
{
if (! IsA(lfirst(arg), Const))
if (!IsA(lfirst(arg), Const))
return NULL;
}
/*
* Get the function procedure's OID and look to see
* whether it is marked proiscachable.
* Get the function procedure's OID and look to see whether it is
* marked proiscachable.
*/
if (expr->opType == OP_EXPR)
{
Oper *oper = (Oper *) expr->oper;
Oper *oper = (Oper *) expr->oper;
replace_opid(oper); /* OK to scribble on input to this extent */
funcid = oper->opid;
@@ -1321,7 +1351,7 @@ simplify_op_or_func(Expr *expr, List *args)
}
else
{
Func *func = (Func *) expr->oper;
Func *func = (Func *) expr->oper;
funcid = func->funcid;
result_typeid = func->functype;
@@ -1333,21 +1363,23 @@ simplify_op_or_func(Expr *expr, List *args)
if (!HeapTupleIsValid(func_tuple))
elog(ERROR, "Function OID %u does not exist", funcid);
funcform = (Form_pg_proc) GETSTRUCT(func_tuple);
if (! funcform->proiscachable)
if (!funcform->proiscachable)
return NULL;
/*
* Also check to make sure it doesn't return a set.
*/
if (funcform->proretset)
return NULL;
/*
* OK, looks like we can simplify this operator/function.
*
* We use the executor's routine ExecEvalExpr() to avoid duplication of
* code and ensure we get the same result as the executor would get.
*
* Build a new Expr node containing the already-simplified arguments.
* The only other setup needed here is the replace_opid() that we already
* Build a new Expr node containing the already-simplified arguments. The
* only other setup needed here is the replace_opid() that we already
* did for the OP_EXPR case.
*/
newexpr = makeNode(Expr);
@@ -1355,21 +1387,23 @@ simplify_op_or_func(Expr *expr, List *args)
newexpr->opType = expr->opType;
newexpr->oper = expr->oper;
newexpr->args = args;
/*
* It is OK to pass econtext = NULL because none of the ExecEvalExpr()
* code used in this situation will use econtext. That might seem
* fortuitous, but it's not so unreasonable --- a constant expression does
* not depend on context, by definition, n'est ce pas?
* fortuitous, but it's not so unreasonable --- a constant expression
* does not depend on context, by definition, n'est ce pas?
*/
const_val = ExecEvalExpr((Node *) newexpr, NULL,
&const_is_null, &isDone);
Assert(isDone); /* if this isn't set, we blew it... */
pfree(newexpr);
/*
* Make the constant result node.
*
* XXX would it be better to take the result type from the
* pg_proc tuple, rather than the Oper or Func node?
* XXX would it be better to take the result type from the pg_proc tuple,
* rather than the Oper or Func node?
*/
resultType = typeidType(result_typeid);
return (Expr *) makeConst(result_typeid, typeLen(resultType),
@@ -1426,8 +1460,8 @@ simplify_op_or_func(Expr *expr, List *args)
*
* The walker routine should return "false" to continue the tree walk, or
* "true" to abort the walk and immediately return "true" to the top-level
* caller. This can be used to short-circuit the traversal if the walker
* has found what it came for. "false" is returned to the top-level caller
* caller. This can be used to short-circuit the traversal if the walker
* has found what it came for. "false" is returned to the top-level caller
* iff no invocation of the walker returned "true".
*
* The node types handled by expression_tree_walker include all those
@@ -1454,16 +1488,16 @@ simplify_op_or_func(Expr *expr, List *args)
*/
bool
expression_tree_walker(Node *node, bool (*walker) (), void *context)
expression_tree_walker(Node *node, bool (*walker) (), void *context)
{
List *temp;
/*
* The walker has already visited the current node,
* and so we need only recurse into any sub-nodes it has.
* The walker has already visited the current node, and so we need
* only recurse into any sub-nodes it has.
*
* We assume that the walker is not interested in List nodes per se,
* so when we expect a List we just recurse directly to self without
* We assume that the walker is not interested in List nodes per se, so
* when we expect a List we just recurse directly to self without
* bothering to call the walker.
*/
if (node == NULL)
@@ -1478,7 +1512,7 @@ expression_tree_walker(Node *node, bool (*walker) (), void *context)
break;
case T_Expr:
{
Expr *expr = (Expr *) node;
Expr *expr = (Expr *) node;
if (expr->opType == SUBPLAN_EXPR)
{
@@ -1500,6 +1534,7 @@ expression_tree_walker(Node *node, bool (*walker) (), void *context)
case T_ArrayRef:
{
ArrayRef *aref = (ArrayRef *) node;
/* recurse directly for upper/lower array index lists */
if (expression_tree_walker((Node *) aref->refupperindexpr,
walker, context))
@@ -1519,10 +1554,12 @@ expression_tree_walker(Node *node, bool (*walker) (), void *context)
case T_CaseExpr:
{
CaseExpr *caseexpr = (CaseExpr *) node;
/* we assume walker doesn't care about CaseWhens, either */
foreach(temp, caseexpr->args)
{
CaseWhen *when = (CaseWhen *) lfirst(temp);
Assert(IsA(when, CaseWhen));
if (walker(when->expr, context))
return true;
@@ -1538,12 +1575,14 @@ expression_tree_walker(Node *node, bool (*walker) (), void *context)
break;
case T_SubLink:
{
SubLink *sublink = (SubLink *) node;
SubLink *sublink = (SubLink *) node;
/* If the SubLink has already been processed by subselect.c,
* it will have lefthand=NIL, and we only need to look at
* the oper list. Otherwise we only need to look at lefthand
* (the Oper nodes in the oper list are deemed uninteresting).
/*
* If the SubLink has already been processed by
* subselect.c, it will have lefthand=NIL, and we only
* need to look at the oper list. Otherwise we only need
* to look at lefthand (the Oper nodes in the oper list
* are deemed uninteresting).
*/
if (sublink->lefthand)
return walker((Node *) sublink->lefthand, context);
@@ -1628,18 +1667,19 @@ expression_tree_walker(Node *node, bool (*walker) (), void *context)
*/
Node *
expression_tree_mutator(Node *node, Node * (*mutator) (), void *context)
expression_tree_mutator(Node *node, Node *(*mutator) (), void *context)
{
/*
* The mutator has already decided not to modify the current node,
* but we must call the mutator for any sub-nodes.
* The mutator has already decided not to modify the current node, but
* we must call the mutator for any sub-nodes.
*/
#define FLATCOPY(newnode, node, nodetype) \
( (newnode) = makeNode(nodetype), \
memcpy((newnode), (node), sizeof(nodetype)) )
#define CHECKFLATCOPY(newnode, node, nodetype) \
#define CHECKFLATCOPY(newnode, node, nodetype) \
( AssertMacro(IsA((node), nodetype)), \
(newnode) = makeNode(nodetype), \
memcpy((newnode), (node), sizeof(nodetype)) )
@@ -1659,31 +1699,41 @@ expression_tree_mutator(Node *node, Node * (*mutator) (), void *context)
return (Node *) copyObject(node);
case T_Expr:
{
Expr *expr = (Expr *) node;
Expr *newnode;
Expr *expr = (Expr *) node;
Expr *newnode;
FLATCOPY(newnode, expr, Expr);
if (expr->opType == SUBPLAN_EXPR)
{
SubLink *oldsublink = ((SubPlan *) expr->oper)->sublink;
SubPlan *newsubplan;
SubLink *oldsublink = ((SubPlan *) expr->oper)->sublink;
SubPlan *newsubplan;
/* flat-copy the oper node, which is a SubPlan */
CHECKFLATCOPY(newsubplan, expr->oper, SubPlan);
newnode->oper = (Node *) newsubplan;
/* likewise its SubLink node */
CHECKFLATCOPY(newsubplan->sublink, oldsublink, SubLink);
/* transform args list (params to be passed to subplan) */
/*
* transform args list (params to be passed to
* subplan)
*/
MUTATE(newnode->args, expr->args, List *);
/* transform sublink's oper list as well */
MUTATE(newsubplan->sublink->oper, oldsublink->oper, List*);
/* but not the subplan itself, which is referenced as-is */
MUTATE(newsubplan->sublink->oper, oldsublink->oper, List *);
/*
* but not the subplan itself, which is referenced
* as-is
*/
}
else
{
/* for other Expr node types, just transform args list,
* linking to original oper node (OK?)
/*
* for other Expr node types, just transform args
* list, linking to original oper node (OK?)
*/
MUTATE(newnode->args, expr->args, List *);
}
@@ -1692,8 +1742,8 @@ expression_tree_mutator(Node *node, Node * (*mutator) (), void *context)
break;
case T_Aggref:
{
Aggref *aggref = (Aggref *) node;
Aggref *newnode;
Aggref *aggref = (Aggref *) node;
Aggref *newnode;
FLATCOPY(newnode, aggref, Aggref);
MUTATE(newnode->target, aggref->target, Node *);
@@ -1702,8 +1752,8 @@ expression_tree_mutator(Node *node, Node * (*mutator) (), void *context)
break;
case T_Iter:
{
Iter *iter = (Iter *) node;
Iter *newnode;
Iter *iter = (Iter *) node;
Iter *newnode;
FLATCOPY(newnode, iter, Iter);
MUTATE(newnode->iterexpr, iter->iterexpr, Node *);
@@ -1763,12 +1813,14 @@ expression_tree_mutator(Node *node, Node * (*mutator) (), void *context)
break;
case T_SubLink:
{
/* A "bare" SubLink (note we will not come here if we found
* a SUBPLAN_EXPR node above it). Transform the lefthand side,
* but not the oper list nor the subquery.
/*
* A "bare" SubLink (note we will not come here if we
* found a SUBPLAN_EXPR node above it). Transform the
* lefthand side, but not the oper list nor the subquery.
*/
SubLink *sublink = (SubLink *) node;
SubLink *newnode;
SubLink *sublink = (SubLink *) node;
SubLink *newnode;
FLATCOPY(newnode, sublink, SubLink);
MUTATE(newnode->lefthand, sublink->lefthand, List *);
@@ -1777,9 +1829,12 @@ expression_tree_mutator(Node *node, Node * (*mutator) (), void *context)
break;
case T_List:
{
/* We assume the mutator isn't interested in the list nodes
* per se, so just invoke it on each list element.
* NOTE: this would fail badly on a list with integer elements!
/*
* We assume the mutator isn't interested in the list
* nodes per se, so just invoke it on each list element.
* NOTE: this would fail badly on a list with integer
* elements!
*/
List *resultlist = NIL;
List *temp;
@@ -1795,9 +1850,13 @@ expression_tree_mutator(Node *node, Node * (*mutator) (), void *context)
break;
case T_TargetEntry:
{
/* We mutate the expression, but not the resdom, by default. */
TargetEntry *targetentry = (TargetEntry *) node;
TargetEntry *newnode;
/*
* We mutate the expression, but not the resdom, by
* default.
*/
TargetEntry *targetentry = (TargetEntry *) node;
TargetEntry *newnode;
FLATCOPY(newnode, targetentry, TargetEntry);
MUTATE(newnode->expr, targetentry->expr, Node *);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.62 2000/03/22 22:08:35 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.63 2000/04/12 17:15:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,6 +42,7 @@ compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
return -1;
if (path1->startup_cost > path2->startup_cost)
return +1;
/*
* If paths have the same startup cost (not at all unlikely),
* order them by total cost.
@@ -57,6 +58,7 @@ compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
return -1;
if (path1->total_cost > path2->total_cost)
return +1;
/*
* If paths have the same total cost, order them by startup cost.
*/
@@ -172,7 +174,8 @@ set_cheapest(RelOptInfo *parent_rel)
void
add_path(RelOptInfo *parent_rel, Path *new_path)
{
bool accept_new = true; /* unless we find a superior old path */
bool accept_new = true; /* unless we find a superior old
* path */
List *p1_prev = NIL;
List *p1;
@@ -184,36 +187,39 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
foreach(p1, parent_rel->pathlist)
{
Path *old_path = (Path *) lfirst(p1);
bool remove_old = false; /* unless new proves superior */
bool remove_old = false; /* unless new proves superior */
int costcmp;
costcmp = compare_path_costs(new_path, old_path, TOTAL_COST);
/*
* If the two paths compare differently for startup and total cost,
* then we want to keep both, and we can skip the (much slower)
* comparison of pathkeys. If they compare the same, proceed with
* the pathkeys comparison. Note this test relies on the fact that
* compare_path_costs will only return 0 if both costs are equal
* (and, therefore, there's no need to call it twice in that case).
* If the two paths compare differently for startup and total
* cost, then we want to keep both, and we can skip the (much
* slower) comparison of pathkeys. If they compare the same,
* proceed with the pathkeys comparison. Note this test relies on
* the fact that compare_path_costs will only return 0 if both
* costs are equal (and, therefore, there's no need to call it
* twice in that case).
*/
if (costcmp == 0 ||
costcmp == compare_path_costs(new_path, old_path, STARTUP_COST))
costcmp == compare_path_costs(new_path, old_path, STARTUP_COST))
{
switch (compare_pathkeys(new_path->pathkeys, old_path->pathkeys))
{
case PATHKEYS_EQUAL:
if (costcmp < 0)
remove_old = true; /* new dominates old */
remove_old = true; /* new dominates old */
else
accept_new = false; /* old equals or dominates new */
accept_new = false; /* old equals or dominates
* new */
break;
case PATHKEYS_BETTER1:
if (costcmp <= 0)
remove_old = true; /* new dominates old */
remove_old = true; /* new dominates old */
break;
case PATHKEYS_BETTER2:
if (costcmp >= 0)
accept_new = false; /* old dominates new */
accept_new = false; /* old dominates new */
break;
case PATHKEYS_DIFFERENT:
/* keep both paths, since they have different ordering */
@@ -241,7 +247,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
* scanning the pathlist; we will not add new_path, and we assume
* new_path cannot dominate any other elements of the pathlist.
*/
if (! accept_new)
if (!accept_new)
break;
}
@@ -315,12 +321,14 @@ create_index_path(Query *root,
if (pathnode->path.pathkeys == NIL)
{
/* No ordering available from index, is that OK? */
if (! ScanDirectionIsNoMovement(indexscandir))
if (!ScanDirectionIsNoMovement(indexscandir))
elog(ERROR, "create_index_path: failed to create ordered index scan");
}
else
{
/* The index is ordered, and build_index_pathkeys defaulted to
/*
* The index is ordered, and build_index_pathkeys defaulted to
* forward scan, so make sure we mark the pathnode properly.
*/
if (ScanDirectionIsNoMovement(indexscandir))
@@ -341,11 +349,11 @@ create_index_path(Query *root,
pathnode->indexscandir = indexscandir;
/*
* This routine is only used to generate "standalone" indexpaths,
* not nestloop inner indexpaths. So joinrelids is always NIL
* and the number of rows is the same as the parent rel's estimate.
* This routine is only used to generate "standalone" indexpaths, not
* nestloop inner indexpaths. So joinrelids is always NIL and the
* number of rows is the same as the parent rel's estimate.
*/
pathnode->joinrelids = NIL; /* no join clauses here */
pathnode->joinrelids = NIL; /* no join clauses here */
pathnode->rows = rel->rows;
cost_index(&pathnode->path, root, rel, index, indexquals, false);
@@ -359,20 +367,23 @@ create_index_path(Query *root,
* pathnode.
*
*/
TidPath *
TidPath *
create_tidscan_path(RelOptInfo *rel, List *tideval)
{
TidPath *pathnode = makeNode(TidPath);
TidPath *pathnode = makeNode(TidPath);
pathnode->path.pathtype = T_TidScan;
pathnode->path.parent = rel;
pathnode->path.pathkeys = NIL;
pathnode->tideval = copyObject(tideval); /* is copy really necessary? */
pathnode->tideval = copyObject(tideval); /* is copy really
* necessary? */
pathnode->unjoined_relids = NIL;
cost_tidscan(&pathnode->path, rel, tideval);
/* divide selectivity for each clause to get an equal selectivity
* as IndexScan does OK ?
/*
* divide selectivity for each clause to get an equal selectivity as
* IndexScan does OK ?
*/
return pathnode;
@@ -485,7 +496,7 @@ create_mergejoin_path(RelOptInfo *joinrel,
* 'innerdisbursion' is an estimate of the disbursion of the inner hash key
*
*/
HashPath *
HashPath *
create_hashjoin_path(RelOptInfo *joinrel,
Path *outer_path,
Path *inner_path,

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.49 2000/02/18 09:30:09 inoue Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.50 2000/04/12 17:15:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,7 +50,7 @@ relation_info(Query *root, Index relid,
Form_pg_class relation;
relationTuple = SearchSysCacheTuple(RELOID,
ObjectIdGetDatum(relationObjectId),
ObjectIdGetDatum(relationObjectId),
0, 0, 0);
if (!HeapTupleIsValid(relationTuple))
elog(ERROR, "relation_info: Relation %u not found",
@@ -81,7 +81,7 @@ find_secondary_indexes(Query *root, Index relid)
Oid indrelid = getrelid(relid, root->rtable);
Relation relation;
HeapScanDesc scan;
ScanKeyData indexKey;
ScanKeyData indexKey;
HeapTuple indexTuple;
/* Scan pg_index for tuples describing indexes of this rel */
@@ -97,27 +97,28 @@ find_secondary_indexes(Query *root, Index relid)
while (HeapTupleIsValid(indexTuple = heap_getnext(scan, 0)))
{
Form_pg_index index = (Form_pg_index) GETSTRUCT(indexTuple);
IndexOptInfo *info = makeNode(IndexOptInfo);
int i;
Relation indexRelation;
Oid relam;
uint16 amorderstrategy;
Form_pg_index index = (Form_pg_index) GETSTRUCT(indexTuple);
IndexOptInfo *info = makeNode(IndexOptInfo);
int i;
Relation indexRelation;
Oid relam;
uint16 amorderstrategy;
/*
* Need to make these arrays large enough to be sure there is a
* terminating 0 at the end of each one.
*/
info->classlist = (Oid *) palloc(sizeof(Oid) * (INDEX_MAX_KEYS+1));
info->indexkeys = (int *) palloc(sizeof(int) * (INDEX_MAX_KEYS+1));
info->ordering = (Oid *) palloc(sizeof(Oid) * (INDEX_MAX_KEYS+1));
info->classlist = (Oid *) palloc(sizeof(Oid) * (INDEX_MAX_KEYS + 1));
info->indexkeys = (int *) palloc(sizeof(int) * (INDEX_MAX_KEYS + 1));
info->ordering = (Oid *) palloc(sizeof(Oid) * (INDEX_MAX_KEYS + 1));
/* Extract info from the pg_index tuple */
info->indexoid = index->indexrelid;
info->indproc = index->indproc; /* functional index ?? */
if (VARSIZE(&index->indpred) != 0) /* partial index ?? */
info->indproc = index->indproc; /* functional index ?? */
if (VARSIZE(&index->indpred) != 0) /* partial index ?? */
{
char *predString = fmgr(F_TEXTOUT, &index->indpred);
info->indpred = (List *) stringToNode(predString);
pfree(predString);
}
@@ -143,26 +144,25 @@ find_secondary_indexes(Query *root, Index relid)
index_close(indexRelation);
/*
* Fetch the ordering operators associated with the index,
* if any.
* Fetch the ordering operators associated with the index, if any.
*/
MemSet(info->ordering, 0, sizeof(Oid) * (INDEX_MAX_KEYS+1));
MemSet(info->ordering, 0, sizeof(Oid) * (INDEX_MAX_KEYS + 1));
if (amorderstrategy != 0)
{
for (i = 0; i < INDEX_MAX_KEYS && index->indclass[i]; i++)
{
HeapTuple amopTuple;
Form_pg_amop amop;
HeapTuple amopTuple;
Form_pg_amop amop;
amopTuple =
SearchSysCacheTuple(AMOPSTRATEGY,
ObjectIdGetDatum(relam),
ObjectIdGetDatum(index->indclass[i]),
ObjectIdGetDatum(index->indclass[i]),
UInt16GetDatum(amorderstrategy),
0);
if (!HeapTupleIsValid(amopTuple))
elog(ERROR, "find_secondary_indexes: no amop %u %u %d",
relam, index->indclass[i], (int) amorderstrategy);
relam, index->indclass[i], (int) amorderstrategy);
amop = (Form_pg_amop) GETSTRUCT(amopTuple);
info->ordering[i] = amop->amopopr;
}

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/relnode.c,v 1.25 2000/02/18 23:47:31 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/relnode.c,v 1.26 2000/04/12 17:15:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,15 +24,15 @@
static List *new_join_tlist(List *tlist, int first_resdomno);
static List *build_joinrel_restrictlist(RelOptInfo *joinrel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel);
RelOptInfo *outer_rel,
RelOptInfo *inner_rel);
static void build_joinrel_joinlist(RelOptInfo *joinrel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel);
RelOptInfo *outer_rel,
RelOptInfo *inner_rel);
static List *subbuild_joinrel_restrictlist(RelOptInfo *joinrel,
List *joininfo_list);
List *joininfo_list);
static void subbuild_joinrel_joinlist(RelOptInfo *joinrel,
List *joininfo_list);
List *joininfo_list);
/*
@@ -50,7 +50,10 @@ get_base_rel(Query *root, int relid)
{
rel = (RelOptInfo *) lfirst(baserels);
/* We know length(rel->relids) == 1 for all members of base_rel_list */
/*
* We know length(rel->relids) == 1 for all members of
* base_rel_list
*/
if (lfirsti(rel->relids) == relid)
return rel;
}
@@ -75,18 +78,20 @@ get_base_rel(Query *root, int relid)
if (relid < 0)
{
/*
* If the relation is a materialized relation, assume
* constants for sizes.
* If the relation is a materialized relation, assume constants
* for sizes.
*/
rel->pages = _NONAME_RELATION_PAGES_;
rel->tuples = _NONAME_RELATION_TUPLES_;
}
else
{
/*
* Otherwise, retrieve relation statistics from the
* system catalogs.
* Otherwise, retrieve relation statistics from the system
* catalogs.
*/
relation_info(root, relid,
&rel->indexed, &rel->pages, &rel->tuples);
@@ -162,6 +167,7 @@ get_join_rel(Query *root,
if (joinrel)
{
/*
* Yes, so we only need to figure the restrictlist for this
* particular pair of component relations.
@@ -198,13 +204,13 @@ get_join_rel(Query *root,
* of the outer and inner join relations and then merging the results
* together.
*
* NOTE: the tlist order for a join rel will depend on which pair
* of outer and inner rels we first try to build it from. But the
* NOTE: the tlist order for a join rel will depend on which pair of
* outer and inner rels we first try to build it from. But the
* contents should be the same regardless.
*
* XXX someday: consider pruning vars from the join's targetlist
* if they are needed only to evaluate restriction clauses of this
* join, and will never be accessed at higher levels of the plantree.
* XXX someday: consider pruning vars from the join's targetlist if they
* are needed only to evaluate restriction clauses of this join, and
* will never be accessed at higher levels of the plantree.
*/
new_outer_tlist = new_join_tlist(outer_rel->targetlist, 1);
new_inner_tlist = new_join_tlist(inner_rel->targetlist,
@@ -212,9 +218,9 @@ get_join_rel(Query *root,
joinrel->targetlist = nconc(new_outer_tlist, new_inner_tlist);
/*
* Construct restrict and join clause lists for the new joinrel.
* (The caller might or might not need the restrictlist, but
* I need it anyway for set_joinrel_size_estimates().)
* Construct restrict and join clause lists for the new joinrel. (The
* caller might or might not need the restrictlist, but I need it
* anyway for set_joinrel_size_estimates().)
*/
restrictlist = build_joinrel_restrictlist(joinrel, outer_rel, inner_rel);
if (restrictlist_ptr)
@@ -246,7 +252,7 @@ get_join_rel(Query *root,
*
* XXX the above comment refers to code that is long dead and gone;
* we don't keep track of joinlists for individual targetlist entries
* anymore. For now, all vars present in either input tlist will be
* anymore. For now, all vars present in either input tlist will be
* emitted in the join's tlist.
*
* 'tlist' is the target list of one of the join relations
@@ -286,16 +292,16 @@ new_join_tlist(List *tlist,
* the join lists need only be computed once for any join RelOptInfo.
* The join lists are fully determined by the set of rels making up the
* joinrel, so we should get the same results (up to ordering) from any
* candidate pair of sub-relations. But the restriction list is whatever
* candidate pair of sub-relations. But the restriction list is whatever
* is not handled in the sub-relations, so it depends on which
* sub-relations are considered.
*
* If a join clause from an input relation refers to base rels still not
* present in the joinrel, then it is still a join clause for the joinrel;
* we put it into an appropriate JoinInfo list for the joinrel. Otherwise,
* we put it into an appropriate JoinInfo list for the joinrel. Otherwise,
* the clause is now a restrict clause for the joined relation, and we
* return it to the caller of build_joinrel_restrictlist() to be stored in
* join paths made from this pair of sub-relations. (It will not need to
* join paths made from this pair of sub-relations. (It will not need to
* be considered further up the join tree.)
*
* 'joinrel' is a join relation node
@@ -304,11 +310,11 @@ new_join_tlist(List *tlist,
*
* build_joinrel_restrictlist() returns a list of relevant restrictinfos,
* whereas build_joinrel_joinlist() stores its results in the joinrel's
* joininfo lists. One or the other must accept each given clause!
* joininfo lists. One or the other must accept each given clause!
*
* NB: Formerly, we made deep(!) copies of each input RestrictInfo to pass
* up to the join relation. I believe this is no longer necessary, because
* RestrictInfo nodes are no longer context-dependent. Instead, just include
* RestrictInfo nodes are no longer context-dependent. Instead, just include
* the original nodes in the lists made for the join relation.
*/
static List *
@@ -316,9 +322,10 @@ build_joinrel_restrictlist(RelOptInfo *joinrel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel)
{
/*
* We must eliminate duplicates, since we will see the
* same clauses arriving from both input relations...
* We must eliminate duplicates, since we will see the same clauses
* arriving from both input relations...
*/
return LispUnion(subbuild_joinrel_restrictlist(joinrel,
outer_rel->joininfo),
@@ -348,6 +355,7 @@ subbuild_joinrel_restrictlist(RelOptInfo *joinrel,
if (is_subseti(joininfo->unjoined_relids, joinrel->relids))
{
/*
* Clauses in this JoinInfo list become restriction clauses
* for the joinrel, since they refer to no outside rels.
@@ -360,9 +368,10 @@ subbuild_joinrel_restrictlist(RelOptInfo *joinrel,
}
else
{
/*
* These clauses are still join clauses at this level,
* so we ignore them in this routine.
* These clauses are still join clauses at this level, so we
* ignore them in this routine.
*/
}
}
@@ -385,18 +394,20 @@ subbuild_joinrel_joinlist(RelOptInfo *joinrel,
joinrel->relids);
if (new_unjoined_relids == NIL)
{
/*
* Clauses in this JoinInfo list become restriction clauses
* for the joinrel, since they refer to no outside rels.
* So we can ignore them in this routine.
* for the joinrel, since they refer to no outside rels. So we
* can ignore them in this routine.
*/
}
else
{
/*
* These clauses are still join clauses at this level,
* so find or make the appropriate JoinInfo item for the joinrel,
* and add the clauses to it (eliminating duplicates).
* These clauses are still join clauses at this level, so find
* or make the appropriate JoinInfo item for the joinrel, and
* add the clauses to it (eliminating duplicates).
*/
JoinInfo *new_joininfo;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.43 2000/01/27 18:11:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.44 2000/04/12 17:15:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,7 +27,7 @@
/*
* tlistentry_member
* Finds the (first) member of the given tlist whose expression is
* equal() to the given expression. Result is NULL if no such member.
* equal() to the given expression. Result is NULL if no such member.
*/
TargetEntry *
tlistentry_member(Node *node, List *targetlist)
@@ -36,7 +36,7 @@ tlistentry_member(Node *node, List *targetlist)
foreach(temp, targetlist)
{
TargetEntry *tlentry = (TargetEntry *) lfirst(temp);
TargetEntry *tlentry = (TargetEntry *) lfirst(temp);
if (equal(node, tlentry->expr))
return tlentry;
@@ -87,12 +87,12 @@ tlist_member(Node *node, List *targetlist)
void
add_var_to_tlist(RelOptInfo *rel, Var *var)
{
if (! tlistentry_member((Node *) var, rel->targetlist))
if (!tlistentry_member((Node *) var, rel->targetlist))
{
/* XXX is copyObject necessary here? */
rel->targetlist = lappend(rel->targetlist,
create_tl_element((Var *) copyObject(var),
length(rel->targetlist) + 1));
create_tl_element((Var *) copyObject(var),
length(rel->targetlist) + 1));
}
}
@@ -189,7 +189,7 @@ add_to_flat_tlist(List *tlist, List *vars)
{
Var *var = lfirst(v);
if (! tlistentry_member((Node *) var, tlist))
if (!tlistentry_member((Node *) var, tlist))
{
Resdom *r;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.25 2000/01/26 05:56:40 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.26 2000/04/12 17:15:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -20,7 +20,8 @@
#include "optimizer/var.h"
typedef struct {
typedef struct
{
List *varlist;
bool includeUpperVars;
} pull_var_clause_context;
@@ -28,7 +29,7 @@ typedef struct {
static bool pull_varnos_walker(Node *node, List **listptr);
static bool contain_var_clause_walker(Node *node, void *context);
static bool pull_var_clause_walker(Node *node,
pull_var_clause_context *context);
pull_var_clause_context *context);
/*
@@ -54,7 +55,8 @@ pull_varnos_walker(Node *node, List **listptr)
return false;
if (IsA(node, Var))
{
Var *var = (Var *) node;
Var *var = (Var *) node;
if (var->varlevelsup == 0 && !intMember(var->varno, *listptr))
*listptr = lconsi(var->varno, *listptr);
return false;
@@ -83,7 +85,8 @@ contain_var_clause_walker(Node *node, void *context)
if (IsA(node, Var))
{
if (((Var *) node)->varlevelsup == 0)
return true; /* abort the tree traversal and return true */
return true; /* abort the tree traversal and return
* true */
return false;
}
return expression_tree_walker(node, contain_var_clause_walker, context);
@@ -94,7 +97,7 @@ contain_var_clause_walker(Node *node, void *context)
* Recursively pulls all var nodes from an expression clause.
*
* Upper-level vars (with varlevelsup > 0) are included only
* if includeUpperVars is true. Most callers probably want
* if includeUpperVars is true. Most callers probably want
* to ignore upper-level vars.
*
* Returns list of varnodes found. Note the varnodes themselves are not
@@ -103,7 +106,7 @@ contain_var_clause_walker(Node *node, void *context)
List *
pull_var_clause(Node *clause, bool includeUpperVars)
{
pull_var_clause_context context;
pull_var_clause_context context;
context.varlist = NIL;
context.includeUpperVars = includeUpperVars;