mirror of
https://github.com/postgres/postgres.git
synced 2025-07-15 19:21:59 +03:00
pgindent run.
This commit is contained in:
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.104 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.105 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -50,13 +50,13 @@ static void set_function_pathlist(Query *root, RelOptInfo *rel,
|
||||
static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed,
|
||||
List *initial_rels);
|
||||
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
|
||||
bool *differentTypes);
|
||||
bool *differentTypes);
|
||||
static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
|
||||
bool *differentTypes);
|
||||
bool *differentTypes);
|
||||
static void compare_tlist_datatypes(List *tlist, List *colTypes,
|
||||
bool *differentTypes);
|
||||
bool *differentTypes);
|
||||
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
|
||||
bool *differentTypes);
|
||||
bool *differentTypes);
|
||||
static void subquery_push_qual(Query *subquery, Index rti, Node *qual);
|
||||
static void recurse_push_qual(Node *setOp, Query *topquery,
|
||||
Index rti, Node *qual);
|
||||
@ -290,14 +290,14 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel,
|
||||
rel->rows += childrel->rows;
|
||||
if (childrel->width > rel->width)
|
||||
rel->width = childrel->width;
|
||||
|
||||
|
||||
childvars = FastListValue(&childrel->reltargetlist);
|
||||
foreach(parentvars, FastListValue(&rel->reltargetlist))
|
||||
{
|
||||
Var *parentvar = (Var *) lfirst(parentvars);
|
||||
Var *childvar = (Var *) lfirst(childvars);
|
||||
int parentndx = parentvar->varattno - rel->min_attr;
|
||||
int childndx = childvar->varattno - childrel->min_attr;
|
||||
Var *parentvar = (Var *) lfirst(parentvars);
|
||||
Var *childvar = (Var *) lfirst(childvars);
|
||||
int parentndx = parentvar->varattno - rel->min_attr;
|
||||
int childndx = childvar->varattno - childrel->min_attr;
|
||||
|
||||
if (childrel->attr_widths[childndx] > rel->attr_widths[parentndx])
|
||||
rel->attr_widths[parentndx] = childrel->attr_widths[childndx];
|
||||
@ -343,8 +343,8 @@ set_subquery_pathlist(Query *root, RelOptInfo *rel,
|
||||
*
|
||||
* There are several cases where we cannot push down clauses.
|
||||
* Restrictions involving the subquery are checked by
|
||||
* subquery_is_pushdown_safe(). Restrictions on individual clauses are
|
||||
* checked by qual_is_pushdown_safe().
|
||||
* subquery_is_pushdown_safe(). Restrictions on individual clauses
|
||||
* are checked by qual_is_pushdown_safe().
|
||||
*
|
||||
* Non-pushed-down clauses will get evaluated as qpquals of the
|
||||
* SubqueryScan node.
|
||||
@ -725,15 +725,16 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
|
||||
vars = pull_var_clause(qual, false);
|
||||
foreach(vl, vars)
|
||||
{
|
||||
Var *var = (Var *) lfirst(vl);
|
||||
Var *var = (Var *) lfirst(vl);
|
||||
List *tl;
|
||||
TargetEntry *tle = NULL;
|
||||
|
||||
Assert(var->varno == rti);
|
||||
|
||||
/*
|
||||
* We use a bitmapset to avoid testing the same attno more than
|
||||
* once. (NB: this only works because subquery outputs can't
|
||||
* have negative attnos.)
|
||||
* once. (NB: this only works because subquery outputs can't have
|
||||
* negative attnos.)
|
||||
*/
|
||||
if (bms_is_member(var->varattno, tested))
|
||||
continue;
|
||||
|
@ -49,7 +49,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.111 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.112 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -102,10 +102,10 @@ bool enable_hashjoin = true;
|
||||
|
||||
|
||||
static Selectivity estimate_hash_bucketsize(Query *root, Var *var,
|
||||
int nbuckets);
|
||||
static bool cost_qual_eval_walker(Node *node, QualCost *total);
|
||||
int nbuckets);
|
||||
static bool cost_qual_eval_walker(Node *node, QualCost * total);
|
||||
static Selectivity approx_selectivity(Query *root, List *quals,
|
||||
JoinType jointype);
|
||||
JoinType jointype);
|
||||
static void set_rel_width(Query *root, RelOptInfo *rel);
|
||||
static double relation_byte_size(double tuples, int width);
|
||||
static double page_size(double tuples, int width);
|
||||
@ -358,13 +358,13 @@ cost_index(Path *path, Query *root,
|
||||
* Normally the indexquals will be removed from the list of restriction
|
||||
* clauses that we have to evaluate as qpquals, so we should subtract
|
||||
* their costs from baserestrictcost. But if we are doing a join then
|
||||
* some of the indexquals are join clauses and shouldn't be subtracted.
|
||||
* Rather than work out exactly how much to subtract, we don't subtract
|
||||
* anything.
|
||||
* some of the indexquals are join clauses and shouldn't be
|
||||
* subtracted. Rather than work out exactly how much to subtract, we
|
||||
* don't subtract anything.
|
||||
*
|
||||
* XXX For a lossy index, not all the quals will be removed and so we
|
||||
* really shouldn't subtract their costs; but detecting that seems more
|
||||
* expensive than it's worth.
|
||||
* really shouldn't subtract their costs; but detecting that seems
|
||||
* more expensive than it's worth.
|
||||
*/
|
||||
startup_cost += baserel->baserestrictcost.startup;
|
||||
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
|
||||
@ -433,8 +433,8 @@ cost_subqueryscan(Path *path, RelOptInfo *baserel)
|
||||
/*
|
||||
* Cost of path is cost of evaluating the subplan, plus cost of
|
||||
* evaluating any restriction clauses that will be attached to the
|
||||
* SubqueryScan node, plus cpu_tuple_cost to account for selection
|
||||
* and projection overhead.
|
||||
* SubqueryScan node, plus cpu_tuple_cost to account for selection and
|
||||
* projection overhead.
|
||||
*/
|
||||
path->startup_cost = baserel->subplan->startup_cost;
|
||||
path->total_cost = baserel->subplan->total_cost;
|
||||
@ -597,8 +597,9 @@ cost_material(Path *path,
|
||||
}
|
||||
|
||||
/*
|
||||
* Also charge a small amount per extracted tuple. We use cpu_tuple_cost
|
||||
* so that it doesn't appear worthwhile to materialize a bare seqscan.
|
||||
* Also charge a small amount per extracted tuple. We use
|
||||
* cpu_tuple_cost so that it doesn't appear worthwhile to materialize
|
||||
* a bare seqscan.
|
||||
*/
|
||||
run_cost += cpu_tuple_cost * tuples;
|
||||
|
||||
@ -631,17 +632,17 @@ cost_agg(Path *path, Query *root,
|
||||
* additional cpu_operator_cost per grouping column per input tuple
|
||||
* for grouping comparisons.
|
||||
*
|
||||
* We will produce a single output tuple if not grouping,
|
||||
* and a tuple per group otherwise.
|
||||
* We will produce a single output tuple if not grouping, and a tuple per
|
||||
* group otherwise.
|
||||
*
|
||||
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
|
||||
* same total CPU cost, but AGG_SORTED has lower startup cost. If the
|
||||
* same total CPU cost, but AGG_SORTED has lower startup cost. If the
|
||||
* input path is already sorted appropriately, AGG_SORTED should be
|
||||
* preferred (since it has no risk of memory overflow). This will happen
|
||||
* as long as the computed total costs are indeed exactly equal --- but
|
||||
* if there's roundoff error we might do the wrong thing. So be sure
|
||||
* that the computations below form the same intermediate values in the
|
||||
* same order.
|
||||
* preferred (since it has no risk of memory overflow). This will
|
||||
* happen as long as the computed total costs are indeed exactly equal
|
||||
* --- but if there's roundoff error we might do the wrong thing. So
|
||||
* be sure that the computations below form the same intermediate
|
||||
* values in the same order.
|
||||
*/
|
||||
if (aggstrategy == AGG_PLAIN)
|
||||
{
|
||||
@ -724,26 +725,26 @@ cost_nestloop(NestPath *path, Query *root)
|
||||
double outer_path_rows = PATH_ROWS(outer_path);
|
||||
double inner_path_rows = PATH_ROWS(inner_path);
|
||||
double ntuples;
|
||||
Selectivity joininfactor;
|
||||
Selectivity joininfactor;
|
||||
|
||||
if (!enable_nestloop)
|
||||
startup_cost += disable_cost;
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop scanning inner tuples for an
|
||||
* outer tuple as soon as we have one match. Account for the effects of
|
||||
* this by scaling down the cost estimates in proportion to the expected
|
||||
* output size. (This assumes that all the quals attached to the join are
|
||||
* IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop scanning inner tuples for
|
||||
* an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the expected output size. (This assumes that all the quals
|
||||
* attached to the join are IN quals, which should be true.)
|
||||
*
|
||||
* Note: it's probably bogus to use the normal selectivity calculation
|
||||
* here when either the outer or inner path is a UniquePath.
|
||||
*/
|
||||
if (path->jointype == JOIN_IN)
|
||||
{
|
||||
Selectivity qual_selec = approx_selectivity(root, restrictlist,
|
||||
Selectivity qual_selec = approx_selectivity(root, restrictlist,
|
||||
path->jointype);
|
||||
double qptuples;
|
||||
double qptuples;
|
||||
|
||||
qptuples = ceil(qual_selec * outer_path_rows * inner_path_rows);
|
||||
if (qptuples > path->path.parent->rows)
|
||||
@ -761,8 +762,8 @@ cost_nestloop(NestPath *path, Query *root)
|
||||
* before we can start returning tuples, so the join's startup cost is
|
||||
* their sum. What's not so clear is whether the inner path's
|
||||
* startup_cost must be paid again on each rescan of the inner path.
|
||||
* This is not true if the inner path is materialized or is a hashjoin,
|
||||
* but probably is true otherwise.
|
||||
* This is not true if the inner path is materialized or is a
|
||||
* hashjoin, but probably is true otherwise.
|
||||
*/
|
||||
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
|
||||
run_cost += outer_path->total_cost - outer_path->startup_cost;
|
||||
@ -783,14 +784,15 @@ cost_nestloop(NestPath *path, Query *root)
|
||||
(inner_path->total_cost - inner_path->startup_cost) * joininfactor;
|
||||
|
||||
/*
|
||||
* Compute number of tuples processed (not number emitted!).
|
||||
* If inner path is an indexscan, be sure to use its estimated output row
|
||||
* count, which may be lower than the restriction-clause-only row count of
|
||||
* its parent. (We don't include this case in the PATH_ROWS macro because
|
||||
* it applies *only* to a nestloop's inner relation.) Note: it is correct
|
||||
* to use the unadjusted inner_path_rows in the above calculation for
|
||||
* joininfactor, since otherwise we'd be double-counting the selectivity
|
||||
* of the join clause being used for the index.
|
||||
* Compute number of tuples processed (not number emitted!). If inner
|
||||
* path is an indexscan, be sure to use its estimated output row
|
||||
* count, which may be lower than the restriction-clause-only row
|
||||
* count of its parent. (We don't include this case in the PATH_ROWS
|
||||
* macro because it applies *only* to a nestloop's inner relation.)
|
||||
* Note: it is correct to use the unadjusted inner_path_rows in the
|
||||
* above calculation for joininfactor, since otherwise we'd be
|
||||
* double-counting the selectivity of the join clause being used for
|
||||
* the index.
|
||||
*/
|
||||
if (IsA(inner_path, IndexPath))
|
||||
inner_path_rows = ((IndexPath *) inner_path)->rows;
|
||||
@ -831,8 +833,8 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
Cost startup_cost = 0;
|
||||
Cost run_cost = 0;
|
||||
Cost cpu_per_tuple;
|
||||
Selectivity merge_selec;
|
||||
Selectivity qp_selec;
|
||||
Selectivity merge_selec;
|
||||
Selectivity qp_selec;
|
||||
QualCost merge_qual_cost;
|
||||
QualCost qp_qual_cost;
|
||||
RestrictInfo *firstclause;
|
||||
@ -847,7 +849,7 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
double rescanratio;
|
||||
Selectivity outerscansel,
|
||||
innerscansel;
|
||||
Selectivity joininfactor;
|
||||
Selectivity joininfactor;
|
||||
Path sort_path; /* dummy for result of cost_sort */
|
||||
|
||||
if (!enable_mergejoin)
|
||||
@ -856,7 +858,8 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
/*
|
||||
* Compute cost and selectivity of the mergequals and qpquals (other
|
||||
* restriction clauses) separately. We use approx_selectivity here
|
||||
* for speed --- in most cases, any errors won't affect the result much.
|
||||
* for speed --- in most cases, any errors won't affect the result
|
||||
* much.
|
||||
*
|
||||
* Note: it's probably bogus to use the normal selectivity calculation
|
||||
* here when either the outer or inner path is a UniquePath.
|
||||
@ -876,29 +879,30 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
qptuples = ceil(mergejointuples * qp_selec);
|
||||
|
||||
/*
|
||||
* When there are equal merge keys in the outer relation, the mergejoin
|
||||
* must rescan any matching tuples in the inner relation. This means
|
||||
* re-fetching inner tuples. Our cost model for this is that a re-fetch
|
||||
* costs the same as an original fetch, which is probably an overestimate;
|
||||
* but on the other hand we ignore the bookkeeping costs of mark/restore.
|
||||
* Not clear if it's worth developing a more refined model.
|
||||
* When there are equal merge keys in the outer relation, the
|
||||
* mergejoin must rescan any matching tuples in the inner relation.
|
||||
* This means re-fetching inner tuples. Our cost model for this is
|
||||
* that a re-fetch costs the same as an original fetch, which is
|
||||
* probably an overestimate; but on the other hand we ignore the
|
||||
* bookkeeping costs of mark/restore. Not clear if it's worth
|
||||
* developing a more refined model.
|
||||
*
|
||||
* The number of re-fetches can be estimated approximately as size of
|
||||
* merge join output minus size of inner relation. Assume that the
|
||||
* distinct key values are 1, 2, ..., and denote the number of values of
|
||||
* each key in the outer relation as m1, m2, ...; in the inner relation,
|
||||
* n1, n2, ... Then we have
|
||||
* merge join output minus size of inner relation. Assume that the
|
||||
* distinct key values are 1, 2, ..., and denote the number of values
|
||||
* of each key in the outer relation as m1, m2, ...; in the inner
|
||||
* relation, n1, n2, ... Then we have
|
||||
*
|
||||
* size of join = m1 * n1 + m2 * n2 + ...
|
||||
* size of join = m1 * n1 + m2 * n2 + ...
|
||||
*
|
||||
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ...
|
||||
* = m1 * n1 + m2 * n2 + ... - (n1 + n2 + ...)
|
||||
* = size of join - size of inner relation
|
||||
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
|
||||
* n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
|
||||
* relation
|
||||
*
|
||||
* This equation works correctly for outer tuples having no inner match
|
||||
* (nk = 0), but not for inner tuples having no outer match (mk = 0);
|
||||
* we are effectively subtracting those from the number of rescanned
|
||||
* tuples, when we should not. Can we do better without expensive
|
||||
* tuples, when we should not. Can we do better without expensive
|
||||
* selectivity computations?
|
||||
*/
|
||||
if (IsA(outer_path, UniquePath))
|
||||
@ -953,8 +957,9 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
|
||||
/*
|
||||
* Readjust scan selectivities to account for above rounding. This is
|
||||
* normally an insignificant effect, but when there are only a few rows
|
||||
* in the inputs, failing to do this makes for a large percentage error.
|
||||
* normally an insignificant effect, but when there are only a few
|
||||
* rows in the inputs, failing to do this makes for a large percentage
|
||||
* error.
|
||||
*/
|
||||
outerscansel = outer_rows / outer_path_rows;
|
||||
innerscansel = inner_rows / inner_path_rows;
|
||||
@ -1002,11 +1007,11 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
/* CPU costs */
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop outputting inner
|
||||
* tuples for an outer tuple as soon as we have one match. Account for
|
||||
* the effects of this by scaling down the cost estimates in proportion
|
||||
* to the expected output size. (This assumes that all the quals attached
|
||||
* to the join are IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop outputting inner tuples
|
||||
* for an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the expected output size. (This assumes that all the quals
|
||||
* attached to the join are IN quals, which should be true.)
|
||||
*/
|
||||
if (path->jpath.jointype == JOIN_IN &&
|
||||
qptuples > path->jpath.path.parent->rows)
|
||||
@ -1017,9 +1022,9 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
/*
|
||||
* The number of tuple comparisons needed is approximately number of
|
||||
* outer rows plus number of inner rows plus number of rescanned
|
||||
* tuples (can we refine this?). At each one, we need to evaluate
|
||||
* the mergejoin quals. NOTE: JOIN_IN mode does not save any work
|
||||
* here, so do NOT include joininfactor.
|
||||
* tuples (can we refine this?). At each one, we need to evaluate the
|
||||
* mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
|
||||
* so do NOT include joininfactor.
|
||||
*/
|
||||
startup_cost += merge_qual_cost.startup;
|
||||
run_cost += merge_qual_cost.per_tuple *
|
||||
@ -1028,7 +1033,7 @@ cost_mergejoin(MergePath *path, Query *root)
|
||||
/*
|
||||
* For each tuple that gets through the mergejoin proper, we charge
|
||||
* cpu_tuple_cost plus the cost of evaluating additional restriction
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* since not all of the quals may get evaluated at each tuple.) This
|
||||
* work is skipped in JOIN_IN mode, so apply the factor.
|
||||
*/
|
||||
@ -1059,8 +1064,8 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
Cost startup_cost = 0;
|
||||
Cost run_cost = 0;
|
||||
Cost cpu_per_tuple;
|
||||
Selectivity hash_selec;
|
||||
Selectivity qp_selec;
|
||||
Selectivity hash_selec;
|
||||
Selectivity qp_selec;
|
||||
QualCost hash_qual_cost;
|
||||
QualCost qp_qual_cost;
|
||||
double hashjointuples;
|
||||
@ -1076,7 +1081,7 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
int physicalbuckets;
|
||||
int numbatches;
|
||||
Selectivity innerbucketsize;
|
||||
Selectivity joininfactor;
|
||||
Selectivity joininfactor;
|
||||
List *hcl;
|
||||
List *qpquals;
|
||||
|
||||
@ -1086,7 +1091,8 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/*
|
||||
* Compute cost and selectivity of the hashquals and qpquals (other
|
||||
* restriction clauses) separately. We use approx_selectivity here
|
||||
* for speed --- in most cases, any errors won't affect the result much.
|
||||
* for speed --- in most cases, any errors won't affect the result
|
||||
* much.
|
||||
*
|
||||
* Note: it's probably bogus to use the normal selectivity calculation
|
||||
* here when either the outer or inner path is a UniquePath.
|
||||
@ -1114,9 +1120,9 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
* Cost of computing hash function: must do it once per input tuple.
|
||||
* We charge one cpu_operator_cost for each column's hash function.
|
||||
*
|
||||
* XXX when a hashclause is more complex than a single operator,
|
||||
* we really should charge the extra eval costs of the left or right
|
||||
* side, as appropriate, here. This seems more work than it's worth
|
||||
* XXX when a hashclause is more complex than a single operator, we
|
||||
* really should charge the extra eval costs of the left or right
|
||||
* side, as appropriate, here. This seems more work than it's worth
|
||||
* at the moment.
|
||||
*/
|
||||
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
|
||||
@ -1131,13 +1137,13 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
|
||||
/*
|
||||
* Determine bucketsize fraction for inner relation. We use the
|
||||
* smallest bucketsize estimated for any individual hashclause;
|
||||
* this is undoubtedly conservative.
|
||||
* smallest bucketsize estimated for any individual hashclause; this
|
||||
* is undoubtedly conservative.
|
||||
*
|
||||
* BUT: if inner relation has been unique-ified, we can assume it's
|
||||
* good for hashing. This is important both because it's the right
|
||||
* answer, and because we avoid contaminating the cache with a value
|
||||
* that's wrong for non-unique-ified paths.
|
||||
* BUT: if inner relation has been unique-ified, we can assume it's good
|
||||
* for hashing. This is important both because it's the right answer,
|
||||
* and because we avoid contaminating the cache with a value that's
|
||||
* wrong for non-unique-ified paths.
|
||||
*/
|
||||
if (IsA(inner_path, UniquePath))
|
||||
innerbucketsize = 1.0 / virtualbuckets;
|
||||
@ -1152,12 +1158,13 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
Assert(IsA(restrictinfo, RestrictInfo));
|
||||
|
||||
/*
|
||||
* First we have to figure out which side of the hashjoin clause
|
||||
* is the inner side.
|
||||
* First we have to figure out which side of the hashjoin
|
||||
* clause is the inner side.
|
||||
*
|
||||
* Since we tend to visit the same clauses over and over when
|
||||
* planning a large query, we cache the bucketsize estimate in the
|
||||
* RestrictInfo node to avoid repeated lookups of statistics.
|
||||
* planning a large query, we cache the bucketsize estimate in
|
||||
* the RestrictInfo node to avoid repeated lookups of
|
||||
* statistics.
|
||||
*/
|
||||
if (bms_is_subset(restrictinfo->right_relids,
|
||||
inner_path->parent->relids))
|
||||
@ -1169,7 +1176,7 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/* not cached yet */
|
||||
thisbucketsize =
|
||||
estimate_hash_bucketsize(root,
|
||||
(Var *) get_rightop(restrictinfo->clause),
|
||||
(Var *) get_rightop(restrictinfo->clause),
|
||||
virtualbuckets);
|
||||
restrictinfo->right_bucketsize = thisbucketsize;
|
||||
}
|
||||
@ -1185,7 +1192,7 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/* not cached yet */
|
||||
thisbucketsize =
|
||||
estimate_hash_bucketsize(root,
|
||||
(Var *) get_leftop(restrictinfo->clause),
|
||||
(Var *) get_leftop(restrictinfo->clause),
|
||||
virtualbuckets);
|
||||
restrictinfo->left_bucketsize = thisbucketsize;
|
||||
}
|
||||
@ -1217,11 +1224,11 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/* CPU costs */
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop comparing inner
|
||||
* tuples to an outer tuple as soon as we have one match. Account for
|
||||
* the effects of this by scaling down the cost estimates in proportion
|
||||
* to the expected output size. (This assumes that all the quals attached
|
||||
* to the join are IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop comparing inner tuples to
|
||||
* an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the expected output size. (This assumes that all the quals
|
||||
* attached to the join are IN quals, which should be true.)
|
||||
*/
|
||||
if (path->jpath.jointype == JOIN_IN &&
|
||||
qptuples > path->jpath.path.parent->rows)
|
||||
@ -1243,7 +1250,7 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
/*
|
||||
* For each tuple that gets through the hashjoin proper, we charge
|
||||
* cpu_tuple_cost plus the cost of evaluating additional restriction
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* since not all of the quals may get evaluated at each tuple.)
|
||||
*/
|
||||
startup_cost += qp_qual_cost.startup;
|
||||
@ -1254,14 +1261,14 @@ cost_hashjoin(HashPath *path, Query *root)
|
||||
* Bias against putting larger relation on inside. We don't want an
|
||||
* absolute prohibition, though, since larger relation might have
|
||||
* better bucketsize --- and we can't trust the size estimates
|
||||
* unreservedly, anyway. Instead, inflate the run cost by the
|
||||
* square root of the size ratio. (Why square root? No real good
|
||||
* reason, but it seems reasonable...)
|
||||
* unreservedly, anyway. Instead, inflate the run cost by the square
|
||||
* root of the size ratio. (Why square root? No real good reason,
|
||||
* but it seems reasonable...)
|
||||
*
|
||||
* Note: before 7.4 we implemented this by inflating startup cost;
|
||||
* but if there's a disable_cost component in the input paths'
|
||||
* startup cost, that unfairly penalizes the hash. Probably it'd
|
||||
* be better to keep track of disable penalty separately from cost.
|
||||
* Note: before 7.4 we implemented this by inflating startup cost; but if
|
||||
* there's a disable_cost component in the input paths' startup cost,
|
||||
* that unfairly penalizes the hash. Probably it'd be better to keep
|
||||
* track of disable penalty separately from cost.
|
||||
*/
|
||||
if (innerbytes > outerbytes && outerbytes > 0)
|
||||
run_cost *= sqrt(innerbytes / outerbytes);
|
||||
@ -1442,7 +1449,7 @@ estimate_hash_bucketsize(Query *root, Var *var, int nbuckets)
|
||||
* and a per-evaluation component.
|
||||
*/
|
||||
void
|
||||
cost_qual_eval(QualCost *cost, List *quals)
|
||||
cost_qual_eval(QualCost * cost, List *quals)
|
||||
{
|
||||
List *l;
|
||||
|
||||
@ -1484,7 +1491,7 @@ cost_qual_eval(QualCost *cost, List *quals)
|
||||
}
|
||||
|
||||
static bool
|
||||
cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
cost_qual_eval_walker(Node *node, QualCost * total)
|
||||
{
|
||||
if (node == NULL)
|
||||
return false;
|
||||
@ -1502,9 +1509,7 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
IsA(node, OpExpr) ||
|
||||
IsA(node, DistinctExpr) ||
|
||||
IsA(node, NullIfExpr))
|
||||
{
|
||||
total->per_tuple += cpu_operator_cost;
|
||||
}
|
||||
else if (IsA(node, ScalarArrayOpExpr))
|
||||
{
|
||||
/* should charge more than 1 op cost, but how many? */
|
||||
@ -1519,47 +1524,48 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
{
|
||||
/*
|
||||
* A subplan node in an expression typically indicates that the
|
||||
* subplan will be executed on each evaluation, so charge accordingly.
|
||||
* (Sub-selects that can be executed as InitPlans have already been
|
||||
* removed from the expression.)
|
||||
* subplan will be executed on each evaluation, so charge
|
||||
* accordingly. (Sub-selects that can be executed as InitPlans
|
||||
* have already been removed from the expression.)
|
||||
*
|
||||
* An exception occurs when we have decided we can implement the
|
||||
* subplan by hashing.
|
||||
*
|
||||
*/
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
Plan *plan = subplan->plan;
|
||||
|
||||
if (subplan->useHashTable)
|
||||
{
|
||||
/*
|
||||
* If we are using a hash table for the subquery outputs, then
|
||||
* the cost of evaluating the query is a one-time cost.
|
||||
* We charge one cpu_operator_cost per tuple for the work of
|
||||
* the cost of evaluating the query is a one-time cost. We
|
||||
* charge one cpu_operator_cost per tuple for the work of
|
||||
* loading the hashtable, too.
|
||||
*/
|
||||
total->startup += plan->total_cost +
|
||||
cpu_operator_cost * plan->plan_rows;
|
||||
|
||||
/*
|
||||
* The per-tuple costs include the cost of evaluating the
|
||||
* lefthand expressions, plus the cost of probing the hashtable.
|
||||
* Recursion into the exprs list will handle the lefthand
|
||||
* expressions properly, and will count one cpu_operator_cost
|
||||
* for each comparison operator. That is probably too low for
|
||||
* the probing cost, but it's hard to make a better estimate,
|
||||
* so live with it for now.
|
||||
* lefthand expressions, plus the cost of probing the
|
||||
* hashtable. Recursion into the exprs list will handle the
|
||||
* lefthand expressions properly, and will count one
|
||||
* cpu_operator_cost for each comparison operator. That is
|
||||
* probably too low for the probing cost, but it's hard to
|
||||
* make a better estimate, so live with it for now.
|
||||
*/
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Otherwise we will be rescanning the subplan output on each
|
||||
* evaluation. We need to estimate how much of the output
|
||||
* we will actually need to scan. NOTE: this logic should
|
||||
* agree with the estimates used by make_subplan() in
|
||||
* evaluation. We need to estimate how much of the output we
|
||||
* will actually need to scan. NOTE: this logic should agree
|
||||
* with the estimates used by make_subplan() in
|
||||
* plan/subselect.c.
|
||||
*/
|
||||
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
|
||||
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
|
||||
|
||||
if (subplan->subLinkType == EXISTS_SUBLINK)
|
||||
{
|
||||
@ -1579,23 +1585,20 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
/* assume we need all tuples */
|
||||
total->per_tuple += plan_run_cost;
|
||||
}
|
||||
|
||||
/*
|
||||
* Also account for subplan's startup cost.
|
||||
* If the subplan is uncorrelated or undirect correlated,
|
||||
* AND its topmost node is a Sort or Material node, assume
|
||||
* that we'll only need to pay its startup cost once;
|
||||
* otherwise assume we pay the startup cost every time.
|
||||
* Also account for subplan's startup cost. If the subplan is
|
||||
* uncorrelated or undirect correlated, AND its topmost node
|
||||
* is a Sort or Material node, assume that we'll only need to
|
||||
* pay its startup cost once; otherwise assume we pay the
|
||||
* startup cost every time.
|
||||
*/
|
||||
if (subplan->parParam == NIL &&
|
||||
(IsA(plan, Sort) ||
|
||||
IsA(plan, Material)))
|
||||
{
|
||||
total->startup += plan->startup_cost;
|
||||
}
|
||||
else
|
||||
{
|
||||
total->per_tuple += plan->startup_cost;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1745,7 +1748,7 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
|
||||
UniquePath *upath;
|
||||
|
||||
/*
|
||||
* Compute joinclause selectivity. Note that we are only considering
|
||||
* Compute joinclause selectivity. Note that we are only considering
|
||||
* clauses that become restriction clauses at this join level; we are
|
||||
* not double-counting them because they were not considered in
|
||||
* estimating the sizes of the component rels.
|
||||
@ -1758,8 +1761,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
|
||||
/*
|
||||
* Basically, we multiply size of Cartesian product by selectivity.
|
||||
*
|
||||
* If we are doing an outer join, take that into account: the output
|
||||
* must be at least as large as the non-nullable input. (Is there any
|
||||
* If we are doing an outer join, take that into account: the output must
|
||||
* be at least as large as the non-nullable input. (Is there any
|
||||
* chance of being even smarter?)
|
||||
*
|
||||
* For JOIN_IN and variants, the Cartesian product is figured with
|
||||
@ -1823,8 +1826,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
|
||||
rel->rows = temp;
|
||||
|
||||
/*
|
||||
* We need not compute the output width here, because build_joinrel_tlist
|
||||
* already did.
|
||||
* We need not compute the output width here, because
|
||||
* build_joinrel_tlist already did.
|
||||
*/
|
||||
}
|
||||
|
||||
@ -1911,11 +1914,14 @@ set_rel_width(Query *root, RelOptInfo *rel)
|
||||
|
||||
Assert(IsA(var, Var));
|
||||
|
||||
/* The width probably hasn't been cached yet, but may as well check */
|
||||
/*
|
||||
* The width probably hasn't been cached yet, but may as well
|
||||
* check
|
||||
*/
|
||||
if (rel->attr_widths[ndx] > 0)
|
||||
{
|
||||
tuple_width += rel->attr_widths[ndx];
|
||||
continue;
|
||||
tuple_width += rel->attr_widths[ndx];
|
||||
continue;
|
||||
}
|
||||
|
||||
relid = getrelid(var->varno, root->rtable);
|
||||
@ -1931,8 +1937,8 @@ set_rel_width(Query *root, RelOptInfo *rel)
|
||||
}
|
||||
|
||||
/*
|
||||
* Not a plain relation, or can't find statistics for it.
|
||||
* Estimate using just the type info.
|
||||
* Not a plain relation, or can't find statistics for it. Estimate
|
||||
* using just the type info.
|
||||
*/
|
||||
item_width = get_typavgwidth(var->vartype, var->vartypmod);
|
||||
Assert(item_width > 0);
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.145 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.146 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -64,9 +64,9 @@ static List *group_clauses_by_indexkey_for_join(Query *root,
|
||||
Relids outer_relids,
|
||||
JoinType jointype, bool isouterjoin);
|
||||
static bool match_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
|
||||
int indexcol, Oid opclass, Expr *clause);
|
||||
static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
|
||||
int indexcol, Oid opclass, Expr *clause);
|
||||
static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
|
||||
int indexcol, Oid opclass, Expr *clause);
|
||||
static Oid indexable_operator(Expr *clause, Oid opclass,
|
||||
bool indexkey_on_left);
|
||||
static bool pred_test(List *predicate_list, List *restrictinfo_list,
|
||||
@ -77,8 +77,8 @@ static bool pred_test_recurse_pred(Expr *predicate, Node *clause);
|
||||
static bool pred_test_simple_clause(Expr *predicate, Node *clause);
|
||||
static Relids indexable_outerrelids(RelOptInfo *rel, IndexOptInfo *index);
|
||||
static Path *make_innerjoin_index_path(Query *root,
|
||||
RelOptInfo *rel, IndexOptInfo *index,
|
||||
List *clausegroups);
|
||||
RelOptInfo *rel, IndexOptInfo *index,
|
||||
List *clausegroups);
|
||||
static bool match_index_to_operand(Node *operand, int indexcol,
|
||||
RelOptInfo *rel, IndexOptInfo *index);
|
||||
static bool match_special_index_operator(Expr *clause, Oid opclass,
|
||||
@ -87,7 +87,7 @@ static List *expand_indexqual_condition(Expr *clause, Oid opclass);
|
||||
static List *prefix_quals(Node *leftop, Oid opclass,
|
||||
Const *prefix, Pattern_Prefix_Status pstatus);
|
||||
static List *network_prefix_quals(Node *leftop, Oid expr_op, Oid opclass,
|
||||
Datum rightop);
|
||||
Datum rightop);
|
||||
static Datum string_to_datum(const char *str, Oid datatype);
|
||||
static Const *string_to_const(const char *str, Oid datatype);
|
||||
|
||||
@ -114,7 +114,7 @@ static Const *string_to_const(const char *str, Oid datatype);
|
||||
* scan this routine deems potentially interesting for the current query.
|
||||
*
|
||||
* We also determine the set of other relids that participate in join
|
||||
* clauses that could be used with each index. The actually best innerjoin
|
||||
* clauses that could be used with each index. The actually best innerjoin
|
||||
* path will be generated for each outer relation later on, but knowing the
|
||||
* set of potential otherrels allows us to identify equivalent outer relations
|
||||
* and avoid repeated computation.
|
||||
@ -219,10 +219,11 @@ create_index_paths(Query *root, RelOptInfo *rel)
|
||||
|
||||
/*
|
||||
* 6. Examine join clauses to see which ones are potentially
|
||||
* usable with this index, and generate the set of all other relids
|
||||
* that participate in such join clauses. We'll use this set later
|
||||
* to recognize outer rels that are equivalent for joining purposes.
|
||||
* We compute both per-index and overall-for-relation sets.
|
||||
* usable with this index, and generate the set of all other
|
||||
* relids that participate in such join clauses. We'll use this
|
||||
* set later to recognize outer rels that are equivalent for
|
||||
* joining purposes. We compute both per-index and
|
||||
* overall-for-relation sets.
|
||||
*/
|
||||
join_outerrelids = indexable_outerrelids(rel, index);
|
||||
index->outer_relids = join_outerrelids;
|
||||
@ -274,7 +275,7 @@ match_index_orclauses(RelOptInfo *rel,
|
||||
*/
|
||||
restrictinfo->subclauseindices =
|
||||
match_index_orclause(rel, index,
|
||||
((BoolExpr *) restrictinfo->clause)->args,
|
||||
((BoolExpr *) restrictinfo->clause)->args,
|
||||
restrictinfo->subclauseindices);
|
||||
}
|
||||
}
|
||||
@ -422,6 +423,7 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
|
||||
Oid *classes = index->classlist;
|
||||
|
||||
FastListInit(&quals);
|
||||
|
||||
/*
|
||||
* Extract relevant indexclauses in indexkey order. This is
|
||||
* essentially just like group_clauses_by_indexkey() except that the
|
||||
@ -576,7 +578,7 @@ group_clauses_by_indexkey(RelOptInfo *rel, IndexOptInfo *index)
|
||||
*
|
||||
* This is much like group_clauses_by_indexkey(), but we consider both
|
||||
* join and restriction clauses. Any joinclause that uses only otherrels
|
||||
* in the specified outer_relids is fair game. But there must be at least
|
||||
* in the specified outer_relids is fair game. But there must be at least
|
||||
* one such joinclause in the final list, otherwise we return NIL indicating
|
||||
* that this index isn't interesting as an inner indexscan. (A scan using
|
||||
* only restriction clauses shouldn't be created here, because a regular Path
|
||||
@ -641,10 +643,10 @@ group_clauses_by_indexkey_for_join(Query *root,
|
||||
*/
|
||||
if (FastListValue(&clausegroup) != NIL)
|
||||
{
|
||||
List *nl;
|
||||
List *nl;
|
||||
|
||||
nl = remove_redundant_join_clauses(root,
|
||||
FastListValue(&clausegroup),
|
||||
FastListValue(&clausegroup),
|
||||
jointype);
|
||||
FastListFromList(&clausegroup, nl);
|
||||
}
|
||||
@ -736,9 +738,9 @@ match_clause_to_indexcol(RelOptInfo *rel,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Check for clauses of the form:
|
||||
* (indexkey operator constant) or (constant operator indexkey).
|
||||
* Anything that is a "pseudo constant" expression will do.
|
||||
* Check for clauses of the form: (indexkey operator constant) or
|
||||
* (constant operator indexkey). Anything that is a "pseudo constant"
|
||||
* expression will do.
|
||||
*/
|
||||
if (match_index_to_operand(leftop, indexcol, rel, index) &&
|
||||
is_pseudo_constant_clause(rightop))
|
||||
@ -747,8 +749,8 @@ match_clause_to_indexcol(RelOptInfo *rel,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If we didn't find a member of the index's opclass, see
|
||||
* whether it is a "special" indexable operator.
|
||||
* If we didn't find a member of the index's opclass, see whether
|
||||
* it is a "special" indexable operator.
|
||||
*/
|
||||
if (match_special_index_operator(clause, opclass, true))
|
||||
return true;
|
||||
@ -762,8 +764,8 @@ match_clause_to_indexcol(RelOptInfo *rel,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If we didn't find a member of the index's opclass, see
|
||||
* whether it is a "special" indexable operator.
|
||||
* If we didn't find a member of the index's opclass, see whether
|
||||
* it is a "special" indexable operator.
|
||||
*/
|
||||
if (match_special_index_operator(clause, opclass, false))
|
||||
return true;
|
||||
@ -824,10 +826,10 @@ match_join_clause_to_indexcol(RelOptInfo *rel,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Check for an indexqual that could be handled by a nestloop
|
||||
* join. We need the index key to be compared against an
|
||||
* expression that uses none of the indexed relation's vars and
|
||||
* contains no volatile functions.
|
||||
* Check for an indexqual that could be handled by a nestloop join. We
|
||||
* need the index key to be compared against an expression that uses
|
||||
* none of the indexed relation's vars and contains no volatile
|
||||
* functions.
|
||||
*/
|
||||
if (match_index_to_operand(leftop, indexcol, rel, index))
|
||||
{
|
||||
@ -1174,10 +1176,11 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
|
||||
* 1. Find "btree" strategy numbers for the pred_op and clause_op.
|
||||
*
|
||||
* We must find a btree opclass that contains both operators, else the
|
||||
* implication can't be determined. If there are multiple such opclasses,
|
||||
* assume we can use any one to determine the logical relationship of the
|
||||
* two operators and the correct corresponding test operator. This should
|
||||
* work for any logically consistent opclasses.
|
||||
* implication can't be determined. If there are multiple such
|
||||
* opclasses, assume we can use any one to determine the logical
|
||||
* relationship of the two operators and the correct corresponding
|
||||
* test operator. This should work for any logically consistent
|
||||
* opclasses.
|
||||
*/
|
||||
catlist = SearchSysCacheList(AMOPOPID, 1,
|
||||
ObjectIdGetDatum(pred_op),
|
||||
@ -1269,7 +1272,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
|
||||
|
||||
/* And execute it. */
|
||||
test_result = ExecEvalExprSwitchContext(test_exprstate,
|
||||
GetPerTupleExprContext(estate),
|
||||
GetPerTupleExprContext(estate),
|
||||
&isNull, NULL);
|
||||
|
||||
/* Get back to outer memory context */
|
||||
@ -1295,7 +1298,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
|
||||
/*
|
||||
* indexable_outerrelids
|
||||
* Finds all other relids that participate in any indexable join clause
|
||||
* for the specified index. Returns a set of relids.
|
||||
* for the specified index. Returns a set of relids.
|
||||
*
|
||||
* 'rel' is the relation for which 'index' is defined
|
||||
*/
|
||||
@ -1314,16 +1317,16 @@ indexable_outerrelids(RelOptInfo *rel, IndexOptInfo *index)
|
||||
/*
|
||||
* Examine each joinclause in the JoinInfo node's list to see if
|
||||
* it matches any key of the index. If so, add the JoinInfo's
|
||||
* otherrels to the result. We can skip examining other joinclauses
|
||||
* in the same list as soon as we find a match (since by definition
|
||||
* they all have the same otherrels).
|
||||
* otherrels to the result. We can skip examining other
|
||||
* joinclauses in the same list as soon as we find a match (since
|
||||
* by definition they all have the same otherrels).
|
||||
*/
|
||||
foreach(j, joininfo->jinfo_restrictinfo)
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) lfirst(j);
|
||||
Expr *clause = rinfo->clause;
|
||||
int indexcol = 0;
|
||||
Oid *classes = index->classlist;
|
||||
Expr *clause = rinfo->clause;
|
||||
int indexcol = 0;
|
||||
Oid *classes = index->classlist;
|
||||
|
||||
do
|
||||
{
|
||||
@ -1398,11 +1401,13 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no indexable joinclauses for this rel, exit quickly.
|
||||
*/
|
||||
if (bms_is_empty(rel->index_outer_relids))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Otherwise, we have to do path selection in the memory context of
|
||||
* the given rel, so that any created path can be safely attached to
|
||||
@ -1410,10 +1415,11 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
* issue for normal planning, but it is an issue for GEQO planning.)
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
|
||||
|
||||
/*
|
||||
* Intersect the given outer_relids with index_outer_relids
|
||||
* to find the set of outer relids actually relevant for this index.
|
||||
* If there are none, again we can fail immediately.
|
||||
* Intersect the given outer_relids with index_outer_relids to find
|
||||
* the set of outer relids actually relevant for this index. If there
|
||||
* are none, again we can fail immediately.
|
||||
*/
|
||||
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
|
||||
if (bms_is_empty(outer_relids))
|
||||
@ -1422,11 +1428,13 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
MemoryContextSwitchTo(oldcontext);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look to see if we already computed the result for this set of
|
||||
* relevant outerrels. (We include the isouterjoin status in the
|
||||
* relevant outerrels. (We include the isouterjoin status in the
|
||||
* cache lookup key for safety. In practice I suspect this is not
|
||||
* necessary because it should always be the same for a given innerrel.)
|
||||
* necessary because it should always be the same for a given
|
||||
* innerrel.)
|
||||
*/
|
||||
foreach(jlist, rel->index_inner_paths)
|
||||
{
|
||||
@ -1441,15 +1449,15 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* For each index of the rel, find the best path; then choose the
|
||||
* best overall. We cache the per-index results as well as the overall
|
||||
* result. (This is useful because different indexes may have different
|
||||
* relevant outerrel sets, so different overall outerrel sets might still
|
||||
* map to the same computation for a given index.)
|
||||
* For each index of the rel, find the best path; then choose the best
|
||||
* overall. We cache the per-index results as well as the overall
|
||||
* result. (This is useful because different indexes may have
|
||||
* different relevant outerrel sets, so different overall outerrel
|
||||
* sets might still map to the same computation for a given index.)
|
||||
*/
|
||||
foreach(ilist, rel->indexlist)
|
||||
{
|
||||
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
|
||||
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
|
||||
Relids index_outer_relids;
|
||||
Path *path = NULL;
|
||||
|
||||
@ -1461,6 +1469,7 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
bms_free(index_outer_relids);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look to see if we already computed the result for this index.
|
||||
*/
|
||||
@ -1471,7 +1480,7 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
info->isouterjoin == isouterjoin)
|
||||
{
|
||||
path = info->best_innerpath;
|
||||
bms_free(index_outer_relids); /* not needed anymore */
|
||||
bms_free(index_outer_relids); /* not needed anymore */
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1484,9 +1493,9 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
|
||||
clausegroups = group_clauses_by_indexkey_for_join(root,
|
||||
rel,
|
||||
index,
|
||||
index_outer_relids,
|
||||
index_outer_relids,
|
||||
jointype,
|
||||
isouterjoin);
|
||||
isouterjoin);
|
||||
if (clausegroups)
|
||||
{
|
||||
/* make the path */
|
||||
@ -1548,9 +1557,9 @@ make_innerjoin_index_path(Query *root,
|
||||
pathnode->path.parent = rel;
|
||||
|
||||
/*
|
||||
* There's no point in marking the path with any pathkeys, since
|
||||
* it will only ever be used as the inner path of a nestloop, and
|
||||
* so its ordering does not matter.
|
||||
* There's no point in marking the path with any pathkeys, since it
|
||||
* will only ever be used as the inner path of a nestloop, and so its
|
||||
* ordering does not matter.
|
||||
*/
|
||||
pathnode->path.pathkeys = NIL;
|
||||
|
||||
@ -1582,19 +1591,19 @@ make_innerjoin_index_path(Query *root,
|
||||
|
||||
/*
|
||||
* We must compute the estimated number of output rows for the
|
||||
* indexscan. This is less than rel->rows because of the
|
||||
* additional selectivity of the join clauses. Since clausegroups
|
||||
* may contain both restriction and join clauses, we have to do a
|
||||
* set union to get the full set of clauses that must be
|
||||
* considered to compute the correct selectivity. (Without the union
|
||||
* operation, we might have some restriction clauses appearing twice,
|
||||
* which'd mislead restrictlist_selectivity into double-counting their
|
||||
* selectivity. However, since RestrictInfo nodes aren't copied when
|
||||
* linking them into different lists, it should be sufficient to use
|
||||
* pointer comparison to remove duplicates.)
|
||||
* indexscan. This is less than rel->rows because of the additional
|
||||
* selectivity of the join clauses. Since clausegroups may contain
|
||||
* both restriction and join clauses, we have to do a set union to get
|
||||
* the full set of clauses that must be considered to compute the
|
||||
* correct selectivity. (Without the union operation, we might have
|
||||
* some restriction clauses appearing twice, which'd mislead
|
||||
* restrictlist_selectivity into double-counting their selectivity.
|
||||
* However, since RestrictInfo nodes aren't copied when linking them
|
||||
* into different lists, it should be sufficient to use pointer
|
||||
* comparison to remove duplicates.)
|
||||
*
|
||||
* Always assume the join type is JOIN_INNER; even if some of the
|
||||
* join clauses come from other contexts, that's not our problem.
|
||||
* Always assume the join type is JOIN_INNER; even if some of the join
|
||||
* clauses come from other contexts, that's not our problem.
|
||||
*/
|
||||
allclauses = set_ptrUnion(rel->baserestrictinfo, allclauses);
|
||||
pathnode->rows = rel->tuples *
|
||||
@ -1656,9 +1665,9 @@ match_index_to_operand(Node *operand,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Index expression; find the correct expression. (This search could
|
||||
* be avoided, at the cost of complicating all the callers of this
|
||||
* routine; doesn't seem worth it.)
|
||||
* Index expression; find the correct expression. (This search
|
||||
* could be avoided, at the cost of complicating all the callers
|
||||
* of this routine; doesn't seem worth it.)
|
||||
*/
|
||||
List *indexprs;
|
||||
int i;
|
||||
@ -1677,6 +1686,7 @@ match_index_to_operand(Node *operand,
|
||||
if (indexprs == NIL)
|
||||
elog(ERROR, "wrong number of index expressions");
|
||||
indexkey = (Node *) lfirst(indexprs);
|
||||
|
||||
/*
|
||||
* Does it match the operand? Again, strip any relabeling.
|
||||
*/
|
||||
@ -1776,12 +1786,12 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_LIKE_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_BYTEA_LIKE_OP:
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_ICLIKE_OP:
|
||||
@ -1789,7 +1799,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_ICLIKE_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_REGEXEQ_OP:
|
||||
@ -1797,7 +1807,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_REGEXEQ_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_ICREGEXEQ_OP:
|
||||
@ -1805,7 +1815,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_ICREGEXEQ_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_INET_SUB_OP:
|
||||
@ -1831,9 +1841,9 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
* want to apply. (A hash index, for example, will not support ">=".)
|
||||
* Currently, only btree supports the operators we need.
|
||||
*
|
||||
* We insist on the opclass being the specific one we expect,
|
||||
* else we'd do the wrong thing if someone were to make a reverse-sort
|
||||
* opclass with the same operators.
|
||||
* We insist on the opclass being the specific one we expect, else we'd
|
||||
* do the wrong thing if someone were to make a reverse-sort opclass
|
||||
* with the same operators.
|
||||
*/
|
||||
switch (expr_op)
|
||||
{
|
||||
@ -1896,7 +1906,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
* The input list is ordered by index key, and so the output list is too.
|
||||
* (The latter is not depended on by any part of the planner, so far as I can
|
||||
* tell; but some parts of the executor do assume that the indxqual list
|
||||
* ultimately delivered to the executor is so ordered. One such place is
|
||||
* ultimately delivered to the executor is so ordered. One such place is
|
||||
* _bt_orderkeys() in the btree support. Perhaps that ought to be fixed
|
||||
* someday --- tgl 7/00)
|
||||
*/
|
||||
@ -1930,7 +1940,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
|
||||
|
||||
} while (clausegroups != NIL && !DoneMatchingIndexKeys(classes));
|
||||
|
||||
Assert(clausegroups == NIL); /* else more groups than indexkeys... */
|
||||
Assert(clausegroups == NIL); /* else more groups than indexkeys... */
|
||||
|
||||
return FastListValue(&resultquals);
|
||||
}
|
||||
@ -1953,11 +1963,12 @@ expand_indexqual_condition(Expr *clause, Oid opclass)
|
||||
|
||||
switch (expr_op)
|
||||
{
|
||||
/*
|
||||
* LIKE and regex operators are not members of any index
|
||||
* opclass, so if we find one in an indexqual list we can
|
||||
* assume that it was accepted by match_special_index_operator().
|
||||
*/
|
||||
/*
|
||||
* LIKE and regex operators are not members of any index
|
||||
* opclass, so if we find one in an indexqual list we can
|
||||
* assume that it was accepted by
|
||||
* match_special_index_operator().
|
||||
*/
|
||||
case OID_TEXT_LIKE_OP:
|
||||
case OID_BPCHAR_LIKE_OP:
|
||||
case OID_NAME_LIKE_OP:
|
||||
@ -2061,22 +2072,22 @@ prefix_quals(Node *leftop, Oid opclass,
|
||||
}
|
||||
|
||||
/*
|
||||
* If necessary, coerce the prefix constant to the right type.
|
||||
* The given prefix constant is either text or bytea type.
|
||||
* If necessary, coerce the prefix constant to the right type. The
|
||||
* given prefix constant is either text or bytea type.
|
||||
*/
|
||||
if (prefix_const->consttype != datatype)
|
||||
{
|
||||
char *prefix;
|
||||
char *prefix;
|
||||
|
||||
switch (prefix_const->consttype)
|
||||
{
|
||||
case TEXTOID:
|
||||
prefix = DatumGetCString(DirectFunctionCall1(textout,
|
||||
prefix_const->constvalue));
|
||||
prefix_const->constvalue));
|
||||
break;
|
||||
case BYTEAOID:
|
||||
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
|
||||
prefix_const->constvalue));
|
||||
prefix_const->constvalue));
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unexpected const type: %u",
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.79 2003/07/25 00:01:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.80 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -300,7 +300,7 @@ sort_inner_and_outer(Query *root,
|
||||
* We always generate a nestloop path for each available outer path.
|
||||
* In fact we may generate as many as four: one on the cheapest-total-cost
|
||||
* inner path, one on the same with materialization, one on the
|
||||
* cheapest-startup-cost inner path (if different),
|
||||
* cheapest-startup-cost inner path (if different),
|
||||
* and one on the best inner-indexscan path (if any).
|
||||
*
|
||||
* We also consider mergejoins if mergejoin clauses are available. We have
|
||||
@ -342,10 +342,10 @@ match_unsorted_outer(Query *root,
|
||||
|
||||
/*
|
||||
* Nestloop only supports inner, left, and IN joins. Also, if we are
|
||||
* doing a right or full join, we must use *all* the mergeclauses as join
|
||||
* clauses, else we will not have a valid plan. (Although these two
|
||||
* flags are currently inverses, keep them separate for clarity and
|
||||
* possible future changes.)
|
||||
* doing a right or full join, we must use *all* the mergeclauses as
|
||||
* join clauses, else we will not have a valid plan. (Although these
|
||||
* two flags are currently inverses, keep them separate for clarity
|
||||
* and possible future changes.)
|
||||
*/
|
||||
switch (jointype)
|
||||
{
|
||||
@ -371,8 +371,8 @@ match_unsorted_outer(Query *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* If we need to unique-ify the inner path, we will consider only
|
||||
* the cheapest inner.
|
||||
* If we need to unique-ify the inner path, we will consider only the
|
||||
* cheapest inner.
|
||||
*/
|
||||
if (jointype == JOIN_UNIQUE_INNER)
|
||||
{
|
||||
@ -384,9 +384,10 @@ match_unsorted_outer(Query *root,
|
||||
else if (nestjoinOK)
|
||||
{
|
||||
/*
|
||||
* If the cheapest inner path is a join or seqscan, we should consider
|
||||
* materializing it. (This is a heuristic: we could consider it
|
||||
* always, but for inner indexscans it's probably a waste of time.)
|
||||
* If the cheapest inner path is a join or seqscan, we should
|
||||
* consider materializing it. (This is a heuristic: we could
|
||||
* consider it always, but for inner indexscans it's probably a
|
||||
* waste of time.)
|
||||
*/
|
||||
if (!(IsA(inner_cheapest_total, IndexPath) ||
|
||||
IsA(inner_cheapest_total, TidPath)))
|
||||
@ -394,8 +395,8 @@ match_unsorted_outer(Query *root,
|
||||
create_material_path(innerrel, inner_cheapest_total);
|
||||
|
||||
/*
|
||||
* Get the best innerjoin indexpath (if any) for this outer rel. It's
|
||||
* the same for all outer paths.
|
||||
* Get the best innerjoin indexpath (if any) for this outer rel.
|
||||
* It's the same for all outer paths.
|
||||
*/
|
||||
bestinnerjoin = best_inner_indexscan(root, innerrel,
|
||||
outerrel->relids, jointype);
|
||||
@ -414,8 +415,8 @@ match_unsorted_outer(Query *root,
|
||||
int sortkeycnt;
|
||||
|
||||
/*
|
||||
* If we need to unique-ify the outer path, it's pointless to consider
|
||||
* any but the cheapest outer.
|
||||
* If we need to unique-ify the outer path, it's pointless to
|
||||
* consider any but the cheapest outer.
|
||||
*/
|
||||
if (save_jointype == JOIN_UNIQUE_OUTER)
|
||||
{
|
||||
@ -709,7 +710,7 @@ hash_inner_and_outer(Query *root,
|
||||
/* righthand side is inner */
|
||||
}
|
||||
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
{
|
||||
/* lefthand side is inner */
|
||||
}
|
||||
@ -727,9 +728,9 @@ hash_inner_and_outer(Query *root,
|
||||
* cheapest-startup-cost outer paths. There's no need to consider
|
||||
* any but the cheapest-total-cost inner path, however.
|
||||
*/
|
||||
Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
|
||||
Path *cheapest_total_outer = outerrel->cheapest_total_path;
|
||||
Path *cheapest_total_inner = innerrel->cheapest_total_path;
|
||||
Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
|
||||
Path *cheapest_total_outer = outerrel->cheapest_total_path;
|
||||
Path *cheapest_total_inner = innerrel->cheapest_total_path;
|
||||
|
||||
/* Unique-ify if need be */
|
||||
if (jointype == JOIN_UNIQUE_OUTER)
|
||||
@ -840,7 +841,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
|
||||
/* righthand side is inner */
|
||||
}
|
||||
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
{
|
||||
/* lefthand side is inner */
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.61 2003/07/25 00:01:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.62 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -19,11 +19,11 @@
|
||||
|
||||
|
||||
static List *make_rels_by_clause_joins(Query *root,
|
||||
RelOptInfo *old_rel,
|
||||
List *other_rels);
|
||||
RelOptInfo *old_rel,
|
||||
List *other_rels);
|
||||
static List *make_rels_by_clauseless_joins(Query *root,
|
||||
RelOptInfo *old_rel,
|
||||
List *other_rels);
|
||||
RelOptInfo *old_rel,
|
||||
List *other_rels);
|
||||
|
||||
|
||||
/*
|
||||
@ -417,8 +417,8 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
|
||||
/*
|
||||
* If we are implementing IN clauses as joins, there are some joins
|
||||
* that are illegal. Check to see if the proposed join is trouble.
|
||||
* We can skip the work if looking at an outer join, however, because
|
||||
* that are illegal. Check to see if the proposed join is trouble. We
|
||||
* can skip the work if looking at an outer join, however, because
|
||||
* only top-level joins might be affected.
|
||||
*/
|
||||
if (jointype == JOIN_INNER)
|
||||
@ -430,8 +430,8 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
InClauseInfo *ininfo = (InClauseInfo *) lfirst(l);
|
||||
|
||||
/*
|
||||
* Cannot join if proposed join contains part, but only
|
||||
* part, of the RHS, *and* it contains rels not in the RHS.
|
||||
* Cannot join if proposed join contains part, but only part,
|
||||
* of the RHS, *and* it contains rels not in the RHS.
|
||||
*/
|
||||
if (bms_overlap(ininfo->righthand, joinrelids) &&
|
||||
!bms_is_subset(ininfo->righthand, joinrelids) &&
|
||||
@ -442,16 +442,17 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
}
|
||||
|
||||
/*
|
||||
* No issue unless we are looking at a join of the IN's RHS
|
||||
* to other stuff.
|
||||
* No issue unless we are looking at a join of the IN's RHS to
|
||||
* other stuff.
|
||||
*/
|
||||
if (! (bms_is_subset(ininfo->righthand, joinrelids) &&
|
||||
!bms_equal(ininfo->righthand, joinrelids)))
|
||||
if (!(bms_is_subset(ininfo->righthand, joinrelids) &&
|
||||
!bms_equal(ininfo->righthand, joinrelids)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If we already joined IN's RHS to any part of its LHS in either
|
||||
* input path, then this join is not constrained (the necessary
|
||||
* work was done at a lower level).
|
||||
* If we already joined IN's RHS to any part of its LHS in
|
||||
* either input path, then this join is not constrained (the
|
||||
* necessary work was done at a lower level).
|
||||
*/
|
||||
if (bms_overlap(ininfo->lefthand, rel1->relids) &&
|
||||
bms_is_subset(ininfo->righthand, rel1->relids))
|
||||
@ -459,6 +460,7 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
if (bms_overlap(ininfo->lefthand, rel2->relids) &&
|
||||
bms_is_subset(ininfo->righthand, rel2->relids))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* JOIN_IN technique will work if outerrel includes LHS and
|
||||
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
|
||||
@ -478,22 +480,14 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
}
|
||||
if (bms_is_subset(ininfo->lefthand, rel1->relids) &&
|
||||
bms_equal(ininfo->righthand, rel2->relids))
|
||||
{
|
||||
jointype = JOIN_IN;
|
||||
}
|
||||
else if (bms_is_subset(ininfo->lefthand, rel2->relids) &&
|
||||
bms_equal(ininfo->righthand, rel1->relids))
|
||||
{
|
||||
jointype = JOIN_REVERSE_IN;
|
||||
}
|
||||
else if (bms_equal(ininfo->righthand, rel1->relids))
|
||||
{
|
||||
jointype = JOIN_UNIQUE_OUTER;
|
||||
}
|
||||
else if (bms_equal(ininfo->righthand, rel2->relids))
|
||||
{
|
||||
jointype = JOIN_UNIQUE_INNER;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* invalid join path */
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.51 2003/06/15 22:51:45 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -99,7 +99,7 @@ create_or_index_paths(Query *root, RelOptInfo *rel)
|
||||
|
||||
best_or_subclause_indices(root,
|
||||
rel,
|
||||
((BoolExpr *) restrictinfo->clause)->args,
|
||||
((BoolExpr *) restrictinfo->clause)->args,
|
||||
restrictinfo->subclauseindices,
|
||||
pathnode);
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.51 2003/07/25 00:01:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -198,8 +198,8 @@ generate_implied_equalities(Query *root)
|
||||
/*
|
||||
* Collect info about relids mentioned in each item. For this
|
||||
* routine we only really care whether there are any at all in
|
||||
* each item, but process_implied_equality() needs the exact
|
||||
* sets, so we may as well pull them here.
|
||||
* each item, but process_implied_equality() needs the exact sets,
|
||||
* so we may as well pull them here.
|
||||
*/
|
||||
relids = (Relids *) palloc(nitems * sizeof(Relids));
|
||||
have_consts = false;
|
||||
@ -233,8 +233,8 @@ generate_implied_equalities(Query *root)
|
||||
|
||||
/*
|
||||
* If it's "const = const" then just ignore it altogether.
|
||||
* There is no place in the restrictinfo structure to store
|
||||
* it. (If the two consts are in fact unequal, then
|
||||
* There is no place in the restrictinfo structure to
|
||||
* store it. (If the two consts are in fact unequal, then
|
||||
* propagating the comparison to Vars will cause us to
|
||||
* produce zero rows out, as expected.)
|
||||
*/
|
||||
@ -242,12 +242,12 @@ generate_implied_equalities(Query *root)
|
||||
{
|
||||
/*
|
||||
* Tell process_implied_equality to delete the clause,
|
||||
* not add it, if it's "var = var" and we have constants
|
||||
* present in the list.
|
||||
* not add it, if it's "var = var" and we have
|
||||
* constants present in the list.
|
||||
*/
|
||||
bool delete_it = (have_consts &&
|
||||
i1_is_variable &&
|
||||
i2_is_variable);
|
||||
bool delete_it = (have_consts &&
|
||||
i1_is_variable &&
|
||||
i2_is_variable);
|
||||
|
||||
process_implied_equality(root,
|
||||
item1->key, item2->key,
|
||||
@ -751,20 +751,21 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
|
||||
* element might match none, one, or more of the output columns
|
||||
* that are visible to the outer query. This means we may have
|
||||
* multiple possible representations of the sub_pathkey in the
|
||||
* context of the outer query. Ideally we would generate them all
|
||||
* and put them all into a pathkey list of the outer query, thereby
|
||||
* propagating equality knowledge up to the outer query. Right now
|
||||
* we cannot do so, because the outer query's canonical pathkey
|
||||
* sets are already frozen when this is called. Instead we prefer
|
||||
* the one that has the highest "score" (number of canonical pathkey
|
||||
* peers, plus one if it matches the outer query_pathkeys).
|
||||
* This is the most likely to be useful in the outer query.
|
||||
* context of the outer query. Ideally we would generate them all
|
||||
* and put them all into a pathkey list of the outer query,
|
||||
* thereby propagating equality knowledge up to the outer query.
|
||||
* Right now we cannot do so, because the outer query's canonical
|
||||
* pathkey sets are already frozen when this is called. Instead
|
||||
* we prefer the one that has the highest "score" (number of
|
||||
* canonical pathkey peers, plus one if it matches the outer
|
||||
* query_pathkeys). This is the most likely to be useful in the
|
||||
* outer query.
|
||||
*/
|
||||
foreach(j, sub_pathkey)
|
||||
{
|
||||
PathKeyItem *sub_item = (PathKeyItem *) lfirst(j);
|
||||
Node *sub_key = sub_item->key;
|
||||
List *k;
|
||||
Node *sub_key = sub_item->key;
|
||||
List *k;
|
||||
|
||||
foreach(k, subquery->targetList)
|
||||
{
|
||||
@ -774,9 +775,9 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
|
||||
equal(tle->expr, sub_key))
|
||||
{
|
||||
/* Found a representation for this sub_key */
|
||||
Var *outer_var;
|
||||
Var *outer_var;
|
||||
PathKeyItem *outer_item;
|
||||
int score;
|
||||
int score;
|
||||
|
||||
outer_var = makeVar(rel->relid,
|
||||
tle->resdom->resno,
|
||||
@ -802,8 +803,8 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
|
||||
}
|
||||
|
||||
/*
|
||||
* If we couldn't find a representation of this sub_pathkey,
|
||||
* we're done (we can't use the ones to its right, either).
|
||||
* If we couldn't find a representation of this sub_pathkey, we're
|
||||
* done (we can't use the ones to its right, either).
|
||||
*/
|
||||
if (!best_item)
|
||||
break;
|
||||
@ -812,8 +813,8 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
|
||||
cpathkey = make_canonical_pathkey(root, best_item);
|
||||
|
||||
/*
|
||||
* Eliminate redundant ordering info; could happen if outer
|
||||
* query equijoins subquery keys...
|
||||
* Eliminate redundant ordering info; could happen if outer query
|
||||
* equijoins subquery keys...
|
||||
*/
|
||||
if (!ptrMember(cpathkey, retval))
|
||||
{
|
||||
@ -920,7 +921,7 @@ make_pathkeys_for_sortclauses(List *sortclauses,
|
||||
* many times when dealing with a many-relation query.
|
||||
*
|
||||
* We have to be careful that the cached values are palloc'd in the same
|
||||
* context the RestrictInfo node itself is in. This is not currently a
|
||||
* context the RestrictInfo node itself is in. This is not currently a
|
||||
* problem for normal planning, but it is an issue for GEQO planning.
|
||||
*/
|
||||
void
|
||||
@ -1090,7 +1091,7 @@ make_pathkeys_for_mergeclauses(Query *root,
|
||||
else
|
||||
{
|
||||
elog(ERROR, "could not identify which side of mergeclause to use");
|
||||
pathkey = NIL; /* keep compiler quiet */
|
||||
pathkey = NIL; /* keep compiler quiet */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.14 2003/02/08 20:20:54 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.15 2003/08/04 00:43:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -27,7 +27,7 @@
|
||||
|
||||
static List *TidqualFromRestrictinfo(Relids relids, List *restrictinfo);
|
||||
static bool isEvaluable(int varno, Node *node);
|
||||
static Node *TidequalClause(int varno, OpExpr *node);
|
||||
static Node *TidequalClause(int varno, OpExpr * node);
|
||||
static List *TidqualFromExpr(int varno, Expr *expr);
|
||||
|
||||
static bool
|
||||
@ -66,7 +66,7 @@ isEvaluable(int varno, Node *node)
|
||||
* or the left node if the opclause is ....=CTID
|
||||
*/
|
||||
static Node *
|
||||
TidequalClause(int varno, OpExpr *node)
|
||||
TidequalClause(int varno, OpExpr * node)
|
||||
{
|
||||
Node *rnode = NULL,
|
||||
*arg1,
|
||||
|
Reference in New Issue
Block a user