1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-26 01:22:12 +03:00

Standard pgindent run for 8.1.

This commit is contained in:
Bruce Momjian
2005-10-15 02:49:52 +00:00
parent 790c01d280
commit 1dc3498251
770 changed files with 34334 additions and 32507 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.136 2005/08/22 17:34:58 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.137 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -62,7 +62,7 @@ static void compare_tlist_datatypes(List *tlist, List *colTypes,
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
bool *differentTypes);
static void subquery_push_qual(Query *subquery,
RangeTblEntry *rte, Index rti, Node *qual);
RangeTblEntry *rte, Index rti, Node *qual);
static void recurse_push_qual(Node *setOp, Query *topquery,
RangeTblEntry *rte, Index rti, Node *qual);
@ -105,7 +105,7 @@ make_one_rel(PlannerInfo *root)
if (brel == NULL)
continue;
Assert(brel->relid == rti); /* sanity check on array */
Assert(brel->relid == rti); /* sanity check on array */
/* ignore RTEs that are "other rels" */
if (brel->reloptkind != RELOPT_BASEREL)
@ -134,9 +134,9 @@ set_base_rel_pathlists(PlannerInfo *root)
Index rti;
/*
* Note: because we call expand_inherited_rtentry inside the loop,
* it's quite possible for the base_rel_array to be enlarged while
* the loop runs. Hence don't try to optimize the loop.
* Note: because we call expand_inherited_rtentry inside the loop, it's
* quite possible for the base_rel_array to be enlarged while the loop
* runs. Hence don't try to optimize the loop.
*/
for (rti = 1; rti < root->base_rel_array_size; rti++)
{
@ -255,8 +255,8 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
ListCell *il;
/*
* XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE;
* can we do better?
* XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE; can
* we do better?
*/
if (list_member_int(root->parse->rowMarks, parentRTindex))
ereport(ERROR,
@ -270,8 +270,8 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
rel->width = 0;
/*
* Generate access paths for each table in the tree (parent AND
* children), and pick the cheapest path for each table.
* Generate access paths for each table in the tree (parent AND children),
* and pick the cheapest path for each table.
*/
foreach(il, inheritlist)
{
@ -286,18 +286,17 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
childOID = childrte->relid;
/*
* Make a RelOptInfo for the child so we can do planning.
* Mark it as an "other rel" since it will not be part of the
* main join tree.
* Make a RelOptInfo for the child so we can do planning. Mark it as
* an "other rel" since it will not be part of the main join tree.
*/
childrel = build_other_rel(root, childRTindex);
/*
* Copy the parent's targetlist and restriction quals to the
* child, with attribute-number adjustment as needed. We don't
* bother to copy the join quals, since we can't do any joining of
* the individual tables. Also, we just zap attr_needed rather
* than trying to adjust it; it won't be looked at in the child.
* Copy the parent's targetlist and restriction quals to the child,
* with attribute-number adjustment as needed. We don't bother to
* copy the join quals, since we can't do any joining of the
* individual tables. Also, we just zap attr_needed rather than
* trying to adjust it; it won't be looked at in the child.
*/
childrel->reltargetlist = (List *)
adjust_inherited_attrs((Node *) rel->reltargetlist,
@ -320,13 +319,14 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
*/
if (constraint_exclusion)
{
List *constraint_pred;
List *constraint_pred;
constraint_pred = get_relation_constraints(childOID, childrel);
/*
* We do not currently enforce that CHECK constraints contain
* only immutable functions, so it's necessary to check here.
* We daren't draw conclusions from plan-time evaluation of
* We do not currently enforce that CHECK constraints contain only
* immutable functions, so it's necessary to check here. We
* daren't draw conclusions from plan-time evaluation of
* non-immutable functions.
*/
if (!contain_mutable_functions((Node *) constraint_pred))
@ -351,9 +351,9 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
subpaths = lappend(subpaths, childrel->cheapest_total_path);
/*
* Propagate size information from the child back to the parent.
* For simplicity, we use the largest widths from any child as the
* parent estimates.
* Propagate size information from the child back to the parent. For
* simplicity, we use the largest widths from any child as the parent
* estimates.
*/
rel->rows += childrel->rows;
if (childrel->width > rel->width)
@ -377,9 +377,9 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
}
/*
* Finally, build Append path and install it as the only access path
* for the parent rel. (Note: this is correct even if we have zero
* or one live subpath due to constraint exclusion.)
* Finally, build Append path and install it as the only access path for
* the parent rel. (Note: this is correct even if we have zero or one
* live subpath due to constraint exclusion.)
*/
add_path(rel, (Path *) create_append_path(rel, subpaths));
@ -430,18 +430,18 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* If there are any restriction clauses that have been attached to the
* subquery relation, consider pushing them down to become WHERE or
* HAVING quals of the subquery itself. This transformation is useful
* because it may allow us to generate a better plan for the subquery
* than evaluating all the subquery output rows and then filtering them.
* subquery relation, consider pushing them down to become WHERE or HAVING
* quals of the subquery itself. This transformation is useful because it
* may allow us to generate a better plan for the subquery than evaluating
* all the subquery output rows and then filtering them.
*
* There are several cases where we cannot push down clauses.
* Restrictions involving the subquery are checked by
* subquery_is_pushdown_safe(). Restrictions on individual clauses
* are checked by qual_is_pushdown_safe().
* There are several cases where we cannot push down clauses. Restrictions
* involving the subquery are checked by subquery_is_pushdown_safe().
* Restrictions on individual clauses are checked by
* qual_is_pushdown_safe().
*
* Non-pushed-down clauses will get evaluated as qpquals of the
* SubqueryScan node.
* Non-pushed-down clauses will get evaluated as qpquals of the SubqueryScan
* node.
*
* XXX Are there any cases where we want to make a policy decision not to
* push down a pushable qual, because it'd result in a worse plan?
@ -475,10 +475,10 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
pfree(differentTypes);
/*
* We can safely pass the outer tuple_fraction down to the subquery
* if the outer level has no joining, aggregation, or sorting to do.
* Otherwise we'd better tell the subquery to plan for full retrieval.
* (XXX This could probably be made more intelligent ...)
* We can safely pass the outer tuple_fraction down to the subquery if the
* outer level has no joining, aggregation, or sorting to do. Otherwise
* we'd better tell the subquery to plan for full retrieval. (XXX This
* could probably be made more intelligent ...)
*/
if (parse->hasAggs ||
parse->groupClause ||
@ -540,8 +540,8 @@ make_fromexpr_rel(PlannerInfo *root, FromExpr *from)
/*
* Count the number of child jointree nodes. This is the depth of the
* dynamic-programming algorithm we must employ to consider all ways
* of joining the child nodes.
* dynamic-programming algorithm we must employ to consider all ways of
* joining the child nodes.
*/
levels_needed = list_length(from->fromlist);
@ -603,11 +603,11 @@ make_one_rel_by_joins(PlannerInfo *root, int levels_needed, List *initial_rels)
RelOptInfo *rel;
/*
* We employ a simple "dynamic programming" algorithm: we first find
* all ways to build joins of two jointree items, then all ways to
* build joins of three items (from two-item joins and single items),
* then four-item joins, and so on until we have considered all ways
* to join all the items into one rel.
* We employ a simple "dynamic programming" algorithm: we first find all
* ways to build joins of two jointree items, then all ways to build joins
* of three items (from two-item joins and single items), then four-item
* joins, and so on until we have considered all ways to join all the
* items into one rel.
*
* joinitems[j] is a list of all the j-item rels. Initially we set
* joinitems[1] to represent all the single-jointree-item relations.
@ -823,8 +823,8 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
return false;
/*
* Examine all Vars used in clause; since it's a restriction clause,
* all such Vars must refer to subselect output columns.
* Examine all Vars used in clause; since it's a restriction clause, all
* such Vars must refer to subselect output columns.
*/
vars = pull_var_clause(qual, false);
foreach(vl, vars)
@ -835,9 +835,9 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
Assert(var->varno == rti);
/*
* We use a bitmapset to avoid testing the same attno more than
* once. (NB: this only works because subquery outputs can't have
* negative attnos.)
* We use a bitmapset to avoid testing the same attno more than once.
* (NB: this only works because subquery outputs can't have negative
* attnos.)
*/
if (bms_is_member(var->varattno, tested))
continue;
@ -893,11 +893,10 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
else
{
/*
* We need to replace Vars in the qual (which must refer to
* outputs of the subquery) with copies of the subquery's
* targetlist expressions. Note that at this point, any uplevel
* Vars in the qual should have been replaced with Params, so they
* need no work.
* We need to replace Vars in the qual (which must refer to outputs of
* the subquery) with copies of the subquery's targetlist expressions.
* Note that at this point, any uplevel Vars in the qual should have
* been replaced with Params, so they need no work.
*
* This step also ensures that when we are pushing into a setop tree,
* each component query gets its own copy of the qual.
@ -907,9 +906,9 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
CMD_SELECT, 0);
/*
* Now attach the qual to the proper place: normally WHERE, but
* if the subquery uses grouping or aggregation, put it in HAVING
* (since the qual really refers to the group-result rows).
* Now attach the qual to the proper place: normally WHERE, but if the
* subquery uses grouping or aggregation, put it in HAVING (since the
* qual really refers to the group-result rows).
*/
if (subquery->hasAggs || subquery->groupClause || subquery->havingQual)
subquery->havingQual = make_and_qual(subquery->havingQual, qual);
@ -919,8 +918,8 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
/*
* We need not change the subquery's hasAggs or hasSublinks flags,
* since we can't be pushing down any aggregates that weren't
* there before, and we don't push down subselects at all.
* since we can't be pushing down any aggregates that weren't there
* before, and we don't push down subselects at all.
*/
}
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.74 2005/10/11 16:44:40 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.75 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -82,7 +82,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* hisel + losel + null_frac - 1.)
*
* If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
* and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
* and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
* yields an impossible (negative) result.
*
* A free side-effect is that we can recognize redundant inequalities such
@ -102,9 +102,9 @@ clauselist_selectivity(PlannerInfo *root,
ListCell *l;
/*
* Initial scan over clauses. Anything that doesn't look like a
* potential rangequery clause gets multiplied into s1 and forgotten.
* Anything that does gets inserted into an rqlist entry.
* Initial scan over clauses. Anything that doesn't look like a potential
* rangequery clause gets multiplied into s1 and forgotten. Anything that
* does gets inserted into an rqlist entry.
*/
foreach(l, clauses)
{
@ -127,10 +127,10 @@ clauselist_selectivity(PlannerInfo *root,
rinfo = NULL;
/*
* See if it looks like a restriction clause with a pseudoconstant
* on one side. (Anything more complicated than that might not
* behave in the simple way we are expecting.) Most of the tests
* here can be done more efficiently with rinfo than without.
* See if it looks like a restriction clause with a pseudoconstant on
* one side. (Anything more complicated than that might not behave in
* the simple way we are expecting.) Most of the tests here can be
* done more efficiently with rinfo than without.
*/
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
{
@ -142,10 +142,10 @@ clauselist_selectivity(PlannerInfo *root,
{
ok = (bms_membership(rinfo->clause_relids) == BMS_SINGLETON) &&
(is_pseudo_constant_clause_relids(lsecond(expr->args),
rinfo->right_relids) ||
rinfo->right_relids) ||
(varonleft = false,
is_pseudo_constant_clause_relids(linitial(expr->args),
rinfo->left_relids)));
is_pseudo_constant_clause_relids(linitial(expr->args),
rinfo->left_relids)));
}
else
{
@ -159,8 +159,8 @@ clauselist_selectivity(PlannerInfo *root,
{
/*
* If it's not a "<" or ">" operator, just merge the
* selectivity in generically. But if it's the right
* oprrest, add the clause to rqlist for later processing.
* selectivity in generically. But if it's the right oprrest,
* add the clause to rqlist for later processing.
*/
switch (get_oprrest(expr->opno))
{
@ -199,8 +199,8 @@ clauselist_selectivity(PlannerInfo *root,
/*
* Exact equality to the default value probably means the
* selectivity function punted. This is not airtight but
* should be good enough.
* selectivity function punted. This is not airtight but should
* be good enough.
*/
if (rqlist->hibound == DEFAULT_INEQ_SEL ||
rqlist->lobound == DEFAULT_INEQ_SEL)
@ -289,8 +289,8 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause,
for (rqelem = *rqlist; rqelem; rqelem = rqelem->next)
{
/*
* We use full equal() here because the "var" might be a function
* of one or more attributes of the same relation...
* We use full equal() here because the "var" might be a function of
* one or more attributes of the same relation...
*/
if (!equal(var, rqelem->var))
continue;
@ -423,17 +423,16 @@ clause_selectivity(PlannerInfo *root,
rinfo = (RestrictInfo *) clause;
/*
* If possible, cache the result of the selectivity calculation
* for the clause. We can cache if varRelid is zero or the clause
* contains only vars of that relid --- otherwise varRelid will
* affect the result, so mustn't cache. We also have to be
* careful about the jointype. It's OK to cache when jointype is
* JOIN_INNER or one of the outer join types (any given outer-join
* clause should always be examined with the same jointype, so
* result won't change). It's not OK to cache when jointype is one
* of the special types associated with IN processing, because the
* same clause may be examined with different jointypes and the
* result should vary.
* If possible, cache the result of the selectivity calculation for
* the clause. We can cache if varRelid is zero or the clause
* contains only vars of that relid --- otherwise varRelid will affect
* the result, so mustn't cache. We also have to be careful about the
* jointype. It's OK to cache when jointype is JOIN_INNER or one of
* the outer join types (any given outer-join clause should always be
* examined with the same jointype, so result won't change). It's not
* OK to cache when jointype is one of the special types associated
* with IN processing, because the same clause may be examined with
* different jointypes and the result should vary.
*/
if (varRelid == 0 ||
bms_is_subset_singleton(rinfo->clause_relids, varRelid))
@ -477,8 +476,8 @@ clause_selectivity(PlannerInfo *root,
Var *var = (Var *) clause;
/*
* We probably shouldn't ever see an uplevel Var here, but if we
* do, return the default selectivity...
* We probably shouldn't ever see an uplevel Var here, but if we do,
* return the default selectivity...
*/
if (var->varlevelsup == 0 &&
(varRelid == 0 || varRelid == (int) var->varno))
@ -488,23 +487,23 @@ clause_selectivity(PlannerInfo *root,
if (rte->rtekind == RTE_SUBQUERY)
{
/*
* XXX not smart about subquery references... any way to
* do better?
* XXX not smart about subquery references... any way to do
* better?
*/
s1 = 0.5;
}
else
{
/*
* A Var at the top of a clause must be a bool Var. This
* is equivalent to the clause reln.attribute = 't', so we
* A Var at the top of a clause must be a bool Var. This is
* equivalent to the clause reln.attribute = 't', so we
* compute the selectivity as if that is what we have.
*/
s1 = restriction_selectivity(root,
BooleanEqualOperator,
list_make2(var,
makeBoolConst(true,
false)),
makeBoolConst(true,
false)),
varRelid);
}
}
@ -534,7 +533,7 @@ clause_selectivity(PlannerInfo *root,
{
/* inverse of the selectivity of the underlying clause */
s1 = 1.0 - clause_selectivity(root,
(Node *) get_notclausearg((Expr *) clause),
(Node *) get_notclausearg((Expr *) clause),
varRelid,
jointype);
}
@ -576,17 +575,16 @@ clause_selectivity(PlannerInfo *root,
{
/*
* If we are considering a nestloop join then all clauses are
* restriction clauses, since we are only interested in the
* one relation.
* restriction clauses, since we are only interested in the one
* relation.
*/
is_join_clause = false;
}
else
{
/*
* Otherwise, it's a join if there's more than one relation
* used. We can optimize this calculation if an rinfo was
* passed.
* Otherwise, it's a join if there's more than one relation used.
* We can optimize this calculation if an rinfo was passed.
*/
if (rinfo)
is_join_clause = (bms_membership(rinfo->clause_relids) ==
@ -613,8 +611,8 @@ clause_selectivity(PlannerInfo *root,
else if (is_funcclause(clause))
{
/*
* This is not an operator, so we guess at the selectivity. THIS
* IS A HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
* This is not an operator, so we guess at the selectivity. THIS IS A
* HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
* SELECTIVITIES THEMSELVES. -- JMH 7/9/92
*/
s1 = (Selectivity) 0.3333333;

View File

@ -49,7 +49,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.148 2005/10/05 17:19:19 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.149 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -121,8 +121,8 @@ clamp_row_est(double nrows)
{
/*
* Force estimate to be at least one row, to make explain output look
* better and to avoid possible divide-by-zero when interpolating
* costs. Make it an integer, too.
* better and to avoid possible divide-by-zero when interpolating costs.
* Make it an integer, too.
*/
if (nrows < 1.0)
nrows = 1.0;
@ -155,12 +155,11 @@ cost_seqscan(Path *path, PlannerInfo *root,
/*
* disk costs
*
* The cost of reading a page sequentially is 1.0, by definition. Note
* that the Unix kernel will typically do some amount of read-ahead
* optimization, so that this cost is less than the true cost of
* reading a page from disk. We ignore that issue here, but must take
* it into account when estimating the cost of non-sequential
* accesses!
* The cost of reading a page sequentially is 1.0, by definition. Note that
* the Unix kernel will typically do some amount of read-ahead
* optimization, so that this cost is less than the true cost of reading a
* page from disk. We ignore that issue here, but must take it into
* account when estimating the cost of non-sequential accesses!
*/
run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
@ -276,10 +275,10 @@ cost_index(IndexPath *path, PlannerInfo *root,
startup_cost += disable_cost;
/*
* Call index-access-method-specific code to estimate the processing
* cost for scanning the index, as well as the selectivity of the
* index (ie, the fraction of main-table tuples we will have to
* retrieve) and its correlation to the main-table tuple order.
* Call index-access-method-specific code to estimate the processing cost
* for scanning the index, as well as the selectivity of the index (ie,
* the fraction of main-table tuples we will have to retrieve) and its
* correlation to the main-table tuple order.
*/
OidFunctionCall7(index->amcostestimate,
PointerGetDatum(root),
@ -292,8 +291,8 @@ cost_index(IndexPath *path, PlannerInfo *root,
/*
* Save amcostestimate's results for possible use in bitmap scan planning.
* We don't bother to save indexStartupCost or indexCorrelation, because
* a bitmap scan doesn't care about either.
* We don't bother to save indexStartupCost or indexCorrelation, because a
* bitmap scan doesn't care about either.
*/
path->indextotalcost = indexTotalCost;
path->indexselectivity = indexSelectivity;
@ -366,19 +365,18 @@ cost_index(IndexPath *path, PlannerInfo *root,
}
/*
* min_IO_cost corresponds to the perfectly correlated case
* (csquared=1), max_IO_cost to the perfectly uncorrelated case
* (csquared=0). Note that we just charge random_page_cost per page
* in the uncorrelated case, rather than using
* cost_nonsequential_access, since we've already accounted for
* caching effects by using the Mackert model.
* min_IO_cost corresponds to the perfectly correlated case (csquared=1),
* max_IO_cost to the perfectly uncorrelated case (csquared=0). Note that
* we just charge random_page_cost per page in the uncorrelated case,
* rather than using cost_nonsequential_access, since we've already
* accounted for caching effects by using the Mackert model.
*/
min_IO_cost = ceil(indexSelectivity * T);
max_IO_cost = pages_fetched * random_page_cost;
/*
* Now interpolate based on estimated index order correlation to get
* total disk I/O cost for main table accesses.
* Now interpolate based on estimated index order correlation to get total
* disk I/O cost for main table accesses.
*/
csquared = indexCorrelation * indexCorrelation;
@ -390,9 +388,9 @@ cost_index(IndexPath *path, PlannerInfo *root,
* Normally the indexquals will be removed from the list of restriction
* clauses that we have to evaluate as qpquals, so we should subtract
* their costs from baserestrictcost. But if we are doing a join then
* some of the indexquals are join clauses and shouldn't be
* subtracted. Rather than work out exactly how much to subtract, we
* don't subtract anything.
* some of the indexquals are join clauses and shouldn't be subtracted.
* Rather than work out exactly how much to subtract, we don't subtract
* anything.
*/
startup_cost += baserel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
@ -467,9 +465,9 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* For small numbers of pages we should charge random_page_cost apiece,
* while if nearly all the table's pages are being read, it's more
* appropriate to charge 1.0 apiece. The effect is nonlinear, too.
* For lack of a better idea, interpolate like this to determine the
* cost per page.
* appropriate to charge 1.0 apiece. The effect is nonlinear, too. For
* lack of a better idea, interpolate like this to determine the cost per
* page.
*/
if (pages_fetched >= 2.0)
cost_per_page = random_page_cost -
@ -482,10 +480,10 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* Estimate CPU costs per tuple.
*
* Often the indexquals don't need to be rechecked at each tuple ...
* but not always, especially not if there are enough tuples involved
* that the bitmaps become lossy. For the moment, just assume they
* will be rechecked always.
* Often the indexquals don't need to be rechecked at each tuple ... but not
* always, especially not if there are enough tuples involved that the
* bitmaps become lossy. For the moment, just assume they will be
* rechecked always.
*/
startup_cost += baserel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
@ -527,7 +525,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
* Estimate the cost of a BitmapAnd node
*
* Note that this considers only the costs of index scanning and bitmap
* creation, not the eventual heap access. In that sense the object isn't
* creation, not the eventual heap access. In that sense the object isn't
* truly a Path, but it has enough path-like properties (costs in particular)
* to warrant treating it as one.
*/
@ -535,24 +533,24 @@ void
cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
{
Cost totalCost;
Selectivity selec;
Selectivity selec;
ListCell *l;
/*
* We estimate AND selectivity on the assumption that the inputs
* are independent. This is probably often wrong, but we don't
* have the info to do better.
* We estimate AND selectivity on the assumption that the inputs are
* independent. This is probably often wrong, but we don't have the info
* to do better.
*
* The runtime cost of the BitmapAnd itself is estimated at 100x
* cpu_operator_cost for each tbm_intersect needed. Probably too
* small, definitely too simplistic?
* cpu_operator_cost for each tbm_intersect needed. Probably too small,
* definitely too simplistic?
*/
totalCost = 0.0;
selec = 1.0;
foreach(l, path->bitmapquals)
{
Path *subpath = (Path *) lfirst(l);
Cost subCost;
Path *subpath = (Path *) lfirst(l);
Cost subCost;
Selectivity subselec;
cost_bitmap_tree_node(subpath, &subCost, &subselec);
@ -578,25 +576,25 @@ void
cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
{
Cost totalCost;
Selectivity selec;
Selectivity selec;
ListCell *l;
/*
* We estimate OR selectivity on the assumption that the inputs
* are non-overlapping, since that's often the case in "x IN (list)"
* type situations. Of course, we clamp to 1.0 at the end.
* We estimate OR selectivity on the assumption that the inputs are
* non-overlapping, since that's often the case in "x IN (list)" type
* situations. Of course, we clamp to 1.0 at the end.
*
* The runtime cost of the BitmapOr itself is estimated at 100x
* cpu_operator_cost for each tbm_union needed. Probably too
* small, definitely too simplistic? We are aware that the tbm_unions
* are optimized out when the inputs are BitmapIndexScans.
* cpu_operator_cost for each tbm_union needed. Probably too small,
* definitely too simplistic? We are aware that the tbm_unions are
* optimized out when the inputs are BitmapIndexScans.
*/
totalCost = 0.0;
selec = 0.0;
foreach(l, path->bitmapquals)
{
Path *subpath = (Path *) lfirst(l);
Cost subCost;
Path *subpath = (Path *) lfirst(l);
Cost subCost;
Selectivity subselec;
cost_bitmap_tree_node(subpath, &subCost, &subselec);
@ -661,10 +659,9 @@ cost_subqueryscan(Path *path, RelOptInfo *baserel)
Assert(baserel->rtekind == RTE_SUBQUERY);
/*
* Cost of path is cost of evaluating the subplan, plus cost of
* evaluating any restriction clauses that will be attached to the
* SubqueryScan node, plus cpu_tuple_cost to account for selection and
* projection overhead.
* Cost of path is cost of evaluating the subplan, plus cost of evaluating
* any restriction clauses that will be attached to the SubqueryScan node,
* plus cpu_tuple_cost to account for selection and projection overhead.
*/
path->startup_cost = baserel->subplan->startup_cost;
path->total_cost = baserel->subplan->total_cost;
@ -694,8 +691,8 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
/*
* For now, estimate function's cost at one operator eval per function
* call. Someday we should revive the function cost estimate columns
* in pg_proc...
* call. Someday we should revive the function cost estimate columns in
* pg_proc...
*/
cpu_per_tuple = cpu_operator_cost;
@ -758,9 +755,8 @@ cost_sort(Path *path, PlannerInfo *root,
startup_cost += disable_cost;
/*
* We want to be sure the cost of a sort is never estimated as zero,
* even if passed-in tuple count is zero. Besides, mustn't do
* log(0)...
* We want to be sure the cost of a sort is never estimated as zero, even
* if passed-in tuple count is zero. Besides, mustn't do log(0)...
*/
if (tuples < 2.0)
tuples = 2.0;
@ -790,8 +786,8 @@ cost_sort(Path *path, PlannerInfo *root,
}
/*
* Also charge a small amount (arbitrarily set equal to operator cost)
* per extracted tuple.
* Also charge a small amount (arbitrarily set equal to operator cost) per
* extracted tuple.
*/
run_cost += cpu_operator_cost * tuples;
@ -828,17 +824,16 @@ cost_material(Path *path,
/*
* Charge a very small amount per inserted tuple, to reflect bookkeeping
* costs. We use cpu_tuple_cost/10 for this. This is needed to break
* the tie that would otherwise exist between nestloop with A outer,
* costs. We use cpu_tuple_cost/10 for this. This is needed to break the
* tie that would otherwise exist between nestloop with A outer,
* materialized B inner and nestloop with B outer, materialized A inner.
* The extra cost ensures we'll prefer materializing the smaller rel.
*/
startup_cost += cpu_tuple_cost * 0.1 * tuples;
/*
* Also charge a small amount per extracted tuple. We use
* cpu_tuple_cost so that it doesn't appear worthwhile to materialize
* a bare seqscan.
* Also charge a small amount per extracted tuple. We use cpu_tuple_cost
* so that it doesn't appear worthwhile to materialize a bare seqscan.
*/
run_cost += cpu_tuple_cost * tuples;
@ -865,23 +860,22 @@ cost_agg(Path *path, PlannerInfo *root,
Cost total_cost;
/*
* We charge one cpu_operator_cost per aggregate function per input
* tuple, and another one per output tuple (corresponding to transfn
* and finalfn calls respectively). If we are grouping, we charge an
* additional cpu_operator_cost per grouping column per input tuple
* for grouping comparisons.
* We charge one cpu_operator_cost per aggregate function per input tuple,
* and another one per output tuple (corresponding to transfn and finalfn
* calls respectively). If we are grouping, we charge an additional
* cpu_operator_cost per grouping column per input tuple for grouping
* comparisons.
*
* We will produce a single output tuple if not grouping, and a tuple per
* group otherwise. We charge cpu_tuple_cost for each output tuple.
*
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
* same total CPU cost, but AGG_SORTED has lower startup cost. If the
* input path is already sorted appropriately, AGG_SORTED should be
* preferred (since it has no risk of memory overflow). This will
* happen as long as the computed total costs are indeed exactly equal
* --- but if there's roundoff error we might do the wrong thing. So
* be sure that the computations below form the same intermediate
* values in the same order.
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the same
* total CPU cost, but AGG_SORTED has lower startup cost. If the input
* path is already sorted appropriately, AGG_SORTED should be preferred
* (since it has no risk of memory overflow). This will happen as long as
* the computed total costs are indeed exactly equal --- but if there's
* roundoff error we might do the wrong thing. So be sure that the
* computations below form the same intermediate values in the same order.
*/
if (aggstrategy == AGG_PLAIN)
{
@ -937,8 +931,8 @@ cost_group(Path *path, PlannerInfo *root,
total_cost = input_total_cost;
/*
* Charge one cpu_operator_cost per comparison per input tuple. We
* assume all columns get compared at most of the tuples.
* Charge one cpu_operator_cost per comparison per input tuple. We assume
* all columns get compared at most of the tuples.
*/
total_cost += cpu_operator_cost * input_tuples * numGroupCols;
@ -968,10 +962,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
Selectivity joininfactor;
/*
* If inner path is an indexscan, be sure to use its estimated output
* row count, which may be lower than the restriction-clause-only row
* count of its parent. (We don't include this case in the PATH_ROWS
* macro because it applies *only* to a nestloop's inner relation.)
* If inner path is an indexscan, be sure to use its estimated output row
* count, which may be lower than the restriction-clause-only row count of
* its parent. (We don't include this case in the PATH_ROWS macro because
* it applies *only* to a nestloop's inner relation.)
*/
if (IsA(inner_path, IndexPath))
inner_path_rows = ((IndexPath *) inner_path)->rows;
@ -982,11 +976,11 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
startup_cost += disable_cost;
/*
* If we're doing JOIN_IN then we will stop scanning inner tuples for
* an outer tuple as soon as we have one match. Account for the
* effects of this by scaling down the cost estimates in proportion to
* the JOIN_IN selectivity. (This assumes that all the quals attached
* to the join are IN quals, which should be true.)
* If we're doing JOIN_IN then we will stop scanning inner tuples for an
* outer tuple as soon as we have one match. Account for the effects of
* this by scaling down the cost estimates in proportion to the JOIN_IN
* selectivity. (This assumes that all the quals attached to the join are
* IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(path, root);
@ -996,9 +990,9 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
* NOTE: clearly, we must pay both outer and inner paths' startup_cost
* before we can start returning tuples, so the join's startup cost is
* their sum. What's not so clear is whether the inner path's
* startup_cost must be paid again on each rescan of the inner path.
* This is not true if the inner path is materialized or is a
* hashjoin, but probably is true otherwise.
* startup_cost must be paid again on each rescan of the inner path. This
* is not true if the inner path is materialized or is a hashjoin, but
* probably is true otherwise.
*/
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
run_cost += outer_path->total_cost - outer_path->startup_cost;
@ -1077,12 +1071,11 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
/*
* Compute cost and selectivity of the mergequals and qpquals (other
* restriction clauses) separately. We use approx_selectivity here
* for speed --- in most cases, any errors won't affect the result
* much.
* restriction clauses) separately. We use approx_selectivity here for
* speed --- in most cases, any errors won't affect the result much.
*
* Note: it's probably bogus to use the normal selectivity calculation
* here when either the outer or inner path is a UniquePath.
* Note: it's probably bogus to use the normal selectivity calculation here
* when either the outer or inner path is a UniquePath.
*/
merge_selec = approx_selectivity(root, mergeclauses,
path->jpath.jointype);
@ -1095,31 +1088,30 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
/*
* When there are equal merge keys in the outer relation, the
* mergejoin must rescan any matching tuples in the inner relation.
* This means re-fetching inner tuples. Our cost model for this is
* that a re-fetch costs the same as an original fetch, which is
* probably an overestimate; but on the other hand we ignore the
* bookkeeping costs of mark/restore. Not clear if it's worth
* developing a more refined model.
* When there are equal merge keys in the outer relation, the mergejoin
* must rescan any matching tuples in the inner relation. This means
* re-fetching inner tuples. Our cost model for this is that a re-fetch
* costs the same as an original fetch, which is probably an overestimate;
* but on the other hand we ignore the bookkeeping costs of mark/restore.
* Not clear if it's worth developing a more refined model.
*
* The number of re-fetches can be estimated approximately as size of
* merge join output minus size of inner relation. Assume that the
* distinct key values are 1, 2, ..., and denote the number of values
* of each key in the outer relation as m1, m2, ...; in the inner
* relation, n1, n2, ... Then we have
* The number of re-fetches can be estimated approximately as size of merge
* join output minus size of inner relation. Assume that the distinct key
* values are 1, 2, ..., and denote the number of values of each key in
* the outer relation as m1, m2, ...; in the inner relation, n1, n2, ...
* Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
* n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 * n1
* + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
* relation
*
* This equation works correctly for outer tuples having no inner match
* (nk = 0), but not for inner tuples having no outer match (mk = 0);
* we are effectively subtracting those from the number of rescanned
* tuples, when we should not. Can we do better without expensive
* selectivity computations?
* This equation works correctly for outer tuples having no inner match (nk =
* 0), but not for inner tuples having no outer match (mk = 0); we are
* effectively subtracting those from the number of rescanned tuples, when
* we should not. Can we do better without expensive selectivity
* computations?
*/
if (IsA(outer_path, UniquePath))
rescannedtuples = 0;
@ -1140,9 +1132,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
* inputs that will actually need to be scanned. We use only the first
* (most significant) merge clause for this purpose.
*
* Since this calculation is somewhat expensive, and will be the same for
* all mergejoin paths associated with the merge clause, we cache the
* results in the RestrictInfo node.
* Since this calculation is somewhat expensive, and will be the same for all
* mergejoin paths associated with the merge clause, we cache the results
* in the RestrictInfo node.
*/
if (mergeclauses && path->jpath.jointype != JOIN_FULL)
{
@ -1181,9 +1173,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
/*
* Readjust scan selectivities to account for above rounding. This is
* normally an insignificant effect, but when there are only a few
* rows in the inputs, failing to do this makes for a large percentage
* error.
* normally an insignificant effect, but when there are only a few rows in
* the inputs, failing to do this makes for a large percentage error.
*/
outerscansel = outer_rows / outer_path_rows;
innerscansel = inner_rows / inner_path_rows;
@ -1231,20 +1222,20 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
/* CPU costs */
/*
* If we're doing JOIN_IN then we will stop outputting inner tuples
* for an outer tuple as soon as we have one match. Account for the
* effects of this by scaling down the cost estimates in proportion to
* the expected output size. (This assumes that all the quals
* attached to the join are IN quals, which should be true.)
* If we're doing JOIN_IN then we will stop outputting inner tuples for an
* outer tuple as soon as we have one match. Account for the effects of
* this by scaling down the cost estimates in proportion to the expected
* output size. (This assumes that all the quals attached to the join are
* IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(&path->jpath, root);
/*
* The number of tuple comparisons needed is approximately number of
* outer rows plus number of inner rows plus number of rescanned
* tuples (can we refine this?). At each one, we need to evaluate the
* mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
* so do NOT include joininfactor.
* The number of tuple comparisons needed is approximately number of outer
* rows plus number of inner rows plus number of rescanned tuples (can we
* refine this?). At each one, we need to evaluate the mergejoin quals.
* NOTE: JOIN_IN mode does not save any work here, so do NOT include
* joininfactor.
*/
startup_cost += merge_qual_cost.startup;
run_cost += merge_qual_cost.per_tuple *
@ -1253,9 +1244,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
* clauses that are to be applied at the join. (This is pessimistic
* since not all of the quals may get evaluated at each tuple.) This
* work is skipped in JOIN_IN mode, so apply the factor.
* clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.) This work is
* skipped in JOIN_IN mode, so apply the factor.
*/
startup_cost += qp_qual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
@ -1290,9 +1281,9 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
double outer_path_rows = PATH_ROWS(outer_path);
double inner_path_rows = PATH_ROWS(inner_path);
double outerbytes = relation_byte_size(outer_path_rows,
outer_path->parent->width);
outer_path->parent->width);
double innerbytes = relation_byte_size(inner_path_rows,
inner_path->parent->width);
inner_path->parent->width);
int num_hashclauses = list_length(hashclauses);
int numbuckets;
int numbatches;
@ -1306,12 +1297,11 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/*
* Compute cost and selectivity of the hashquals and qpquals (other
* restriction clauses) separately. We use approx_selectivity here
* for speed --- in most cases, any errors won't affect the result
* much.
* restriction clauses) separately. We use approx_selectivity here for
* speed --- in most cases, any errors won't affect the result much.
*
* Note: it's probably bogus to use the normal selectivity calculation
* here when either the outer or inner path is a UniquePath.
* Note: it's probably bogus to use the normal selectivity calculation here
* when either the outer or inner path is a UniquePath.
*/
hash_selec = approx_selectivity(root, hashclauses,
path->jpath.jointype);
@ -1329,13 +1319,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
startup_cost += inner_path->total_cost;
/*
* Cost of computing hash function: must do it once per input tuple.
* We charge one cpu_operator_cost for each column's hash function.
* Cost of computing hash function: must do it once per input tuple. We
* charge one cpu_operator_cost for each column's hash function.
*
* XXX when a hashclause is more complex than a single operator, we
* really should charge the extra eval costs of the left or right
* side, as appropriate, here. This seems more work than it's worth
* at the moment.
* XXX when a hashclause is more complex than a single operator, we really
* should charge the extra eval costs of the left or right side, as
* appropriate, here. This seems more work than it's worth at the moment.
*/
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
@ -1345,17 +1334,17 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
inner_path->parent->width,
&numbuckets,
&numbatches);
virtualbuckets = (double) numbuckets * (double) numbatches;
virtualbuckets = (double) numbuckets *(double) numbatches;
/*
* Determine bucketsize fraction for inner relation. We use the
* smallest bucketsize estimated for any individual hashclause; this
* is undoubtedly conservative.
* Determine bucketsize fraction for inner relation. We use the smallest
* bucketsize estimated for any individual hashclause; this is undoubtedly
* conservative.
*
* BUT: if inner relation has been unique-ified, we can assume it's good
* for hashing. This is important both because it's the right answer,
* and because we avoid contaminating the cache with a value that's
* wrong for non-unique-ified paths.
* BUT: if inner relation has been unique-ified, we can assume it's good for
* hashing. This is important both because it's the right answer, and
* because we avoid contaminating the cache with a value that's wrong for
* non-unique-ified paths.
*/
if (IsA(inner_path, UniquePath))
innerbucketsize = 1.0 / virtualbuckets;
@ -1370,13 +1359,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
Assert(IsA(restrictinfo, RestrictInfo));
/*
* First we have to figure out which side of the hashjoin
* clause is the inner side.
* First we have to figure out which side of the hashjoin clause
* is the inner side.
*
* Since we tend to visit the same clauses over and over when
* planning a large query, we cache the bucketsize estimate in
* the RestrictInfo node to avoid repeated lookups of
* statistics.
* planning a large query, we cache the bucketsize estimate in the
* RestrictInfo node to avoid repeated lookups of statistics.
*/
if (bms_is_subset(restrictinfo->right_relids,
inner_path->parent->relids))
@ -1388,7 +1376,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
get_rightop(restrictinfo->clause),
get_rightop(restrictinfo->clause),
virtualbuckets);
restrictinfo->right_bucketsize = thisbucketsize;
}
@ -1404,7 +1392,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
get_leftop(restrictinfo->clause),
get_leftop(restrictinfo->clause),
virtualbuckets);
restrictinfo->left_bucketsize = thisbucketsize;
}
@ -1417,10 +1405,10 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/*
* If inner relation is too big then we will need to "batch" the join,
* which implies writing and reading most of the tuples to disk an
* extra time. Charge one cost unit per page of I/O (correct since it
* should be nice and sequential...). Writing the inner rel counts as
* startup cost, all the rest as run cost.
* which implies writing and reading most of the tuples to disk an extra
* time. Charge one cost unit per page of I/O (correct since it should be
* nice and sequential...). Writing the inner rel counts as startup cost,
* all the rest as run cost.
*/
if (numbatches > 1)
{
@ -1436,21 +1424,21 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/* CPU costs */
/*
* If we're doing JOIN_IN then we will stop comparing inner tuples to
* an outer tuple as soon as we have one match. Account for the
* effects of this by scaling down the cost estimates in proportion to
* the expected output size. (This assumes that all the quals
* attached to the join are IN quals, which should be true.)
* If we're doing JOIN_IN then we will stop comparing inner tuples to an
* outer tuple as soon as we have one match. Account for the effects of
* this by scaling down the cost estimates in proportion to the expected
* output size. (This assumes that all the quals attached to the join are
* IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(&path->jpath, root);
/*
* The number of tuple comparisons needed is the number of outer
* tuples times the typical number of tuples in a hash bucket, which
* is the inner relation size times its bucketsize fraction. At each
* one, we need to evaluate the hashjoin quals. (Note: charging the
* full qual eval cost at each tuple is pessimistic, since we don't
* evaluate the quals unless the hash values match exactly.)
* The number of tuple comparisons needed is the number of outer tuples
* times the typical number of tuples in a hash bucket, which is the inner
* relation size times its bucketsize fraction. At each one, we need to
* evaluate the hashjoin quals. (Note: charging the full qual eval cost
* at each tuple is pessimistic, since we don't evaluate the quals unless
* the hash values match exactly.)
*/
startup_cost += hash_qual_cost.startup;
run_cost += hash_qual_cost.per_tuple *
@ -1460,8 +1448,8 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
* clauses that are to be applied at the join. (This is pessimistic
* since not all of the quals may get evaluated at each tuple.)
* clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
@ -1469,16 +1457,16 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/*
* Bias against putting larger relation on inside. We don't want an
* absolute prohibition, though, since larger relation might have
* better bucketsize --- and we can't trust the size estimates
* unreservedly, anyway. Instead, inflate the run cost by the square
* root of the size ratio. (Why square root? No real good reason,
* but it seems reasonable...)
* absolute prohibition, though, since larger relation might have better
* bucketsize --- and we can't trust the size estimates unreservedly,
* anyway. Instead, inflate the run cost by the square root of the size
* ratio. (Why square root? No real good reason, but it seems
* reasonable...)
*
* Note: before 7.4 we implemented this by inflating startup cost; but if
* there's a disable_cost component in the input paths' startup cost,
* that unfairly penalizes the hash. Probably it'd be better to keep
* track of disable penalty separately from cost.
* there's a disable_cost component in the input paths' startup cost, that
* unfairly penalizes the hash. Probably it'd be better to keep track of
* disable penalty separately from cost.
*/
if (innerbytes > outerbytes && outerbytes > 0)
run_cost *= sqrt(innerbytes / outerbytes);
@ -1545,13 +1533,13 @@ cost_qual_eval_walker(Node *node, QualCost *total)
return false;
/*
* Our basic strategy is to charge one cpu_operator_cost for each
* operator or function node in the given tree. Vars and Consts are
* charged zero, and so are boolean operators (AND, OR, NOT).
* Simplistic, but a lot better than no model at all.
* Our basic strategy is to charge one cpu_operator_cost for each operator
* or function node in the given tree. Vars and Consts are charged zero,
* and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
* better than no model at all.
*
* Should we try to account for the possibility of short-circuit
* evaluation of AND/OR?
* Should we try to account for the possibility of short-circuit evaluation
* of AND/OR?
*/
if (IsA(node, FuncExpr) ||
IsA(node, OpExpr) ||
@ -1572,12 +1560,12 @@ cost_qual_eval_walker(Node *node, QualCost *total)
{
/*
* A subplan node in an expression typically indicates that the
* subplan will be executed on each evaluation, so charge
* accordingly. (Sub-selects that can be executed as InitPlans
* have already been removed from the expression.)
* subplan will be executed on each evaluation, so charge accordingly.
* (Sub-selects that can be executed as InitPlans have already been
* removed from the expression.)
*
* An exception occurs when we have decided we can implement the
* subplan by hashing.
* An exception occurs when we have decided we can implement the subplan
* by hashing.
*
*/
SubPlan *subplan = (SubPlan *) node;
@ -1586,32 +1574,31 @@ cost_qual_eval_walker(Node *node, QualCost *total)
if (subplan->useHashTable)
{
/*
* If we are using a hash table for the subquery outputs, then
* the cost of evaluating the query is a one-time cost. We
* charge one cpu_operator_cost per tuple for the work of
* loading the hashtable, too.
* If we are using a hash table for the subquery outputs, then the
* cost of evaluating the query is a one-time cost. We charge one
* cpu_operator_cost per tuple for the work of loading the
* hashtable, too.
*/
total->startup += plan->total_cost +
cpu_operator_cost * plan->plan_rows;
/*
* The per-tuple costs include the cost of evaluating the
* lefthand expressions, plus the cost of probing the
* hashtable. Recursion into the exprs list will handle the
* lefthand expressions properly, and will count one
* cpu_operator_cost for each comparison operator. That is
* probably too low for the probing cost, but it's hard to
* make a better estimate, so live with it for now.
* The per-tuple costs include the cost of evaluating the lefthand
* expressions, plus the cost of probing the hashtable. Recursion
* into the exprs list will handle the lefthand expressions
* properly, and will count one cpu_operator_cost for each
* comparison operator. That is probably too low for the probing
* cost, but it's hard to make a better estimate, so live with it
* for now.
*/
}
else
{
/*
* Otherwise we will be rescanning the subplan output on each
* evaluation. We need to estimate how much of the output we
* will actually need to scan. NOTE: this logic should agree
* with the estimates used by make_subplan() in
* plan/subselect.c.
* evaluation. We need to estimate how much of the output we will
* actually need to scan. NOTE: this logic should agree with the
* estimates used by make_subplan() in plan/subselect.c.
*/
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
@ -1636,10 +1623,10 @@ cost_qual_eval_walker(Node *node, QualCost *total)
/*
* Also account for subplan's startup cost. If the subplan is
* uncorrelated or undirect correlated, AND its topmost node
* is a Sort or Material node, assume that we'll only need to
* pay its startup cost once; otherwise assume we pay the
* startup cost every time.
* uncorrelated or undirect correlated, AND its topmost node is a
* Sort or Material node, assume that we'll only need to pay its
* startup cost once; otherwise assume we pay the startup cost
* every time.
*/
if (subplan->parParam == NIL &&
(IsA(plan, Sort) ||
@ -1761,9 +1748,9 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
/*
* Compute joinclause selectivity. Note that we are only considering
* clauses that become restriction clauses at this join level; we are
* not double-counting them because they were not considered in
* estimating the sizes of the component rels.
* clauses that become restriction clauses at this join level; we are not
* double-counting them because they were not considered in estimating the
* sizes of the component rels.
*/
selec = clauselist_selectivity(root,
restrictlist,
@ -1773,13 +1760,13 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
/*
* Basically, we multiply size of Cartesian product by selectivity.
*
* If we are doing an outer join, take that into account: the output must
* be at least as large as the non-nullable input. (Is there any
* chance of being even smarter?)
* If we are doing an outer join, take that into account: the output must be
* at least as large as the non-nullable input. (Is there any chance of
* being even smarter?)
*
* For JOIN_IN and variants, the Cartesian product is figured with
* respect to a unique-ified input, and then we can clamp to the size
* of the other input.
* For JOIN_IN and variants, the Cartesian product is figured with respect to
* a unique-ified input, and then we can clamp to the size of the other
* input.
*/
switch (jointype)
{
@ -1848,12 +1835,11 @@ join_in_selectivity(JoinPath *path, PlannerInfo *root)
return 1.0;
/*
* Return 1.0 if the inner side is already known unique. The case
* where the inner path is already a UniquePath probably cannot happen
* in current usage, but check it anyway for completeness. The
* interesting case is where we've determined the inner relation
* itself is unique, which we can check by looking at the rows
* estimate for its UniquePath.
* Return 1.0 if the inner side is already known unique. The case where
* the inner path is already a UniquePath probably cannot happen in
* current usage, but check it anyway for completeness. The interesting
* case is where we've determined the inner relation itself is unique,
* which we can check by looking at the rows estimate for its UniquePath.
*/
if (IsA(path->innerjoinpath, UniquePath))
return 1.0;
@ -1866,10 +1852,9 @@ join_in_selectivity(JoinPath *path, PlannerInfo *root)
/*
* Compute same result set_joinrel_size_estimates would compute for
* JOIN_INNER. Note that we use the input rels' absolute size
* estimates, not PATH_ROWS() which might be less; if we used
* PATH_ROWS() we'd be double-counting the effects of any join clauses
* used in input scans.
* JOIN_INNER. Note that we use the input rels' absolute size estimates,
* not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
* double-counting the effects of any join clauses used in input scans.
*/
selec = clauselist_selectivity(root,
path->joinrestrictinfo,
@ -1908,8 +1893,8 @@ set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
/*
* Estimate number of rows the function itself will return.
*
* XXX no idea how to do this yet; but we can at least check whether
* function returns set or not...
* XXX no idea how to do this yet; but we can at least check whether function
* returns set or not...
*/
if (expression_returns_set(rte->funcexpr))
rel->tuples = 1000;
@ -1957,8 +1942,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
ndx = var->varattno - rel->min_attr;
/*
* The width probably hasn't been cached yet, but may as well
* check
* The width probably hasn't been cached yet, but may as well check
*/
if (rel->attr_widths[ndx] > 0)
{

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.190 2005/09/24 22:54:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.191 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -48,9 +48,9 @@
static List *find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
List *clauses, List *outer_clauses,
bool istoplevel, bool isjoininner,
Relids outer_relids);
List *clauses, List *outer_clauses,
bool istoplevel, bool isjoininner,
Relids outer_relids);
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths);
static int bitmap_path_comparator(const void *a, const void *b);
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths);
@ -62,25 +62,25 @@ static Oid indexable_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static Relids indexable_outerrelids(RelOptInfo *rel);
static bool matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel,
Relids outer_relids);
Relids outer_relids);
static List *find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
Relids outer_relids, bool isouterjoin);
Relids outer_relids, bool isouterjoin);
static ScanDirection match_variant_ordering(PlannerInfo *root,
IndexOptInfo *index,
List *restrictclauses);
IndexOptInfo *index,
List *restrictclauses);
static List *identify_ignorable_ordering_cols(PlannerInfo *root,
IndexOptInfo *index,
List *restrictclauses);
IndexOptInfo *index,
List *restrictclauses);
static bool match_index_to_query_keys(PlannerInfo *root,
IndexOptInfo *index,
ScanDirection indexscandir,
List *ignorables);
IndexOptInfo *index,
ScanDirection indexscandir,
List *ignorables);
static bool match_boolean_index_clause(Node *clause, int indexcol,
IndexOptInfo *index);
IndexOptInfo *index);
static bool match_special_index_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static Expr *expand_boolean_index_clause(Node *clause, int indexcol,
IndexOptInfo *index);
IndexOptInfo *index);
static List *expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass);
static List *prefix_quals(Node *leftop, Oid opclass,
Const *prefix, Pattern_Prefix_Status pstatus);
@ -153,8 +153,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
true, false, NULL);
/*
* We can submit them all to add_path. (This generates access paths for
* plain IndexScan plans.) However, for the next step we will only want
* We can submit them all to add_path. (This generates access paths for
* plain IndexScan plans.) However, for the next step we will only want
* the ones that have some selectivity; we must discard anything that was
* generated solely for ordering purposes.
*/
@ -180,8 +180,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
bitindexpaths = list_concat(bitindexpaths, indexpaths);
/*
* If we found anything usable, generate a BitmapHeapPath for the
* most promising combination of bitmap index paths.
* If we found anything usable, generate a BitmapHeapPath for the most
* promising combination of bitmap index paths.
*/
if (bitindexpaths != NIL)
{
@ -254,19 +254,19 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
bool index_is_ordered;
/*
* Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK; otherwise, if we
* are at top level we know it's not OK (since predOK is exactly
* whether its predicate could be proven from the toplevel clauses).
* Otherwise, we have to test whether the added clauses are
* sufficient to imply the predicate. If so, we could use
* the index in the current context.
* Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK; otherwise, if we are
* at top level we know it's not OK (since predOK is exactly whether
* its predicate could be proven from the toplevel clauses).
* Otherwise, we have to test whether the added clauses are sufficient
* to imply the predicate. If so, we could use the index in the
* current context.
*
* We set useful_predicate to true iff the predicate was proven
* using the current set of clauses. This is needed to prevent
* matching a predOK index to an arm of an OR, which would be
* a legal but pointlessly inefficient plan. (A better plan will
* be generated by just scanning the predOK index alone, no OR.)
* We set useful_predicate to true iff the predicate was proven using the
* current set of clauses. This is needed to prevent matching a
* predOK index to an arm of an OR, which would be a legal but
* pointlessly inefficient plan. (A better plan will be generated by
* just scanning the predOK index alone, no OR.)
*/
useful_predicate = false;
if (index->indpred != NIL)
@ -282,7 +282,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
else
{
if (istoplevel)
continue; /* no point in trying to prove it */
continue; /* no point in trying to prove it */
/* Form all_clauses if not done already */
if (all_clauses == NIL)
@ -290,7 +290,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
outer_clauses);
if (!predicate_implied_by(index->indpred, all_clauses))
continue; /* can't use it at all */
continue; /* can't use it at all */
if (!predicate_implied_by(index->indpred, outer_clauses))
useful_predicate = true;
@ -309,17 +309,17 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
&found_clause);
/*
* Not all index AMs support scans with no restriction clauses.
* We can't generate a scan over an index with amoptionalkey = false
* Not all index AMs support scans with no restriction clauses. We
* can't generate a scan over an index with amoptionalkey = false
* unless there's at least one restriction clause.
*/
if (restrictclauses == NIL && !index->amoptionalkey)
continue;
/*
* 2. Compute pathkeys describing index's ordering, if any, then
* see how many of them are actually useful for this query. This
* is not relevant unless we are at top level.
* 2. Compute pathkeys describing index's ordering, if any, then see
* how many of them are actually useful for this query. This is not
* relevant unless we are at top level.
*/
index_is_ordered = OidIsValid(index->ordering[0]);
if (istoplevel && index_is_ordered && !isjoininner)
@ -335,9 +335,8 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
/*
* 3. Generate an indexscan path if there are relevant restriction
* clauses in the current clauses, OR the index ordering is
* potentially useful for later merging or final output ordering,
* OR the index has a predicate that was proven by the current
* clauses.
* potentially useful for later merging or final output ordering, OR
* the index has a predicate that was proven by the current clauses.
*/
if (found_clause || useful_pathkeys != NIL || useful_predicate)
{
@ -352,16 +351,15 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
}
/*
* 4. If the index is ordered, and there is a requested query
* ordering that we failed to match, consider variant ways of
* achieving the ordering. Again, this is only interesting
* at top level.
* 4. If the index is ordered, and there is a requested query ordering
* that we failed to match, consider variant ways of achieving the
* ordering. Again, this is only interesting at top level.
*/
if (istoplevel && index_is_ordered && !isjoininner &&
root->query_pathkeys != NIL &&
pathkeys_useful_for_ordering(root, useful_pathkeys) == 0)
{
ScanDirection scandir;
ScanDirection scandir;
scandir = match_variant_ordering(root, index, restrictclauses);
if (!ScanDirectionIsNoMovement(scandir))
@ -409,9 +407,9 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
foreach(l, clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
List *pathlist;
Path *bitmapqual;
ListCell *j;
List *pathlist;
Path *bitmapqual;
ListCell *j;
Assert(IsA(rinfo, RestrictInfo));
/* Ignore RestrictInfos that aren't ORs */
@ -419,19 +417,19 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
continue;
/*
* We must be able to match at least one index to each of the arms
* of the OR, else we can't use it.
* We must be able to match at least one index to each of the arms of
* the OR, else we can't use it.
*/
pathlist = NIL;
foreach(j, ((BoolExpr *) rinfo->orclause)->args)
{
Node *orarg = (Node *) lfirst(j);
List *indlist;
Node *orarg = (Node *) lfirst(j);
List *indlist;
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
{
List *andargs = ((BoolExpr *) orarg)->args;
List *andargs = ((BoolExpr *) orarg)->args;
indlist = find_usable_indexes(root, rel,
andargs,
@ -458,25 +456,28 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
isjoininner,
outer_relids);
}
/*
* If nothing matched this arm, we can't do anything
* with this OR clause.
* If nothing matched this arm, we can't do anything with this OR
* clause.
*/
if (indlist == NIL)
{
pathlist = NIL;
break;
}
/*
* OK, pick the most promising AND combination,
* and add it to pathlist.
* OK, pick the most promising AND combination, and add it to
* pathlist.
*/
bitmapqual = choose_bitmap_and(root, rel, indlist);
pathlist = lappend(pathlist, bitmapqual);
}
/*
* If we have a match for every arm, then turn them
* into a BitmapOrPath, and add to result list.
* If we have a match for every arm, then turn them into a
* BitmapOrPath, and add to result list.
*/
if (pathlist != NIL)
{
@ -494,7 +495,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
* Given a nonempty list of bitmap paths, AND them into one path.
*
* This is a nontrivial decision since we can legally use any subset of the
* given path set. We want to choose a good tradeoff between selectivity
* given path set. We want to choose a good tradeoff between selectivity
* and cost of computing the bitmap.
*
* The result is either a single one of the inputs, or a BitmapAndPath
@ -511,7 +512,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
int i;
ListCell *l;
Assert(npaths > 0); /* else caller error */
Assert(npaths > 0); /* else caller error */
if (npaths == 1)
return (Path *) linitial(paths); /* easy case */
@ -519,24 +520,23 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
* OR clauses. As a compromise, we sort the paths by selectivity.
* We always take the first, and sequentially add on paths that result
* in a lower estimated cost.
* OR clauses. As a compromise, we sort the paths by selectivity. We
* always take the first, and sequentially add on paths that result in a
* lower estimated cost.
*
* We also make some effort to detect directly redundant input paths,
* as can happen if there are multiple possibly usable indexes. For
* this we look only at plain IndexPath inputs, not at sub-OR clauses.
* And we consider an index redundant if all its index conditions were
* already used by earlier indexes. (We could use predicate_implied_by
* to have a more intelligent, but much more expensive, check --- but in
* most cases simple pointer equality should suffice, since after all the
* index conditions are all coming from the same RestrictInfo lists.)
* We also make some effort to detect directly redundant input paths, as can
* happen if there are multiple possibly usable indexes. For this we look
* only at plain IndexPath inputs, not at sub-OR clauses. And we consider
* an index redundant if all its index conditions were already used by
* earlier indexes. (We could use predicate_implied_by to have a more
* intelligent, but much more expensive, check --- but in most cases
* simple pointer equality should suffice, since after all the index
* conditions are all coming from the same RestrictInfo lists.)
*
* XXX is there any risk of throwing away a useful partial index here
* because we don't explicitly look at indpred? At least in simple
* cases, the partial index will sort before competing non-partial
* indexes and so it makes the right choice, but perhaps we need to
* work harder.
* XXX is there any risk of throwing away a useful partial index here because
* we don't explicitly look at indpred? At least in simple cases, the
* partial index will sort before competing non-partial indexes and so it
* makes the right choice, but perhaps we need to work harder.
*
* Note: outputting the selected sub-paths in selectivity order is a good
* thing even if we weren't using that as part of the selection method,
@ -559,13 +559,13 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
qualsofar = list_copy(((IndexPath *) patharray[0])->indexclauses);
else
qualsofar = NIL;
lastcell = list_head(paths); /* for quick deletions */
lastcell = list_head(paths); /* for quick deletions */
for (i = 1; i < npaths; i++)
{
Path *newpath = patharray[i];
List *newqual = NIL;
Cost newcost;
Path *newpath = patharray[i];
List *newqual = NIL;
Cost newcost;
if (IsA(newpath, IndexPath))
{
@ -599,12 +599,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
static int
bitmap_path_comparator(const void *a, const void *b)
{
Path *pa = *(Path * const *) a;
Path *pb = *(Path * const *) b;
Path *pa = *(Path *const *) a;
Path *pb = *(Path *const *) b;
Cost acost;
Cost bcost;
Selectivity aselec;
Selectivity bselec;
Selectivity aselec;
Selectivity bselec;
cost_bitmap_tree_node(pa, &acost, &aselec);
cost_bitmap_tree_node(pb, &bcost, &bselec);
@ -660,7 +660,7 @@ bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths)
*
* We can use clauses from either the current clauses or outer_clauses lists,
* but *found_clause is set TRUE only if we used at least one clause from
* the "current clauses" list. See find_usable_indexes() for motivation.
* the "current clauses" list. See find_usable_indexes() for motivation.
*
* outer_relids determines what Vars will be allowed on the other side
* of a possible index qual; see match_clause_to_indexcol().
@ -770,7 +770,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
* to the caller-specified outer_relids relations (which had better not
* include the relation whose index is being tested). outer_relids should
* be NULL when checking simple restriction clauses, and the outer side
* of the join when building a join inner scan. Other than that, the
* of the join when building a join inner scan. Other than that, the
* only thing we don't like is volatile functions.
*
* Note: in most cases we already know that the clause as a whole uses
@ -836,8 +836,8 @@ match_clause_to_indexcol(IndexOptInfo *index,
return true;
/*
* If we didn't find a member of the index's opclass, see whether
* it is a "special" indexable operator.
* If we didn't find a member of the index's opclass, see whether it
* is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, true))
return true;
@ -852,8 +852,8 @@ match_clause_to_indexcol(IndexOptInfo *index,
return true;
/*
* If we didn't find a member of the index's opclass, see whether
* it is a "special" indexable operator.
* If we didn't find a member of the index's opclass, see whether it
* is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, false))
return true;
@ -914,14 +914,14 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* Note: if Postgres tried to optimize queries by forming equivalence
* classes over equi-joined attributes (i.e., if it recognized that a
* qualification such as "where a.b=c.d and a.b=5" could make use of
* an index on c.d), then we could use that equivalence class info
* here with joininfo lists to do more complete tests for the usability
* of a partial index. For now, the test only uses restriction
* clauses (those in baserestrictinfo). --Nels, Dec '92
* qualification such as "where a.b=c.d and a.b=5" could make use of an
* index on c.d), then we could use that equivalence class info here with
* joininfo lists to do more complete tests for the usability of a partial
* index. For now, the test only uses restriction clauses (those in
* baserestrictinfo). --Nels, Dec '92
*
* XXX as of 7.1, equivalence class info *is* available. Consider
* improving this code as foreseen by Nels.
* XXX as of 7.1, equivalence class info *is* available. Consider improving
* this code as foreseen by Nels.
*/
foreach(ilist, rel->indexlist)
@ -943,7 +943,7 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* indexable_outerrelids
* Finds all other relids that participate in any indexable join clause
* for the specified table. Returns a set of relids.
* for the specified table. Returns a set of relids.
*/
static Relids
indexable_outerrelids(RelOptInfo *rel)
@ -958,7 +958,7 @@ indexable_outerrelids(RelOptInfo *rel)
foreach(l, rel->joininfo)
{
RestrictInfo *joininfo = (RestrictInfo *) lfirst(l);
Relids other_rels;
Relids other_rels;
other_rels = bms_difference(joininfo->required_relids, rel->relids);
if (matches_any_index(joininfo, rel, other_rels))
@ -986,7 +986,7 @@ matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel, Relids outer_relids)
{
foreach(l, ((BoolExpr *) rinfo->orclause)->args)
{
Node *orarg = (Node *) lfirst(l);
Node *orarg = (Node *) lfirst(l);
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
@ -1092,17 +1092,17 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
return NULL;
/*
* Otherwise, we have to do path selection in the memory context of
* the given rel, so that any created path can be safely attached to
* the rel's cache of best inner paths. (This is not currently an
* issue for normal planning, but it is an issue for GEQO planning.)
* Otherwise, we have to do path selection in the memory context of the
* given rel, so that any created path can be safely attached to the rel's
* cache of best inner paths. (This is not currently an issue for normal
* planning, but it is an issue for GEQO planning.)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
/*
* Intersect the given outer_relids with index_outer_relids to find
* the set of outer relids actually relevant for this rel. If there
* are none, again we can fail immediately.
* Intersect the given outer_relids with index_outer_relids to find the
* set of outer relids actually relevant for this rel. If there are none,
* again we can fail immediately.
*/
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
if (bms_is_empty(outer_relids))
@ -1113,11 +1113,10 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
}
/*
* Look to see if we already computed the result for this set of
* relevant outerrels. (We include the isouterjoin status in the
* cache lookup key for safety. In practice I suspect this is not
* necessary because it should always be the same for a given
* innerrel.)
* Look to see if we already computed the result for this set of relevant
* outerrels. (We include the isouterjoin status in the cache lookup key
* for safety. In practice I suspect this is not necessary because it
* should always be the same for a given innerrel.)
*/
foreach(l, rel->index_inner_paths)
{
@ -1160,8 +1159,8 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
bitindexpaths = list_concat(bitindexpaths, list_copy(indexpaths));
/*
* If we found anything usable, generate a BitmapHeapPath for the
* most promising combination of bitmap index paths.
* If we found anything usable, generate a BitmapHeapPath for the most
* promising combination of bitmap index paths.
*/
if (bitindexpaths != NIL)
{
@ -1218,12 +1217,11 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
ListCell *l;
/*
* We can always use plain restriction clauses for the rel. We
* scan these first because we want them first in the clause
* list for the convenience of remove_redundant_join_clauses,
* which can never remove non-join clauses and hence won't be able
* to get rid of a non-join clause if it appears after a join
* clause it is redundant with.
* We can always use plain restriction clauses for the rel. We scan these
* first because we want them first in the clause list for the convenience
* of remove_redundant_join_clauses, which can never remove non-join
* clauses and hence won't be able to get rid of a non-join clause if it
* appears after a join clause it is redundant with.
*/
foreach(l, rel->baserestrictinfo)
{
@ -1305,7 +1303,7 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
*
* If able to match the requested query pathkeys, returns either
* ForwardScanDirection or BackwardScanDirection to indicate the proper index
* scan direction. If no match, returns NoMovementScanDirection.
* scan direction. If no match, returns NoMovementScanDirection.
*/
static ScanDirection
match_variant_ordering(PlannerInfo *root,
@ -1318,8 +1316,8 @@ match_variant_ordering(PlannerInfo *root,
* Forget the whole thing if not a btree index; our check for ignorable
* columns assumes we are dealing with btree opclasses. (It'd be possible
* to factor out just the try for backwards indexscan, but considering
* that we presently have no orderable indexes except btrees anyway,
* it's hardly worth contorting this code for that case.)
* that we presently have no orderable indexes except btrees anyway, it's
* hardly worth contorting this code for that case.)
*
* Note: if you remove this, you probably need to put in a check on
* amoptionalkey to prevent possible clauseless scan on an index that
@ -1327,17 +1325,19 @@ match_variant_ordering(PlannerInfo *root,
*/
if (index->relam != BTREE_AM_OID)
return NoMovementScanDirection;
/*
* Figure out which index columns can be optionally ignored because
* they have an equality constraint. This is the same set for either
* forward or backward scan, so we do it just once.
* Figure out which index columns can be optionally ignored because they
* have an equality constraint. This is the same set for either forward
* or backward scan, so we do it just once.
*/
ignorables = identify_ignorable_ordering_cols(root, index,
restrictclauses);
/*
* Try to match to forward scan, then backward scan. However, we can
* skip the forward-scan case if there are no ignorable columns,
* because find_usable_indexes() would have found the match already.
* Try to match to forward scan, then backward scan. However, we can skip
* the forward-scan case if there are no ignorable columns, because
* find_usable_indexes() would have found the match already.
*/
if (ignorables &&
match_index_to_query_keys(root, index, ForwardScanDirection,
@ -1365,24 +1365,24 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
List *restrictclauses)
{
List *result = NIL;
int indexcol = 0; /* note this is 0-based */
int indexcol = 0; /* note this is 0-based */
ListCell *l;
/* restrictclauses is either NIL or has a sublist per column */
foreach(l, restrictclauses)
{
List *sublist = (List *) lfirst(l);
Oid opclass = index->classlist[indexcol];
ListCell *l2;
List *sublist = (List *) lfirst(l);
Oid opclass = index->classlist[indexcol];
ListCell *l2;
foreach(l2, sublist)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l2);
OpExpr *clause = (OpExpr *) rinfo->clause;
Oid clause_op;
int op_strategy;
bool varonleft;
bool ispc;
Oid clause_op;
int op_strategy;
bool varonleft;
bool ispc;
/* We know this clause passed match_clause_to_indexcol */
@ -1393,11 +1393,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
index))
{
/*
* The clause means either col = TRUE or col = FALSE;
* we do not care which, it's an equality constraint
* either way.
* The clause means either col = TRUE or col = FALSE; we
* do not care which, it's an equality constraint either
* way.
*/
result = lappend_int(result, indexcol+1);
result = lappend_int(result, indexcol + 1);
break;
}
}
@ -1426,12 +1426,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
op_strategy = get_op_opclass_strategy(clause_op, opclass);
/*
* You might expect to see Assert(op_strategy != 0) here,
* but you won't: the clause might contain a special indexable
* operator rather than an ordinary opclass member. Currently
* none of the special operators are very likely to expand to
* an equality operator; we do not bother to check, but just
* assume no match.
* You might expect to see Assert(op_strategy != 0) here, but you
* won't: the clause might contain a special indexable operator
* rather than an ordinary opclass member. Currently none of the
* special operators are very likely to expand to an equality
* operator; we do not bother to check, but just assume no match.
*/
if (op_strategy != BTEqualStrategyNumber)
continue;
@ -1445,7 +1444,7 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
rinfo->left_relids);
if (ispc)
{
result = lappend_int(result, indexcol+1);
result = lappend_int(result, indexcol + 1);
break;
}
}
@ -1480,8 +1479,8 @@ match_index_to_query_keys(PlannerInfo *root,
index_pathkeys = build_index_pathkeys(root, index, indexscandir);
/*
* Can we match to the query's requested pathkeys? The inner loop
* skips over ignorable index columns while trying to match.
* Can we match to the query's requested pathkeys? The inner loop skips
* over ignorable index columns while trying to match.
*/
index_cell = list_head(index_pathkeys);
index_col = 0;
@ -1492,13 +1491,14 @@ match_index_to_query_keys(PlannerInfo *root,
for (;;)
{
List *isubkey;
List *isubkey;
if (index_cell == NULL)
return false;
isubkey = (List *) lfirst(index_cell);
index_cell = lnext(index_cell);
index_col++; /* index_col is now 1-based */
/*
* Since we are dealing with canonicalized pathkeys, pointer
* comparison is sufficient to determine a match.
@ -1561,9 +1561,9 @@ match_index_to_operand(Node *operand,
int indkey;
/*
* Ignore any RelabelType node above the operand. This is needed to
* be able to apply indexscanning in binary-compatible-operator cases.
* Note: we can assume there is at most one RelabelType node;
* Ignore any RelabelType node above the operand. This is needed to be
* able to apply indexscanning in binary-compatible-operator cases. Note:
* we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
*/
if (operand && IsA(operand, RelabelType))
@ -1583,9 +1583,9 @@ match_index_to_operand(Node *operand,
else
{
/*
* Index expression; find the correct expression. (This search
* could be avoided, at the cost of complicating all the callers
* of this routine; doesn't seem worth it.)
* Index expression; find the correct expression. (This search could
* be avoided, at the cost of complicating all the callers of this
* routine; doesn't seem worth it.)
*/
ListCell *indexpr_item;
int i;
@ -1645,7 +1645,7 @@ match_index_to_operand(Node *operand,
*
* Another thing that we do with this machinery is to provide special
* smarts for "boolean" indexes (that is, indexes on boolean columns
* that support boolean equality). We can transform a plain reference
* that support boolean equality). We can transform a plain reference
* to the indexkey into "indexkey = true", or "NOT indexkey" into
* "indexkey = false", so as to make the expression indexable using the
* regular index operators. (As of Postgres 8.1, we must do this here
@ -1696,14 +1696,15 @@ match_boolean_index_clause(Node *clause,
indexcol, index))
return true;
}
/*
* Since we only consider clauses at top level of WHERE, we can convert
* indexkey IS TRUE and indexkey IS FALSE to index searches as well.
* The different meaning for NULL isn't important.
* indexkey IS TRUE and indexkey IS FALSE to index searches as well. The
* different meaning for NULL isn't important.
*/
else if (clause && IsA(clause, BooleanTest))
{
BooleanTest *btest = (BooleanTest *) clause;
BooleanTest *btest = (BooleanTest *) clause;
if (btest->booltesttype == IS_TRUE ||
btest->booltesttype == IS_FALSE)
@ -1737,8 +1738,8 @@ match_special_index_operator(Expr *clause, Oid opclass,
/*
* Currently, all known special operators require the indexkey on the
* left, but this test could be pushed into the switch statement if
* some are added that do not...
* left, but this test could be pushed into the switch statement if some
* are added that do not...
*/
if (!indexkey_on_left)
return false;
@ -1760,12 +1761,12 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_LIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_BYTEA_LIKE_OP:
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICLIKE_OP:
@ -1773,7 +1774,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICLIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_REGEXEQ_OP:
@ -1781,7 +1782,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_REGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICREGEXEQ_OP:
@ -1789,7 +1790,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICREGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_INET_SUB_OP:
@ -1815,9 +1816,9 @@ match_special_index_operator(Expr *clause, Oid opclass,
* want to apply. (A hash index, for example, will not support ">=".)
* Currently, only btree supports the operators we need.
*
* We insist on the opclass being the specific one we expect, else we'd
* do the wrong thing if someone were to make a reverse-sort opclass
* with the same operators.
* We insist on the opclass being the specific one we expect, else we'd do
* the wrong thing if someone were to make a reverse-sort opclass with the
* same operators.
*/
switch (expr_op)
{
@ -1906,7 +1907,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
/* First check for boolean cases */
if (IsBooleanOpclass(curClass))
{
Expr *boolqual;
Expr *boolqual;
boolqual = expand_boolean_index_clause((Node *) rinfo->clause,
indexcol,
@ -1960,7 +1961,7 @@ expand_boolean_index_clause(Node *clause,
/* NOT clause? */
if (not_clause(clause))
{
Node *arg = (Node *) get_notclausearg((Expr *) clause);
Node *arg = (Node *) get_notclausearg((Expr *) clause);
/* It must have matched the indexkey */
Assert(match_index_to_operand(arg, indexcol, index));
@ -1971,8 +1972,8 @@ expand_boolean_index_clause(Node *clause,
}
if (clause && IsA(clause, BooleanTest))
{
BooleanTest *btest = (BooleanTest *) clause;
Node *arg = (Node *) btest->arg;
BooleanTest *btest = (BooleanTest *) clause;
Node *arg = (Node *) btest->arg;
/* It must have matched the indexkey */
Assert(match_index_to_operand(arg, indexcol, index));
@ -2007,6 +2008,7 @@ static List *
expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
{
Expr *clause = rinfo->clause;
/* we know these will succeed */
Node *leftop = get_leftop(clause);
Node *rightop = get_rightop(clause);
@ -2020,10 +2022,9 @@ expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
switch (expr_op)
{
/*
* LIKE and regex operators are not members of any index
* opclass, so if we find one in an indexqual list we can
* assume that it was accepted by
* match_special_index_operator().
* LIKE and regex operators are not members of any index opclass,
* so if we find one in an indexqual list we can assume that it
* was accepted by match_special_index_operator().
*/
case OID_TEXT_LIKE_OP:
case OID_BPCHAR_LIKE_OP:
@ -2128,8 +2129,8 @@ prefix_quals(Node *leftop, Oid opclass,
}
/*
* If necessary, coerce the prefix constant to the right type. The
* given prefix constant is either text or bytea type.
* If necessary, coerce the prefix constant to the right type. The given
* prefix constant is either text or bytea type.
*/
if (prefix_const->consttype != datatype)
{
@ -2139,11 +2140,11 @@ prefix_quals(Node *leftop, Oid opclass,
{
case TEXTOID:
prefix = DatumGetCString(DirectFunctionCall1(textout,
prefix_const->constvalue));
prefix_const->constvalue));
break;
case BYTEAOID:
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
prefix_const->constvalue));
prefix_const->constvalue));
break;
default:
elog(ERROR, "unexpected const type: %u",

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.95 2005/06/05 22:32:55 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.96 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -65,9 +65,9 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* Find potential mergejoin clauses. We can skip this if we are not
* interested in doing a mergejoin. However, mergejoin is currently
* our only way of implementing full outer joins, so override
* mergejoin disable if it's a full join.
* interested in doing a mergejoin. However, mergejoin is currently our
* only way of implementing full outer joins, so override mergejoin
* disable if it's a full join.
*/
if (enable_mergejoin || jointype == JOIN_FULL)
mergeclause_list = select_mergejoin_clauses(joinrel,
@ -95,23 +95,22 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 3. Consider paths where the inner relation need not be explicitly
* sorted. This includes mergejoins only (nestloops were already
* built in match_unsorted_outer).
* sorted. This includes mergejoins only (nestloops were already built in
* match_unsorted_outer).
*
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
* significant difference between the inner and outer side of a
* mergejoin, so match_unsorted_inner creates no paths that aren't
* equivalent to those made by match_unsorted_outer when
* add_paths_to_joinrel() is invoked with the two rels given in the
* other order.
* significant difference between the inner and outer side of a mergejoin,
* so match_unsorted_inner creates no paths that aren't equivalent to
* those made by match_unsorted_outer when add_paths_to_joinrel() is
* invoked with the two rels given in the other order.
*/
match_unsorted_inner(root, joinrel, outerrel, innerrel,
restrictlist, mergeclause_list, jointype);
#endif
/*
* 4. Consider paths where both outer and inner relations must be
* hashed before being joined.
* 4. Consider paths where both outer and inner relations must be hashed
* before being joined.
*/
if (enable_hashjoin)
hash_inner_and_outer(root, joinrel, outerrel, innerrel,
@ -174,11 +173,11 @@ sort_inner_and_outer(PlannerInfo *root,
/*
* We only consider the cheapest-total-cost input paths, since we are
* assuming here that a sort is required. We will consider
* cheapest-startup-cost input paths later, and only if they don't
* need a sort.
* cheapest-startup-cost input paths later, and only if they don't need a
* sort.
*
* If unique-ification is requested, do it and then handle as a plain
* inner join.
* If unique-ification is requested, do it and then handle as a plain inner
* join.
*/
outer_path = outerrel->cheapest_total_path;
inner_path = innerrel->cheapest_total_path;
@ -194,31 +193,29 @@ sort_inner_and_outer(PlannerInfo *root,
}
/*
* Each possible ordering of the available mergejoin clauses will
* generate a differently-sorted result path at essentially the same
* cost. We have no basis for choosing one over another at this level
* of joining, but some sort orders may be more useful than others for
* higher-level mergejoins, so it's worth considering multiple
* orderings.
* Each possible ordering of the available mergejoin clauses will generate
* a differently-sorted result path at essentially the same cost. We have
* no basis for choosing one over another at this level of joining, but
* some sort orders may be more useful than others for higher-level
* mergejoins, so it's worth considering multiple orderings.
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
* redundant. Therefore, what we do is convert the mergeclause list
* to a list of canonical pathkeys, and then consider different
* orderings of the pathkeys.
* redundant. Therefore, what we do is convert the mergeclause list to a
* list of canonical pathkeys, and then consider different orderings of
* the pathkeys.
*
* Generating a path for *every* permutation of the pathkeys doesn't seem
* like a winning strategy; the cost in planning time is too high. For
* now, we generate one path for each pathkey, listing that pathkey
* first and the rest in random order. This should allow at least a
* one-clause mergejoin without re-sorting against any other possible
* mergejoin partner path. But if we've not guessed the right
* ordering of secondary keys, we may end up evaluating clauses as
* qpquals when they could have been done as mergeclauses. We need to
* figure out a better way. (Two possible approaches: look at all the
* relevant index relations to suggest plausible sort orders, or make
* just one output path and somehow mark it as having a sort-order
* that can be rearranged freely.)
* now, we generate one path for each pathkey, listing that pathkey first
* and the rest in random order. This should allow at least a one-clause
* mergejoin without re-sorting against any other possible mergejoin
* partner path. But if we've not guessed the right ordering of secondary
* keys, we may end up evaluating clauses as qpquals when they could have
* been done as mergeclauses. We need to figure out a better way. (Two
* possible approaches: look at all the relevant index relations to
* suggest plausible sort orders, or make just one output path and somehow
* mark it as having a sort-order that can be rearranged freely.)
*/
all_pathkeys = make_pathkeys_for_mergeclauses(root,
mergeclause_list,
@ -243,26 +240,25 @@ sort_inner_and_outer(PlannerInfo *root,
/*
* Select mergeclause(s) that match this sort ordering. If we had
* redundant merge clauses then we will get a subset of the
* original clause list. There had better be some match,
* however...
* redundant merge clauses then we will get a subset of the original
* clause list. There had better be some match, however...
*/
cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
cur_pathkeys,
mergeclause_list);
mergeclause_list);
Assert(cur_mergeclauses != NIL);
/* Forget it if can't use all the clauses in right/full join */
if (useallclauses &&
list_length(cur_mergeclauses) != list_length(mergeclause_list))
list_length(cur_mergeclauses) != list_length(mergeclause_list))
continue;
/*
* Build sort pathkeys for both sides.
*
* Note: it's possible that the cheapest paths will already be sorted
* properly. create_mergejoin_path will detect that case and
* suppress an explicit sort step, so we needn't do so here.
* properly. create_mergejoin_path will detect that case and suppress
* an explicit sort step, so we needn't do so here.
*/
outerkeys = make_pathkeys_for_mergeclauses(root,
cur_mergeclauses,
@ -343,10 +339,10 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Nestloop only supports inner, left, and IN joins. Also, if we are
* doing a right or full join, we must use *all* the mergeclauses as
* join clauses, else we will not have a valid plan. (Although these
* two flags are currently inverses, keep them separate for clarity
* and possible future changes.)
* doing a right or full join, we must use *all* the mergeclauses as join
* clauses, else we will not have a valid plan. (Although these two flags
* are currently inverses, keep them separate for clarity and possible
* future changes.)
*/
switch (jointype)
{
@ -385,10 +381,9 @@ match_unsorted_outer(PlannerInfo *root,
else if (nestjoinOK)
{
/*
* If the cheapest inner path is a join or seqscan, we should
* consider materializing it. (This is a heuristic: we could
* consider it always, but for inner indexscans it's probably a
* waste of time.)
* If the cheapest inner path is a join or seqscan, we should consider
* materializing it. (This is a heuristic: we could consider it
* always, but for inner indexscans it's probably a waste of time.)
*/
if (!(IsA(inner_cheapest_total, IndexPath) ||
IsA(inner_cheapest_total, BitmapHeapPath) ||
@ -397,8 +392,8 @@ match_unsorted_outer(PlannerInfo *root,
create_material_path(innerrel, inner_cheapest_total);
/*
* Get the best innerjoin indexpath (if any) for this outer rel.
* It's the same for all outer paths.
* Get the best innerjoin indexpath (if any) for this outer rel. It's
* the same for all outer paths.
*/
bestinnerjoin = best_inner_indexscan(root, innerrel,
outerrel->relids, jointype);
@ -417,8 +412,8 @@ match_unsorted_outer(PlannerInfo *root,
int sortkeycnt;
/*
* If we need to unique-ify the outer path, it's pointless to
* consider any but the cheapest outer.
* If we need to unique-ify the outer path, it's pointless to consider
* any but the cheapest outer.
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
@ -429,9 +424,9 @@ match_unsorted_outer(PlannerInfo *root,
}
/*
* The result will have this sort order (even if it is implemented
* as a nestloop, and even if some of the mergeclauses are
* implemented by qpquals rather than as true mergeclauses):
* The result will have this sort order (even if it is implemented as
* a nestloop, and even if some of the mergeclauses are implemented by
* qpquals rather than as true mergeclauses):
*/
merge_pathkeys = build_join_pathkeys(root, joinrel, jointype,
outerpath->pathkeys);
@ -516,9 +511,9 @@ match_unsorted_outer(PlannerInfo *root,
innerrel);
/*
* Generate a mergejoin on the basis of sorting the cheapest
* inner. Since a sort will be needed, only cheapest total cost
* matters. (But create_mergejoin_path will do the right thing if
* Generate a mergejoin on the basis of sorting the cheapest inner.
* Since a sort will be needed, only cheapest total cost matters.
* (But create_mergejoin_path will do the right thing if
* inner_cheapest_total is already correctly sorted.)
*/
add_path(joinrel, (Path *)
@ -538,10 +533,10 @@ match_unsorted_outer(PlannerInfo *root,
continue;
/*
* Look for presorted inner paths that satisfy the innersortkey
* list --- or any truncation thereof, if we are allowed to build
* a mergejoin using a subset of the merge clauses. Here, we
* consider both cheap startup cost and cheap total cost. Ignore
* Look for presorted inner paths that satisfy the innersortkey list
* --- or any truncation thereof, if we are allowed to build a
* mergejoin using a subset of the merge clauses. Here, we consider
* both cheap startup cost and cheap total cost. Ignore
* inner_cheapest_total, since we already made a path with it.
*/
num_sortkeys = list_length(innersortkeys);
@ -559,8 +554,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Look for an inner path ordered well enough for the first
* 'sortkeycnt' innersortkeys. NB: trialsortkeys list is
* modified destructively, which is why we made a copy...
* 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
* destructively, which is why we made a copy...
*/
trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
@ -611,8 +606,8 @@ match_unsorted_outer(PlannerInfo *root,
if (innerpath != cheapest_total_inner)
{
/*
* Avoid rebuilding clause list if we already made
* one; saves memory in big join trees...
* Avoid rebuilding clause list if we already made one;
* saves memory in big join trees...
*/
if (newclauses == NIL)
{
@ -620,8 +615,8 @@ match_unsorted_outer(PlannerInfo *root,
{
newclauses =
find_mergeclauses_for_pathkeys(root,
trialsortkeys,
mergeclauses);
trialsortkeys,
mergeclauses);
Assert(newclauses != NIL);
}
else
@ -697,8 +692,8 @@ hash_inner_and_outer(PlannerInfo *root,
* We need to build only one hashpath for any given pair of outer and
* inner relations; all of the hashable clauses will be used as keys.
*
* Scan the join's restrictinfo list to find hashjoinable clauses that
* are usable with this pair of sub-relations.
* Scan the join's restrictinfo list to find hashjoinable clauses that are
* usable with this pair of sub-relations.
*/
hashclauses = NIL;
foreach(l, restrictlist)
@ -725,7 +720,7 @@ hash_inner_and_outer(PlannerInfo *root,
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
@ -739,9 +734,9 @@ hash_inner_and_outer(PlannerInfo *root,
if (hashclauses)
{
/*
* We consider both the cheapest-total-cost and
* cheapest-startup-cost outer paths. There's no need to consider
* any but the cheapest-total-cost inner path, however.
* We consider both the cheapest-total-cost and cheapest-startup-cost
* outer paths. There's no need to consider any but the
* cheapest-total-cost inner path, however.
*/
Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
Path *cheapest_total_outer = outerrel->cheapest_total_path;
@ -807,15 +802,15 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
/*
* If processing an outer join, only use its own join clauses in
* the merge. For inner joins we need not be so picky.
* If processing an outer join, only use its own join clauses in the
* merge. For inner joins we need not be so picky.
*
* Furthermore, if it is a right/full join then *all* the explicit
* join clauses must be mergejoinable, else the executor will
* fail. If we are asked for a right join then just return NIL to
* indicate no mergejoin is possible (we can handle it as a left
* join instead). If we are asked for a full join then emit an
* error, because there is no fallback.
* Furthermore, if it is a right/full join then *all* the explicit join
* clauses must be mergejoinable, else the executor will fail. If we
* are asked for a right join then just return NIL to indicate no
* mergejoin is possible (we can handle it as a left join instead). If
* we are asked for a full join then emit an error, because there is
* no fallback.
*/
if (isouterjoin)
{
@ -847,8 +842,8 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
/*
* Check if clause is usable with these input rels. All the vars
* needed on each side of the clause must be available from one or
* the other of the input rels.
* needed on each side of the clause must be available from one or the
* other of the input rels.
*/
if (bms_is_subset(restrictinfo->left_relids, outerrel->relids) &&
bms_is_subset(restrictinfo->right_relids, innerrel->relids))
@ -856,7 +851,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.75 2005/07/28 22:27:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.76 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -49,17 +49,16 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
/*
* First, consider left-sided and right-sided plans, in which rels of
* exactly level-1 member relations are joined against initial
* relations. We prefer to join using join clauses, but if we find a
* rel of level-1 members that has no join clauses, we will generate
* Cartesian-product joins against all initial rels not already
* contained in it.
* exactly level-1 member relations are joined against initial relations.
* We prefer to join using join clauses, but if we find a rel of level-1
* members that has no join clauses, we will generate Cartesian-product
* joins against all initial rels not already contained in it.
*
* In the first pass (level == 2), we try to join each initial rel to
* each initial rel that appears later in joinrels[1]. (The
* mirror-image joins are handled automatically by make_join_rel.) In
* later passes, we try to join rels of size level-1 from
* joinrels[level-1] to each initial rel in joinrels[1].
* In the first pass (level == 2), we try to join each initial rel to each
* initial rel that appears later in joinrels[1]. (The mirror-image joins
* are handled automatically by make_join_rel.) In later passes, we try
* to join rels of size level-1 from joinrels[level-1] to each initial rel
* in joinrels[1].
*/
foreach(r, joinrels[level - 1])
{
@ -76,23 +75,22 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
if (old_rel->joininfo != NIL)
{
/*
* Note that if all available join clauses for this rel
* require more than one other rel, we will fail to make any
* joins against it here. In most cases that's OK; it'll be
* considered by "bushy plan" join code in a higher-level pass
* where we have those other rels collected into a join rel.
* Note that if all available join clauses for this rel require
* more than one other rel, we will fail to make any joins against
* it here. In most cases that's OK; it'll be considered by
* "bushy plan" join code in a higher-level pass where we have
* those other rels collected into a join rel.
*/
new_rels = make_rels_by_clause_joins(root,
old_rel,
other_rels);
/*
* An exception occurs when there is a clauseless join inside
* an IN (sub-SELECT) construct. Here, the members of the
* subselect all have join clauses (against the stuff outside
* the IN), but they *must* be joined to each other before we
* can make use of those join clauses. So do the clauseless
* join bit.
* An exception occurs when there is a clauseless join inside an
* IN (sub-SELECT) construct. Here, the members of the subselect
* all have join clauses (against the stuff outside the IN), but
* they *must* be joined to each other before we can make use of
* those join clauses. So do the clauseless join bit.
*
* See also the last-ditch case below.
*/
@ -115,30 +113,29 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
/*
* At levels above 2 we will generate the same joined relation in
* multiple ways --- for example (a join b) join c is the same
* RelOptInfo as (b join c) join a, though the second case will
* add a different set of Paths to it. To avoid making extra work
* for subsequent passes, do not enter the same RelOptInfo into
* our output list multiple times.
* RelOptInfo as (b join c) join a, though the second case will add a
* different set of Paths to it. To avoid making extra work for
* subsequent passes, do not enter the same RelOptInfo into our output
* list multiple times.
*/
result_rels = list_concat_unique_ptr(result_rels, new_rels);
}
/*
* Now, consider "bushy plans" in which relations of k initial rels
* are joined to relations of level-k initial rels, for 2 <= k <=
* level-2.
* Now, consider "bushy plans" in which relations of k initial rels are
* joined to relations of level-k initial rels, for 2 <= k <= level-2.
*
* We only consider bushy-plan joins for pairs of rels where there is a
* suitable join clause, in order to avoid unreasonable growth of
* planning time.
* suitable join clause, in order to avoid unreasonable growth of planning
* time.
*/
for (k = 2;; k++)
{
int other_level = level - k;
/*
* Since make_join_rel(x, y) handles both x,y and y,x cases, we
* only need to go as far as the halfway point.
* Since make_join_rel(x, y) handles both x,y and y,x cases, we only
* need to go as far as the halfway point.
*/
if (k > other_level)
break;
@ -165,8 +162,8 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
{
/*
* OK, we can build a rel of the right level from this
* pair of rels. Do so if there is at least one
* usable join clause.
* pair of rels. Do so if there is at least one usable
* join clause.
*/
if (have_relevant_joinclause(old_rel, new_rel))
{
@ -185,16 +182,16 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
}
/*
* Last-ditch effort: if we failed to find any usable joins so far,
* force a set of cartesian-product joins to be generated. This
* handles the special case where all the available rels have join
* clauses but we cannot use any of the joins yet. An example is
* Last-ditch effort: if we failed to find any usable joins so far, force
* a set of cartesian-product joins to be generated. This handles the
* special case where all the available rels have join clauses but we
* cannot use any of the joins yet. An example is
*
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
*
* The join clause will be usable at level 3, but at level 2 we have no
* choice but to make cartesian joins. We consider only left-sided
* and right-sided cartesian joins in this case (no bushy).
* choice but to make cartesian joins. We consider only left-sided and
* right-sided cartesian joins in this case (no bushy).
*/
if (result_rels == NIL)
{
@ -318,8 +315,8 @@ make_rels_by_clauseless_joins(PlannerInfo *root,
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
/*
* As long as given other_rels are distinct, don't need to
* test to see if jrel is already part of output list.
* As long as given other_rels are distinct, don't need to test to
* see if jrel is already part of output list.
*/
if (jrel)
result = lcons(jrel, result);
@ -393,10 +390,10 @@ make_jointree_rel(PlannerInfo *root, Node *jtnode)
elog(ERROR, "invalid join order");
/*
* Since we are only going to consider this one way to do it,
* we're done generating Paths for this joinrel and can now select
* the cheapest. In fact we *must* do so now, since next level up
* will need it!
* Since we are only going to consider this one way to do it, we're
* done generating Paths for this joinrel and can now select the
* cheapest. In fact we *must* do so now, since next level up will
* need it!
*/
set_cheapest(rel);
@ -439,10 +436,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
joinrelids = bms_union(rel1->relids, rel2->relids);
/*
* If we are implementing IN clauses as joins, there are some joins
* that are illegal. Check to see if the proposed join is trouble. We
* can skip the work if looking at an outer join, however, because
* only top-level joins might be affected.
* If we are implementing IN clauses as joins, there are some joins that
* are illegal. Check to see if the proposed join is trouble. We can skip
* the work if looking at an outer join, however, because only top-level
* joins might be affected.
*/
if (jointype == JOIN_INNER)
{
@ -454,8 +451,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* This IN clause is not relevant unless its RHS overlaps the
* proposed join. (Check this first as a fast path for
* dismissing most irrelevant INs quickly.)
* proposed join. (Check this first as a fast path for dismissing
* most irrelevant INs quickly.)
*/
if (!bms_overlap(ininfo->righthand, joinrelids))
continue;
@ -468,10 +465,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
continue;
/*
* Cannot join if proposed join contains rels not in the RHS
* *and* contains only part of the RHS. We must build the
* complete RHS (subselect's join) before it can be joined to
* rels outside the subselect.
* Cannot join if proposed join contains rels not in the RHS *and*
* contains only part of the RHS. We must build the complete RHS
* (subselect's join) before it can be joined to rels outside the
* subselect.
*/
if (!bms_is_subset(ininfo->righthand, joinrelids))
{
@ -480,13 +477,12 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
/*
* At this point we are considering a join of the IN's RHS to
* some other rel(s).
* At this point we are considering a join of the IN's RHS to some
* other rel(s).
*
* If we already joined IN's RHS to any other rels in either
* input path, then this join is not constrained (the
* necessary work was done at the lower level where that join
* occurred).
* If we already joined IN's RHS to any other rels in either input
* path, then this join is not constrained (the necessary work was
* done at the lower level where that join occurred).
*/
if (bms_is_subset(ininfo->righthand, rel1->relids) &&
!bms_equal(ininfo->righthand, rel1->relids))
@ -500,12 +496,11 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
* RHS/LHS.
*
* JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS;
* conversely JOIN_UNIQUE_INNER will work if innerrel is
* exactly RHS.
* JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS; conversely
* JOIN_UNIQUE_INNER will work if innerrel is exactly RHS.
*
* But none of these will work if we already found another IN
* that needs to trigger here.
* But none of these will work if we already found another IN that
* needs to trigger here.
*/
if (jointype != JOIN_INNER)
{
@ -532,8 +527,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
/*
* Find or build the join RelOptInfo, and compute the restrictlist
* that goes with this particular joining.
* Find or build the join RelOptInfo, and compute the restrictlist that
* goes with this particular joining.
*/
joinrel = build_join_rel(root, joinrelids, rel1, rel2, jointype,
&restrictlist);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.74 2005/07/28 20:26:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.75 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -99,14 +99,14 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
if (restriction_is_or_clause(rinfo))
{
/*
* Use the generate_bitmap_or_paths() machinery to estimate
* the value of each OR clause. We can use regular
* restriction clauses along with the OR clause contents to
* generate indexquals. We pass outer_relids = NULL so that
* sub-clauses that are actually joins will be ignored.
* Use the generate_bitmap_or_paths() machinery to estimate the
* value of each OR clause. We can use regular restriction
* clauses along with the OR clause contents to generate
* indexquals. We pass outer_relids = NULL so that sub-clauses
* that are actually joins will be ignored.
*/
List *orpaths;
ListCell *k;
List *orpaths;
ListCell *k;
orpaths = generate_bitmap_or_paths(root, rel,
list_make1(rinfo),
@ -116,7 +116,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
/* Locate the cheapest OR path */
foreach(k, orpaths)
{
BitmapOrPath *path = (BitmapOrPath *) lfirst(k);
BitmapOrPath *path = (BitmapOrPath *) lfirst(k);
Assert(IsA(path, BitmapOrPath));
if (bestpath == NULL ||
@ -134,8 +134,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
return false;
/*
* Convert the path's indexclauses structure to a RestrictInfo tree.
* We include any partial-index predicates so as to get a reasonable
* Convert the path's indexclauses structure to a RestrictInfo tree. We
* include any partial-index predicates so as to get a reasonable
* representation of what the path is actually scanning.
*/
newrinfos = make_restrictinfo_from_bitmapqual((Path *) bestpath,
@ -155,12 +155,12 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
rel->baserestrictinfo = list_concat(rel->baserestrictinfo, newrinfos);
/*
* Adjust the original OR clause's cached selectivity to compensate
* for the selectivity of the added (but redundant) lower-level qual.
* This should result in the join rel getting approximately the same
* rows estimate as it would have gotten without all these
* shenanigans. (XXX major hack alert ... this depends on the
* assumption that the selectivity will stay cached ...)
* Adjust the original OR clause's cached selectivity to compensate for
* the selectivity of the added (but redundant) lower-level qual. This
* should result in the join rel getting approximately the same rows
* estimate as it would have gotten without all these shenanigans. (XXX
* major hack alert ... this depends on the assumption that the
* selectivity will stay cached ...)
*/
or_selec = clause_selectivity(root, (Node *) or_rinfo,
0, JOIN_INNER);

View File

@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.72 2005/08/27 22:13:43 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.73 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -33,17 +33,17 @@
static PathKeyItem *makePathKeyItem(Node *key, Oid sortop, bool checkType);
static void generate_outer_join_implications(PlannerInfo *root,
List *equi_key_set,
Relids *relids);
List *equi_key_set,
Relids *relids);
static void sub_generate_join_implications(PlannerInfo *root,
List *equi_key_set, Relids *relids,
Node *item1, Oid sortop1,
Relids item1_relids);
List *equi_key_set, Relids *relids,
Node *item1, Oid sortop1,
Relids item1_relids);
static void process_implied_const_eq(PlannerInfo *root,
List *equi_key_set, Relids *relids,
Node *item1, Oid sortop1,
Relids item1_relids,
bool delete_it);
List *equi_key_set, Relids *relids,
Node *item1, Oid sortop1,
Relids item1_relids,
bool delete_it);
static List *make_canonical_pathkey(PlannerInfo *root, PathKeyItem *item);
static Var *find_indexkey_var(PlannerInfo *root, RelOptInfo *rel,
AttrNumber varattno);
@ -59,12 +59,11 @@ makePathKeyItem(Node *key, Oid sortop, bool checkType)
PathKeyItem *item = makeNode(PathKeyItem);
/*
* Some callers pass expressions that are not necessarily of the same
* type as the sort operator expects as input (for example when
* dealing with an index that uses binary-compatible operators). We
* must relabel these with the correct type so that the key
* expressions will be seen as equal() to expressions that have been
* correctly labeled.
* Some callers pass expressions that are not necessarily of the same type
* as the sort operator expects as input (for example when dealing with an
* index that uses binary-compatible operators). We must relabel these
* with the correct type so that the key expressions will be seen as
* equal() to expressions that have been correctly labeled.
*/
if (checkType)
{
@ -116,20 +115,19 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo)
return;
/*
* Our plan is to make a two-element set, then sweep through the
* existing equijoin sets looking for matches to item1 or item2. When
* we find one, we remove that set from equi_key_list and union it
* into our new set. When done, we add the new set to the front of
* equi_key_list.
* Our plan is to make a two-element set, then sweep through the existing
* equijoin sets looking for matches to item1 or item2. When we find one,
* we remove that set from equi_key_list and union it into our new set.
* When done, we add the new set to the front of equi_key_list.
*
* It may well be that the two items we're given are already known to be
* equijoin-equivalent, in which case we don't need to change our data
* structure. If we find both of them in the same equivalence set to
* start with, we can quit immediately.
*
* This is a standard UNION-FIND problem, for which there exist better
* data structures than simple lists. If this code ever proves to be
* a bottleneck then it could be sped up --- but for now, simple is
* This is a standard UNION-FIND problem, for which there exist better data
* structures than simple lists. If this code ever proves to be a
* bottleneck then it could be sped up --- but for now, simple is
* beautiful.
*/
newset = NIL;
@ -148,8 +146,7 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo)
if (item1here || item2here)
{
/*
* If find both in same equivalence set, no need to do any
* more
* If find both in same equivalence set, no need to do any more
*/
if (item1here && item2here)
{
@ -228,18 +225,18 @@ generate_implied_equalities(PlannerInfo *root)
int i1;
/*
* A set containing only two items cannot imply any equalities
* beyond the one that created the set, so we can skip it ---
* unless outer joins appear in the query.
* A set containing only two items cannot imply any equalities beyond
* the one that created the set, so we can skip it --- unless outer
* joins appear in the query.
*/
if (nitems < 3 && !root->hasOuterJoins)
continue;
/*
* Collect info about relids mentioned in each item. For this
* routine we only really care whether there are any at all in
* each item, but process_implied_equality() needs the exact sets,
* so we may as well pull them here.
* Collect info about relids mentioned in each item. For this routine
* we only really care whether there are any at all in each item, but
* process_implied_equality() needs the exact sets, so we may as well
* pull them here.
*/
relids = (Relids *) palloc(nitems * sizeof(Relids));
have_consts = false;
@ -258,9 +255,9 @@ generate_implied_equalities(PlannerInfo *root)
* Match each item in the set with all that appear after it (it's
* sufficient to generate A=B, need not process B=A too).
*
* A set containing only two items cannot imply any equalities
* beyond the one that created the set, so we can skip this
* processing in that case.
* A set containing only two items cannot imply any equalities beyond the
* one that created the set, so we can skip this processing in that
* case.
*/
if (nitems >= 3)
{
@ -346,7 +343,7 @@ generate_implied_equalities(PlannerInfo *root)
* the time it gets here, the restriction will look like
* COALESCE(LEFTVAR, RIGHTVAR) = CONSTANT
* and we will have a join clause LEFTVAR = RIGHTVAR that we can match the
* COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
* COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
* and RIGHTVAR = CONSTANT into the input relations, since any rows not
* meeting these conditions cannot contribute to the join result.
*
@ -397,8 +394,8 @@ generate_outer_join_implications(PlannerInfo *root,
*/
static void
sub_generate_join_implications(PlannerInfo *root,
List *equi_key_set, Relids *relids,
Node *item1, Oid sortop1, Relids item1_relids)
List *equi_key_set, Relids *relids,
Node *item1, Oid sortop1, Relids item1_relids)
{
ListCell *l;
@ -410,34 +407,36 @@ sub_generate_join_implications(PlannerInfo *root,
foreach(l, root->left_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
Node *leftop = get_leftop(rinfo->clause);
Node *leftop = get_leftop(rinfo->clause);
if (equal(leftop, item1) && rinfo->left_sortop == sortop1)
{
/*
* Match, so find constant member(s) of set and generate
* implied INNERVAR = CONSTANT
* Match, so find constant member(s) of set and generate implied
* INNERVAR = CONSTANT
*/
Node *rightop = get_rightop(rinfo->clause);
Node *rightop = get_rightop(rinfo->clause);
process_implied_const_eq(root, equi_key_set, relids,
rightop,
rinfo->right_sortop,
rinfo->right_relids,
false);
/*
* We can remove explicit tests of this outer-join qual, too,
* since we now have tests forcing each of its sides
* to the same value.
* since we now have tests forcing each of its sides to the same
* value.
*/
process_implied_equality(root,
leftop, rightop,
rinfo->left_sortop, rinfo->right_sortop,
rinfo->left_relids, rinfo->right_relids,
true);
/*
* And recurse to see if we can deduce anything from
* INNERVAR = CONSTANT
* And recurse to see if we can deduce anything from INNERVAR =
* CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
rightop,
@ -450,34 +449,36 @@ sub_generate_join_implications(PlannerInfo *root,
foreach(l, root->right_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
Node *rightop = get_rightop(rinfo->clause);
Node *rightop = get_rightop(rinfo->clause);
if (equal(rightop, item1) && rinfo->right_sortop == sortop1)
{
/*
* Match, so find constant member(s) of set and generate
* implied INNERVAR = CONSTANT
* Match, so find constant member(s) of set and generate implied
* INNERVAR = CONSTANT
*/
Node *leftop = get_leftop(rinfo->clause);
Node *leftop = get_leftop(rinfo->clause);
process_implied_const_eq(root, equi_key_set, relids,
leftop,
rinfo->left_sortop,
rinfo->left_relids,
false);
/*
* We can remove explicit tests of this outer-join qual, too,
* since we now have tests forcing each of its sides
* to the same value.
* since we now have tests forcing each of its sides to the same
* value.
*/
process_implied_equality(root,
leftop, rightop,
rinfo->left_sortop, rinfo->right_sortop,
rinfo->left_relids, rinfo->right_relids,
true);
/*
* And recurse to see if we can deduce anything from
* INNERVAR = CONSTANT
* And recurse to see if we can deduce anything from INNERVAR =
* CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
leftop,
@ -492,8 +493,8 @@ sub_generate_join_implications(PlannerInfo *root,
if (IsA(item1, CoalesceExpr))
{
CoalesceExpr *cexpr = (CoalesceExpr *) item1;
Node *cfirst;
Node *csecond;
Node *cfirst;
Node *csecond;
if (list_length(cexpr->args) != 2)
return;
@ -501,26 +502,26 @@ sub_generate_join_implications(PlannerInfo *root,
csecond = (Node *) lsecond(cexpr->args);
/*
* Examine each mergejoinable full-join clause, looking for a
* clause of the form "x = y" matching the COALESCE(x,y) expression
* Examine each mergejoinable full-join clause, looking for a clause
* of the form "x = y" matching the COALESCE(x,y) expression
*/
foreach(l, root->full_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
Node *leftop = get_leftop(rinfo->clause);
Node *rightop = get_rightop(rinfo->clause);
Node *leftop = get_leftop(rinfo->clause);
Node *rightop = get_rightop(rinfo->clause);
/*
* We can assume the COALESCE() inputs are in the same order
* as the join clause, since both were automatically generated
* in the cases we care about.
* We can assume the COALESCE() inputs are in the same order as
* the join clause, since both were automatically generated in the
* cases we care about.
*
* XXX currently this may fail to match in cross-type cases
* because the COALESCE will contain typecast operations while
* the join clause may not (if there is a cross-type mergejoin
* operator available for the two column types).
* Is it OK to strip implicit coercions from the COALESCE
* arguments? What of the sortops in such cases?
* XXX currently this may fail to match in cross-type cases because
* the COALESCE will contain typecast operations while the join
* clause may not (if there is a cross-type mergejoin operator
* available for the two column types). Is it OK to strip implicit
* coercions from the COALESCE arguments? What of the sortops in
* such cases?
*/
if (equal(leftop, cfirst) &&
equal(rightop, csecond) &&
@ -548,10 +549,11 @@ sub_generate_join_implications(PlannerInfo *root,
sortop1,
item1_relids,
true);
/*
* We can remove explicit tests of this outer-join qual, too,
* since we now have tests forcing each of its sides
* to the same value.
* since we now have tests forcing each of its sides to the
* same value.
*/
process_implied_equality(root,
leftop, rightop,
@ -560,9 +562,10 @@ sub_generate_join_implications(PlannerInfo *root,
rinfo->left_relids,
rinfo->right_relids,
true);
/*
* And recurse to see if we can deduce anything from
* LEFTVAR = CONSTANT
* And recurse to see if we can deduce anything from LEFTVAR =
* CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
leftop,
@ -700,19 +703,19 @@ canonicalize_pathkeys(PlannerInfo *root, List *pathkeys)
List *cpathkey;
/*
* It's sufficient to look at the first entry in the sublist; if
* there are more entries, they're already part of an equivalence
* set by definition.
* It's sufficient to look at the first entry in the sublist; if there
* are more entries, they're already part of an equivalence set by
* definition.
*/
Assert(pathkey != NIL);
item = (PathKeyItem *) linitial(pathkey);
cpathkey = make_canonical_pathkey(root, item);
/*
* Eliminate redundant ordering requests --- ORDER BY A,A is the
* same as ORDER BY A. We want to check this only after we have
* canonicalized the keys, so that equivalent-key knowledge is
* used when deciding if an item is redundant.
* Eliminate redundant ordering requests --- ORDER BY A,A is the same
* as ORDER BY A. We want to check this only after we have
* canonicalized the keys, so that equivalent-key knowledge is used
* when deciding if an item is redundant.
*/
new_pathkeys = list_append_unique_ptr(new_pathkeys, cpathkey);
}
@ -769,8 +772,8 @@ compare_pathkeys(List *keys1, List *keys2)
List *subkey2 = (List *) lfirst(key2);
/*
* XXX would like to check that we've been given canonicalized
* input, but PlannerInfo not accessible here...
* XXX would like to check that we've been given canonicalized input,
* but PlannerInfo not accessible here...
*/
#ifdef NOT_USED
Assert(list_member_ptr(root->equi_key_list, subkey1));
@ -778,10 +781,10 @@ compare_pathkeys(List *keys1, List *keys2)
#endif
/*
* We will never have two subkeys where one is a subset of the
* other, because of the canonicalization process. Either they
* are equal or they ain't. Furthermore, we only need pointer
* comparison to detect equality.
* We will never have two subkeys where one is a subset of the other,
* because of the canonicalization process. Either they are equal or
* they ain't. Furthermore, we only need pointer comparison to detect
* equality.
*/
if (subkey1 != subkey2)
return PATHKEYS_DIFFERENT; /* no need to keep looking */
@ -789,9 +792,9 @@ compare_pathkeys(List *keys1, List *keys2)
/*
* If we reached the end of only one list, the other is longer and
* therefore not a subset. (We assume the additional sublist(s) of
* the other list are not NIL --- no pathkey list should ever have a
* NIL sublist.)
* therefore not a subset. (We assume the additional sublist(s) of the
* other list are not NIL --- no pathkey list should ever have a NIL
* sublist.)
*/
if (key1 == NULL && key2 == NULL)
return PATHKEYS_EQUAL;
@ -840,8 +843,8 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys,
Path *path = (Path *) lfirst(l);
/*
* Since cost comparison is a lot cheaper than pathkey comparison,
* do that first. (XXX is that still true?)
* Since cost comparison is a lot cheaper than pathkey comparison, do
* that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_path_costs(matched_path, path, cost_criterion) <= 0)
@ -879,11 +882,11 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
Path *path = (Path *) lfirst(l);
/*
* Since cost comparison is a lot cheaper than pathkey comparison,
* do that first.
* Since cost comparison is a lot cheaper than pathkey comparison, do
* that first.
*/
if (matched_path != NULL &&
compare_fractional_path_costs(matched_path, path, fraction) <= 0)
compare_fractional_path_costs(matched_path, path, fraction) <= 0)
continue;
if (pathkeys_contained_in(pathkeys, path->pathkeys))
@ -954,8 +957,8 @@ build_index_pathkeys(PlannerInfo *root,
cpathkey = make_canonical_pathkey(root, item);
/*
* Eliminate redundant ordering info; could happen if query is
* such that index keys are equijoined...
* Eliminate redundant ordering info; could happen if query is such
* that index keys are equijoined...
*/
retval = list_append_unique_ptr(retval, cpathkey);
@ -1003,7 +1006,7 @@ find_indexkey_var(PlannerInfo *root, RelOptInfo *rel, AttrNumber varattno)
/*
* convert_subquery_pathkeys
* Build a pathkeys list that describes the ordering of a subquery's
* result, in the terms of the outer query. This is essentially a
* result, in the terms of the outer query. This is essentially a
* task of conversion.
*
* 'rel': outer query's RelOptInfo for the subquery relation.
@ -1033,19 +1036,18 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* The sub_pathkey could contain multiple elements (representing
* knowledge that multiple items are effectively equal). Each
* element might match none, one, or more of the output columns
* that are visible to the outer query. This means we may have
* multiple possible representations of the sub_pathkey in the
* context of the outer query. Ideally we would generate them all
* and put them all into a pathkey list of the outer query,
* thereby propagating equality knowledge up to the outer query.
* Right now we cannot do so, because the outer query's canonical
* pathkey sets are already frozen when this is called. Instead
* we prefer the one that has the highest "score" (number of
* canonical pathkey peers, plus one if it matches the outer
* query_pathkeys). This is the most likely to be useful in the
* outer query.
* knowledge that multiple items are effectively equal). Each element
* might match none, one, or more of the output columns that are
* visible to the outer query. This means we may have multiple
* possible representations of the sub_pathkey in the context of the
* outer query. Ideally we would generate them all and put them all
* into a pathkey list of the outer query, thereby propagating
* equality knowledge up to the outer query. Right now we cannot do
* so, because the outer query's canonical pathkey sets are already
* frozen when this is called. Instead we prefer the one that has the
* highest "score" (number of canonical pathkey peers, plus one if it
* matches the outer query_pathkeys). This is the most likely to be
* useful in the outer query.
*/
foreach(j, sub_pathkey)
{
@ -1144,13 +1146,13 @@ build_join_pathkeys(PlannerInfo *root,
return NIL;
/*
* This used to be quite a complex bit of code, but now that all
* pathkey sublists start out life canonicalized, we don't have to do
* a darn thing here! The inner-rel vars we used to need to add are
* *already* part of the outer pathkey!
* This used to be quite a complex bit of code, but now that all pathkey
* sublists start out life canonicalized, we don't have to do a darn thing
* here! The inner-rel vars we used to need to add are *already* part of
* the outer pathkey!
*
* We do, however, need to truncate the pathkeys list, since it may
* contain pathkeys that were useful for forming this joinrel but are
* We do, however, need to truncate the pathkeys list, since it may contain
* pathkeys that were useful for forming this joinrel but are
* uninteresting to higher levels.
*/
return truncate_useless_pathkeys(root, joinrel, outer_pathkeys);
@ -1289,22 +1291,20 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
/*
* We can match a pathkey against either left or right side of any
* mergejoin clause. (We examine both sides since we aren't told
* if the given pathkeys are for inner or outer input path; no
* confusion is possible.) Furthermore, if there are multiple
* matching clauses, take them all. In plain inner-join scenarios
* we expect only one match, because redundant-mergeclause
* elimination will have removed any redundant mergeclauses from
* the input list. However, in outer-join scenarios there might be
* multiple matches. An example is
* mergejoin clause. (We examine both sides since we aren't told if
* the given pathkeys are for inner or outer input path; no confusion
* is possible.) Furthermore, if there are multiple matching clauses,
* take them all. In plain inner-join scenarios we expect only one
* match, because redundant-mergeclause elimination will have removed
* any redundant mergeclauses from the input list. However, in
* outer-join scenarios there might be multiple matches. An example is
*
* select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and
* a.v1 = b.v2;
* select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and a.v1 =
* b.v2;
*
* Given the pathkeys ((a.v1), (a.v2)) it is okay to return all three
* clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and
* indeed we *must* do so or we will be unable to form a valid
* plan.
* clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and indeed
* we *must* do so or we will be unable to form a valid plan.
*/
foreach(j, restrictinfos)
{
@ -1325,15 +1325,15 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
/*
* If we didn't find a mergeclause, we're done --- any additional
* sort-key positions in the pathkeys are useless. (But we can
* still mergejoin if we found at least one mergeclause.)
* sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched_restrictinfos == NIL)
break;
/*
* If we did find usable mergeclause(s) for this sort-key
* position, add them to result list.
* If we did find usable mergeclause(s) for this sort-key position,
* add them to result list.
*/
mergeclauses = list_concat(mergeclauses, matched_restrictinfos);
}
@ -1390,14 +1390,13 @@ make_pathkeys_for_mergeclauses(PlannerInfo *root,
}
/*
* When we are given multiple merge clauses, it's possible that
* some clauses refer to the same vars as earlier clauses. There's
* no reason for us to specify sort keys like (A,B,A) when (A,B)
* will do --- and adding redundant sort keys makes add_path think
* that this sort order is different from ones that are really the
* same, so don't do it. Since we now have a canonicalized
* pathkey, a simple ptrMember test is sufficient to detect
* redundant keys.
* When we are given multiple merge clauses, it's possible that some
* clauses refer to the same vars as earlier clauses. There's no
* reason for us to specify sort keys like (A,B,A) when (A,B) will do
* --- and adding redundant sort keys makes add_path think that this
* sort order is different from ones that are really the same, so
* don't do it. Since we now have a canonicalized pathkey, a simple
* ptrMember test is sufficient to detect redundant keys.
*/
pathkeys = list_append_unique_ptr(pathkeys, pathkey);
}
@ -1447,8 +1446,8 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
cache_mergeclause_pathkeys(root, restrictinfo);
/*
* We can compare canonical pathkey sublists by simple
* pointer equality; see compare_pathkeys.
* We can compare canonical pathkey sublists by simple pointer
* equality; see compare_pathkeys.
*/
if (pathkey == restrictinfo->left_pathkey ||
pathkey == restrictinfo->right_pathkey)
@ -1460,8 +1459,8 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
/*
* If we didn't find a mergeclause, we're done --- any additional
* sort-key positions in the pathkeys are useless. (But we can
* still mergejoin if we found at least one mergeclause.)
* sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched)
useful++;

View File

@ -11,7 +11,7 @@
* WHERE ctid IN (tid1, tid2, ...)
*
* There is currently no special support for joins involving CTID; in
* particular nothing corresponding to best_inner_indexscan(). Since it's
* particular nothing corresponding to best_inner_indexscan(). Since it's
* not very useful to store TIDs of one table in another table, there
* doesn't seem to be enough use-case to justify adding a lot of code
* for that.
@ -22,7 +22,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.24 2005/08/23 20:49:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.25 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -50,7 +50,7 @@ static List *TidQualFromRestrictinfo(int varno, List *restrictinfo);
*
* If it is, return the pseudoconstant subnode; if not, return NULL.
*
* We check that the CTID Var belongs to relation "varno". That is probably
* We check that the CTID Var belongs to relation "varno". That is probably
* redundant considering this is only applied to restriction clauses, but
* let's be safe.
*/