mirror of
https://github.com/postgres/postgres.git
synced 2025-09-09 13:09:39 +03:00
Standard pgindent run for 8.1.
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
* geqo_erx.c
|
||||
* edge recombination crossover [ER]
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.19 2003/11/29 22:39:49 pgsql Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.20 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -55,8 +55,8 @@ alloc_edge_table(int num_gene)
|
||||
Edge *edge_table;
|
||||
|
||||
/*
|
||||
* palloc one extra location so that nodes numbered 1..n can be
|
||||
* indexed directly; 0 will not be used
|
||||
* palloc one extra location so that nodes numbered 1..n can be indexed
|
||||
* directly; 0 will not be used
|
||||
*/
|
||||
|
||||
edge_table = (Edge *) palloc((num_gene + 1) * sizeof(Edge));
|
||||
@@ -94,8 +94,7 @@ gimme_edge_table(Gene *tour1, Gene *tour2, int num_gene, Edge *edge_table)
|
||||
int i,
|
||||
index1,
|
||||
index2;
|
||||
int edge_total; /* total number of unique edges in two
|
||||
* genes */
|
||||
int edge_total; /* total number of unique edges in two genes */
|
||||
|
||||
/* at first clear the edge table's old data */
|
||||
for (i = 1; i <= num_gene; i++)
|
||||
@@ -111,15 +110,15 @@ gimme_edge_table(Gene *tour1, Gene *tour2, int num_gene, Edge *edge_table)
|
||||
for (index1 = 0; index1 < num_gene; index1++)
|
||||
{
|
||||
/*
|
||||
* presume the tour is circular, i.e. 1->2, 2->3, 3->1 this
|
||||
* operaton maps n back to 1
|
||||
* presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton
|
||||
* maps n back to 1
|
||||
*/
|
||||
|
||||
index2 = (index1 + 1) % num_gene;
|
||||
|
||||
/*
|
||||
* edges are bidirectional, i.e. 1->2 is same as 2->1 call
|
||||
* gimme_edge twice per edge
|
||||
* edges are bidirectional, i.e. 1->2 is same as 2->1 call gimme_edge
|
||||
* twice per edge
|
||||
*/
|
||||
|
||||
edge_total += gimme_edge(tour1[index1], tour1[index2], edge_table);
|
||||
@@ -320,10 +319,10 @@ gimme_gene(Edge edge, Edge *edge_table)
|
||||
*/
|
||||
|
||||
/*
|
||||
* The test for minimum_count can probably be removed at some
|
||||
* point but comments should probably indicate exactly why it is
|
||||
* guaranteed that the test will always succeed the first time
|
||||
* around. If it can fail then the code is in error
|
||||
* The test for minimum_count can probably be removed at some point
|
||||
* but comments should probably indicate exactly why it is guaranteed
|
||||
* that the test will always succeed the first time around. If it can
|
||||
* fail then the code is in error
|
||||
*/
|
||||
|
||||
|
||||
@@ -379,8 +378,8 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
|
||||
|
||||
|
||||
/*
|
||||
* how many edges remain? how many gene with four total (initial)
|
||||
* edges remain?
|
||||
* how many edges remain? how many gene with four total (initial) edges
|
||||
* remain?
|
||||
*/
|
||||
|
||||
for (i = 1; i <= num_gene; i++)
|
||||
@@ -395,8 +394,8 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
|
||||
}
|
||||
|
||||
/*
|
||||
* random decision of the gene with remaining edges and whose
|
||||
* total_edges == 4
|
||||
* random decision of the gene with remaining edges and whose total_edges
|
||||
* == 4
|
||||
*/
|
||||
|
||||
if (four_count != 0)
|
||||
@@ -444,15 +443,15 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
|
||||
}
|
||||
|
||||
/*
|
||||
* edge table seems to be empty; this happens sometimes on the last
|
||||
* point due to the fact that the first point is removed from the
|
||||
* table even though only one of its edges has been determined
|
||||
* edge table seems to be empty; this happens sometimes on the last point
|
||||
* due to the fact that the first point is removed from the table even
|
||||
* though only one of its edges has been determined
|
||||
*/
|
||||
|
||||
else
|
||||
{ /* occurs only at the last point in the
|
||||
* tour; simply look for the point which
|
||||
* is not yet used */
|
||||
{ /* occurs only at the last point in the tour;
|
||||
* simply look for the point which is not yet
|
||||
* used */
|
||||
|
||||
for (i = 1; i <= num_gene; i++)
|
||||
if (edge_table[i].unused_edges >= 0)
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.76 2005/06/09 04:18:59 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.77 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -52,15 +52,15 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
|
||||
struct HTAB *savehash;
|
||||
|
||||
/*
|
||||
* Because gimme_tree considers both left- and right-sided trees,
|
||||
* there is no difference between a tour (a,b,c,d,...) and a tour
|
||||
* (b,a,c,d,...) --- the same join orders will be considered. To avoid
|
||||
* redundant cost calculations, we simply reject tours where tour[0] >
|
||||
* tour[1], assigning them an artificially bad fitness.
|
||||
* Because gimme_tree considers both left- and right-sided trees, there is
|
||||
* no difference between a tour (a,b,c,d,...) and a tour (b,a,c,d,...) ---
|
||||
* the same join orders will be considered. To avoid redundant cost
|
||||
* calculations, we simply reject tours where tour[0] > tour[1], assigning
|
||||
* them an artificially bad fitness.
|
||||
*
|
||||
* init_tour() is aware of this rule and so we should never reject a tour
|
||||
* during the initial filling of the pool. It seems difficult to
|
||||
* persuade the recombination logic never to break the rule, however.
|
||||
* during the initial filling of the pool. It seems difficult to persuade
|
||||
* the recombination logic never to break the rule, however.
|
||||
*/
|
||||
if (num_gene >= 2 && tour[0] > tour[1])
|
||||
return DBL_MAX;
|
||||
@@ -69,10 +69,10 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
|
||||
* Create a private memory context that will hold all temp storage
|
||||
* allocated inside gimme_tree().
|
||||
*
|
||||
* Since geqo_eval() will be called many times, we can't afford to let
|
||||
* all that memory go unreclaimed until end of statement. Note we
|
||||
* make the temp context a child of the planner's normal context, so
|
||||
* that it will be freed even if we abort via ereport(ERROR).
|
||||
* Since geqo_eval() will be called many times, we can't afford to let all
|
||||
* that memory go unreclaimed until end of statement. Note we make the
|
||||
* temp context a child of the planner's normal context, so that it will
|
||||
* be freed even if we abort via ereport(ERROR).
|
||||
*/
|
||||
mycontext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"GEQO",
|
||||
@@ -84,15 +84,15 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
|
||||
/*
|
||||
* gimme_tree will add entries to root->join_rel_list, which may or may
|
||||
* not already contain some entries. The newly added entries will be
|
||||
* recycled by the MemoryContextDelete below, so we must ensure that
|
||||
* the list is restored to its former state before exiting. We can
|
||||
* do this by truncating the list to its original length. NOTE this
|
||||
* assumes that any added entries are appended at the end!
|
||||
* recycled by the MemoryContextDelete below, so we must ensure that the
|
||||
* list is restored to its former state before exiting. We can do this by
|
||||
* truncating the list to its original length. NOTE this assumes that any
|
||||
* added entries are appended at the end!
|
||||
*
|
||||
* We also must take care not to mess up the outer join_rel_hash,
|
||||
* if there is one. We can do this by just temporarily setting the
|
||||
* link to NULL. (If we are dealing with enough join rels, which we
|
||||
* very likely are, a new hash table will get built and used locally.)
|
||||
* We also must take care not to mess up the outer join_rel_hash, if there is
|
||||
* one. We can do this by just temporarily setting the link to NULL. (If
|
||||
* we are dealing with enough join rels, which we very likely are, a new
|
||||
* hash table will get built and used locally.)
|
||||
*/
|
||||
savelength = list_length(evaldata->root->join_rel_list);
|
||||
savehash = evaldata->root->join_rel_hash;
|
||||
@@ -170,23 +170,22 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
|
||||
* Push each relation onto the stack in the specified order. After
|
||||
* pushing each relation, see whether the top two stack entries are
|
||||
* joinable according to the desirable_join() heuristics. If so, join
|
||||
* them into one stack entry, and try again to combine with the next
|
||||
* stack entry down (if any). When the stack top is no longer
|
||||
* joinable, continue to the next input relation. After we have
|
||||
* pushed the last input relation, the heuristics are disabled and we
|
||||
* force joining all the remaining stack entries.
|
||||
* them into one stack entry, and try again to combine with the next stack
|
||||
* entry down (if any). When the stack top is no longer joinable,
|
||||
* continue to the next input relation. After we have pushed the last
|
||||
* input relation, the heuristics are disabled and we force joining all
|
||||
* the remaining stack entries.
|
||||
*
|
||||
* If desirable_join() always returns true, this produces a straight
|
||||
* left-to-right join just like the old code. Otherwise we may
|
||||
* produce a bushy plan or a left/right-sided plan that really
|
||||
* corresponds to some tour other than the one given. To the extent
|
||||
* that the heuristics are helpful, however, this will be a better
|
||||
* plan than the raw tour.
|
||||
* left-to-right join just like the old code. Otherwise we may produce a
|
||||
* bushy plan or a left/right-sided plan that really corresponds to some
|
||||
* tour other than the one given. To the extent that the heuristics are
|
||||
* helpful, however, this will be a better plan than the raw tour.
|
||||
*
|
||||
* Also, when a join attempt fails (because of IN-clause constraints), we
|
||||
* may be able to recover and produce a workable plan, where the old
|
||||
* code just had to give up. This case acts the same as a false
|
||||
* result from desirable_join().
|
||||
* Also, when a join attempt fails (because of IN-clause constraints), we may
|
||||
* be able to recover and produce a workable plan, where the old code just
|
||||
* had to give up. This case acts the same as a false result from
|
||||
* desirable_join().
|
||||
*/
|
||||
for (rel_count = 0; rel_count < num_gene; rel_count++)
|
||||
{
|
||||
@@ -199,8 +198,8 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
|
||||
stack_depth++;
|
||||
|
||||
/*
|
||||
* While it's feasible, pop the top two stack entries and replace
|
||||
* with their join.
|
||||
* While it's feasible, pop the top two stack entries and replace with
|
||||
* their join.
|
||||
*/
|
||||
while (stack_depth >= 2)
|
||||
{
|
||||
@@ -208,20 +207,18 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
|
||||
RelOptInfo *inner_rel = stack[stack_depth - 1];
|
||||
|
||||
/*
|
||||
* Don't pop if heuristics say not to join now. However, once
|
||||
* we have exhausted the input, the heuristics can't prevent
|
||||
* popping.
|
||||
* Don't pop if heuristics say not to join now. However, once we
|
||||
* have exhausted the input, the heuristics can't prevent popping.
|
||||
*/
|
||||
if (rel_count < num_gene - 1 &&
|
||||
!desirable_join(evaldata->root, outer_rel, inner_rel))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Construct a RelOptInfo representing the join of these two
|
||||
* input relations. These are always inner joins. Note that
|
||||
* we expect the joinrel not to exist in root->join_rel_list
|
||||
* yet, and so the paths constructed for it will only include
|
||||
* the ones we want.
|
||||
* Construct a RelOptInfo representing the join of these two input
|
||||
* relations. These are always inner joins. Note that we expect
|
||||
* the joinrel not to exist in root->join_rel_list yet, and so the
|
||||
* paths constructed for it will only include the ones we want.
|
||||
*/
|
||||
joinrel = make_join_rel(evaldata->root, outer_rel, inner_rel,
|
||||
JOIN_INNER);
|
||||
@@ -266,9 +263,9 @@ desirable_join(PlannerInfo *root,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Join if the rels are members of the same IN sub-select. This is
|
||||
* needed to improve the odds that we will find a valid solution in a
|
||||
* case where an IN sub-select has a clauseless join.
|
||||
* Join if the rels are members of the same IN sub-select. This is needed
|
||||
* to improve the odds that we will find a valid solution in a case where
|
||||
* an IN sub-select has a clauseless join.
|
||||
*/
|
||||
foreach(l, root->in_info_list)
|
||||
{
|
||||
|
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.50 2005/06/08 23:02:04 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.51 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -106,10 +106,9 @@ geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
|
||||
random_init_pool(pool, &evaldata);
|
||||
|
||||
/* sort the pool according to cheapest path as fitness */
|
||||
sort_pool(pool); /* we have to do it only one time, since
|
||||
* all kids replace the worst individuals
|
||||
* in future (-> geqo_pool.c:spread_chromo
|
||||
* ) */
|
||||
sort_pool(pool); /* we have to do it only one time, since all
|
||||
* kids replace the worst individuals in
|
||||
* future (-> geqo_pool.c:spread_chromo ) */
|
||||
|
||||
#ifdef GEQO_DEBUG
|
||||
elog(DEBUG1, "GEQO selected %d pool entries, best %.2f, worst %.2f",
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.42 2004/12/31 21:59:58 pgsql Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.43 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -41,10 +41,10 @@ avg_pool(Pool *pool)
|
||||
elog(ERROR, "pool_size is zero");
|
||||
|
||||
/*
|
||||
* Since the pool may contain multiple occurrences of DBL_MAX, divide
|
||||
* by pool->size before summing, not after, to avoid overflow. This
|
||||
* loses a little in speed and accuracy, but this routine is only used
|
||||
* for debug printouts, so we don't care that much.
|
||||
* Since the pool may contain multiple occurrences of DBL_MAX, divide by
|
||||
* pool->size before summing, not after, to avoid overflow. This loses a
|
||||
* little in speed and accuracy, but this routine is only used for debug
|
||||
* printouts, so we don't care that much.
|
||||
*/
|
||||
for (i = 0; i < pool->size; i++)
|
||||
cumulative += pool->data[i].worth / pool->size;
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.26 2004/12/31 21:59:58 pgsql Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.27 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -96,13 +96,12 @@ random_init_pool(Pool *pool, GeqoEvalData *evaldata)
|
||||
int bad = 0;
|
||||
|
||||
/*
|
||||
* We immediately discard any invalid individuals (those that
|
||||
* geqo_eval returns DBL_MAX for), thereby not wasting pool space on
|
||||
* them.
|
||||
* We immediately discard any invalid individuals (those that geqo_eval
|
||||
* returns DBL_MAX for), thereby not wasting pool space on them.
|
||||
*
|
||||
* If we fail to make any valid individuals after 10000 tries, give up;
|
||||
* this probably means something is broken, and we shouldn't just let
|
||||
* ourselves get stuck in an infinite loop.
|
||||
* If we fail to make any valid individuals after 10000 tries, give up; this
|
||||
* probably means something is broken, and we shouldn't just let ourselves
|
||||
* get stuck in an infinite loop.
|
||||
*/
|
||||
i = 0;
|
||||
while (i < pool->size)
|
||||
@@ -223,8 +222,8 @@ spread_chromo(Chromosome *chromo, Pool *pool)
|
||||
|
||||
|
||||
/*
|
||||
* these 2 cases move the search indices since a new location has
|
||||
* not yet been found.
|
||||
* these 2 cases move the search indices since a new location has not
|
||||
* yet been found.
|
||||
*/
|
||||
|
||||
else if (chromo->worth < pool->data[mid].worth)
|
||||
@@ -242,8 +241,7 @@ spread_chromo(Chromosome *chromo, Pool *pool)
|
||||
/* now we have index for chromo */
|
||||
|
||||
/*
|
||||
* move every gene from index on down one position to make room for
|
||||
* chromo
|
||||
* move every gene from index on down one position to make room for chromo
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@@ -3,7 +3,7 @@
|
||||
* geqo_recombination.c
|
||||
* misc recombination procedures
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.14 2004/08/29 05:06:43 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.15 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -62,8 +62,8 @@ init_tour(Gene *tour, int num_gene)
|
||||
}
|
||||
|
||||
/*
|
||||
* Since geqo_eval() will reject tours where tour[0] > tour[1], we may
|
||||
* as well switch the two to make it a valid tour.
|
||||
* Since geqo_eval() will reject tours where tour[0] > tour[1], we may as
|
||||
* well switch the two to make it a valid tour.
|
||||
*/
|
||||
if (num_gene >= 2 && tour[0] > tour[1])
|
||||
{
|
||||
@@ -86,8 +86,8 @@ alloc_city_table(int num_gene)
|
||||
City *city_table;
|
||||
|
||||
/*
|
||||
* palloc one extra location so that nodes numbered 1..n can be
|
||||
* indexed directly; 0 will not be used
|
||||
* palloc one extra location so that nodes numbered 1..n can be indexed
|
||||
* directly; 0 will not be used
|
||||
*/
|
||||
city_table = (City *) palloc((num_gene + 1) * sizeof(City));
|
||||
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.19 2005/06/14 14:21:16 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.20 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -86,13 +86,14 @@ linear(int pool_size, double bias) /* bias is y-intercept of linear
|
||||
|
||||
/*
|
||||
* If geqo_rand() returns exactly 1.0 then we will get exactly max from
|
||||
* this equation, whereas we need 0 <= index < max. Also it seems possible
|
||||
* that roundoff error might deliver values slightly outside the range;
|
||||
* in particular avoid passing a value slightly less than 0 to sqrt().
|
||||
* If we get a bad value just try again.
|
||||
* this equation, whereas we need 0 <= index < max. Also it seems
|
||||
* possible that roundoff error might deliver values slightly outside the
|
||||
* range; in particular avoid passing a value slightly less than 0 to
|
||||
* sqrt(). If we get a bad value just try again.
|
||||
*/
|
||||
do {
|
||||
double sqrtval;
|
||||
do
|
||||
{
|
||||
double sqrtval;
|
||||
|
||||
sqrtval = (bias * bias) - 4.0 * (bias - 1.0) * geqo_rand();
|
||||
if (sqrtval > 0.0)
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.136 2005/08/22 17:34:58 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.137 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -62,7 +62,7 @@ static void compare_tlist_datatypes(List *tlist, List *colTypes,
|
||||
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
|
||||
bool *differentTypes);
|
||||
static void subquery_push_qual(Query *subquery,
|
||||
RangeTblEntry *rte, Index rti, Node *qual);
|
||||
RangeTblEntry *rte, Index rti, Node *qual);
|
||||
static void recurse_push_qual(Node *setOp, Query *topquery,
|
||||
RangeTblEntry *rte, Index rti, Node *qual);
|
||||
|
||||
@@ -105,7 +105,7 @@ make_one_rel(PlannerInfo *root)
|
||||
if (brel == NULL)
|
||||
continue;
|
||||
|
||||
Assert(brel->relid == rti); /* sanity check on array */
|
||||
Assert(brel->relid == rti); /* sanity check on array */
|
||||
|
||||
/* ignore RTEs that are "other rels" */
|
||||
if (brel->reloptkind != RELOPT_BASEREL)
|
||||
@@ -134,9 +134,9 @@ set_base_rel_pathlists(PlannerInfo *root)
|
||||
Index rti;
|
||||
|
||||
/*
|
||||
* Note: because we call expand_inherited_rtentry inside the loop,
|
||||
* it's quite possible for the base_rel_array to be enlarged while
|
||||
* the loop runs. Hence don't try to optimize the loop.
|
||||
* Note: because we call expand_inherited_rtentry inside the loop, it's
|
||||
* quite possible for the base_rel_array to be enlarged while the loop
|
||||
* runs. Hence don't try to optimize the loop.
|
||||
*/
|
||||
for (rti = 1; rti < root->base_rel_array_size; rti++)
|
||||
{
|
||||
@@ -255,8 +255,8 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
ListCell *il;
|
||||
|
||||
/*
|
||||
* XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE;
|
||||
* can we do better?
|
||||
* XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE; can
|
||||
* we do better?
|
||||
*/
|
||||
if (list_member_int(root->parse->rowMarks, parentRTindex))
|
||||
ereport(ERROR,
|
||||
@@ -270,8 +270,8 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
rel->width = 0;
|
||||
|
||||
/*
|
||||
* Generate access paths for each table in the tree (parent AND
|
||||
* children), and pick the cheapest path for each table.
|
||||
* Generate access paths for each table in the tree (parent AND children),
|
||||
* and pick the cheapest path for each table.
|
||||
*/
|
||||
foreach(il, inheritlist)
|
||||
{
|
||||
@@ -286,18 +286,17 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
childOID = childrte->relid;
|
||||
|
||||
/*
|
||||
* Make a RelOptInfo for the child so we can do planning.
|
||||
* Mark it as an "other rel" since it will not be part of the
|
||||
* main join tree.
|
||||
* Make a RelOptInfo for the child so we can do planning. Mark it as
|
||||
* an "other rel" since it will not be part of the main join tree.
|
||||
*/
|
||||
childrel = build_other_rel(root, childRTindex);
|
||||
|
||||
/*
|
||||
* Copy the parent's targetlist and restriction quals to the
|
||||
* child, with attribute-number adjustment as needed. We don't
|
||||
* bother to copy the join quals, since we can't do any joining of
|
||||
* the individual tables. Also, we just zap attr_needed rather
|
||||
* than trying to adjust it; it won't be looked at in the child.
|
||||
* Copy the parent's targetlist and restriction quals to the child,
|
||||
* with attribute-number adjustment as needed. We don't bother to
|
||||
* copy the join quals, since we can't do any joining of the
|
||||
* individual tables. Also, we just zap attr_needed rather than
|
||||
* trying to adjust it; it won't be looked at in the child.
|
||||
*/
|
||||
childrel->reltargetlist = (List *)
|
||||
adjust_inherited_attrs((Node *) rel->reltargetlist,
|
||||
@@ -320,13 +319,14 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
*/
|
||||
if (constraint_exclusion)
|
||||
{
|
||||
List *constraint_pred;
|
||||
List *constraint_pred;
|
||||
|
||||
constraint_pred = get_relation_constraints(childOID, childrel);
|
||||
|
||||
/*
|
||||
* We do not currently enforce that CHECK constraints contain
|
||||
* only immutable functions, so it's necessary to check here.
|
||||
* We daren't draw conclusions from plan-time evaluation of
|
||||
* We do not currently enforce that CHECK constraints contain only
|
||||
* immutable functions, so it's necessary to check here. We
|
||||
* daren't draw conclusions from plan-time evaluation of
|
||||
* non-immutable functions.
|
||||
*/
|
||||
if (!contain_mutable_functions((Node *) constraint_pred))
|
||||
@@ -351,9 +351,9 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
subpaths = lappend(subpaths, childrel->cheapest_total_path);
|
||||
|
||||
/*
|
||||
* Propagate size information from the child back to the parent.
|
||||
* For simplicity, we use the largest widths from any child as the
|
||||
* parent estimates.
|
||||
* Propagate size information from the child back to the parent. For
|
||||
* simplicity, we use the largest widths from any child as the parent
|
||||
* estimates.
|
||||
*/
|
||||
rel->rows += childrel->rows;
|
||||
if (childrel->width > rel->width)
|
||||
@@ -377,9 +377,9 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* Finally, build Append path and install it as the only access path
|
||||
* for the parent rel. (Note: this is correct even if we have zero
|
||||
* or one live subpath due to constraint exclusion.)
|
||||
* Finally, build Append path and install it as the only access path for
|
||||
* the parent rel. (Note: this is correct even if we have zero or one
|
||||
* live subpath due to constraint exclusion.)
|
||||
*/
|
||||
add_path(rel, (Path *) create_append_path(rel, subpaths));
|
||||
|
||||
@@ -430,18 +430,18 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
|
||||
/*
|
||||
* If there are any restriction clauses that have been attached to the
|
||||
* subquery relation, consider pushing them down to become WHERE or
|
||||
* HAVING quals of the subquery itself. This transformation is useful
|
||||
* because it may allow us to generate a better plan for the subquery
|
||||
* than evaluating all the subquery output rows and then filtering them.
|
||||
* subquery relation, consider pushing them down to become WHERE or HAVING
|
||||
* quals of the subquery itself. This transformation is useful because it
|
||||
* may allow us to generate a better plan for the subquery than evaluating
|
||||
* all the subquery output rows and then filtering them.
|
||||
*
|
||||
* There are several cases where we cannot push down clauses.
|
||||
* Restrictions involving the subquery are checked by
|
||||
* subquery_is_pushdown_safe(). Restrictions on individual clauses
|
||||
* are checked by qual_is_pushdown_safe().
|
||||
* There are several cases where we cannot push down clauses. Restrictions
|
||||
* involving the subquery are checked by subquery_is_pushdown_safe().
|
||||
* Restrictions on individual clauses are checked by
|
||||
* qual_is_pushdown_safe().
|
||||
*
|
||||
* Non-pushed-down clauses will get evaluated as qpquals of the
|
||||
* SubqueryScan node.
|
||||
* Non-pushed-down clauses will get evaluated as qpquals of the SubqueryScan
|
||||
* node.
|
||||
*
|
||||
* XXX Are there any cases where we want to make a policy decision not to
|
||||
* push down a pushable qual, because it'd result in a worse plan?
|
||||
@@ -475,10 +475,10 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
pfree(differentTypes);
|
||||
|
||||
/*
|
||||
* We can safely pass the outer tuple_fraction down to the subquery
|
||||
* if the outer level has no joining, aggregation, or sorting to do.
|
||||
* Otherwise we'd better tell the subquery to plan for full retrieval.
|
||||
* (XXX This could probably be made more intelligent ...)
|
||||
* We can safely pass the outer tuple_fraction down to the subquery if the
|
||||
* outer level has no joining, aggregation, or sorting to do. Otherwise
|
||||
* we'd better tell the subquery to plan for full retrieval. (XXX This
|
||||
* could probably be made more intelligent ...)
|
||||
*/
|
||||
if (parse->hasAggs ||
|
||||
parse->groupClause ||
|
||||
@@ -540,8 +540,8 @@ make_fromexpr_rel(PlannerInfo *root, FromExpr *from)
|
||||
|
||||
/*
|
||||
* Count the number of child jointree nodes. This is the depth of the
|
||||
* dynamic-programming algorithm we must employ to consider all ways
|
||||
* of joining the child nodes.
|
||||
* dynamic-programming algorithm we must employ to consider all ways of
|
||||
* joining the child nodes.
|
||||
*/
|
||||
levels_needed = list_length(from->fromlist);
|
||||
|
||||
@@ -603,11 +603,11 @@ make_one_rel_by_joins(PlannerInfo *root, int levels_needed, List *initial_rels)
|
||||
RelOptInfo *rel;
|
||||
|
||||
/*
|
||||
* We employ a simple "dynamic programming" algorithm: we first find
|
||||
* all ways to build joins of two jointree items, then all ways to
|
||||
* build joins of three items (from two-item joins and single items),
|
||||
* then four-item joins, and so on until we have considered all ways
|
||||
* to join all the items into one rel.
|
||||
* We employ a simple "dynamic programming" algorithm: we first find all
|
||||
* ways to build joins of two jointree items, then all ways to build joins
|
||||
* of three items (from two-item joins and single items), then four-item
|
||||
* joins, and so on until we have considered all ways to join all the
|
||||
* items into one rel.
|
||||
*
|
||||
* joinitems[j] is a list of all the j-item rels. Initially we set
|
||||
* joinitems[1] to represent all the single-jointree-item relations.
|
||||
@@ -823,8 +823,8 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Examine all Vars used in clause; since it's a restriction clause,
|
||||
* all such Vars must refer to subselect output columns.
|
||||
* Examine all Vars used in clause; since it's a restriction clause, all
|
||||
* such Vars must refer to subselect output columns.
|
||||
*/
|
||||
vars = pull_var_clause(qual, false);
|
||||
foreach(vl, vars)
|
||||
@@ -835,9 +835,9 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
|
||||
Assert(var->varno == rti);
|
||||
|
||||
/*
|
||||
* We use a bitmapset to avoid testing the same attno more than
|
||||
* once. (NB: this only works because subquery outputs can't have
|
||||
* negative attnos.)
|
||||
* We use a bitmapset to avoid testing the same attno more than once.
|
||||
* (NB: this only works because subquery outputs can't have negative
|
||||
* attnos.)
|
||||
*/
|
||||
if (bms_is_member(var->varattno, tested))
|
||||
continue;
|
||||
@@ -893,11 +893,10 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We need to replace Vars in the qual (which must refer to
|
||||
* outputs of the subquery) with copies of the subquery's
|
||||
* targetlist expressions. Note that at this point, any uplevel
|
||||
* Vars in the qual should have been replaced with Params, so they
|
||||
* need no work.
|
||||
* We need to replace Vars in the qual (which must refer to outputs of
|
||||
* the subquery) with copies of the subquery's targetlist expressions.
|
||||
* Note that at this point, any uplevel Vars in the qual should have
|
||||
* been replaced with Params, so they need no work.
|
||||
*
|
||||
* This step also ensures that when we are pushing into a setop tree,
|
||||
* each component query gets its own copy of the qual.
|
||||
@@ -907,9 +906,9 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
|
||||
CMD_SELECT, 0);
|
||||
|
||||
/*
|
||||
* Now attach the qual to the proper place: normally WHERE, but
|
||||
* if the subquery uses grouping or aggregation, put it in HAVING
|
||||
* (since the qual really refers to the group-result rows).
|
||||
* Now attach the qual to the proper place: normally WHERE, but if the
|
||||
* subquery uses grouping or aggregation, put it in HAVING (since the
|
||||
* qual really refers to the group-result rows).
|
||||
*/
|
||||
if (subquery->hasAggs || subquery->groupClause || subquery->havingQual)
|
||||
subquery->havingQual = make_and_qual(subquery->havingQual, qual);
|
||||
@@ -919,8 +918,8 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
|
||||
|
||||
/*
|
||||
* We need not change the subquery's hasAggs or hasSublinks flags,
|
||||
* since we can't be pushing down any aggregates that weren't
|
||||
* there before, and we don't push down subselects at all.
|
||||
* since we can't be pushing down any aggregates that weren't there
|
||||
* before, and we don't push down subselects at all.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.74 2005/10/11 16:44:40 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.75 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -82,7 +82,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
|
||||
* hisel + losel + null_frac - 1.)
|
||||
*
|
||||
* If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
|
||||
* and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
|
||||
* and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
|
||||
* yields an impossible (negative) result.
|
||||
*
|
||||
* A free side-effect is that we can recognize redundant inequalities such
|
||||
@@ -102,9 +102,9 @@ clauselist_selectivity(PlannerInfo *root,
|
||||
ListCell *l;
|
||||
|
||||
/*
|
||||
* Initial scan over clauses. Anything that doesn't look like a
|
||||
* potential rangequery clause gets multiplied into s1 and forgotten.
|
||||
* Anything that does gets inserted into an rqlist entry.
|
||||
* Initial scan over clauses. Anything that doesn't look like a potential
|
||||
* rangequery clause gets multiplied into s1 and forgotten. Anything that
|
||||
* does gets inserted into an rqlist entry.
|
||||
*/
|
||||
foreach(l, clauses)
|
||||
{
|
||||
@@ -127,10 +127,10 @@ clauselist_selectivity(PlannerInfo *root,
|
||||
rinfo = NULL;
|
||||
|
||||
/*
|
||||
* See if it looks like a restriction clause with a pseudoconstant
|
||||
* on one side. (Anything more complicated than that might not
|
||||
* behave in the simple way we are expecting.) Most of the tests
|
||||
* here can be done more efficiently with rinfo than without.
|
||||
* See if it looks like a restriction clause with a pseudoconstant on
|
||||
* one side. (Anything more complicated than that might not behave in
|
||||
* the simple way we are expecting.) Most of the tests here can be
|
||||
* done more efficiently with rinfo than without.
|
||||
*/
|
||||
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
|
||||
{
|
||||
@@ -142,10 +142,10 @@ clauselist_selectivity(PlannerInfo *root,
|
||||
{
|
||||
ok = (bms_membership(rinfo->clause_relids) == BMS_SINGLETON) &&
|
||||
(is_pseudo_constant_clause_relids(lsecond(expr->args),
|
||||
rinfo->right_relids) ||
|
||||
rinfo->right_relids) ||
|
||||
(varonleft = false,
|
||||
is_pseudo_constant_clause_relids(linitial(expr->args),
|
||||
rinfo->left_relids)));
|
||||
is_pseudo_constant_clause_relids(linitial(expr->args),
|
||||
rinfo->left_relids)));
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -159,8 +159,8 @@ clauselist_selectivity(PlannerInfo *root,
|
||||
{
|
||||
/*
|
||||
* If it's not a "<" or ">" operator, just merge the
|
||||
* selectivity in generically. But if it's the right
|
||||
* oprrest, add the clause to rqlist for later processing.
|
||||
* selectivity in generically. But if it's the right oprrest,
|
||||
* add the clause to rqlist for later processing.
|
||||
*/
|
||||
switch (get_oprrest(expr->opno))
|
||||
{
|
||||
@@ -199,8 +199,8 @@ clauselist_selectivity(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Exact equality to the default value probably means the
|
||||
* selectivity function punted. This is not airtight but
|
||||
* should be good enough.
|
||||
* selectivity function punted. This is not airtight but should
|
||||
* be good enough.
|
||||
*/
|
||||
if (rqlist->hibound == DEFAULT_INEQ_SEL ||
|
||||
rqlist->lobound == DEFAULT_INEQ_SEL)
|
||||
@@ -289,8 +289,8 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause,
|
||||
for (rqelem = *rqlist; rqelem; rqelem = rqelem->next)
|
||||
{
|
||||
/*
|
||||
* We use full equal() here because the "var" might be a function
|
||||
* of one or more attributes of the same relation...
|
||||
* We use full equal() here because the "var" might be a function of
|
||||
* one or more attributes of the same relation...
|
||||
*/
|
||||
if (!equal(var, rqelem->var))
|
||||
continue;
|
||||
@@ -423,17 +423,16 @@ clause_selectivity(PlannerInfo *root,
|
||||
rinfo = (RestrictInfo *) clause;
|
||||
|
||||
/*
|
||||
* If possible, cache the result of the selectivity calculation
|
||||
* for the clause. We can cache if varRelid is zero or the clause
|
||||
* contains only vars of that relid --- otherwise varRelid will
|
||||
* affect the result, so mustn't cache. We also have to be
|
||||
* careful about the jointype. It's OK to cache when jointype is
|
||||
* JOIN_INNER or one of the outer join types (any given outer-join
|
||||
* clause should always be examined with the same jointype, so
|
||||
* result won't change). It's not OK to cache when jointype is one
|
||||
* of the special types associated with IN processing, because the
|
||||
* same clause may be examined with different jointypes and the
|
||||
* result should vary.
|
||||
* If possible, cache the result of the selectivity calculation for
|
||||
* the clause. We can cache if varRelid is zero or the clause
|
||||
* contains only vars of that relid --- otherwise varRelid will affect
|
||||
* the result, so mustn't cache. We also have to be careful about the
|
||||
* jointype. It's OK to cache when jointype is JOIN_INNER or one of
|
||||
* the outer join types (any given outer-join clause should always be
|
||||
* examined with the same jointype, so result won't change). It's not
|
||||
* OK to cache when jointype is one of the special types associated
|
||||
* with IN processing, because the same clause may be examined with
|
||||
* different jointypes and the result should vary.
|
||||
*/
|
||||
if (varRelid == 0 ||
|
||||
bms_is_subset_singleton(rinfo->clause_relids, varRelid))
|
||||
@@ -477,8 +476,8 @@ clause_selectivity(PlannerInfo *root,
|
||||
Var *var = (Var *) clause;
|
||||
|
||||
/*
|
||||
* We probably shouldn't ever see an uplevel Var here, but if we
|
||||
* do, return the default selectivity...
|
||||
* We probably shouldn't ever see an uplevel Var here, but if we do,
|
||||
* return the default selectivity...
|
||||
*/
|
||||
if (var->varlevelsup == 0 &&
|
||||
(varRelid == 0 || varRelid == (int) var->varno))
|
||||
@@ -488,23 +487,23 @@ clause_selectivity(PlannerInfo *root,
|
||||
if (rte->rtekind == RTE_SUBQUERY)
|
||||
{
|
||||
/*
|
||||
* XXX not smart about subquery references... any way to
|
||||
* do better?
|
||||
* XXX not smart about subquery references... any way to do
|
||||
* better?
|
||||
*/
|
||||
s1 = 0.5;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* A Var at the top of a clause must be a bool Var. This
|
||||
* is equivalent to the clause reln.attribute = 't', so we
|
||||
* A Var at the top of a clause must be a bool Var. This is
|
||||
* equivalent to the clause reln.attribute = 't', so we
|
||||
* compute the selectivity as if that is what we have.
|
||||
*/
|
||||
s1 = restriction_selectivity(root,
|
||||
BooleanEqualOperator,
|
||||
list_make2(var,
|
||||
makeBoolConst(true,
|
||||
false)),
|
||||
makeBoolConst(true,
|
||||
false)),
|
||||
varRelid);
|
||||
}
|
||||
}
|
||||
@@ -534,7 +533,7 @@ clause_selectivity(PlannerInfo *root,
|
||||
{
|
||||
/* inverse of the selectivity of the underlying clause */
|
||||
s1 = 1.0 - clause_selectivity(root,
|
||||
(Node *) get_notclausearg((Expr *) clause),
|
||||
(Node *) get_notclausearg((Expr *) clause),
|
||||
varRelid,
|
||||
jointype);
|
||||
}
|
||||
@@ -576,17 +575,16 @@ clause_selectivity(PlannerInfo *root,
|
||||
{
|
||||
/*
|
||||
* If we are considering a nestloop join then all clauses are
|
||||
* restriction clauses, since we are only interested in the
|
||||
* one relation.
|
||||
* restriction clauses, since we are only interested in the one
|
||||
* relation.
|
||||
*/
|
||||
is_join_clause = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Otherwise, it's a join if there's more than one relation
|
||||
* used. We can optimize this calculation if an rinfo was
|
||||
* passed.
|
||||
* Otherwise, it's a join if there's more than one relation used.
|
||||
* We can optimize this calculation if an rinfo was passed.
|
||||
*/
|
||||
if (rinfo)
|
||||
is_join_clause = (bms_membership(rinfo->clause_relids) ==
|
||||
@@ -613,8 +611,8 @@ clause_selectivity(PlannerInfo *root,
|
||||
else if (is_funcclause(clause))
|
||||
{
|
||||
/*
|
||||
* This is not an operator, so we guess at the selectivity. THIS
|
||||
* IS A HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
|
||||
* This is not an operator, so we guess at the selectivity. THIS IS A
|
||||
* HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
|
||||
* SELECTIVITIES THEMSELVES. -- JMH 7/9/92
|
||||
*/
|
||||
s1 = (Selectivity) 0.3333333;
|
||||
|
@@ -49,7 +49,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.148 2005/10/05 17:19:19 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.149 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -121,8 +121,8 @@ clamp_row_est(double nrows)
|
||||
{
|
||||
/*
|
||||
* Force estimate to be at least one row, to make explain output look
|
||||
* better and to avoid possible divide-by-zero when interpolating
|
||||
* costs. Make it an integer, too.
|
||||
* better and to avoid possible divide-by-zero when interpolating costs.
|
||||
* Make it an integer, too.
|
||||
*/
|
||||
if (nrows < 1.0)
|
||||
nrows = 1.0;
|
||||
@@ -155,12 +155,11 @@ cost_seqscan(Path *path, PlannerInfo *root,
|
||||
/*
|
||||
* disk costs
|
||||
*
|
||||
* The cost of reading a page sequentially is 1.0, by definition. Note
|
||||
* that the Unix kernel will typically do some amount of read-ahead
|
||||
* optimization, so that this cost is less than the true cost of
|
||||
* reading a page from disk. We ignore that issue here, but must take
|
||||
* it into account when estimating the cost of non-sequential
|
||||
* accesses!
|
||||
* The cost of reading a page sequentially is 1.0, by definition. Note that
|
||||
* the Unix kernel will typically do some amount of read-ahead
|
||||
* optimization, so that this cost is less than the true cost of reading a
|
||||
* page from disk. We ignore that issue here, but must take it into
|
||||
* account when estimating the cost of non-sequential accesses!
|
||||
*/
|
||||
run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
|
||||
|
||||
@@ -276,10 +275,10 @@ cost_index(IndexPath *path, PlannerInfo *root,
|
||||
startup_cost += disable_cost;
|
||||
|
||||
/*
|
||||
* Call index-access-method-specific code to estimate the processing
|
||||
* cost for scanning the index, as well as the selectivity of the
|
||||
* index (ie, the fraction of main-table tuples we will have to
|
||||
* retrieve) and its correlation to the main-table tuple order.
|
||||
* Call index-access-method-specific code to estimate the processing cost
|
||||
* for scanning the index, as well as the selectivity of the index (ie,
|
||||
* the fraction of main-table tuples we will have to retrieve) and its
|
||||
* correlation to the main-table tuple order.
|
||||
*/
|
||||
OidFunctionCall7(index->amcostestimate,
|
||||
PointerGetDatum(root),
|
||||
@@ -292,8 +291,8 @@ cost_index(IndexPath *path, PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Save amcostestimate's results for possible use in bitmap scan planning.
|
||||
* We don't bother to save indexStartupCost or indexCorrelation, because
|
||||
* a bitmap scan doesn't care about either.
|
||||
* We don't bother to save indexStartupCost or indexCorrelation, because a
|
||||
* bitmap scan doesn't care about either.
|
||||
*/
|
||||
path->indextotalcost = indexTotalCost;
|
||||
path->indexselectivity = indexSelectivity;
|
||||
@@ -366,19 +365,18 @@ cost_index(IndexPath *path, PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* min_IO_cost corresponds to the perfectly correlated case
|
||||
* (csquared=1), max_IO_cost to the perfectly uncorrelated case
|
||||
* (csquared=0). Note that we just charge random_page_cost per page
|
||||
* in the uncorrelated case, rather than using
|
||||
* cost_nonsequential_access, since we've already accounted for
|
||||
* caching effects by using the Mackert model.
|
||||
* min_IO_cost corresponds to the perfectly correlated case (csquared=1),
|
||||
* max_IO_cost to the perfectly uncorrelated case (csquared=0). Note that
|
||||
* we just charge random_page_cost per page in the uncorrelated case,
|
||||
* rather than using cost_nonsequential_access, since we've already
|
||||
* accounted for caching effects by using the Mackert model.
|
||||
*/
|
||||
min_IO_cost = ceil(indexSelectivity * T);
|
||||
max_IO_cost = pages_fetched * random_page_cost;
|
||||
|
||||
/*
|
||||
* Now interpolate based on estimated index order correlation to get
|
||||
* total disk I/O cost for main table accesses.
|
||||
* Now interpolate based on estimated index order correlation to get total
|
||||
* disk I/O cost for main table accesses.
|
||||
*/
|
||||
csquared = indexCorrelation * indexCorrelation;
|
||||
|
||||
@@ -390,9 +388,9 @@ cost_index(IndexPath *path, PlannerInfo *root,
|
||||
* Normally the indexquals will be removed from the list of restriction
|
||||
* clauses that we have to evaluate as qpquals, so we should subtract
|
||||
* their costs from baserestrictcost. But if we are doing a join then
|
||||
* some of the indexquals are join clauses and shouldn't be
|
||||
* subtracted. Rather than work out exactly how much to subtract, we
|
||||
* don't subtract anything.
|
||||
* some of the indexquals are join clauses and shouldn't be subtracted.
|
||||
* Rather than work out exactly how much to subtract, we don't subtract
|
||||
* anything.
|
||||
*/
|
||||
startup_cost += baserel->baserestrictcost.startup;
|
||||
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
|
||||
@@ -467,9 +465,9 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
|
||||
/*
|
||||
* For small numbers of pages we should charge random_page_cost apiece,
|
||||
* while if nearly all the table's pages are being read, it's more
|
||||
* appropriate to charge 1.0 apiece. The effect is nonlinear, too.
|
||||
* For lack of a better idea, interpolate like this to determine the
|
||||
* cost per page.
|
||||
* appropriate to charge 1.0 apiece. The effect is nonlinear, too. For
|
||||
* lack of a better idea, interpolate like this to determine the cost per
|
||||
* page.
|
||||
*/
|
||||
if (pages_fetched >= 2.0)
|
||||
cost_per_page = random_page_cost -
|
||||
@@ -482,10 +480,10 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
|
||||
/*
|
||||
* Estimate CPU costs per tuple.
|
||||
*
|
||||
* Often the indexquals don't need to be rechecked at each tuple ...
|
||||
* but not always, especially not if there are enough tuples involved
|
||||
* that the bitmaps become lossy. For the moment, just assume they
|
||||
* will be rechecked always.
|
||||
* Often the indexquals don't need to be rechecked at each tuple ... but not
|
||||
* always, especially not if there are enough tuples involved that the
|
||||
* bitmaps become lossy. For the moment, just assume they will be
|
||||
* rechecked always.
|
||||
*/
|
||||
startup_cost += baserel->baserestrictcost.startup;
|
||||
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
|
||||
@@ -527,7 +525,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
|
||||
* Estimate the cost of a BitmapAnd node
|
||||
*
|
||||
* Note that this considers only the costs of index scanning and bitmap
|
||||
* creation, not the eventual heap access. In that sense the object isn't
|
||||
* creation, not the eventual heap access. In that sense the object isn't
|
||||
* truly a Path, but it has enough path-like properties (costs in particular)
|
||||
* to warrant treating it as one.
|
||||
*/
|
||||
@@ -535,24 +533,24 @@ void
|
||||
cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
|
||||
{
|
||||
Cost totalCost;
|
||||
Selectivity selec;
|
||||
Selectivity selec;
|
||||
ListCell *l;
|
||||
|
||||
/*
|
||||
* We estimate AND selectivity on the assumption that the inputs
|
||||
* are independent. This is probably often wrong, but we don't
|
||||
* have the info to do better.
|
||||
* We estimate AND selectivity on the assumption that the inputs are
|
||||
* independent. This is probably often wrong, but we don't have the info
|
||||
* to do better.
|
||||
*
|
||||
* The runtime cost of the BitmapAnd itself is estimated at 100x
|
||||
* cpu_operator_cost for each tbm_intersect needed. Probably too
|
||||
* small, definitely too simplistic?
|
||||
* cpu_operator_cost for each tbm_intersect needed. Probably too small,
|
||||
* definitely too simplistic?
|
||||
*/
|
||||
totalCost = 0.0;
|
||||
selec = 1.0;
|
||||
foreach(l, path->bitmapquals)
|
||||
{
|
||||
Path *subpath = (Path *) lfirst(l);
|
||||
Cost subCost;
|
||||
Path *subpath = (Path *) lfirst(l);
|
||||
Cost subCost;
|
||||
Selectivity subselec;
|
||||
|
||||
cost_bitmap_tree_node(subpath, &subCost, &subselec);
|
||||
@@ -578,25 +576,25 @@ void
|
||||
cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
|
||||
{
|
||||
Cost totalCost;
|
||||
Selectivity selec;
|
||||
Selectivity selec;
|
||||
ListCell *l;
|
||||
|
||||
/*
|
||||
* We estimate OR selectivity on the assumption that the inputs
|
||||
* are non-overlapping, since that's often the case in "x IN (list)"
|
||||
* type situations. Of course, we clamp to 1.0 at the end.
|
||||
* We estimate OR selectivity on the assumption that the inputs are
|
||||
* non-overlapping, since that's often the case in "x IN (list)" type
|
||||
* situations. Of course, we clamp to 1.0 at the end.
|
||||
*
|
||||
* The runtime cost of the BitmapOr itself is estimated at 100x
|
||||
* cpu_operator_cost for each tbm_union needed. Probably too
|
||||
* small, definitely too simplistic? We are aware that the tbm_unions
|
||||
* are optimized out when the inputs are BitmapIndexScans.
|
||||
* cpu_operator_cost for each tbm_union needed. Probably too small,
|
||||
* definitely too simplistic? We are aware that the tbm_unions are
|
||||
* optimized out when the inputs are BitmapIndexScans.
|
||||
*/
|
||||
totalCost = 0.0;
|
||||
selec = 0.0;
|
||||
foreach(l, path->bitmapquals)
|
||||
{
|
||||
Path *subpath = (Path *) lfirst(l);
|
||||
Cost subCost;
|
||||
Path *subpath = (Path *) lfirst(l);
|
||||
Cost subCost;
|
||||
Selectivity subselec;
|
||||
|
||||
cost_bitmap_tree_node(subpath, &subCost, &subselec);
|
||||
@@ -661,10 +659,9 @@ cost_subqueryscan(Path *path, RelOptInfo *baserel)
|
||||
Assert(baserel->rtekind == RTE_SUBQUERY);
|
||||
|
||||
/*
|
||||
* Cost of path is cost of evaluating the subplan, plus cost of
|
||||
* evaluating any restriction clauses that will be attached to the
|
||||
* SubqueryScan node, plus cpu_tuple_cost to account for selection and
|
||||
* projection overhead.
|
||||
* Cost of path is cost of evaluating the subplan, plus cost of evaluating
|
||||
* any restriction clauses that will be attached to the SubqueryScan node,
|
||||
* plus cpu_tuple_cost to account for selection and projection overhead.
|
||||
*/
|
||||
path->startup_cost = baserel->subplan->startup_cost;
|
||||
path->total_cost = baserel->subplan->total_cost;
|
||||
@@ -694,8 +691,8 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
|
||||
|
||||
/*
|
||||
* For now, estimate function's cost at one operator eval per function
|
||||
* call. Someday we should revive the function cost estimate columns
|
||||
* in pg_proc...
|
||||
* call. Someday we should revive the function cost estimate columns in
|
||||
* pg_proc...
|
||||
*/
|
||||
cpu_per_tuple = cpu_operator_cost;
|
||||
|
||||
@@ -758,9 +755,8 @@ cost_sort(Path *path, PlannerInfo *root,
|
||||
startup_cost += disable_cost;
|
||||
|
||||
/*
|
||||
* We want to be sure the cost of a sort is never estimated as zero,
|
||||
* even if passed-in tuple count is zero. Besides, mustn't do
|
||||
* log(0)...
|
||||
* We want to be sure the cost of a sort is never estimated as zero, even
|
||||
* if passed-in tuple count is zero. Besides, mustn't do log(0)...
|
||||
*/
|
||||
if (tuples < 2.0)
|
||||
tuples = 2.0;
|
||||
@@ -790,8 +786,8 @@ cost_sort(Path *path, PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* Also charge a small amount (arbitrarily set equal to operator cost)
|
||||
* per extracted tuple.
|
||||
* Also charge a small amount (arbitrarily set equal to operator cost) per
|
||||
* extracted tuple.
|
||||
*/
|
||||
run_cost += cpu_operator_cost * tuples;
|
||||
|
||||
@@ -828,17 +824,16 @@ cost_material(Path *path,
|
||||
|
||||
/*
|
||||
* Charge a very small amount per inserted tuple, to reflect bookkeeping
|
||||
* costs. We use cpu_tuple_cost/10 for this. This is needed to break
|
||||
* the tie that would otherwise exist between nestloop with A outer,
|
||||
* costs. We use cpu_tuple_cost/10 for this. This is needed to break the
|
||||
* tie that would otherwise exist between nestloop with A outer,
|
||||
* materialized B inner and nestloop with B outer, materialized A inner.
|
||||
* The extra cost ensures we'll prefer materializing the smaller rel.
|
||||
*/
|
||||
startup_cost += cpu_tuple_cost * 0.1 * tuples;
|
||||
|
||||
/*
|
||||
* Also charge a small amount per extracted tuple. We use
|
||||
* cpu_tuple_cost so that it doesn't appear worthwhile to materialize
|
||||
* a bare seqscan.
|
||||
* Also charge a small amount per extracted tuple. We use cpu_tuple_cost
|
||||
* so that it doesn't appear worthwhile to materialize a bare seqscan.
|
||||
*/
|
||||
run_cost += cpu_tuple_cost * tuples;
|
||||
|
||||
@@ -865,23 +860,22 @@ cost_agg(Path *path, PlannerInfo *root,
|
||||
Cost total_cost;
|
||||
|
||||
/*
|
||||
* We charge one cpu_operator_cost per aggregate function per input
|
||||
* tuple, and another one per output tuple (corresponding to transfn
|
||||
* and finalfn calls respectively). If we are grouping, we charge an
|
||||
* additional cpu_operator_cost per grouping column per input tuple
|
||||
* for grouping comparisons.
|
||||
* We charge one cpu_operator_cost per aggregate function per input tuple,
|
||||
* and another one per output tuple (corresponding to transfn and finalfn
|
||||
* calls respectively). If we are grouping, we charge an additional
|
||||
* cpu_operator_cost per grouping column per input tuple for grouping
|
||||
* comparisons.
|
||||
*
|
||||
* We will produce a single output tuple if not grouping, and a tuple per
|
||||
* group otherwise. We charge cpu_tuple_cost for each output tuple.
|
||||
*
|
||||
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
|
||||
* same total CPU cost, but AGG_SORTED has lower startup cost. If the
|
||||
* input path is already sorted appropriately, AGG_SORTED should be
|
||||
* preferred (since it has no risk of memory overflow). This will
|
||||
* happen as long as the computed total costs are indeed exactly equal
|
||||
* --- but if there's roundoff error we might do the wrong thing. So
|
||||
* be sure that the computations below form the same intermediate
|
||||
* values in the same order.
|
||||
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the same
|
||||
* total CPU cost, but AGG_SORTED has lower startup cost. If the input
|
||||
* path is already sorted appropriately, AGG_SORTED should be preferred
|
||||
* (since it has no risk of memory overflow). This will happen as long as
|
||||
* the computed total costs are indeed exactly equal --- but if there's
|
||||
* roundoff error we might do the wrong thing. So be sure that the
|
||||
* computations below form the same intermediate values in the same order.
|
||||
*/
|
||||
if (aggstrategy == AGG_PLAIN)
|
||||
{
|
||||
@@ -937,8 +931,8 @@ cost_group(Path *path, PlannerInfo *root,
|
||||
total_cost = input_total_cost;
|
||||
|
||||
/*
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We
|
||||
* assume all columns get compared at most of the tuples.
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We assume
|
||||
* all columns get compared at most of the tuples.
|
||||
*/
|
||||
total_cost += cpu_operator_cost * input_tuples * numGroupCols;
|
||||
|
||||
@@ -968,10 +962,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
|
||||
Selectivity joininfactor;
|
||||
|
||||
/*
|
||||
* If inner path is an indexscan, be sure to use its estimated output
|
||||
* row count, which may be lower than the restriction-clause-only row
|
||||
* count of its parent. (We don't include this case in the PATH_ROWS
|
||||
* macro because it applies *only* to a nestloop's inner relation.)
|
||||
* If inner path is an indexscan, be sure to use its estimated output row
|
||||
* count, which may be lower than the restriction-clause-only row count of
|
||||
* its parent. (We don't include this case in the PATH_ROWS macro because
|
||||
* it applies *only* to a nestloop's inner relation.)
|
||||
*/
|
||||
if (IsA(inner_path, IndexPath))
|
||||
inner_path_rows = ((IndexPath *) inner_path)->rows;
|
||||
@@ -982,11 +976,11 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
|
||||
startup_cost += disable_cost;
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop scanning inner tuples for
|
||||
* an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the JOIN_IN selectivity. (This assumes that all the quals attached
|
||||
* to the join are IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop scanning inner tuples for an
|
||||
* outer tuple as soon as we have one match. Account for the effects of
|
||||
* this by scaling down the cost estimates in proportion to the JOIN_IN
|
||||
* selectivity. (This assumes that all the quals attached to the join are
|
||||
* IN quals, which should be true.)
|
||||
*/
|
||||
joininfactor = join_in_selectivity(path, root);
|
||||
|
||||
@@ -996,9 +990,9 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
|
||||
* NOTE: clearly, we must pay both outer and inner paths' startup_cost
|
||||
* before we can start returning tuples, so the join's startup cost is
|
||||
* their sum. What's not so clear is whether the inner path's
|
||||
* startup_cost must be paid again on each rescan of the inner path.
|
||||
* This is not true if the inner path is materialized or is a
|
||||
* hashjoin, but probably is true otherwise.
|
||||
* startup_cost must be paid again on each rescan of the inner path. This
|
||||
* is not true if the inner path is materialized or is a hashjoin, but
|
||||
* probably is true otherwise.
|
||||
*/
|
||||
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
|
||||
run_cost += outer_path->total_cost - outer_path->startup_cost;
|
||||
@@ -1077,12 +1071,11 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
|
||||
|
||||
/*
|
||||
* Compute cost and selectivity of the mergequals and qpquals (other
|
||||
* restriction clauses) separately. We use approx_selectivity here
|
||||
* for speed --- in most cases, any errors won't affect the result
|
||||
* much.
|
||||
* restriction clauses) separately. We use approx_selectivity here for
|
||||
* speed --- in most cases, any errors won't affect the result much.
|
||||
*
|
||||
* Note: it's probably bogus to use the normal selectivity calculation
|
||||
* here when either the outer or inner path is a UniquePath.
|
||||
* Note: it's probably bogus to use the normal selectivity calculation here
|
||||
* when either the outer or inner path is a UniquePath.
|
||||
*/
|
||||
merge_selec = approx_selectivity(root, mergeclauses,
|
||||
path->jpath.jointype);
|
||||
@@ -1095,31 +1088,30 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
|
||||
mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
|
||||
|
||||
/*
|
||||
* When there are equal merge keys in the outer relation, the
|
||||
* mergejoin must rescan any matching tuples in the inner relation.
|
||||
* This means re-fetching inner tuples. Our cost model for this is
|
||||
* that a re-fetch costs the same as an original fetch, which is
|
||||
* probably an overestimate; but on the other hand we ignore the
|
||||
* bookkeeping costs of mark/restore. Not clear if it's worth
|
||||
* developing a more refined model.
|
||||
* When there are equal merge keys in the outer relation, the mergejoin
|
||||
* must rescan any matching tuples in the inner relation. This means
|
||||
* re-fetching inner tuples. Our cost model for this is that a re-fetch
|
||||
* costs the same as an original fetch, which is probably an overestimate;
|
||||
* but on the other hand we ignore the bookkeeping costs of mark/restore.
|
||||
* Not clear if it's worth developing a more refined model.
|
||||
*
|
||||
* The number of re-fetches can be estimated approximately as size of
|
||||
* merge join output minus size of inner relation. Assume that the
|
||||
* distinct key values are 1, 2, ..., and denote the number of values
|
||||
* of each key in the outer relation as m1, m2, ...; in the inner
|
||||
* relation, n1, n2, ... Then we have
|
||||
* The number of re-fetches can be estimated approximately as size of merge
|
||||
* join output minus size of inner relation. Assume that the distinct key
|
||||
* values are 1, 2, ..., and denote the number of values of each key in
|
||||
* the outer relation as m1, m2, ...; in the inner relation, n1, n2, ...
|
||||
* Then we have
|
||||
*
|
||||
* size of join = m1 * n1 + m2 * n2 + ...
|
||||
*
|
||||
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
|
||||
* n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
|
||||
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 * n1
|
||||
* + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
|
||||
* relation
|
||||
*
|
||||
* This equation works correctly for outer tuples having no inner match
|
||||
* (nk = 0), but not for inner tuples having no outer match (mk = 0);
|
||||
* we are effectively subtracting those from the number of rescanned
|
||||
* tuples, when we should not. Can we do better without expensive
|
||||
* selectivity computations?
|
||||
* This equation works correctly for outer tuples having no inner match (nk =
|
||||
* 0), but not for inner tuples having no outer match (mk = 0); we are
|
||||
* effectively subtracting those from the number of rescanned tuples, when
|
||||
* we should not. Can we do better without expensive selectivity
|
||||
* computations?
|
||||
*/
|
||||
if (IsA(outer_path, UniquePath))
|
||||
rescannedtuples = 0;
|
||||
@@ -1140,9 +1132,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
|
||||
* inputs that will actually need to be scanned. We use only the first
|
||||
* (most significant) merge clause for this purpose.
|
||||
*
|
||||
* Since this calculation is somewhat expensive, and will be the same for
|
||||
* all mergejoin paths associated with the merge clause, we cache the
|
||||
* results in the RestrictInfo node.
|
||||
* Since this calculation is somewhat expensive, and will be the same for all
|
||||
* mergejoin paths associated with the merge clause, we cache the results
|
||||
* in the RestrictInfo node.
|
||||
*/
|
||||
if (mergeclauses && path->jpath.jointype != JOIN_FULL)
|
||||
{
|
||||
@@ -1181,9 +1173,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
|
||||
|
||||
/*
|
||||
* Readjust scan selectivities to account for above rounding. This is
|
||||
* normally an insignificant effect, but when there are only a few
|
||||
* rows in the inputs, failing to do this makes for a large percentage
|
||||
* error.
|
||||
* normally an insignificant effect, but when there are only a few rows in
|
||||
* the inputs, failing to do this makes for a large percentage error.
|
||||
*/
|
||||
outerscansel = outer_rows / outer_path_rows;
|
||||
innerscansel = inner_rows / inner_path_rows;
|
||||
@@ -1231,20 +1222,20 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
|
||||
/* CPU costs */
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop outputting inner tuples
|
||||
* for an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the expected output size. (This assumes that all the quals
|
||||
* attached to the join are IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop outputting inner tuples for an
|
||||
* outer tuple as soon as we have one match. Account for the effects of
|
||||
* this by scaling down the cost estimates in proportion to the expected
|
||||
* output size. (This assumes that all the quals attached to the join are
|
||||
* IN quals, which should be true.)
|
||||
*/
|
||||
joininfactor = join_in_selectivity(&path->jpath, root);
|
||||
|
||||
/*
|
||||
* The number of tuple comparisons needed is approximately number of
|
||||
* outer rows plus number of inner rows plus number of rescanned
|
||||
* tuples (can we refine this?). At each one, we need to evaluate the
|
||||
* mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
|
||||
* so do NOT include joininfactor.
|
||||
* The number of tuple comparisons needed is approximately number of outer
|
||||
* rows plus number of inner rows plus number of rescanned tuples (can we
|
||||
* refine this?). At each one, we need to evaluate the mergejoin quals.
|
||||
* NOTE: JOIN_IN mode does not save any work here, so do NOT include
|
||||
* joininfactor.
|
||||
*/
|
||||
startup_cost += merge_qual_cost.startup;
|
||||
run_cost += merge_qual_cost.per_tuple *
|
||||
@@ -1253,9 +1244,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
|
||||
/*
|
||||
* For each tuple that gets through the mergejoin proper, we charge
|
||||
* cpu_tuple_cost plus the cost of evaluating additional restriction
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* since not all of the quals may get evaluated at each tuple.) This
|
||||
* work is skipped in JOIN_IN mode, so apply the factor.
|
||||
* clauses that are to be applied at the join. (This is pessimistic since
|
||||
* not all of the quals may get evaluated at each tuple.) This work is
|
||||
* skipped in JOIN_IN mode, so apply the factor.
|
||||
*/
|
||||
startup_cost += qp_qual_cost.startup;
|
||||
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
|
||||
@@ -1290,9 +1281,9 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
double outer_path_rows = PATH_ROWS(outer_path);
|
||||
double inner_path_rows = PATH_ROWS(inner_path);
|
||||
double outerbytes = relation_byte_size(outer_path_rows,
|
||||
outer_path->parent->width);
|
||||
outer_path->parent->width);
|
||||
double innerbytes = relation_byte_size(inner_path_rows,
|
||||
inner_path->parent->width);
|
||||
inner_path->parent->width);
|
||||
int num_hashclauses = list_length(hashclauses);
|
||||
int numbuckets;
|
||||
int numbatches;
|
||||
@@ -1306,12 +1297,11 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
|
||||
/*
|
||||
* Compute cost and selectivity of the hashquals and qpquals (other
|
||||
* restriction clauses) separately. We use approx_selectivity here
|
||||
* for speed --- in most cases, any errors won't affect the result
|
||||
* much.
|
||||
* restriction clauses) separately. We use approx_selectivity here for
|
||||
* speed --- in most cases, any errors won't affect the result much.
|
||||
*
|
||||
* Note: it's probably bogus to use the normal selectivity calculation
|
||||
* here when either the outer or inner path is a UniquePath.
|
||||
* Note: it's probably bogus to use the normal selectivity calculation here
|
||||
* when either the outer or inner path is a UniquePath.
|
||||
*/
|
||||
hash_selec = approx_selectivity(root, hashclauses,
|
||||
path->jpath.jointype);
|
||||
@@ -1329,13 +1319,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
startup_cost += inner_path->total_cost;
|
||||
|
||||
/*
|
||||
* Cost of computing hash function: must do it once per input tuple.
|
||||
* We charge one cpu_operator_cost for each column's hash function.
|
||||
* Cost of computing hash function: must do it once per input tuple. We
|
||||
* charge one cpu_operator_cost for each column's hash function.
|
||||
*
|
||||
* XXX when a hashclause is more complex than a single operator, we
|
||||
* really should charge the extra eval costs of the left or right
|
||||
* side, as appropriate, here. This seems more work than it's worth
|
||||
* at the moment.
|
||||
* XXX when a hashclause is more complex than a single operator, we really
|
||||
* should charge the extra eval costs of the left or right side, as
|
||||
* appropriate, here. This seems more work than it's worth at the moment.
|
||||
*/
|
||||
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
|
||||
run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
|
||||
@@ -1345,17 +1334,17 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
inner_path->parent->width,
|
||||
&numbuckets,
|
||||
&numbatches);
|
||||
virtualbuckets = (double) numbuckets * (double) numbatches;
|
||||
virtualbuckets = (double) numbuckets *(double) numbatches;
|
||||
|
||||
/*
|
||||
* Determine bucketsize fraction for inner relation. We use the
|
||||
* smallest bucketsize estimated for any individual hashclause; this
|
||||
* is undoubtedly conservative.
|
||||
* Determine bucketsize fraction for inner relation. We use the smallest
|
||||
* bucketsize estimated for any individual hashclause; this is undoubtedly
|
||||
* conservative.
|
||||
*
|
||||
* BUT: if inner relation has been unique-ified, we can assume it's good
|
||||
* for hashing. This is important both because it's the right answer,
|
||||
* and because we avoid contaminating the cache with a value that's
|
||||
* wrong for non-unique-ified paths.
|
||||
* BUT: if inner relation has been unique-ified, we can assume it's good for
|
||||
* hashing. This is important both because it's the right answer, and
|
||||
* because we avoid contaminating the cache with a value that's wrong for
|
||||
* non-unique-ified paths.
|
||||
*/
|
||||
if (IsA(inner_path, UniquePath))
|
||||
innerbucketsize = 1.0 / virtualbuckets;
|
||||
@@ -1370,13 +1359,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
Assert(IsA(restrictinfo, RestrictInfo));
|
||||
|
||||
/*
|
||||
* First we have to figure out which side of the hashjoin
|
||||
* clause is the inner side.
|
||||
* First we have to figure out which side of the hashjoin clause
|
||||
* is the inner side.
|
||||
*
|
||||
* Since we tend to visit the same clauses over and over when
|
||||
* planning a large query, we cache the bucketsize estimate in
|
||||
* the RestrictInfo node to avoid repeated lookups of
|
||||
* statistics.
|
||||
* planning a large query, we cache the bucketsize estimate in the
|
||||
* RestrictInfo node to avoid repeated lookups of statistics.
|
||||
*/
|
||||
if (bms_is_subset(restrictinfo->right_relids,
|
||||
inner_path->parent->relids))
|
||||
@@ -1388,7 +1376,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
/* not cached yet */
|
||||
thisbucketsize =
|
||||
estimate_hash_bucketsize(root,
|
||||
get_rightop(restrictinfo->clause),
|
||||
get_rightop(restrictinfo->clause),
|
||||
virtualbuckets);
|
||||
restrictinfo->right_bucketsize = thisbucketsize;
|
||||
}
|
||||
@@ -1404,7 +1392,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
/* not cached yet */
|
||||
thisbucketsize =
|
||||
estimate_hash_bucketsize(root,
|
||||
get_leftop(restrictinfo->clause),
|
||||
get_leftop(restrictinfo->clause),
|
||||
virtualbuckets);
|
||||
restrictinfo->left_bucketsize = thisbucketsize;
|
||||
}
|
||||
@@ -1417,10 +1405,10 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
|
||||
/*
|
||||
* If inner relation is too big then we will need to "batch" the join,
|
||||
* which implies writing and reading most of the tuples to disk an
|
||||
* extra time. Charge one cost unit per page of I/O (correct since it
|
||||
* should be nice and sequential...). Writing the inner rel counts as
|
||||
* startup cost, all the rest as run cost.
|
||||
* which implies writing and reading most of the tuples to disk an extra
|
||||
* time. Charge one cost unit per page of I/O (correct since it should be
|
||||
* nice and sequential...). Writing the inner rel counts as startup cost,
|
||||
* all the rest as run cost.
|
||||
*/
|
||||
if (numbatches > 1)
|
||||
{
|
||||
@@ -1436,21 +1424,21 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
/* CPU costs */
|
||||
|
||||
/*
|
||||
* If we're doing JOIN_IN then we will stop comparing inner tuples to
|
||||
* an outer tuple as soon as we have one match. Account for the
|
||||
* effects of this by scaling down the cost estimates in proportion to
|
||||
* the expected output size. (This assumes that all the quals
|
||||
* attached to the join are IN quals, which should be true.)
|
||||
* If we're doing JOIN_IN then we will stop comparing inner tuples to an
|
||||
* outer tuple as soon as we have one match. Account for the effects of
|
||||
* this by scaling down the cost estimates in proportion to the expected
|
||||
* output size. (This assumes that all the quals attached to the join are
|
||||
* IN quals, which should be true.)
|
||||
*/
|
||||
joininfactor = join_in_selectivity(&path->jpath, root);
|
||||
|
||||
/*
|
||||
* The number of tuple comparisons needed is the number of outer
|
||||
* tuples times the typical number of tuples in a hash bucket, which
|
||||
* is the inner relation size times its bucketsize fraction. At each
|
||||
* one, we need to evaluate the hashjoin quals. (Note: charging the
|
||||
* full qual eval cost at each tuple is pessimistic, since we don't
|
||||
* evaluate the quals unless the hash values match exactly.)
|
||||
* The number of tuple comparisons needed is the number of outer tuples
|
||||
* times the typical number of tuples in a hash bucket, which is the inner
|
||||
* relation size times its bucketsize fraction. At each one, we need to
|
||||
* evaluate the hashjoin quals. (Note: charging the full qual eval cost
|
||||
* at each tuple is pessimistic, since we don't evaluate the quals unless
|
||||
* the hash values match exactly.)
|
||||
*/
|
||||
startup_cost += hash_qual_cost.startup;
|
||||
run_cost += hash_qual_cost.per_tuple *
|
||||
@@ -1460,8 +1448,8 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
/*
|
||||
* For each tuple that gets through the hashjoin proper, we charge
|
||||
* cpu_tuple_cost plus the cost of evaluating additional restriction
|
||||
* clauses that are to be applied at the join. (This is pessimistic
|
||||
* since not all of the quals may get evaluated at each tuple.)
|
||||
* clauses that are to be applied at the join. (This is pessimistic since
|
||||
* not all of the quals may get evaluated at each tuple.)
|
||||
*/
|
||||
startup_cost += qp_qual_cost.startup;
|
||||
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
|
||||
@@ -1469,16 +1457,16 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
|
||||
|
||||
/*
|
||||
* Bias against putting larger relation on inside. We don't want an
|
||||
* absolute prohibition, though, since larger relation might have
|
||||
* better bucketsize --- and we can't trust the size estimates
|
||||
* unreservedly, anyway. Instead, inflate the run cost by the square
|
||||
* root of the size ratio. (Why square root? No real good reason,
|
||||
* but it seems reasonable...)
|
||||
* absolute prohibition, though, since larger relation might have better
|
||||
* bucketsize --- and we can't trust the size estimates unreservedly,
|
||||
* anyway. Instead, inflate the run cost by the square root of the size
|
||||
* ratio. (Why square root? No real good reason, but it seems
|
||||
* reasonable...)
|
||||
*
|
||||
* Note: before 7.4 we implemented this by inflating startup cost; but if
|
||||
* there's a disable_cost component in the input paths' startup cost,
|
||||
* that unfairly penalizes the hash. Probably it'd be better to keep
|
||||
* track of disable penalty separately from cost.
|
||||
* there's a disable_cost component in the input paths' startup cost, that
|
||||
* unfairly penalizes the hash. Probably it'd be better to keep track of
|
||||
* disable penalty separately from cost.
|
||||
*/
|
||||
if (innerbytes > outerbytes && outerbytes > 0)
|
||||
run_cost *= sqrt(innerbytes / outerbytes);
|
||||
@@ -1545,13 +1533,13 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Our basic strategy is to charge one cpu_operator_cost for each
|
||||
* operator or function node in the given tree. Vars and Consts are
|
||||
* charged zero, and so are boolean operators (AND, OR, NOT).
|
||||
* Simplistic, but a lot better than no model at all.
|
||||
* Our basic strategy is to charge one cpu_operator_cost for each operator
|
||||
* or function node in the given tree. Vars and Consts are charged zero,
|
||||
* and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
|
||||
* better than no model at all.
|
||||
*
|
||||
* Should we try to account for the possibility of short-circuit
|
||||
* evaluation of AND/OR?
|
||||
* Should we try to account for the possibility of short-circuit evaluation
|
||||
* of AND/OR?
|
||||
*/
|
||||
if (IsA(node, FuncExpr) ||
|
||||
IsA(node, OpExpr) ||
|
||||
@@ -1572,12 +1560,12 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
{
|
||||
/*
|
||||
* A subplan node in an expression typically indicates that the
|
||||
* subplan will be executed on each evaluation, so charge
|
||||
* accordingly. (Sub-selects that can be executed as InitPlans
|
||||
* have already been removed from the expression.)
|
||||
* subplan will be executed on each evaluation, so charge accordingly.
|
||||
* (Sub-selects that can be executed as InitPlans have already been
|
||||
* removed from the expression.)
|
||||
*
|
||||
* An exception occurs when we have decided we can implement the
|
||||
* subplan by hashing.
|
||||
* An exception occurs when we have decided we can implement the subplan
|
||||
* by hashing.
|
||||
*
|
||||
*/
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
@@ -1586,32 +1574,31 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
if (subplan->useHashTable)
|
||||
{
|
||||
/*
|
||||
* If we are using a hash table for the subquery outputs, then
|
||||
* the cost of evaluating the query is a one-time cost. We
|
||||
* charge one cpu_operator_cost per tuple for the work of
|
||||
* loading the hashtable, too.
|
||||
* If we are using a hash table for the subquery outputs, then the
|
||||
* cost of evaluating the query is a one-time cost. We charge one
|
||||
* cpu_operator_cost per tuple for the work of loading the
|
||||
* hashtable, too.
|
||||
*/
|
||||
total->startup += plan->total_cost +
|
||||
cpu_operator_cost * plan->plan_rows;
|
||||
|
||||
/*
|
||||
* The per-tuple costs include the cost of evaluating the
|
||||
* lefthand expressions, plus the cost of probing the
|
||||
* hashtable. Recursion into the exprs list will handle the
|
||||
* lefthand expressions properly, and will count one
|
||||
* cpu_operator_cost for each comparison operator. That is
|
||||
* probably too low for the probing cost, but it's hard to
|
||||
* make a better estimate, so live with it for now.
|
||||
* The per-tuple costs include the cost of evaluating the lefthand
|
||||
* expressions, plus the cost of probing the hashtable. Recursion
|
||||
* into the exprs list will handle the lefthand expressions
|
||||
* properly, and will count one cpu_operator_cost for each
|
||||
* comparison operator. That is probably too low for the probing
|
||||
* cost, but it's hard to make a better estimate, so live with it
|
||||
* for now.
|
||||
*/
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Otherwise we will be rescanning the subplan output on each
|
||||
* evaluation. We need to estimate how much of the output we
|
||||
* will actually need to scan. NOTE: this logic should agree
|
||||
* with the estimates used by make_subplan() in
|
||||
* plan/subselect.c.
|
||||
* evaluation. We need to estimate how much of the output we will
|
||||
* actually need to scan. NOTE: this logic should agree with the
|
||||
* estimates used by make_subplan() in plan/subselect.c.
|
||||
*/
|
||||
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
|
||||
|
||||
@@ -1636,10 +1623,10 @@ cost_qual_eval_walker(Node *node, QualCost *total)
|
||||
|
||||
/*
|
||||
* Also account for subplan's startup cost. If the subplan is
|
||||
* uncorrelated or undirect correlated, AND its topmost node
|
||||
* is a Sort or Material node, assume that we'll only need to
|
||||
* pay its startup cost once; otherwise assume we pay the
|
||||
* startup cost every time.
|
||||
* uncorrelated or undirect correlated, AND its topmost node is a
|
||||
* Sort or Material node, assume that we'll only need to pay its
|
||||
* startup cost once; otherwise assume we pay the startup cost
|
||||
* every time.
|
||||
*/
|
||||
if (subplan->parParam == NIL &&
|
||||
(IsA(plan, Sort) ||
|
||||
@@ -1761,9 +1748,9 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
|
||||
|
||||
/*
|
||||
* Compute joinclause selectivity. Note that we are only considering
|
||||
* clauses that become restriction clauses at this join level; we are
|
||||
* not double-counting them because they were not considered in
|
||||
* estimating the sizes of the component rels.
|
||||
* clauses that become restriction clauses at this join level; we are not
|
||||
* double-counting them because they were not considered in estimating the
|
||||
* sizes of the component rels.
|
||||
*/
|
||||
selec = clauselist_selectivity(root,
|
||||
restrictlist,
|
||||
@@ -1773,13 +1760,13 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
|
||||
/*
|
||||
* Basically, we multiply size of Cartesian product by selectivity.
|
||||
*
|
||||
* If we are doing an outer join, take that into account: the output must
|
||||
* be at least as large as the non-nullable input. (Is there any
|
||||
* chance of being even smarter?)
|
||||
* If we are doing an outer join, take that into account: the output must be
|
||||
* at least as large as the non-nullable input. (Is there any chance of
|
||||
* being even smarter?)
|
||||
*
|
||||
* For JOIN_IN and variants, the Cartesian product is figured with
|
||||
* respect to a unique-ified input, and then we can clamp to the size
|
||||
* of the other input.
|
||||
* For JOIN_IN and variants, the Cartesian product is figured with respect to
|
||||
* a unique-ified input, and then we can clamp to the size of the other
|
||||
* input.
|
||||
*/
|
||||
switch (jointype)
|
||||
{
|
||||
@@ -1848,12 +1835,11 @@ join_in_selectivity(JoinPath *path, PlannerInfo *root)
|
||||
return 1.0;
|
||||
|
||||
/*
|
||||
* Return 1.0 if the inner side is already known unique. The case
|
||||
* where the inner path is already a UniquePath probably cannot happen
|
||||
* in current usage, but check it anyway for completeness. The
|
||||
* interesting case is where we've determined the inner relation
|
||||
* itself is unique, which we can check by looking at the rows
|
||||
* estimate for its UniquePath.
|
||||
* Return 1.0 if the inner side is already known unique. The case where
|
||||
* the inner path is already a UniquePath probably cannot happen in
|
||||
* current usage, but check it anyway for completeness. The interesting
|
||||
* case is where we've determined the inner relation itself is unique,
|
||||
* which we can check by looking at the rows estimate for its UniquePath.
|
||||
*/
|
||||
if (IsA(path->innerjoinpath, UniquePath))
|
||||
return 1.0;
|
||||
@@ -1866,10 +1852,9 @@ join_in_selectivity(JoinPath *path, PlannerInfo *root)
|
||||
|
||||
/*
|
||||
* Compute same result set_joinrel_size_estimates would compute for
|
||||
* JOIN_INNER. Note that we use the input rels' absolute size
|
||||
* estimates, not PATH_ROWS() which might be less; if we used
|
||||
* PATH_ROWS() we'd be double-counting the effects of any join clauses
|
||||
* used in input scans.
|
||||
* JOIN_INNER. Note that we use the input rels' absolute size estimates,
|
||||
* not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
|
||||
* double-counting the effects of any join clauses used in input scans.
|
||||
*/
|
||||
selec = clauselist_selectivity(root,
|
||||
path->joinrestrictinfo,
|
||||
@@ -1908,8 +1893,8 @@ set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
|
||||
/*
|
||||
* Estimate number of rows the function itself will return.
|
||||
*
|
||||
* XXX no idea how to do this yet; but we can at least check whether
|
||||
* function returns set or not...
|
||||
* XXX no idea how to do this yet; but we can at least check whether function
|
||||
* returns set or not...
|
||||
*/
|
||||
if (expression_returns_set(rte->funcexpr))
|
||||
rel->tuples = 1000;
|
||||
@@ -1957,8 +1942,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
|
||||
ndx = var->varattno - rel->min_attr;
|
||||
|
||||
/*
|
||||
* The width probably hasn't been cached yet, but may as well
|
||||
* check
|
||||
* The width probably hasn't been cached yet, but may as well check
|
||||
*/
|
||||
if (rel->attr_widths[ndx] > 0)
|
||||
{
|
||||
|
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.190 2005/09/24 22:54:36 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.191 2005/10/15 02:49:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -48,9 +48,9 @@
|
||||
|
||||
|
||||
static List *find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
|
||||
List *clauses, List *outer_clauses,
|
||||
bool istoplevel, bool isjoininner,
|
||||
Relids outer_relids);
|
||||
List *clauses, List *outer_clauses,
|
||||
bool istoplevel, bool isjoininner,
|
||||
Relids outer_relids);
|
||||
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths);
|
||||
static int bitmap_path_comparator(const void *a, const void *b);
|
||||
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths);
|
||||
@@ -62,25 +62,25 @@ static Oid indexable_operator(Expr *clause, Oid opclass,
|
||||
bool indexkey_on_left);
|
||||
static Relids indexable_outerrelids(RelOptInfo *rel);
|
||||
static bool matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel,
|
||||
Relids outer_relids);
|
||||
Relids outer_relids);
|
||||
static List *find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
|
||||
Relids outer_relids, bool isouterjoin);
|
||||
Relids outer_relids, bool isouterjoin);
|
||||
static ScanDirection match_variant_ordering(PlannerInfo *root,
|
||||
IndexOptInfo *index,
|
||||
List *restrictclauses);
|
||||
IndexOptInfo *index,
|
||||
List *restrictclauses);
|
||||
static List *identify_ignorable_ordering_cols(PlannerInfo *root,
|
||||
IndexOptInfo *index,
|
||||
List *restrictclauses);
|
||||
IndexOptInfo *index,
|
||||
List *restrictclauses);
|
||||
static bool match_index_to_query_keys(PlannerInfo *root,
|
||||
IndexOptInfo *index,
|
||||
ScanDirection indexscandir,
|
||||
List *ignorables);
|
||||
IndexOptInfo *index,
|
||||
ScanDirection indexscandir,
|
||||
List *ignorables);
|
||||
static bool match_boolean_index_clause(Node *clause, int indexcol,
|
||||
IndexOptInfo *index);
|
||||
IndexOptInfo *index);
|
||||
static bool match_special_index_operator(Expr *clause, Oid opclass,
|
||||
bool indexkey_on_left);
|
||||
static Expr *expand_boolean_index_clause(Node *clause, int indexcol,
|
||||
IndexOptInfo *index);
|
||||
IndexOptInfo *index);
|
||||
static List *expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass);
|
||||
static List *prefix_quals(Node *leftop, Oid opclass,
|
||||
Const *prefix, Pattern_Prefix_Status pstatus);
|
||||
@@ -153,8 +153,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
|
||||
true, false, NULL);
|
||||
|
||||
/*
|
||||
* We can submit them all to add_path. (This generates access paths for
|
||||
* plain IndexScan plans.) However, for the next step we will only want
|
||||
* We can submit them all to add_path. (This generates access paths for
|
||||
* plain IndexScan plans.) However, for the next step we will only want
|
||||
* the ones that have some selectivity; we must discard anything that was
|
||||
* generated solely for ordering purposes.
|
||||
*/
|
||||
@@ -180,8 +180,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
|
||||
bitindexpaths = list_concat(bitindexpaths, indexpaths);
|
||||
|
||||
/*
|
||||
* If we found anything usable, generate a BitmapHeapPath for the
|
||||
* most promising combination of bitmap index paths.
|
||||
* If we found anything usable, generate a BitmapHeapPath for the most
|
||||
* promising combination of bitmap index paths.
|
||||
*/
|
||||
if (bitindexpaths != NIL)
|
||||
{
|
||||
@@ -254,19 +254,19 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
|
||||
bool index_is_ordered;
|
||||
|
||||
/*
|
||||
* Ignore partial indexes that do not match the query. If a partial
|
||||
* index is marked predOK then we know it's OK; otherwise, if we
|
||||
* are at top level we know it's not OK (since predOK is exactly
|
||||
* whether its predicate could be proven from the toplevel clauses).
|
||||
* Otherwise, we have to test whether the added clauses are
|
||||
* sufficient to imply the predicate. If so, we could use
|
||||
* the index in the current context.
|
||||
* Ignore partial indexes that do not match the query. If a partial
|
||||
* index is marked predOK then we know it's OK; otherwise, if we are
|
||||
* at top level we know it's not OK (since predOK is exactly whether
|
||||
* its predicate could be proven from the toplevel clauses).
|
||||
* Otherwise, we have to test whether the added clauses are sufficient
|
||||
* to imply the predicate. If so, we could use the index in the
|
||||
* current context.
|
||||
*
|
||||
* We set useful_predicate to true iff the predicate was proven
|
||||
* using the current set of clauses. This is needed to prevent
|
||||
* matching a predOK index to an arm of an OR, which would be
|
||||
* a legal but pointlessly inefficient plan. (A better plan will
|
||||
* be generated by just scanning the predOK index alone, no OR.)
|
||||
* We set useful_predicate to true iff the predicate was proven using the
|
||||
* current set of clauses. This is needed to prevent matching a
|
||||
* predOK index to an arm of an OR, which would be a legal but
|
||||
* pointlessly inefficient plan. (A better plan will be generated by
|
||||
* just scanning the predOK index alone, no OR.)
|
||||
*/
|
||||
useful_predicate = false;
|
||||
if (index->indpred != NIL)
|
||||
@@ -282,7 +282,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
|
||||
else
|
||||
{
|
||||
if (istoplevel)
|
||||
continue; /* no point in trying to prove it */
|
||||
continue; /* no point in trying to prove it */
|
||||
|
||||
/* Form all_clauses if not done already */
|
||||
if (all_clauses == NIL)
|
||||
@@ -290,7 +290,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
|
||||
outer_clauses);
|
||||
|
||||
if (!predicate_implied_by(index->indpred, all_clauses))
|
||||
continue; /* can't use it at all */
|
||||
continue; /* can't use it at all */
|
||||
|
||||
if (!predicate_implied_by(index->indpred, outer_clauses))
|
||||
useful_predicate = true;
|
||||
@@ -309,17 +309,17 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
|
||||
&found_clause);
|
||||
|
||||
/*
|
||||
* Not all index AMs support scans with no restriction clauses.
|
||||
* We can't generate a scan over an index with amoptionalkey = false
|
||||
* Not all index AMs support scans with no restriction clauses. We
|
||||
* can't generate a scan over an index with amoptionalkey = false
|
||||
* unless there's at least one restriction clause.
|
||||
*/
|
||||
if (restrictclauses == NIL && !index->amoptionalkey)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* 2. Compute pathkeys describing index's ordering, if any, then
|
||||
* see how many of them are actually useful for this query. This
|
||||
* is not relevant unless we are at top level.
|
||||
* 2. Compute pathkeys describing index's ordering, if any, then see
|
||||
* how many of them are actually useful for this query. This is not
|
||||
* relevant unless we are at top level.
|
||||
*/
|
||||
index_is_ordered = OidIsValid(index->ordering[0]);
|
||||
if (istoplevel && index_is_ordered && !isjoininner)
|
||||
@@ -335,9 +335,8 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
|
||||
/*
|
||||
* 3. Generate an indexscan path if there are relevant restriction
|
||||
* clauses in the current clauses, OR the index ordering is
|
||||
* potentially useful for later merging or final output ordering,
|
||||
* OR the index has a predicate that was proven by the current
|
||||
* clauses.
|
||||
* potentially useful for later merging or final output ordering, OR
|
||||
* the index has a predicate that was proven by the current clauses.
|
||||
*/
|
||||
if (found_clause || useful_pathkeys != NIL || useful_predicate)
|
||||
{
|
||||
@@ -352,16 +351,15 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* 4. If the index is ordered, and there is a requested query
|
||||
* ordering that we failed to match, consider variant ways of
|
||||
* achieving the ordering. Again, this is only interesting
|
||||
* at top level.
|
||||
* 4. If the index is ordered, and there is a requested query ordering
|
||||
* that we failed to match, consider variant ways of achieving the
|
||||
* ordering. Again, this is only interesting at top level.
|
||||
*/
|
||||
if (istoplevel && index_is_ordered && !isjoininner &&
|
||||
root->query_pathkeys != NIL &&
|
||||
pathkeys_useful_for_ordering(root, useful_pathkeys) == 0)
|
||||
{
|
||||
ScanDirection scandir;
|
||||
ScanDirection scandir;
|
||||
|
||||
scandir = match_variant_ordering(root, index, restrictclauses);
|
||||
if (!ScanDirectionIsNoMovement(scandir))
|
||||
@@ -409,9 +407,9 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
|
||||
foreach(l, clauses)
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
|
||||
List *pathlist;
|
||||
Path *bitmapqual;
|
||||
ListCell *j;
|
||||
List *pathlist;
|
||||
Path *bitmapqual;
|
||||
ListCell *j;
|
||||
|
||||
Assert(IsA(rinfo, RestrictInfo));
|
||||
/* Ignore RestrictInfos that aren't ORs */
|
||||
@@ -419,19 +417,19 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We must be able to match at least one index to each of the arms
|
||||
* of the OR, else we can't use it.
|
||||
* We must be able to match at least one index to each of the arms of
|
||||
* the OR, else we can't use it.
|
||||
*/
|
||||
pathlist = NIL;
|
||||
foreach(j, ((BoolExpr *) rinfo->orclause)->args)
|
||||
{
|
||||
Node *orarg = (Node *) lfirst(j);
|
||||
List *indlist;
|
||||
Node *orarg = (Node *) lfirst(j);
|
||||
List *indlist;
|
||||
|
||||
/* OR arguments should be ANDs or sub-RestrictInfos */
|
||||
if (and_clause(orarg))
|
||||
{
|
||||
List *andargs = ((BoolExpr *) orarg)->args;
|
||||
List *andargs = ((BoolExpr *) orarg)->args;
|
||||
|
||||
indlist = find_usable_indexes(root, rel,
|
||||
andargs,
|
||||
@@ -458,25 +456,28 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
|
||||
isjoininner,
|
||||
outer_relids);
|
||||
}
|
||||
|
||||
/*
|
||||
* If nothing matched this arm, we can't do anything
|
||||
* with this OR clause.
|
||||
* If nothing matched this arm, we can't do anything with this OR
|
||||
* clause.
|
||||
*/
|
||||
if (indlist == NIL)
|
||||
{
|
||||
pathlist = NIL;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, pick the most promising AND combination,
|
||||
* and add it to pathlist.
|
||||
* OK, pick the most promising AND combination, and add it to
|
||||
* pathlist.
|
||||
*/
|
||||
bitmapqual = choose_bitmap_and(root, rel, indlist);
|
||||
pathlist = lappend(pathlist, bitmapqual);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have a match for every arm, then turn them
|
||||
* into a BitmapOrPath, and add to result list.
|
||||
* If we have a match for every arm, then turn them into a
|
||||
* BitmapOrPath, and add to result list.
|
||||
*/
|
||||
if (pathlist != NIL)
|
||||
{
|
||||
@@ -494,7 +495,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
|
||||
* Given a nonempty list of bitmap paths, AND them into one path.
|
||||
*
|
||||
* This is a nontrivial decision since we can legally use any subset of the
|
||||
* given path set. We want to choose a good tradeoff between selectivity
|
||||
* given path set. We want to choose a good tradeoff between selectivity
|
||||
* and cost of computing the bitmap.
|
||||
*
|
||||
* The result is either a single one of the inputs, or a BitmapAndPath
|
||||
@@ -511,7 +512,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
|
||||
int i;
|
||||
ListCell *l;
|
||||
|
||||
Assert(npaths > 0); /* else caller error */
|
||||
Assert(npaths > 0); /* else caller error */
|
||||
if (npaths == 1)
|
||||
return (Path *) linitial(paths); /* easy case */
|
||||
|
||||
@@ -519,24 +520,23 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
|
||||
* In theory we should consider every nonempty subset of the given paths.
|
||||
* In practice that seems like overkill, given the crude nature of the
|
||||
* estimates, not to mention the possible effects of higher-level AND and
|
||||
* OR clauses. As a compromise, we sort the paths by selectivity.
|
||||
* We always take the first, and sequentially add on paths that result
|
||||
* in a lower estimated cost.
|
||||
* OR clauses. As a compromise, we sort the paths by selectivity. We
|
||||
* always take the first, and sequentially add on paths that result in a
|
||||
* lower estimated cost.
|
||||
*
|
||||
* We also make some effort to detect directly redundant input paths,
|
||||
* as can happen if there are multiple possibly usable indexes. For
|
||||
* this we look only at plain IndexPath inputs, not at sub-OR clauses.
|
||||
* And we consider an index redundant if all its index conditions were
|
||||
* already used by earlier indexes. (We could use predicate_implied_by
|
||||
* to have a more intelligent, but much more expensive, check --- but in
|
||||
* most cases simple pointer equality should suffice, since after all the
|
||||
* index conditions are all coming from the same RestrictInfo lists.)
|
||||
* We also make some effort to detect directly redundant input paths, as can
|
||||
* happen if there are multiple possibly usable indexes. For this we look
|
||||
* only at plain IndexPath inputs, not at sub-OR clauses. And we consider
|
||||
* an index redundant if all its index conditions were already used by
|
||||
* earlier indexes. (We could use predicate_implied_by to have a more
|
||||
* intelligent, but much more expensive, check --- but in most cases
|
||||
* simple pointer equality should suffice, since after all the index
|
||||
* conditions are all coming from the same RestrictInfo lists.)
|
||||
*
|
||||
* XXX is there any risk of throwing away a useful partial index here
|
||||
* because we don't explicitly look at indpred? At least in simple
|
||||
* cases, the partial index will sort before competing non-partial
|
||||
* indexes and so it makes the right choice, but perhaps we need to
|
||||
* work harder.
|
||||
* XXX is there any risk of throwing away a useful partial index here because
|
||||
* we don't explicitly look at indpred? At least in simple cases, the
|
||||
* partial index will sort before competing non-partial indexes and so it
|
||||
* makes the right choice, but perhaps we need to work harder.
|
||||
*
|
||||
* Note: outputting the selected sub-paths in selectivity order is a good
|
||||
* thing even if we weren't using that as part of the selection method,
|
||||
@@ -559,13 +559,13 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
|
||||
qualsofar = list_copy(((IndexPath *) patharray[0])->indexclauses);
|
||||
else
|
||||
qualsofar = NIL;
|
||||
lastcell = list_head(paths); /* for quick deletions */
|
||||
lastcell = list_head(paths); /* for quick deletions */
|
||||
|
||||
for (i = 1; i < npaths; i++)
|
||||
{
|
||||
Path *newpath = patharray[i];
|
||||
List *newqual = NIL;
|
||||
Cost newcost;
|
||||
Path *newpath = patharray[i];
|
||||
List *newqual = NIL;
|
||||
Cost newcost;
|
||||
|
||||
if (IsA(newpath, IndexPath))
|
||||
{
|
||||
@@ -599,12 +599,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
|
||||
static int
|
||||
bitmap_path_comparator(const void *a, const void *b)
|
||||
{
|
||||
Path *pa = *(Path * const *) a;
|
||||
Path *pb = *(Path * const *) b;
|
||||
Path *pa = *(Path *const *) a;
|
||||
Path *pb = *(Path *const *) b;
|
||||
Cost acost;
|
||||
Cost bcost;
|
||||
Selectivity aselec;
|
||||
Selectivity bselec;
|
||||
Selectivity aselec;
|
||||
Selectivity bselec;
|
||||
|
||||
cost_bitmap_tree_node(pa, &acost, &aselec);
|
||||
cost_bitmap_tree_node(pb, &bcost, &bselec);
|
||||
@@ -660,7 +660,7 @@ bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths)
|
||||
*
|
||||
* We can use clauses from either the current clauses or outer_clauses lists,
|
||||
* but *found_clause is set TRUE only if we used at least one clause from
|
||||
* the "current clauses" list. See find_usable_indexes() for motivation.
|
||||
* the "current clauses" list. See find_usable_indexes() for motivation.
|
||||
*
|
||||
* outer_relids determines what Vars will be allowed on the other side
|
||||
* of a possible index qual; see match_clause_to_indexcol().
|
||||
@@ -770,7 +770,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
|
||||
* to the caller-specified outer_relids relations (which had better not
|
||||
* include the relation whose index is being tested). outer_relids should
|
||||
* be NULL when checking simple restriction clauses, and the outer side
|
||||
* of the join when building a join inner scan. Other than that, the
|
||||
* of the join when building a join inner scan. Other than that, the
|
||||
* only thing we don't like is volatile functions.
|
||||
*
|
||||
* Note: in most cases we already know that the clause as a whole uses
|
||||
@@ -836,8 +836,8 @@ match_clause_to_indexcol(IndexOptInfo *index,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If we didn't find a member of the index's opclass, see whether
|
||||
* it is a "special" indexable operator.
|
||||
* If we didn't find a member of the index's opclass, see whether it
|
||||
* is a "special" indexable operator.
|
||||
*/
|
||||
if (match_special_index_operator(clause, opclass, true))
|
||||
return true;
|
||||
@@ -852,8 +852,8 @@ match_clause_to_indexcol(IndexOptInfo *index,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If we didn't find a member of the index's opclass, see whether
|
||||
* it is a "special" indexable operator.
|
||||
* If we didn't find a member of the index's opclass, see whether it
|
||||
* is a "special" indexable operator.
|
||||
*/
|
||||
if (match_special_index_operator(clause, opclass, false))
|
||||
return true;
|
||||
@@ -914,14 +914,14 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
|
||||
/*
|
||||
* Note: if Postgres tried to optimize queries by forming equivalence
|
||||
* classes over equi-joined attributes (i.e., if it recognized that a
|
||||
* qualification such as "where a.b=c.d and a.b=5" could make use of
|
||||
* an index on c.d), then we could use that equivalence class info
|
||||
* here with joininfo lists to do more complete tests for the usability
|
||||
* of a partial index. For now, the test only uses restriction
|
||||
* clauses (those in baserestrictinfo). --Nels, Dec '92
|
||||
* qualification such as "where a.b=c.d and a.b=5" could make use of an
|
||||
* index on c.d), then we could use that equivalence class info here with
|
||||
* joininfo lists to do more complete tests for the usability of a partial
|
||||
* index. For now, the test only uses restriction clauses (those in
|
||||
* baserestrictinfo). --Nels, Dec '92
|
||||
*
|
||||
* XXX as of 7.1, equivalence class info *is* available. Consider
|
||||
* improving this code as foreseen by Nels.
|
||||
* XXX as of 7.1, equivalence class info *is* available. Consider improving
|
||||
* this code as foreseen by Nels.
|
||||
*/
|
||||
|
||||
foreach(ilist, rel->indexlist)
|
||||
@@ -943,7 +943,7 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
|
||||
/*
|
||||
* indexable_outerrelids
|
||||
* Finds all other relids that participate in any indexable join clause
|
||||
* for the specified table. Returns a set of relids.
|
||||
* for the specified table. Returns a set of relids.
|
||||
*/
|
||||
static Relids
|
||||
indexable_outerrelids(RelOptInfo *rel)
|
||||
@@ -958,7 +958,7 @@ indexable_outerrelids(RelOptInfo *rel)
|
||||
foreach(l, rel->joininfo)
|
||||
{
|
||||
RestrictInfo *joininfo = (RestrictInfo *) lfirst(l);
|
||||
Relids other_rels;
|
||||
Relids other_rels;
|
||||
|
||||
other_rels = bms_difference(joininfo->required_relids, rel->relids);
|
||||
if (matches_any_index(joininfo, rel, other_rels))
|
||||
@@ -986,7 +986,7 @@ matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel, Relids outer_relids)
|
||||
{
|
||||
foreach(l, ((BoolExpr *) rinfo->orclause)->args)
|
||||
{
|
||||
Node *orarg = (Node *) lfirst(l);
|
||||
Node *orarg = (Node *) lfirst(l);
|
||||
|
||||
/* OR arguments should be ANDs or sub-RestrictInfos */
|
||||
if (and_clause(orarg))
|
||||
@@ -1092,17 +1092,17 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Otherwise, we have to do path selection in the memory context of
|
||||
* the given rel, so that any created path can be safely attached to
|
||||
* the rel's cache of best inner paths. (This is not currently an
|
||||
* issue for normal planning, but it is an issue for GEQO planning.)
|
||||
* Otherwise, we have to do path selection in the memory context of the
|
||||
* given rel, so that any created path can be safely attached to the rel's
|
||||
* cache of best inner paths. (This is not currently an issue for normal
|
||||
* planning, but it is an issue for GEQO planning.)
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
|
||||
|
||||
/*
|
||||
* Intersect the given outer_relids with index_outer_relids to find
|
||||
* the set of outer relids actually relevant for this rel. If there
|
||||
* are none, again we can fail immediately.
|
||||
* Intersect the given outer_relids with index_outer_relids to find the
|
||||
* set of outer relids actually relevant for this rel. If there are none,
|
||||
* again we can fail immediately.
|
||||
*/
|
||||
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
|
||||
if (bms_is_empty(outer_relids))
|
||||
@@ -1113,11 +1113,10 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* Look to see if we already computed the result for this set of
|
||||
* relevant outerrels. (We include the isouterjoin status in the
|
||||
* cache lookup key for safety. In practice I suspect this is not
|
||||
* necessary because it should always be the same for a given
|
||||
* innerrel.)
|
||||
* Look to see if we already computed the result for this set of relevant
|
||||
* outerrels. (We include the isouterjoin status in the cache lookup key
|
||||
* for safety. In practice I suspect this is not necessary because it
|
||||
* should always be the same for a given innerrel.)
|
||||
*/
|
||||
foreach(l, rel->index_inner_paths)
|
||||
{
|
||||
@@ -1160,8 +1159,8 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
|
||||
bitindexpaths = list_concat(bitindexpaths, list_copy(indexpaths));
|
||||
|
||||
/*
|
||||
* If we found anything usable, generate a BitmapHeapPath for the
|
||||
* most promising combination of bitmap index paths.
|
||||
* If we found anything usable, generate a BitmapHeapPath for the most
|
||||
* promising combination of bitmap index paths.
|
||||
*/
|
||||
if (bitindexpaths != NIL)
|
||||
{
|
||||
@@ -1218,12 +1217,11 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
|
||||
ListCell *l;
|
||||
|
||||
/*
|
||||
* We can always use plain restriction clauses for the rel. We
|
||||
* scan these first because we want them first in the clause
|
||||
* list for the convenience of remove_redundant_join_clauses,
|
||||
* which can never remove non-join clauses and hence won't be able
|
||||
* to get rid of a non-join clause if it appears after a join
|
||||
* clause it is redundant with.
|
||||
* We can always use plain restriction clauses for the rel. We scan these
|
||||
* first because we want them first in the clause list for the convenience
|
||||
* of remove_redundant_join_clauses, which can never remove non-join
|
||||
* clauses and hence won't be able to get rid of a non-join clause if it
|
||||
* appears after a join clause it is redundant with.
|
||||
*/
|
||||
foreach(l, rel->baserestrictinfo)
|
||||
{
|
||||
@@ -1305,7 +1303,7 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
|
||||
*
|
||||
* If able to match the requested query pathkeys, returns either
|
||||
* ForwardScanDirection or BackwardScanDirection to indicate the proper index
|
||||
* scan direction. If no match, returns NoMovementScanDirection.
|
||||
* scan direction. If no match, returns NoMovementScanDirection.
|
||||
*/
|
||||
static ScanDirection
|
||||
match_variant_ordering(PlannerInfo *root,
|
||||
@@ -1318,8 +1316,8 @@ match_variant_ordering(PlannerInfo *root,
|
||||
* Forget the whole thing if not a btree index; our check for ignorable
|
||||
* columns assumes we are dealing with btree opclasses. (It'd be possible
|
||||
* to factor out just the try for backwards indexscan, but considering
|
||||
* that we presently have no orderable indexes except btrees anyway,
|
||||
* it's hardly worth contorting this code for that case.)
|
||||
* that we presently have no orderable indexes except btrees anyway, it's
|
||||
* hardly worth contorting this code for that case.)
|
||||
*
|
||||
* Note: if you remove this, you probably need to put in a check on
|
||||
* amoptionalkey to prevent possible clauseless scan on an index that
|
||||
@@ -1327,17 +1325,19 @@ match_variant_ordering(PlannerInfo *root,
|
||||
*/
|
||||
if (index->relam != BTREE_AM_OID)
|
||||
return NoMovementScanDirection;
|
||||
|
||||
/*
|
||||
* Figure out which index columns can be optionally ignored because
|
||||
* they have an equality constraint. This is the same set for either
|
||||
* forward or backward scan, so we do it just once.
|
||||
* Figure out which index columns can be optionally ignored because they
|
||||
* have an equality constraint. This is the same set for either forward
|
||||
* or backward scan, so we do it just once.
|
||||
*/
|
||||
ignorables = identify_ignorable_ordering_cols(root, index,
|
||||
restrictclauses);
|
||||
|
||||
/*
|
||||
* Try to match to forward scan, then backward scan. However, we can
|
||||
* skip the forward-scan case if there are no ignorable columns,
|
||||
* because find_usable_indexes() would have found the match already.
|
||||
* Try to match to forward scan, then backward scan. However, we can skip
|
||||
* the forward-scan case if there are no ignorable columns, because
|
||||
* find_usable_indexes() would have found the match already.
|
||||
*/
|
||||
if (ignorables &&
|
||||
match_index_to_query_keys(root, index, ForwardScanDirection,
|
||||
@@ -1365,24 +1365,24 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
|
||||
List *restrictclauses)
|
||||
{
|
||||
List *result = NIL;
|
||||
int indexcol = 0; /* note this is 0-based */
|
||||
int indexcol = 0; /* note this is 0-based */
|
||||
ListCell *l;
|
||||
|
||||
/* restrictclauses is either NIL or has a sublist per column */
|
||||
foreach(l, restrictclauses)
|
||||
{
|
||||
List *sublist = (List *) lfirst(l);
|
||||
Oid opclass = index->classlist[indexcol];
|
||||
ListCell *l2;
|
||||
List *sublist = (List *) lfirst(l);
|
||||
Oid opclass = index->classlist[indexcol];
|
||||
ListCell *l2;
|
||||
|
||||
foreach(l2, sublist)
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l2);
|
||||
OpExpr *clause = (OpExpr *) rinfo->clause;
|
||||
Oid clause_op;
|
||||
int op_strategy;
|
||||
bool varonleft;
|
||||
bool ispc;
|
||||
Oid clause_op;
|
||||
int op_strategy;
|
||||
bool varonleft;
|
||||
bool ispc;
|
||||
|
||||
/* We know this clause passed match_clause_to_indexcol */
|
||||
|
||||
@@ -1393,11 +1393,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
|
||||
index))
|
||||
{
|
||||
/*
|
||||
* The clause means either col = TRUE or col = FALSE;
|
||||
* we do not care which, it's an equality constraint
|
||||
* either way.
|
||||
* The clause means either col = TRUE or col = FALSE; we
|
||||
* do not care which, it's an equality constraint either
|
||||
* way.
|
||||
*/
|
||||
result = lappend_int(result, indexcol+1);
|
||||
result = lappend_int(result, indexcol + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1426,12 +1426,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
|
||||
op_strategy = get_op_opclass_strategy(clause_op, opclass);
|
||||
|
||||
/*
|
||||
* You might expect to see Assert(op_strategy != 0) here,
|
||||
* but you won't: the clause might contain a special indexable
|
||||
* operator rather than an ordinary opclass member. Currently
|
||||
* none of the special operators are very likely to expand to
|
||||
* an equality operator; we do not bother to check, but just
|
||||
* assume no match.
|
||||
* You might expect to see Assert(op_strategy != 0) here, but you
|
||||
* won't: the clause might contain a special indexable operator
|
||||
* rather than an ordinary opclass member. Currently none of the
|
||||
* special operators are very likely to expand to an equality
|
||||
* operator; we do not bother to check, but just assume no match.
|
||||
*/
|
||||
if (op_strategy != BTEqualStrategyNumber)
|
||||
continue;
|
||||
@@ -1445,7 +1444,7 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
|
||||
rinfo->left_relids);
|
||||
if (ispc)
|
||||
{
|
||||
result = lappend_int(result, indexcol+1);
|
||||
result = lappend_int(result, indexcol + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1480,8 +1479,8 @@ match_index_to_query_keys(PlannerInfo *root,
|
||||
index_pathkeys = build_index_pathkeys(root, index, indexscandir);
|
||||
|
||||
/*
|
||||
* Can we match to the query's requested pathkeys? The inner loop
|
||||
* skips over ignorable index columns while trying to match.
|
||||
* Can we match to the query's requested pathkeys? The inner loop skips
|
||||
* over ignorable index columns while trying to match.
|
||||
*/
|
||||
index_cell = list_head(index_pathkeys);
|
||||
index_col = 0;
|
||||
@@ -1492,13 +1491,14 @@ match_index_to_query_keys(PlannerInfo *root,
|
||||
|
||||
for (;;)
|
||||
{
|
||||
List *isubkey;
|
||||
List *isubkey;
|
||||
|
||||
if (index_cell == NULL)
|
||||
return false;
|
||||
isubkey = (List *) lfirst(index_cell);
|
||||
index_cell = lnext(index_cell);
|
||||
index_col++; /* index_col is now 1-based */
|
||||
|
||||
/*
|
||||
* Since we are dealing with canonicalized pathkeys, pointer
|
||||
* comparison is sufficient to determine a match.
|
||||
@@ -1561,9 +1561,9 @@ match_index_to_operand(Node *operand,
|
||||
int indkey;
|
||||
|
||||
/*
|
||||
* Ignore any RelabelType node above the operand. This is needed to
|
||||
* be able to apply indexscanning in binary-compatible-operator cases.
|
||||
* Note: we can assume there is at most one RelabelType node;
|
||||
* Ignore any RelabelType node above the operand. This is needed to be
|
||||
* able to apply indexscanning in binary-compatible-operator cases. Note:
|
||||
* we can assume there is at most one RelabelType node;
|
||||
* eval_const_expressions() will have simplified if more than one.
|
||||
*/
|
||||
if (operand && IsA(operand, RelabelType))
|
||||
@@ -1583,9 +1583,9 @@ match_index_to_operand(Node *operand,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Index expression; find the correct expression. (This search
|
||||
* could be avoided, at the cost of complicating all the callers
|
||||
* of this routine; doesn't seem worth it.)
|
||||
* Index expression; find the correct expression. (This search could
|
||||
* be avoided, at the cost of complicating all the callers of this
|
||||
* routine; doesn't seem worth it.)
|
||||
*/
|
||||
ListCell *indexpr_item;
|
||||
int i;
|
||||
@@ -1645,7 +1645,7 @@ match_index_to_operand(Node *operand,
|
||||
*
|
||||
* Another thing that we do with this machinery is to provide special
|
||||
* smarts for "boolean" indexes (that is, indexes on boolean columns
|
||||
* that support boolean equality). We can transform a plain reference
|
||||
* that support boolean equality). We can transform a plain reference
|
||||
* to the indexkey into "indexkey = true", or "NOT indexkey" into
|
||||
* "indexkey = false", so as to make the expression indexable using the
|
||||
* regular index operators. (As of Postgres 8.1, we must do this here
|
||||
@@ -1696,14 +1696,15 @@ match_boolean_index_clause(Node *clause,
|
||||
indexcol, index))
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we only consider clauses at top level of WHERE, we can convert
|
||||
* indexkey IS TRUE and indexkey IS FALSE to index searches as well.
|
||||
* The different meaning for NULL isn't important.
|
||||
* indexkey IS TRUE and indexkey IS FALSE to index searches as well. The
|
||||
* different meaning for NULL isn't important.
|
||||
*/
|
||||
else if (clause && IsA(clause, BooleanTest))
|
||||
{
|
||||
BooleanTest *btest = (BooleanTest *) clause;
|
||||
BooleanTest *btest = (BooleanTest *) clause;
|
||||
|
||||
if (btest->booltesttype == IS_TRUE ||
|
||||
btest->booltesttype == IS_FALSE)
|
||||
@@ -1737,8 +1738,8 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
|
||||
/*
|
||||
* Currently, all known special operators require the indexkey on the
|
||||
* left, but this test could be pushed into the switch statement if
|
||||
* some are added that do not...
|
||||
* left, but this test could be pushed into the switch statement if some
|
||||
* are added that do not...
|
||||
*/
|
||||
if (!indexkey_on_left)
|
||||
return false;
|
||||
@@ -1760,12 +1761,12 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_LIKE_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_BYTEA_LIKE_OP:
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_ICLIKE_OP:
|
||||
@@ -1773,7 +1774,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_ICLIKE_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_REGEXEQ_OP:
|
||||
@@ -1781,7 +1782,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_REGEXEQ_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_TEXT_ICREGEXEQ_OP:
|
||||
@@ -1789,7 +1790,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
case OID_NAME_ICREGEXEQ_OP:
|
||||
/* the right-hand const is type text for all of these */
|
||||
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
&prefix, &rest) != Pattern_Prefix_None;
|
||||
break;
|
||||
|
||||
case OID_INET_SUB_OP:
|
||||
@@ -1815,9 +1816,9 @@ match_special_index_operator(Expr *clause, Oid opclass,
|
||||
* want to apply. (A hash index, for example, will not support ">=".)
|
||||
* Currently, only btree supports the operators we need.
|
||||
*
|
||||
* We insist on the opclass being the specific one we expect, else we'd
|
||||
* do the wrong thing if someone were to make a reverse-sort opclass
|
||||
* with the same operators.
|
||||
* We insist on the opclass being the specific one we expect, else we'd do
|
||||
* the wrong thing if someone were to make a reverse-sort opclass with the
|
||||
* same operators.
|
||||
*/
|
||||
switch (expr_op)
|
||||
{
|
||||
@@ -1906,7 +1907,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
|
||||
/* First check for boolean cases */
|
||||
if (IsBooleanOpclass(curClass))
|
||||
{
|
||||
Expr *boolqual;
|
||||
Expr *boolqual;
|
||||
|
||||
boolqual = expand_boolean_index_clause((Node *) rinfo->clause,
|
||||
indexcol,
|
||||
@@ -1960,7 +1961,7 @@ expand_boolean_index_clause(Node *clause,
|
||||
/* NOT clause? */
|
||||
if (not_clause(clause))
|
||||
{
|
||||
Node *arg = (Node *) get_notclausearg((Expr *) clause);
|
||||
Node *arg = (Node *) get_notclausearg((Expr *) clause);
|
||||
|
||||
/* It must have matched the indexkey */
|
||||
Assert(match_index_to_operand(arg, indexcol, index));
|
||||
@@ -1971,8 +1972,8 @@ expand_boolean_index_clause(Node *clause,
|
||||
}
|
||||
if (clause && IsA(clause, BooleanTest))
|
||||
{
|
||||
BooleanTest *btest = (BooleanTest *) clause;
|
||||
Node *arg = (Node *) btest->arg;
|
||||
BooleanTest *btest = (BooleanTest *) clause;
|
||||
Node *arg = (Node *) btest->arg;
|
||||
|
||||
/* It must have matched the indexkey */
|
||||
Assert(match_index_to_operand(arg, indexcol, index));
|
||||
@@ -2007,6 +2008,7 @@ static List *
|
||||
expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
|
||||
{
|
||||
Expr *clause = rinfo->clause;
|
||||
|
||||
/* we know these will succeed */
|
||||
Node *leftop = get_leftop(clause);
|
||||
Node *rightop = get_rightop(clause);
|
||||
@@ -2020,10 +2022,9 @@ expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
|
||||
switch (expr_op)
|
||||
{
|
||||
/*
|
||||
* LIKE and regex operators are not members of any index
|
||||
* opclass, so if we find one in an indexqual list we can
|
||||
* assume that it was accepted by
|
||||
* match_special_index_operator().
|
||||
* LIKE and regex operators are not members of any index opclass,
|
||||
* so if we find one in an indexqual list we can assume that it
|
||||
* was accepted by match_special_index_operator().
|
||||
*/
|
||||
case OID_TEXT_LIKE_OP:
|
||||
case OID_BPCHAR_LIKE_OP:
|
||||
@@ -2128,8 +2129,8 @@ prefix_quals(Node *leftop, Oid opclass,
|
||||
}
|
||||
|
||||
/*
|
||||
* If necessary, coerce the prefix constant to the right type. The
|
||||
* given prefix constant is either text or bytea type.
|
||||
* If necessary, coerce the prefix constant to the right type. The given
|
||||
* prefix constant is either text or bytea type.
|
||||
*/
|
||||
if (prefix_const->consttype != datatype)
|
||||
{
|
||||
@@ -2139,11 +2140,11 @@ prefix_quals(Node *leftop, Oid opclass,
|
||||
{
|
||||
case TEXTOID:
|
||||
prefix = DatumGetCString(DirectFunctionCall1(textout,
|
||||
prefix_const->constvalue));
|
||||
prefix_const->constvalue));
|
||||
break;
|
||||
case BYTEAOID:
|
||||
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
|
||||
prefix_const->constvalue));
|
||||
prefix_const->constvalue));
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unexpected const type: %u",
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.95 2005/06/05 22:32:55 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.96 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -65,9 +65,9 @@ add_paths_to_joinrel(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Find potential mergejoin clauses. We can skip this if we are not
|
||||
* interested in doing a mergejoin. However, mergejoin is currently
|
||||
* our only way of implementing full outer joins, so override
|
||||
* mergejoin disable if it's a full join.
|
||||
* interested in doing a mergejoin. However, mergejoin is currently our
|
||||
* only way of implementing full outer joins, so override mergejoin
|
||||
* disable if it's a full join.
|
||||
*/
|
||||
if (enable_mergejoin || jointype == JOIN_FULL)
|
||||
mergeclause_list = select_mergejoin_clauses(joinrel,
|
||||
@@ -95,23 +95,22 @@ add_paths_to_joinrel(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* 3. Consider paths where the inner relation need not be explicitly
|
||||
* sorted. This includes mergejoins only (nestloops were already
|
||||
* built in match_unsorted_outer).
|
||||
* sorted. This includes mergejoins only (nestloops were already built in
|
||||
* match_unsorted_outer).
|
||||
*
|
||||
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
|
||||
* significant difference between the inner and outer side of a
|
||||
* mergejoin, so match_unsorted_inner creates no paths that aren't
|
||||
* equivalent to those made by match_unsorted_outer when
|
||||
* add_paths_to_joinrel() is invoked with the two rels given in the
|
||||
* other order.
|
||||
* significant difference between the inner and outer side of a mergejoin,
|
||||
* so match_unsorted_inner creates no paths that aren't equivalent to
|
||||
* those made by match_unsorted_outer when add_paths_to_joinrel() is
|
||||
* invoked with the two rels given in the other order.
|
||||
*/
|
||||
match_unsorted_inner(root, joinrel, outerrel, innerrel,
|
||||
restrictlist, mergeclause_list, jointype);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 4. Consider paths where both outer and inner relations must be
|
||||
* hashed before being joined.
|
||||
* 4. Consider paths where both outer and inner relations must be hashed
|
||||
* before being joined.
|
||||
*/
|
||||
if (enable_hashjoin)
|
||||
hash_inner_and_outer(root, joinrel, outerrel, innerrel,
|
||||
@@ -174,11 +173,11 @@ sort_inner_and_outer(PlannerInfo *root,
|
||||
/*
|
||||
* We only consider the cheapest-total-cost input paths, since we are
|
||||
* assuming here that a sort is required. We will consider
|
||||
* cheapest-startup-cost input paths later, and only if they don't
|
||||
* need a sort.
|
||||
* cheapest-startup-cost input paths later, and only if they don't need a
|
||||
* sort.
|
||||
*
|
||||
* If unique-ification is requested, do it and then handle as a plain
|
||||
* inner join.
|
||||
* If unique-ification is requested, do it and then handle as a plain inner
|
||||
* join.
|
||||
*/
|
||||
outer_path = outerrel->cheapest_total_path;
|
||||
inner_path = innerrel->cheapest_total_path;
|
||||
@@ -194,31 +193,29 @@ sort_inner_and_outer(PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* Each possible ordering of the available mergejoin clauses will
|
||||
* generate a differently-sorted result path at essentially the same
|
||||
* cost. We have no basis for choosing one over another at this level
|
||||
* of joining, but some sort orders may be more useful than others for
|
||||
* higher-level mergejoins, so it's worth considering multiple
|
||||
* orderings.
|
||||
* Each possible ordering of the available mergejoin clauses will generate
|
||||
* a differently-sorted result path at essentially the same cost. We have
|
||||
* no basis for choosing one over another at this level of joining, but
|
||||
* some sort orders may be more useful than others for higher-level
|
||||
* mergejoins, so it's worth considering multiple orderings.
|
||||
*
|
||||
* Actually, it's not quite true that every mergeclause ordering will
|
||||
* generate a different path order, because some of the clauses may be
|
||||
* redundant. Therefore, what we do is convert the mergeclause list
|
||||
* to a list of canonical pathkeys, and then consider different
|
||||
* orderings of the pathkeys.
|
||||
* redundant. Therefore, what we do is convert the mergeclause list to a
|
||||
* list of canonical pathkeys, and then consider different orderings of
|
||||
* the pathkeys.
|
||||
*
|
||||
* Generating a path for *every* permutation of the pathkeys doesn't seem
|
||||
* like a winning strategy; the cost in planning time is too high. For
|
||||
* now, we generate one path for each pathkey, listing that pathkey
|
||||
* first and the rest in random order. This should allow at least a
|
||||
* one-clause mergejoin without re-sorting against any other possible
|
||||
* mergejoin partner path. But if we've not guessed the right
|
||||
* ordering of secondary keys, we may end up evaluating clauses as
|
||||
* qpquals when they could have been done as mergeclauses. We need to
|
||||
* figure out a better way. (Two possible approaches: look at all the
|
||||
* relevant index relations to suggest plausible sort orders, or make
|
||||
* just one output path and somehow mark it as having a sort-order
|
||||
* that can be rearranged freely.)
|
||||
* now, we generate one path for each pathkey, listing that pathkey first
|
||||
* and the rest in random order. This should allow at least a one-clause
|
||||
* mergejoin without re-sorting against any other possible mergejoin
|
||||
* partner path. But if we've not guessed the right ordering of secondary
|
||||
* keys, we may end up evaluating clauses as qpquals when they could have
|
||||
* been done as mergeclauses. We need to figure out a better way. (Two
|
||||
* possible approaches: look at all the relevant index relations to
|
||||
* suggest plausible sort orders, or make just one output path and somehow
|
||||
* mark it as having a sort-order that can be rearranged freely.)
|
||||
*/
|
||||
all_pathkeys = make_pathkeys_for_mergeclauses(root,
|
||||
mergeclause_list,
|
||||
@@ -243,26 +240,25 @@ sort_inner_and_outer(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Select mergeclause(s) that match this sort ordering. If we had
|
||||
* redundant merge clauses then we will get a subset of the
|
||||
* original clause list. There had better be some match,
|
||||
* however...
|
||||
* redundant merge clauses then we will get a subset of the original
|
||||
* clause list. There had better be some match, however...
|
||||
*/
|
||||
cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
|
||||
cur_pathkeys,
|
||||
mergeclause_list);
|
||||
mergeclause_list);
|
||||
Assert(cur_mergeclauses != NIL);
|
||||
|
||||
/* Forget it if can't use all the clauses in right/full join */
|
||||
if (useallclauses &&
|
||||
list_length(cur_mergeclauses) != list_length(mergeclause_list))
|
||||
list_length(cur_mergeclauses) != list_length(mergeclause_list))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Build sort pathkeys for both sides.
|
||||
*
|
||||
* Note: it's possible that the cheapest paths will already be sorted
|
||||
* properly. create_mergejoin_path will detect that case and
|
||||
* suppress an explicit sort step, so we needn't do so here.
|
||||
* properly. create_mergejoin_path will detect that case and suppress
|
||||
* an explicit sort step, so we needn't do so here.
|
||||
*/
|
||||
outerkeys = make_pathkeys_for_mergeclauses(root,
|
||||
cur_mergeclauses,
|
||||
@@ -343,10 +339,10 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Nestloop only supports inner, left, and IN joins. Also, if we are
|
||||
* doing a right or full join, we must use *all* the mergeclauses as
|
||||
* join clauses, else we will not have a valid plan. (Although these
|
||||
* two flags are currently inverses, keep them separate for clarity
|
||||
* and possible future changes.)
|
||||
* doing a right or full join, we must use *all* the mergeclauses as join
|
||||
* clauses, else we will not have a valid plan. (Although these two flags
|
||||
* are currently inverses, keep them separate for clarity and possible
|
||||
* future changes.)
|
||||
*/
|
||||
switch (jointype)
|
||||
{
|
||||
@@ -385,10 +381,9 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
else if (nestjoinOK)
|
||||
{
|
||||
/*
|
||||
* If the cheapest inner path is a join or seqscan, we should
|
||||
* consider materializing it. (This is a heuristic: we could
|
||||
* consider it always, but for inner indexscans it's probably a
|
||||
* waste of time.)
|
||||
* If the cheapest inner path is a join or seqscan, we should consider
|
||||
* materializing it. (This is a heuristic: we could consider it
|
||||
* always, but for inner indexscans it's probably a waste of time.)
|
||||
*/
|
||||
if (!(IsA(inner_cheapest_total, IndexPath) ||
|
||||
IsA(inner_cheapest_total, BitmapHeapPath) ||
|
||||
@@ -397,8 +392,8 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
create_material_path(innerrel, inner_cheapest_total);
|
||||
|
||||
/*
|
||||
* Get the best innerjoin indexpath (if any) for this outer rel.
|
||||
* It's the same for all outer paths.
|
||||
* Get the best innerjoin indexpath (if any) for this outer rel. It's
|
||||
* the same for all outer paths.
|
||||
*/
|
||||
bestinnerjoin = best_inner_indexscan(root, innerrel,
|
||||
outerrel->relids, jointype);
|
||||
@@ -417,8 +412,8 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
int sortkeycnt;
|
||||
|
||||
/*
|
||||
* If we need to unique-ify the outer path, it's pointless to
|
||||
* consider any but the cheapest outer.
|
||||
* If we need to unique-ify the outer path, it's pointless to consider
|
||||
* any but the cheapest outer.
|
||||
*/
|
||||
if (save_jointype == JOIN_UNIQUE_OUTER)
|
||||
{
|
||||
@@ -429,9 +424,9 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* The result will have this sort order (even if it is implemented
|
||||
* as a nestloop, and even if some of the mergeclauses are
|
||||
* implemented by qpquals rather than as true mergeclauses):
|
||||
* The result will have this sort order (even if it is implemented as
|
||||
* a nestloop, and even if some of the mergeclauses are implemented by
|
||||
* qpquals rather than as true mergeclauses):
|
||||
*/
|
||||
merge_pathkeys = build_join_pathkeys(root, joinrel, jointype,
|
||||
outerpath->pathkeys);
|
||||
@@ -516,9 +511,9 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
innerrel);
|
||||
|
||||
/*
|
||||
* Generate a mergejoin on the basis of sorting the cheapest
|
||||
* inner. Since a sort will be needed, only cheapest total cost
|
||||
* matters. (But create_mergejoin_path will do the right thing if
|
||||
* Generate a mergejoin on the basis of sorting the cheapest inner.
|
||||
* Since a sort will be needed, only cheapest total cost matters.
|
||||
* (But create_mergejoin_path will do the right thing if
|
||||
* inner_cheapest_total is already correctly sorted.)
|
||||
*/
|
||||
add_path(joinrel, (Path *)
|
||||
@@ -538,10 +533,10 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Look for presorted inner paths that satisfy the innersortkey
|
||||
* list --- or any truncation thereof, if we are allowed to build
|
||||
* a mergejoin using a subset of the merge clauses. Here, we
|
||||
* consider both cheap startup cost and cheap total cost. Ignore
|
||||
* Look for presorted inner paths that satisfy the innersortkey list
|
||||
* --- or any truncation thereof, if we are allowed to build a
|
||||
* mergejoin using a subset of the merge clauses. Here, we consider
|
||||
* both cheap startup cost and cheap total cost. Ignore
|
||||
* inner_cheapest_total, since we already made a path with it.
|
||||
*/
|
||||
num_sortkeys = list_length(innersortkeys);
|
||||
@@ -559,8 +554,8 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Look for an inner path ordered well enough for the first
|
||||
* 'sortkeycnt' innersortkeys. NB: trialsortkeys list is
|
||||
* modified destructively, which is why we made a copy...
|
||||
* 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
|
||||
* destructively, which is why we made a copy...
|
||||
*/
|
||||
trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
|
||||
innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
|
||||
@@ -611,8 +606,8 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
if (innerpath != cheapest_total_inner)
|
||||
{
|
||||
/*
|
||||
* Avoid rebuilding clause list if we already made
|
||||
* one; saves memory in big join trees...
|
||||
* Avoid rebuilding clause list if we already made one;
|
||||
* saves memory in big join trees...
|
||||
*/
|
||||
if (newclauses == NIL)
|
||||
{
|
||||
@@ -620,8 +615,8 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
{
|
||||
newclauses =
|
||||
find_mergeclauses_for_pathkeys(root,
|
||||
trialsortkeys,
|
||||
mergeclauses);
|
||||
trialsortkeys,
|
||||
mergeclauses);
|
||||
Assert(newclauses != NIL);
|
||||
}
|
||||
else
|
||||
@@ -697,8 +692,8 @@ hash_inner_and_outer(PlannerInfo *root,
|
||||
* We need to build only one hashpath for any given pair of outer and
|
||||
* inner relations; all of the hashable clauses will be used as keys.
|
||||
*
|
||||
* Scan the join's restrictinfo list to find hashjoinable clauses that
|
||||
* are usable with this pair of sub-relations.
|
||||
* Scan the join's restrictinfo list to find hashjoinable clauses that are
|
||||
* usable with this pair of sub-relations.
|
||||
*/
|
||||
hashclauses = NIL;
|
||||
foreach(l, restrictlist)
|
||||
@@ -725,7 +720,7 @@ hash_inner_and_outer(PlannerInfo *root,
|
||||
/* righthand side is inner */
|
||||
}
|
||||
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
{
|
||||
/* lefthand side is inner */
|
||||
}
|
||||
@@ -739,9 +734,9 @@ hash_inner_and_outer(PlannerInfo *root,
|
||||
if (hashclauses)
|
||||
{
|
||||
/*
|
||||
* We consider both the cheapest-total-cost and
|
||||
* cheapest-startup-cost outer paths. There's no need to consider
|
||||
* any but the cheapest-total-cost inner path, however.
|
||||
* We consider both the cheapest-total-cost and cheapest-startup-cost
|
||||
* outer paths. There's no need to consider any but the
|
||||
* cheapest-total-cost inner path, however.
|
||||
*/
|
||||
Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
|
||||
Path *cheapest_total_outer = outerrel->cheapest_total_path;
|
||||
@@ -807,15 +802,15 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
|
||||
RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
|
||||
|
||||
/*
|
||||
* If processing an outer join, only use its own join clauses in
|
||||
* the merge. For inner joins we need not be so picky.
|
||||
* If processing an outer join, only use its own join clauses in the
|
||||
* merge. For inner joins we need not be so picky.
|
||||
*
|
||||
* Furthermore, if it is a right/full join then *all* the explicit
|
||||
* join clauses must be mergejoinable, else the executor will
|
||||
* fail. If we are asked for a right join then just return NIL to
|
||||
* indicate no mergejoin is possible (we can handle it as a left
|
||||
* join instead). If we are asked for a full join then emit an
|
||||
* error, because there is no fallback.
|
||||
* Furthermore, if it is a right/full join then *all* the explicit join
|
||||
* clauses must be mergejoinable, else the executor will fail. If we
|
||||
* are asked for a right join then just return NIL to indicate no
|
||||
* mergejoin is possible (we can handle it as a left join instead). If
|
||||
* we are asked for a full join then emit an error, because there is
|
||||
* no fallback.
|
||||
*/
|
||||
if (isouterjoin)
|
||||
{
|
||||
@@ -847,8 +842,8 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
|
||||
|
||||
/*
|
||||
* Check if clause is usable with these input rels. All the vars
|
||||
* needed on each side of the clause must be available from one or
|
||||
* the other of the input rels.
|
||||
* needed on each side of the clause must be available from one or the
|
||||
* other of the input rels.
|
||||
*/
|
||||
if (bms_is_subset(restrictinfo->left_relids, outerrel->relids) &&
|
||||
bms_is_subset(restrictinfo->right_relids, innerrel->relids))
|
||||
@@ -856,7 +851,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
|
||||
/* righthand side is inner */
|
||||
}
|
||||
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
bms_is_subset(restrictinfo->right_relids, outerrel->relids))
|
||||
{
|
||||
/* lefthand side is inner */
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.75 2005/07/28 22:27:00 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.76 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -49,17 +49,16 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
|
||||
|
||||
/*
|
||||
* First, consider left-sided and right-sided plans, in which rels of
|
||||
* exactly level-1 member relations are joined against initial
|
||||
* relations. We prefer to join using join clauses, but if we find a
|
||||
* rel of level-1 members that has no join clauses, we will generate
|
||||
* Cartesian-product joins against all initial rels not already
|
||||
* contained in it.
|
||||
* exactly level-1 member relations are joined against initial relations.
|
||||
* We prefer to join using join clauses, but if we find a rel of level-1
|
||||
* members that has no join clauses, we will generate Cartesian-product
|
||||
* joins against all initial rels not already contained in it.
|
||||
*
|
||||
* In the first pass (level == 2), we try to join each initial rel to
|
||||
* each initial rel that appears later in joinrels[1]. (The
|
||||
* mirror-image joins are handled automatically by make_join_rel.) In
|
||||
* later passes, we try to join rels of size level-1 from
|
||||
* joinrels[level-1] to each initial rel in joinrels[1].
|
||||
* In the first pass (level == 2), we try to join each initial rel to each
|
||||
* initial rel that appears later in joinrels[1]. (The mirror-image joins
|
||||
* are handled automatically by make_join_rel.) In later passes, we try
|
||||
* to join rels of size level-1 from joinrels[level-1] to each initial rel
|
||||
* in joinrels[1].
|
||||
*/
|
||||
foreach(r, joinrels[level - 1])
|
||||
{
|
||||
@@ -76,23 +75,22 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
|
||||
if (old_rel->joininfo != NIL)
|
||||
{
|
||||
/*
|
||||
* Note that if all available join clauses for this rel
|
||||
* require more than one other rel, we will fail to make any
|
||||
* joins against it here. In most cases that's OK; it'll be
|
||||
* considered by "bushy plan" join code in a higher-level pass
|
||||
* where we have those other rels collected into a join rel.
|
||||
* Note that if all available join clauses for this rel require
|
||||
* more than one other rel, we will fail to make any joins against
|
||||
* it here. In most cases that's OK; it'll be considered by
|
||||
* "bushy plan" join code in a higher-level pass where we have
|
||||
* those other rels collected into a join rel.
|
||||
*/
|
||||
new_rels = make_rels_by_clause_joins(root,
|
||||
old_rel,
|
||||
other_rels);
|
||||
|
||||
/*
|
||||
* An exception occurs when there is a clauseless join inside
|
||||
* an IN (sub-SELECT) construct. Here, the members of the
|
||||
* subselect all have join clauses (against the stuff outside
|
||||
* the IN), but they *must* be joined to each other before we
|
||||
* can make use of those join clauses. So do the clauseless
|
||||
* join bit.
|
||||
* An exception occurs when there is a clauseless join inside an
|
||||
* IN (sub-SELECT) construct. Here, the members of the subselect
|
||||
* all have join clauses (against the stuff outside the IN), but
|
||||
* they *must* be joined to each other before we can make use of
|
||||
* those join clauses. So do the clauseless join bit.
|
||||
*
|
||||
* See also the last-ditch case below.
|
||||
*/
|
||||
@@ -115,30 +113,29 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
|
||||
/*
|
||||
* At levels above 2 we will generate the same joined relation in
|
||||
* multiple ways --- for example (a join b) join c is the same
|
||||
* RelOptInfo as (b join c) join a, though the second case will
|
||||
* add a different set of Paths to it. To avoid making extra work
|
||||
* for subsequent passes, do not enter the same RelOptInfo into
|
||||
* our output list multiple times.
|
||||
* RelOptInfo as (b join c) join a, though the second case will add a
|
||||
* different set of Paths to it. To avoid making extra work for
|
||||
* subsequent passes, do not enter the same RelOptInfo into our output
|
||||
* list multiple times.
|
||||
*/
|
||||
result_rels = list_concat_unique_ptr(result_rels, new_rels);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now, consider "bushy plans" in which relations of k initial rels
|
||||
* are joined to relations of level-k initial rels, for 2 <= k <=
|
||||
* level-2.
|
||||
* Now, consider "bushy plans" in which relations of k initial rels are
|
||||
* joined to relations of level-k initial rels, for 2 <= k <= level-2.
|
||||
*
|
||||
* We only consider bushy-plan joins for pairs of rels where there is a
|
||||
* suitable join clause, in order to avoid unreasonable growth of
|
||||
* planning time.
|
||||
* suitable join clause, in order to avoid unreasonable growth of planning
|
||||
* time.
|
||||
*/
|
||||
for (k = 2;; k++)
|
||||
{
|
||||
int other_level = level - k;
|
||||
|
||||
/*
|
||||
* Since make_join_rel(x, y) handles both x,y and y,x cases, we
|
||||
* only need to go as far as the halfway point.
|
||||
* Since make_join_rel(x, y) handles both x,y and y,x cases, we only
|
||||
* need to go as far as the halfway point.
|
||||
*/
|
||||
if (k > other_level)
|
||||
break;
|
||||
@@ -165,8 +162,8 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
|
||||
{
|
||||
/*
|
||||
* OK, we can build a rel of the right level from this
|
||||
* pair of rels. Do so if there is at least one
|
||||
* usable join clause.
|
||||
* pair of rels. Do so if there is at least one usable
|
||||
* join clause.
|
||||
*/
|
||||
if (have_relevant_joinclause(old_rel, new_rel))
|
||||
{
|
||||
@@ -185,16 +182,16 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
|
||||
}
|
||||
|
||||
/*
|
||||
* Last-ditch effort: if we failed to find any usable joins so far,
|
||||
* force a set of cartesian-product joins to be generated. This
|
||||
* handles the special case where all the available rels have join
|
||||
* clauses but we cannot use any of the joins yet. An example is
|
||||
* Last-ditch effort: if we failed to find any usable joins so far, force
|
||||
* a set of cartesian-product joins to be generated. This handles the
|
||||
* special case where all the available rels have join clauses but we
|
||||
* cannot use any of the joins yet. An example is
|
||||
*
|
||||
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
|
||||
*
|
||||
* The join clause will be usable at level 3, but at level 2 we have no
|
||||
* choice but to make cartesian joins. We consider only left-sided
|
||||
* and right-sided cartesian joins in this case (no bushy).
|
||||
* choice but to make cartesian joins. We consider only left-sided and
|
||||
* right-sided cartesian joins in this case (no bushy).
|
||||
*/
|
||||
if (result_rels == NIL)
|
||||
{
|
||||
@@ -318,8 +315,8 @@ make_rels_by_clauseless_joins(PlannerInfo *root,
|
||||
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
|
||||
|
||||
/*
|
||||
* As long as given other_rels are distinct, don't need to
|
||||
* test to see if jrel is already part of output list.
|
||||
* As long as given other_rels are distinct, don't need to test to
|
||||
* see if jrel is already part of output list.
|
||||
*/
|
||||
if (jrel)
|
||||
result = lcons(jrel, result);
|
||||
@@ -393,10 +390,10 @@ make_jointree_rel(PlannerInfo *root, Node *jtnode)
|
||||
elog(ERROR, "invalid join order");
|
||||
|
||||
/*
|
||||
* Since we are only going to consider this one way to do it,
|
||||
* we're done generating Paths for this joinrel and can now select
|
||||
* the cheapest. In fact we *must* do so now, since next level up
|
||||
* will need it!
|
||||
* Since we are only going to consider this one way to do it, we're
|
||||
* done generating Paths for this joinrel and can now select the
|
||||
* cheapest. In fact we *must* do so now, since next level up will
|
||||
* need it!
|
||||
*/
|
||||
set_cheapest(rel);
|
||||
|
||||
@@ -439,10 +436,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
joinrelids = bms_union(rel1->relids, rel2->relids);
|
||||
|
||||
/*
|
||||
* If we are implementing IN clauses as joins, there are some joins
|
||||
* that are illegal. Check to see if the proposed join is trouble. We
|
||||
* can skip the work if looking at an outer join, however, because
|
||||
* only top-level joins might be affected.
|
||||
* If we are implementing IN clauses as joins, there are some joins that
|
||||
* are illegal. Check to see if the proposed join is trouble. We can skip
|
||||
* the work if looking at an outer join, however, because only top-level
|
||||
* joins might be affected.
|
||||
*/
|
||||
if (jointype == JOIN_INNER)
|
||||
{
|
||||
@@ -454,8 +451,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
|
||||
/*
|
||||
* This IN clause is not relevant unless its RHS overlaps the
|
||||
* proposed join. (Check this first as a fast path for
|
||||
* dismissing most irrelevant INs quickly.)
|
||||
* proposed join. (Check this first as a fast path for dismissing
|
||||
* most irrelevant INs quickly.)
|
||||
*/
|
||||
if (!bms_overlap(ininfo->righthand, joinrelids))
|
||||
continue;
|
||||
@@ -468,10 +465,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Cannot join if proposed join contains rels not in the RHS
|
||||
* *and* contains only part of the RHS. We must build the
|
||||
* complete RHS (subselect's join) before it can be joined to
|
||||
* rels outside the subselect.
|
||||
* Cannot join if proposed join contains rels not in the RHS *and*
|
||||
* contains only part of the RHS. We must build the complete RHS
|
||||
* (subselect's join) before it can be joined to rels outside the
|
||||
* subselect.
|
||||
*/
|
||||
if (!bms_is_subset(ininfo->righthand, joinrelids))
|
||||
{
|
||||
@@ -480,13 +477,12 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point we are considering a join of the IN's RHS to
|
||||
* some other rel(s).
|
||||
* At this point we are considering a join of the IN's RHS to some
|
||||
* other rel(s).
|
||||
*
|
||||
* If we already joined IN's RHS to any other rels in either
|
||||
* input path, then this join is not constrained (the
|
||||
* necessary work was done at the lower level where that join
|
||||
* occurred).
|
||||
* If we already joined IN's RHS to any other rels in either input
|
||||
* path, then this join is not constrained (the necessary work was
|
||||
* done at the lower level where that join occurred).
|
||||
*/
|
||||
if (bms_is_subset(ininfo->righthand, rel1->relids) &&
|
||||
!bms_equal(ininfo->righthand, rel1->relids))
|
||||
@@ -500,12 +496,11 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
|
||||
* RHS/LHS.
|
||||
*
|
||||
* JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS;
|
||||
* conversely JOIN_UNIQUE_INNER will work if innerrel is
|
||||
* exactly RHS.
|
||||
* JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS; conversely
|
||||
* JOIN_UNIQUE_INNER will work if innerrel is exactly RHS.
|
||||
*
|
||||
* But none of these will work if we already found another IN
|
||||
* that needs to trigger here.
|
||||
* But none of these will work if we already found another IN that
|
||||
* needs to trigger here.
|
||||
*/
|
||||
if (jointype != JOIN_INNER)
|
||||
{
|
||||
@@ -532,8 +527,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
}
|
||||
|
||||
/*
|
||||
* Find or build the join RelOptInfo, and compute the restrictlist
|
||||
* that goes with this particular joining.
|
||||
* Find or build the join RelOptInfo, and compute the restrictlist that
|
||||
* goes with this particular joining.
|
||||
*/
|
||||
joinrel = build_join_rel(root, joinrelids, rel1, rel2, jointype,
|
||||
&restrictlist);
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.74 2005/07/28 20:26:20 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.75 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -99,14 +99,14 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
|
||||
if (restriction_is_or_clause(rinfo))
|
||||
{
|
||||
/*
|
||||
* Use the generate_bitmap_or_paths() machinery to estimate
|
||||
* the value of each OR clause. We can use regular
|
||||
* restriction clauses along with the OR clause contents to
|
||||
* generate indexquals. We pass outer_relids = NULL so that
|
||||
* sub-clauses that are actually joins will be ignored.
|
||||
* Use the generate_bitmap_or_paths() machinery to estimate the
|
||||
* value of each OR clause. We can use regular restriction
|
||||
* clauses along with the OR clause contents to generate
|
||||
* indexquals. We pass outer_relids = NULL so that sub-clauses
|
||||
* that are actually joins will be ignored.
|
||||
*/
|
||||
List *orpaths;
|
||||
ListCell *k;
|
||||
List *orpaths;
|
||||
ListCell *k;
|
||||
|
||||
orpaths = generate_bitmap_or_paths(root, rel,
|
||||
list_make1(rinfo),
|
||||
@@ -116,7 +116,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
|
||||
/* Locate the cheapest OR path */
|
||||
foreach(k, orpaths)
|
||||
{
|
||||
BitmapOrPath *path = (BitmapOrPath *) lfirst(k);
|
||||
BitmapOrPath *path = (BitmapOrPath *) lfirst(k);
|
||||
|
||||
Assert(IsA(path, BitmapOrPath));
|
||||
if (bestpath == NULL ||
|
||||
@@ -134,8 +134,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Convert the path's indexclauses structure to a RestrictInfo tree.
|
||||
* We include any partial-index predicates so as to get a reasonable
|
||||
* Convert the path's indexclauses structure to a RestrictInfo tree. We
|
||||
* include any partial-index predicates so as to get a reasonable
|
||||
* representation of what the path is actually scanning.
|
||||
*/
|
||||
newrinfos = make_restrictinfo_from_bitmapqual((Path *) bestpath,
|
||||
@@ -155,12 +155,12 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
|
||||
rel->baserestrictinfo = list_concat(rel->baserestrictinfo, newrinfos);
|
||||
|
||||
/*
|
||||
* Adjust the original OR clause's cached selectivity to compensate
|
||||
* for the selectivity of the added (but redundant) lower-level qual.
|
||||
* This should result in the join rel getting approximately the same
|
||||
* rows estimate as it would have gotten without all these
|
||||
* shenanigans. (XXX major hack alert ... this depends on the
|
||||
* assumption that the selectivity will stay cached ...)
|
||||
* Adjust the original OR clause's cached selectivity to compensate for
|
||||
* the selectivity of the added (but redundant) lower-level qual. This
|
||||
* should result in the join rel getting approximately the same rows
|
||||
* estimate as it would have gotten without all these shenanigans. (XXX
|
||||
* major hack alert ... this depends on the assumption that the
|
||||
* selectivity will stay cached ...)
|
||||
*/
|
||||
or_selec = clause_selectivity(root, (Node *) or_rinfo,
|
||||
0, JOIN_INNER);
|
||||
|
@@ -11,7 +11,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.72 2005/08/27 22:13:43 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.73 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -33,17 +33,17 @@
|
||||
|
||||
static PathKeyItem *makePathKeyItem(Node *key, Oid sortop, bool checkType);
|
||||
static void generate_outer_join_implications(PlannerInfo *root,
|
||||
List *equi_key_set,
|
||||
Relids *relids);
|
||||
List *equi_key_set,
|
||||
Relids *relids);
|
||||
static void sub_generate_join_implications(PlannerInfo *root,
|
||||
List *equi_key_set, Relids *relids,
|
||||
Node *item1, Oid sortop1,
|
||||
Relids item1_relids);
|
||||
List *equi_key_set, Relids *relids,
|
||||
Node *item1, Oid sortop1,
|
||||
Relids item1_relids);
|
||||
static void process_implied_const_eq(PlannerInfo *root,
|
||||
List *equi_key_set, Relids *relids,
|
||||
Node *item1, Oid sortop1,
|
||||
Relids item1_relids,
|
||||
bool delete_it);
|
||||
List *equi_key_set, Relids *relids,
|
||||
Node *item1, Oid sortop1,
|
||||
Relids item1_relids,
|
||||
bool delete_it);
|
||||
static List *make_canonical_pathkey(PlannerInfo *root, PathKeyItem *item);
|
||||
static Var *find_indexkey_var(PlannerInfo *root, RelOptInfo *rel,
|
||||
AttrNumber varattno);
|
||||
@@ -59,12 +59,11 @@ makePathKeyItem(Node *key, Oid sortop, bool checkType)
|
||||
PathKeyItem *item = makeNode(PathKeyItem);
|
||||
|
||||
/*
|
||||
* Some callers pass expressions that are not necessarily of the same
|
||||
* type as the sort operator expects as input (for example when
|
||||
* dealing with an index that uses binary-compatible operators). We
|
||||
* must relabel these with the correct type so that the key
|
||||
* expressions will be seen as equal() to expressions that have been
|
||||
* correctly labeled.
|
||||
* Some callers pass expressions that are not necessarily of the same type
|
||||
* as the sort operator expects as input (for example when dealing with an
|
||||
* index that uses binary-compatible operators). We must relabel these
|
||||
* with the correct type so that the key expressions will be seen as
|
||||
* equal() to expressions that have been correctly labeled.
|
||||
*/
|
||||
if (checkType)
|
||||
{
|
||||
@@ -116,20 +115,19 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Our plan is to make a two-element set, then sweep through the
|
||||
* existing equijoin sets looking for matches to item1 or item2. When
|
||||
* we find one, we remove that set from equi_key_list and union it
|
||||
* into our new set. When done, we add the new set to the front of
|
||||
* equi_key_list.
|
||||
* Our plan is to make a two-element set, then sweep through the existing
|
||||
* equijoin sets looking for matches to item1 or item2. When we find one,
|
||||
* we remove that set from equi_key_list and union it into our new set.
|
||||
* When done, we add the new set to the front of equi_key_list.
|
||||
*
|
||||
* It may well be that the two items we're given are already known to be
|
||||
* equijoin-equivalent, in which case we don't need to change our data
|
||||
* structure. If we find both of them in the same equivalence set to
|
||||
* start with, we can quit immediately.
|
||||
*
|
||||
* This is a standard UNION-FIND problem, for which there exist better
|
||||
* data structures than simple lists. If this code ever proves to be
|
||||
* a bottleneck then it could be sped up --- but for now, simple is
|
||||
* This is a standard UNION-FIND problem, for which there exist better data
|
||||
* structures than simple lists. If this code ever proves to be a
|
||||
* bottleneck then it could be sped up --- but for now, simple is
|
||||
* beautiful.
|
||||
*/
|
||||
newset = NIL;
|
||||
@@ -148,8 +146,7 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo)
|
||||
if (item1here || item2here)
|
||||
{
|
||||
/*
|
||||
* If find both in same equivalence set, no need to do any
|
||||
* more
|
||||
* If find both in same equivalence set, no need to do any more
|
||||
*/
|
||||
if (item1here && item2here)
|
||||
{
|
||||
@@ -228,18 +225,18 @@ generate_implied_equalities(PlannerInfo *root)
|
||||
int i1;
|
||||
|
||||
/*
|
||||
* A set containing only two items cannot imply any equalities
|
||||
* beyond the one that created the set, so we can skip it ---
|
||||
* unless outer joins appear in the query.
|
||||
* A set containing only two items cannot imply any equalities beyond
|
||||
* the one that created the set, so we can skip it --- unless outer
|
||||
* joins appear in the query.
|
||||
*/
|
||||
if (nitems < 3 && !root->hasOuterJoins)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Collect info about relids mentioned in each item. For this
|
||||
* routine we only really care whether there are any at all in
|
||||
* each item, but process_implied_equality() needs the exact sets,
|
||||
* so we may as well pull them here.
|
||||
* Collect info about relids mentioned in each item. For this routine
|
||||
* we only really care whether there are any at all in each item, but
|
||||
* process_implied_equality() needs the exact sets, so we may as well
|
||||
* pull them here.
|
||||
*/
|
||||
relids = (Relids *) palloc(nitems * sizeof(Relids));
|
||||
have_consts = false;
|
||||
@@ -258,9 +255,9 @@ generate_implied_equalities(PlannerInfo *root)
|
||||
* Match each item in the set with all that appear after it (it's
|
||||
* sufficient to generate A=B, need not process B=A too).
|
||||
*
|
||||
* A set containing only two items cannot imply any equalities
|
||||
* beyond the one that created the set, so we can skip this
|
||||
* processing in that case.
|
||||
* A set containing only two items cannot imply any equalities beyond the
|
||||
* one that created the set, so we can skip this processing in that
|
||||
* case.
|
||||
*/
|
||||
if (nitems >= 3)
|
||||
{
|
||||
@@ -346,7 +343,7 @@ generate_implied_equalities(PlannerInfo *root)
|
||||
* the time it gets here, the restriction will look like
|
||||
* COALESCE(LEFTVAR, RIGHTVAR) = CONSTANT
|
||||
* and we will have a join clause LEFTVAR = RIGHTVAR that we can match the
|
||||
* COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
|
||||
* COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
|
||||
* and RIGHTVAR = CONSTANT into the input relations, since any rows not
|
||||
* meeting these conditions cannot contribute to the join result.
|
||||
*
|
||||
@@ -397,8 +394,8 @@ generate_outer_join_implications(PlannerInfo *root,
|
||||
*/
|
||||
static void
|
||||
sub_generate_join_implications(PlannerInfo *root,
|
||||
List *equi_key_set, Relids *relids,
|
||||
Node *item1, Oid sortop1, Relids item1_relids)
|
||||
List *equi_key_set, Relids *relids,
|
||||
Node *item1, Oid sortop1, Relids item1_relids)
|
||||
|
||||
{
|
||||
ListCell *l;
|
||||
@@ -410,34 +407,36 @@ sub_generate_join_implications(PlannerInfo *root,
|
||||
foreach(l, root->left_join_clauses)
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
|
||||
Node *leftop = get_leftop(rinfo->clause);
|
||||
Node *leftop = get_leftop(rinfo->clause);
|
||||
|
||||
if (equal(leftop, item1) && rinfo->left_sortop == sortop1)
|
||||
{
|
||||
/*
|
||||
* Match, so find constant member(s) of set and generate
|
||||
* implied INNERVAR = CONSTANT
|
||||
* Match, so find constant member(s) of set and generate implied
|
||||
* INNERVAR = CONSTANT
|
||||
*/
|
||||
Node *rightop = get_rightop(rinfo->clause);
|
||||
Node *rightop = get_rightop(rinfo->clause);
|
||||
|
||||
process_implied_const_eq(root, equi_key_set, relids,
|
||||
rightop,
|
||||
rinfo->right_sortop,
|
||||
rinfo->right_relids,
|
||||
false);
|
||||
|
||||
/*
|
||||
* We can remove explicit tests of this outer-join qual, too,
|
||||
* since we now have tests forcing each of its sides
|
||||
* to the same value.
|
||||
* since we now have tests forcing each of its sides to the same
|
||||
* value.
|
||||
*/
|
||||
process_implied_equality(root,
|
||||
leftop, rightop,
|
||||
rinfo->left_sortop, rinfo->right_sortop,
|
||||
rinfo->left_relids, rinfo->right_relids,
|
||||
true);
|
||||
|
||||
/*
|
||||
* And recurse to see if we can deduce anything from
|
||||
* INNERVAR = CONSTANT
|
||||
* And recurse to see if we can deduce anything from INNERVAR =
|
||||
* CONSTANT
|
||||
*/
|
||||
sub_generate_join_implications(root, equi_key_set, relids,
|
||||
rightop,
|
||||
@@ -450,34 +449,36 @@ sub_generate_join_implications(PlannerInfo *root,
|
||||
foreach(l, root->right_join_clauses)
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
|
||||
Node *rightop = get_rightop(rinfo->clause);
|
||||
Node *rightop = get_rightop(rinfo->clause);
|
||||
|
||||
if (equal(rightop, item1) && rinfo->right_sortop == sortop1)
|
||||
{
|
||||
/*
|
||||
* Match, so find constant member(s) of set and generate
|
||||
* implied INNERVAR = CONSTANT
|
||||
* Match, so find constant member(s) of set and generate implied
|
||||
* INNERVAR = CONSTANT
|
||||
*/
|
||||
Node *leftop = get_leftop(rinfo->clause);
|
||||
Node *leftop = get_leftop(rinfo->clause);
|
||||
|
||||
process_implied_const_eq(root, equi_key_set, relids,
|
||||
leftop,
|
||||
rinfo->left_sortop,
|
||||
rinfo->left_relids,
|
||||
false);
|
||||
|
||||
/*
|
||||
* We can remove explicit tests of this outer-join qual, too,
|
||||
* since we now have tests forcing each of its sides
|
||||
* to the same value.
|
||||
* since we now have tests forcing each of its sides to the same
|
||||
* value.
|
||||
*/
|
||||
process_implied_equality(root,
|
||||
leftop, rightop,
|
||||
rinfo->left_sortop, rinfo->right_sortop,
|
||||
rinfo->left_relids, rinfo->right_relids,
|
||||
true);
|
||||
|
||||
/*
|
||||
* And recurse to see if we can deduce anything from
|
||||
* INNERVAR = CONSTANT
|
||||
* And recurse to see if we can deduce anything from INNERVAR =
|
||||
* CONSTANT
|
||||
*/
|
||||
sub_generate_join_implications(root, equi_key_set, relids,
|
||||
leftop,
|
||||
@@ -492,8 +493,8 @@ sub_generate_join_implications(PlannerInfo *root,
|
||||
if (IsA(item1, CoalesceExpr))
|
||||
{
|
||||
CoalesceExpr *cexpr = (CoalesceExpr *) item1;
|
||||
Node *cfirst;
|
||||
Node *csecond;
|
||||
Node *cfirst;
|
||||
Node *csecond;
|
||||
|
||||
if (list_length(cexpr->args) != 2)
|
||||
return;
|
||||
@@ -501,26 +502,26 @@ sub_generate_join_implications(PlannerInfo *root,
|
||||
csecond = (Node *) lsecond(cexpr->args);
|
||||
|
||||
/*
|
||||
* Examine each mergejoinable full-join clause, looking for a
|
||||
* clause of the form "x = y" matching the COALESCE(x,y) expression
|
||||
* Examine each mergejoinable full-join clause, looking for a clause
|
||||
* of the form "x = y" matching the COALESCE(x,y) expression
|
||||
*/
|
||||
foreach(l, root->full_join_clauses)
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
|
||||
Node *leftop = get_leftop(rinfo->clause);
|
||||
Node *rightop = get_rightop(rinfo->clause);
|
||||
Node *leftop = get_leftop(rinfo->clause);
|
||||
Node *rightop = get_rightop(rinfo->clause);
|
||||
|
||||
/*
|
||||
* We can assume the COALESCE() inputs are in the same order
|
||||
* as the join clause, since both were automatically generated
|
||||
* in the cases we care about.
|
||||
* We can assume the COALESCE() inputs are in the same order as
|
||||
* the join clause, since both were automatically generated in the
|
||||
* cases we care about.
|
||||
*
|
||||
* XXX currently this may fail to match in cross-type cases
|
||||
* because the COALESCE will contain typecast operations while
|
||||
* the join clause may not (if there is a cross-type mergejoin
|
||||
* operator available for the two column types).
|
||||
* Is it OK to strip implicit coercions from the COALESCE
|
||||
* arguments? What of the sortops in such cases?
|
||||
* XXX currently this may fail to match in cross-type cases because
|
||||
* the COALESCE will contain typecast operations while the join
|
||||
* clause may not (if there is a cross-type mergejoin operator
|
||||
* available for the two column types). Is it OK to strip implicit
|
||||
* coercions from the COALESCE arguments? What of the sortops in
|
||||
* such cases?
|
||||
*/
|
||||
if (equal(leftop, cfirst) &&
|
||||
equal(rightop, csecond) &&
|
||||
@@ -548,10 +549,11 @@ sub_generate_join_implications(PlannerInfo *root,
|
||||
sortop1,
|
||||
item1_relids,
|
||||
true);
|
||||
|
||||
/*
|
||||
* We can remove explicit tests of this outer-join qual, too,
|
||||
* since we now have tests forcing each of its sides
|
||||
* to the same value.
|
||||
* since we now have tests forcing each of its sides to the
|
||||
* same value.
|
||||
*/
|
||||
process_implied_equality(root,
|
||||
leftop, rightop,
|
||||
@@ -560,9 +562,10 @@ sub_generate_join_implications(PlannerInfo *root,
|
||||
rinfo->left_relids,
|
||||
rinfo->right_relids,
|
||||
true);
|
||||
|
||||
/*
|
||||
* And recurse to see if we can deduce anything from
|
||||
* LEFTVAR = CONSTANT
|
||||
* And recurse to see if we can deduce anything from LEFTVAR =
|
||||
* CONSTANT
|
||||
*/
|
||||
sub_generate_join_implications(root, equi_key_set, relids,
|
||||
leftop,
|
||||
@@ -700,19 +703,19 @@ canonicalize_pathkeys(PlannerInfo *root, List *pathkeys)
|
||||
List *cpathkey;
|
||||
|
||||
/*
|
||||
* It's sufficient to look at the first entry in the sublist; if
|
||||
* there are more entries, they're already part of an equivalence
|
||||
* set by definition.
|
||||
* It's sufficient to look at the first entry in the sublist; if there
|
||||
* are more entries, they're already part of an equivalence set by
|
||||
* definition.
|
||||
*/
|
||||
Assert(pathkey != NIL);
|
||||
item = (PathKeyItem *) linitial(pathkey);
|
||||
cpathkey = make_canonical_pathkey(root, item);
|
||||
|
||||
/*
|
||||
* Eliminate redundant ordering requests --- ORDER BY A,A is the
|
||||
* same as ORDER BY A. We want to check this only after we have
|
||||
* canonicalized the keys, so that equivalent-key knowledge is
|
||||
* used when deciding if an item is redundant.
|
||||
* Eliminate redundant ordering requests --- ORDER BY A,A is the same
|
||||
* as ORDER BY A. We want to check this only after we have
|
||||
* canonicalized the keys, so that equivalent-key knowledge is used
|
||||
* when deciding if an item is redundant.
|
||||
*/
|
||||
new_pathkeys = list_append_unique_ptr(new_pathkeys, cpathkey);
|
||||
}
|
||||
@@ -769,8 +772,8 @@ compare_pathkeys(List *keys1, List *keys2)
|
||||
List *subkey2 = (List *) lfirst(key2);
|
||||
|
||||
/*
|
||||
* XXX would like to check that we've been given canonicalized
|
||||
* input, but PlannerInfo not accessible here...
|
||||
* XXX would like to check that we've been given canonicalized input,
|
||||
* but PlannerInfo not accessible here...
|
||||
*/
|
||||
#ifdef NOT_USED
|
||||
Assert(list_member_ptr(root->equi_key_list, subkey1));
|
||||
@@ -778,10 +781,10 @@ compare_pathkeys(List *keys1, List *keys2)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We will never have two subkeys where one is a subset of the
|
||||
* other, because of the canonicalization process. Either they
|
||||
* are equal or they ain't. Furthermore, we only need pointer
|
||||
* comparison to detect equality.
|
||||
* We will never have two subkeys where one is a subset of the other,
|
||||
* because of the canonicalization process. Either they are equal or
|
||||
* they ain't. Furthermore, we only need pointer comparison to detect
|
||||
* equality.
|
||||
*/
|
||||
if (subkey1 != subkey2)
|
||||
return PATHKEYS_DIFFERENT; /* no need to keep looking */
|
||||
@@ -789,9 +792,9 @@ compare_pathkeys(List *keys1, List *keys2)
|
||||
|
||||
/*
|
||||
* If we reached the end of only one list, the other is longer and
|
||||
* therefore not a subset. (We assume the additional sublist(s) of
|
||||
* the other list are not NIL --- no pathkey list should ever have a
|
||||
* NIL sublist.)
|
||||
* therefore not a subset. (We assume the additional sublist(s) of the
|
||||
* other list are not NIL --- no pathkey list should ever have a NIL
|
||||
* sublist.)
|
||||
*/
|
||||
if (key1 == NULL && key2 == NULL)
|
||||
return PATHKEYS_EQUAL;
|
||||
@@ -840,8 +843,8 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys,
|
||||
Path *path = (Path *) lfirst(l);
|
||||
|
||||
/*
|
||||
* Since cost comparison is a lot cheaper than pathkey comparison,
|
||||
* do that first. (XXX is that still true?)
|
||||
* Since cost comparison is a lot cheaper than pathkey comparison, do
|
||||
* that first. (XXX is that still true?)
|
||||
*/
|
||||
if (matched_path != NULL &&
|
||||
compare_path_costs(matched_path, path, cost_criterion) <= 0)
|
||||
@@ -879,11 +882,11 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
|
||||
Path *path = (Path *) lfirst(l);
|
||||
|
||||
/*
|
||||
* Since cost comparison is a lot cheaper than pathkey comparison,
|
||||
* do that first.
|
||||
* Since cost comparison is a lot cheaper than pathkey comparison, do
|
||||
* that first.
|
||||
*/
|
||||
if (matched_path != NULL &&
|
||||
compare_fractional_path_costs(matched_path, path, fraction) <= 0)
|
||||
compare_fractional_path_costs(matched_path, path, fraction) <= 0)
|
||||
continue;
|
||||
|
||||
if (pathkeys_contained_in(pathkeys, path->pathkeys))
|
||||
@@ -954,8 +957,8 @@ build_index_pathkeys(PlannerInfo *root,
|
||||
cpathkey = make_canonical_pathkey(root, item);
|
||||
|
||||
/*
|
||||
* Eliminate redundant ordering info; could happen if query is
|
||||
* such that index keys are equijoined...
|
||||
* Eliminate redundant ordering info; could happen if query is such
|
||||
* that index keys are equijoined...
|
||||
*/
|
||||
retval = list_append_unique_ptr(retval, cpathkey);
|
||||
|
||||
@@ -1003,7 +1006,7 @@ find_indexkey_var(PlannerInfo *root, RelOptInfo *rel, AttrNumber varattno)
|
||||
/*
|
||||
* convert_subquery_pathkeys
|
||||
* Build a pathkeys list that describes the ordering of a subquery's
|
||||
* result, in the terms of the outer query. This is essentially a
|
||||
* result, in the terms of the outer query. This is essentially a
|
||||
* task of conversion.
|
||||
*
|
||||
* 'rel': outer query's RelOptInfo for the subquery relation.
|
||||
@@ -1033,19 +1036,18 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
|
||||
|
||||
/*
|
||||
* The sub_pathkey could contain multiple elements (representing
|
||||
* knowledge that multiple items are effectively equal). Each
|
||||
* element might match none, one, or more of the output columns
|
||||
* that are visible to the outer query. This means we may have
|
||||
* multiple possible representations of the sub_pathkey in the
|
||||
* context of the outer query. Ideally we would generate them all
|
||||
* and put them all into a pathkey list of the outer query,
|
||||
* thereby propagating equality knowledge up to the outer query.
|
||||
* Right now we cannot do so, because the outer query's canonical
|
||||
* pathkey sets are already frozen when this is called. Instead
|
||||
* we prefer the one that has the highest "score" (number of
|
||||
* canonical pathkey peers, plus one if it matches the outer
|
||||
* query_pathkeys). This is the most likely to be useful in the
|
||||
* outer query.
|
||||
* knowledge that multiple items are effectively equal). Each element
|
||||
* might match none, one, or more of the output columns that are
|
||||
* visible to the outer query. This means we may have multiple
|
||||
* possible representations of the sub_pathkey in the context of the
|
||||
* outer query. Ideally we would generate them all and put them all
|
||||
* into a pathkey list of the outer query, thereby propagating
|
||||
* equality knowledge up to the outer query. Right now we cannot do
|
||||
* so, because the outer query's canonical pathkey sets are already
|
||||
* frozen when this is called. Instead we prefer the one that has the
|
||||
* highest "score" (number of canonical pathkey peers, plus one if it
|
||||
* matches the outer query_pathkeys). This is the most likely to be
|
||||
* useful in the outer query.
|
||||
*/
|
||||
foreach(j, sub_pathkey)
|
||||
{
|
||||
@@ -1144,13 +1146,13 @@ build_join_pathkeys(PlannerInfo *root,
|
||||
return NIL;
|
||||
|
||||
/*
|
||||
* This used to be quite a complex bit of code, but now that all
|
||||
* pathkey sublists start out life canonicalized, we don't have to do
|
||||
* a darn thing here! The inner-rel vars we used to need to add are
|
||||
* *already* part of the outer pathkey!
|
||||
* This used to be quite a complex bit of code, but now that all pathkey
|
||||
* sublists start out life canonicalized, we don't have to do a darn thing
|
||||
* here! The inner-rel vars we used to need to add are *already* part of
|
||||
* the outer pathkey!
|
||||
*
|
||||
* We do, however, need to truncate the pathkeys list, since it may
|
||||
* contain pathkeys that were useful for forming this joinrel but are
|
||||
* We do, however, need to truncate the pathkeys list, since it may contain
|
||||
* pathkeys that were useful for forming this joinrel but are
|
||||
* uninteresting to higher levels.
|
||||
*/
|
||||
return truncate_useless_pathkeys(root, joinrel, outer_pathkeys);
|
||||
@@ -1289,22 +1291,20 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* We can match a pathkey against either left or right side of any
|
||||
* mergejoin clause. (We examine both sides since we aren't told
|
||||
* if the given pathkeys are for inner or outer input path; no
|
||||
* confusion is possible.) Furthermore, if there are multiple
|
||||
* matching clauses, take them all. In plain inner-join scenarios
|
||||
* we expect only one match, because redundant-mergeclause
|
||||
* elimination will have removed any redundant mergeclauses from
|
||||
* the input list. However, in outer-join scenarios there might be
|
||||
* multiple matches. An example is
|
||||
* mergejoin clause. (We examine both sides since we aren't told if
|
||||
* the given pathkeys are for inner or outer input path; no confusion
|
||||
* is possible.) Furthermore, if there are multiple matching clauses,
|
||||
* take them all. In plain inner-join scenarios we expect only one
|
||||
* match, because redundant-mergeclause elimination will have removed
|
||||
* any redundant mergeclauses from the input list. However, in
|
||||
* outer-join scenarios there might be multiple matches. An example is
|
||||
*
|
||||
* select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and
|
||||
* a.v1 = b.v2;
|
||||
* select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and a.v1 =
|
||||
* b.v2;
|
||||
*
|
||||
* Given the pathkeys ((a.v1), (a.v2)) it is okay to return all three
|
||||
* clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and
|
||||
* indeed we *must* do so or we will be unable to form a valid
|
||||
* plan.
|
||||
* clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and indeed
|
||||
* we *must* do so or we will be unable to form a valid plan.
|
||||
*/
|
||||
foreach(j, restrictinfos)
|
||||
{
|
||||
@@ -1325,15 +1325,15 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* If we didn't find a mergeclause, we're done --- any additional
|
||||
* sort-key positions in the pathkeys are useless. (But we can
|
||||
* still mergejoin if we found at least one mergeclause.)
|
||||
* sort-key positions in the pathkeys are useless. (But we can still
|
||||
* mergejoin if we found at least one mergeclause.)
|
||||
*/
|
||||
if (matched_restrictinfos == NIL)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If we did find usable mergeclause(s) for this sort-key
|
||||
* position, add them to result list.
|
||||
* If we did find usable mergeclause(s) for this sort-key position,
|
||||
* add them to result list.
|
||||
*/
|
||||
mergeclauses = list_concat(mergeclauses, matched_restrictinfos);
|
||||
}
|
||||
@@ -1390,14 +1390,13 @@ make_pathkeys_for_mergeclauses(PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* When we are given multiple merge clauses, it's possible that
|
||||
* some clauses refer to the same vars as earlier clauses. There's
|
||||
* no reason for us to specify sort keys like (A,B,A) when (A,B)
|
||||
* will do --- and adding redundant sort keys makes add_path think
|
||||
* that this sort order is different from ones that are really the
|
||||
* same, so don't do it. Since we now have a canonicalized
|
||||
* pathkey, a simple ptrMember test is sufficient to detect
|
||||
* redundant keys.
|
||||
* When we are given multiple merge clauses, it's possible that some
|
||||
* clauses refer to the same vars as earlier clauses. There's no
|
||||
* reason for us to specify sort keys like (A,B,A) when (A,B) will do
|
||||
* --- and adding redundant sort keys makes add_path think that this
|
||||
* sort order is different from ones that are really the same, so
|
||||
* don't do it. Since we now have a canonicalized pathkey, a simple
|
||||
* ptrMember test is sufficient to detect redundant keys.
|
||||
*/
|
||||
pathkeys = list_append_unique_ptr(pathkeys, pathkey);
|
||||
}
|
||||
@@ -1447,8 +1446,8 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
|
||||
cache_mergeclause_pathkeys(root, restrictinfo);
|
||||
|
||||
/*
|
||||
* We can compare canonical pathkey sublists by simple
|
||||
* pointer equality; see compare_pathkeys.
|
||||
* We can compare canonical pathkey sublists by simple pointer
|
||||
* equality; see compare_pathkeys.
|
||||
*/
|
||||
if (pathkey == restrictinfo->left_pathkey ||
|
||||
pathkey == restrictinfo->right_pathkey)
|
||||
@@ -1460,8 +1459,8 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
|
||||
|
||||
/*
|
||||
* If we didn't find a mergeclause, we're done --- any additional
|
||||
* sort-key positions in the pathkeys are useless. (But we can
|
||||
* still mergejoin if we found at least one mergeclause.)
|
||||
* sort-key positions in the pathkeys are useless. (But we can still
|
||||
* mergejoin if we found at least one mergeclause.)
|
||||
*/
|
||||
if (matched)
|
||||
useful++;
|
||||
|
@@ -11,7 +11,7 @@
|
||||
* WHERE ctid IN (tid1, tid2, ...)
|
||||
*
|
||||
* There is currently no special support for joins involving CTID; in
|
||||
* particular nothing corresponding to best_inner_indexscan(). Since it's
|
||||
* particular nothing corresponding to best_inner_indexscan(). Since it's
|
||||
* not very useful to store TIDs of one table in another table, there
|
||||
* doesn't seem to be enough use-case to justify adding a lot of code
|
||||
* for that.
|
||||
@@ -22,7 +22,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.24 2005/08/23 20:49:47 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.25 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -50,7 +50,7 @@ static List *TidQualFromRestrictinfo(int varno, List *restrictinfo);
|
||||
*
|
||||
* If it is, return the pseudoconstant subnode; if not, return NULL.
|
||||
*
|
||||
* We check that the CTID Var belongs to relation "varno". That is probably
|
||||
* We check that the CTID Var belongs to relation "varno". That is probably
|
||||
* redundant considering this is only applied to restriction clauses, but
|
||||
* let's be safe.
|
||||
*/
|
||||
|
@@ -10,7 +10,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.200 2005/10/13 00:06:46 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.201 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -50,10 +50,10 @@ static IndexScan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
|
||||
List *tlist, List *scan_clauses,
|
||||
List **nonlossy_clauses);
|
||||
static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root,
|
||||
BitmapHeapPath *best_path,
|
||||
List *tlist, List *scan_clauses);
|
||||
BitmapHeapPath *best_path,
|
||||
List *tlist, List *scan_clauses);
|
||||
static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
|
||||
List **qual, List **indexqual);
|
||||
List **qual, List **indexqual);
|
||||
static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
|
||||
List *tlist, List *scan_clauses);
|
||||
static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path,
|
||||
@@ -72,7 +72,7 @@ static void fix_indexqual_references(List *indexquals, IndexPath *index_path,
|
||||
List **indexstrategy,
|
||||
List **indexsubtype);
|
||||
static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index,
|
||||
Oid *opclass);
|
||||
Oid *opclass);
|
||||
static List *get_switched_clauses(List *clauses, Relids outerrelids);
|
||||
static void copy_path_costsize(Plan *dest, Path *src);
|
||||
static void copy_plan_costsize(Plan *dest, Plan *src);
|
||||
@@ -82,15 +82,15 @@ static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
|
||||
List *indexstrategy, List *indexsubtype,
|
||||
ScanDirection indexscandir);
|
||||
static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
|
||||
List *indexqual,
|
||||
List *indexqualorig,
|
||||
List *indexstrategy,
|
||||
List *indexsubtype);
|
||||
List *indexqual,
|
||||
List *indexqualorig,
|
||||
List *indexstrategy,
|
||||
List *indexsubtype);
|
||||
static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
|
||||
List *qpqual,
|
||||
Plan *lefttree,
|
||||
List *bitmapqualorig,
|
||||
Index scanrelid);
|
||||
List *qpqual,
|
||||
Plan *lefttree,
|
||||
List *bitmapqualorig,
|
||||
Index scanrelid);
|
||||
static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
|
||||
List *tideval);
|
||||
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
|
||||
@@ -164,7 +164,7 @@ create_plan(PlannerInfo *root, Path *best_path)
|
||||
break;
|
||||
case T_Material:
|
||||
plan = (Plan *) create_material_plan(root,
|
||||
(MaterialPath *) best_path);
|
||||
(MaterialPath *) best_path);
|
||||
break;
|
||||
case T_Unique:
|
||||
plan = (Plan *) create_unique_plan(root,
|
||||
@@ -195,12 +195,12 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
|
||||
Scan *plan;
|
||||
|
||||
/*
|
||||
* For table scans, rather than using the relation targetlist (which
|
||||
* is only those Vars actually needed by the query), we prefer to
|
||||
* generate a tlist containing all Vars in order. This will allow the
|
||||
* executor to optimize away projection of the table tuples, if
|
||||
* possible. (Note that planner.c may replace the tlist we generate
|
||||
* here, forcing projection to occur.)
|
||||
* For table scans, rather than using the relation targetlist (which is
|
||||
* only those Vars actually needed by the query), we prefer to generate a
|
||||
* tlist containing all Vars in order. This will allow the executor to
|
||||
* optimize away projection of the table tuples, if possible. (Note that
|
||||
* planner.c may replace the tlist we generate here, forcing projection to
|
||||
* occur.)
|
||||
*/
|
||||
if (use_physical_tlist(rel))
|
||||
{
|
||||
@@ -213,8 +213,8 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
|
||||
tlist = build_relation_tlist(rel);
|
||||
|
||||
/*
|
||||
* Extract the relevant restriction clauses from the parent relation;
|
||||
* the executor must apply all these restrictions during the scan.
|
||||
* Extract the relevant restriction clauses from the parent relation; the
|
||||
* executor must apply all these restrictions during the scan.
|
||||
*/
|
||||
scan_clauses = rel->baserestrictinfo;
|
||||
|
||||
@@ -237,7 +237,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
|
||||
|
||||
case T_BitmapHeapScan:
|
||||
plan = (Scan *) create_bitmap_scan_plan(root,
|
||||
(BitmapHeapPath *) best_path,
|
||||
(BitmapHeapPath *) best_path,
|
||||
tlist,
|
||||
scan_clauses);
|
||||
break;
|
||||
@@ -308,8 +308,8 @@ use_physical_tlist(RelOptInfo *rel)
|
||||
int i;
|
||||
|
||||
/*
|
||||
* OK for subquery and function scans; otherwise, can't do it for
|
||||
* anything except real relations.
|
||||
* OK for subquery and function scans; otherwise, can't do it for anything
|
||||
* except real relations.
|
||||
*/
|
||||
if (rel->rtekind != RTE_RELATION)
|
||||
{
|
||||
@@ -328,9 +328,9 @@ use_physical_tlist(RelOptInfo *rel)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Can't do it if any system columns are requested, either. (This
|
||||
* could possibly be fixed but would take some fragile assumptions in
|
||||
* setrefs.c, I think.)
|
||||
* Can't do it if any system columns are requested, either. (This could
|
||||
* possibly be fixed but would take some fragile assumptions in setrefs.c,
|
||||
* I think.)
|
||||
*/
|
||||
for (i = rel->min_attr; i <= 0; i++)
|
||||
{
|
||||
@@ -415,14 +415,14 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path)
|
||||
#ifdef NOT_USED
|
||||
|
||||
/*
|
||||
* * Expensive function pullups may have pulled local predicates *
|
||||
* into this path node. Put them in the qpqual of the plan node. *
|
||||
* JMH, 6/15/92
|
||||
* * Expensive function pullups may have pulled local predicates * into
|
||||
* this path node. Put them in the qpqual of the plan node. * JMH,
|
||||
* 6/15/92
|
||||
*/
|
||||
if (get_loc_restrictinfo(best_path) != NIL)
|
||||
set_qpqual((Plan) plan,
|
||||
list_concat(get_qpqual((Plan) plan),
|
||||
get_actual_clauses(get_loc_restrictinfo(best_path))));
|
||||
get_actual_clauses(get_loc_restrictinfo(best_path))));
|
||||
#endif
|
||||
|
||||
return plan;
|
||||
@@ -444,13 +444,13 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path)
|
||||
ListCell *subpaths;
|
||||
|
||||
/*
|
||||
* It is possible for the subplans list to contain only one entry,
|
||||
* or even no entries. Handle these cases specially.
|
||||
* It is possible for the subplans list to contain only one entry, or even
|
||||
* no entries. Handle these cases specially.
|
||||
*
|
||||
* XXX ideally, if there's just one entry, we'd not bother to generate
|
||||
* an Append node but just return the single child. At the moment this
|
||||
* does not work because the varno of the child scan plan won't match
|
||||
* the parent-rel Vars it'll be asked to emit.
|
||||
* XXX ideally, if there's just one entry, we'd not bother to generate an
|
||||
* Append node but just return the single child. At the moment this does
|
||||
* not work because the varno of the child scan plan won't match the
|
||||
* parent-rel Vars it'll be asked to emit.
|
||||
*/
|
||||
if (best_path->subpaths == NIL)
|
||||
{
|
||||
@@ -618,8 +618,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
|
||||
if (newitems)
|
||||
{
|
||||
/*
|
||||
* If the top plan node can't do projections, we need to add a
|
||||
* Result node to help it along.
|
||||
* If the top plan node can't do projections, we need to add a Result
|
||||
* node to help it along.
|
||||
*/
|
||||
if (!is_projection_capable_plan(subplan))
|
||||
subplan = (Plan *) make_result(newtlist, NULL, subplan);
|
||||
@@ -628,8 +628,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
|
||||
}
|
||||
|
||||
/*
|
||||
* Build control information showing which subplan output columns are
|
||||
* to be examined by the grouping step. Unfortunately we can't merge this
|
||||
* Build control information showing which subplan output columns are to
|
||||
* be examined by the grouping step. Unfortunately we can't merge this
|
||||
* with the previous loop, since we didn't then know which version of the
|
||||
* subplan tlist we'd end up using.
|
||||
*/
|
||||
@@ -656,9 +656,9 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
|
||||
numGroups = (long) Min(best_path->rows, (double) LONG_MAX);
|
||||
|
||||
/*
|
||||
* Since the Agg node is going to project anyway, we can give it
|
||||
* the minimum output tlist, without any stuff we might have added
|
||||
* to the subplan tlist.
|
||||
* Since the Agg node is going to project anyway, we can give it the
|
||||
* minimum output tlist, without any stuff we might have added to the
|
||||
* subplan tlist.
|
||||
*/
|
||||
plan = (Plan *) make_agg(root,
|
||||
build_relation_tlist(best_path->path.parent),
|
||||
@@ -776,9 +776,9 @@ create_indexscan_plan(PlannerInfo *root,
|
||||
stripped_indexquals = get_actual_clauses(indexquals);
|
||||
|
||||
/*
|
||||
* The executor needs a copy with the indexkey on the left of each
|
||||
* clause and with index attr numbers substituted for table ones. This
|
||||
* pass also gets strategy info and looks for "lossy" operators.
|
||||
* The executor needs a copy with the indexkey on the left of each clause
|
||||
* and with index attr numbers substituted for table ones. This pass also
|
||||
* gets strategy info and looks for "lossy" operators.
|
||||
*/
|
||||
fix_indexqual_references(indexquals, best_path,
|
||||
&fixed_indexquals,
|
||||
@@ -792,12 +792,11 @@ create_indexscan_plan(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* If this is an innerjoin scan, the indexclauses will contain join
|
||||
* clauses that are not present in scan_clauses (since the passed-in
|
||||
* value is just the rel's baserestrictinfo list). We must add these
|
||||
* clauses to scan_clauses to ensure they get checked. In most cases
|
||||
* we will remove the join clauses again below, but if a join clause
|
||||
* contains a special operator, we need to make sure it gets into the
|
||||
* scan_clauses.
|
||||
* clauses that are not present in scan_clauses (since the passed-in value
|
||||
* is just the rel's baserestrictinfo list). We must add these clauses to
|
||||
* scan_clauses to ensure they get checked. In most cases we will remove
|
||||
* the join clauses again below, but if a join clause contains a special
|
||||
* operator, we need to make sure it gets into the scan_clauses.
|
||||
*
|
||||
* Note: pointer comparison should be enough to determine RestrictInfo
|
||||
* matches.
|
||||
@@ -806,25 +805,25 @@ create_indexscan_plan(PlannerInfo *root,
|
||||
scan_clauses = list_union_ptr(scan_clauses, best_path->indexclauses);
|
||||
|
||||
/*
|
||||
* The qpqual list must contain all restrictions not automatically
|
||||
* handled by the index. All the predicates in the indexquals will be
|
||||
* checked (either by the index itself, or by nodeIndexscan.c), but if
|
||||
* there are any "special" operators involved then they must be included
|
||||
* in qpqual. Also, any lossy index operators must be rechecked in
|
||||
* the qpqual. The upshot is that qpqual must contain scan_clauses
|
||||
* minus whatever appears in nonlossy_indexquals.
|
||||
* The qpqual list must contain all restrictions not automatically handled
|
||||
* by the index. All the predicates in the indexquals will be checked
|
||||
* (either by the index itself, or by nodeIndexscan.c), but if there are
|
||||
* any "special" operators involved then they must be included in qpqual.
|
||||
* Also, any lossy index operators must be rechecked in the qpqual. The
|
||||
* upshot is that qpqual must contain scan_clauses minus whatever appears
|
||||
* in nonlossy_indexquals.
|
||||
*
|
||||
* In normal cases simple pointer equality checks will be enough to
|
||||
* spot duplicate RestrictInfos, so we try that first. In some situations
|
||||
* (particularly with OR'd index conditions) we may have scan_clauses
|
||||
* that are not equal to, but are logically implied by, the index quals;
|
||||
* so we also try a predicate_implied_by() check to see if we can discard
|
||||
* quals that way. (predicate_implied_by assumes its first input contains
|
||||
* only immutable functions, so we have to check that.) We can also
|
||||
* discard quals that are implied by a partial index's predicate.
|
||||
* In normal cases simple pointer equality checks will be enough to spot
|
||||
* duplicate RestrictInfos, so we try that first. In some situations
|
||||
* (particularly with OR'd index conditions) we may have scan_clauses that
|
||||
* are not equal to, but are logically implied by, the index quals; so we
|
||||
* also try a predicate_implied_by() check to see if we can discard quals
|
||||
* that way. (predicate_implied_by assumes its first input contains only
|
||||
* immutable functions, so we have to check that.) We can also discard
|
||||
* quals that are implied by a partial index's predicate.
|
||||
*
|
||||
* While at it, we strip off the RestrictInfos to produce a list of
|
||||
* plain expressions.
|
||||
* While at it, we strip off the RestrictInfos to produce a list of plain
|
||||
* expressions.
|
||||
*/
|
||||
qpqual = NIL;
|
||||
foreach(l, scan_clauses)
|
||||
@@ -836,7 +835,7 @@ create_indexscan_plan(PlannerInfo *root,
|
||||
continue;
|
||||
if (!contain_mutable_functions((Node *) rinfo->clause))
|
||||
{
|
||||
List *clausel = list_make1(rinfo->clause);
|
||||
List *clausel = list_make1(rinfo->clause);
|
||||
|
||||
if (predicate_implied_by(clausel, nonlossy_indexquals))
|
||||
continue;
|
||||
@@ -898,13 +897,12 @@ create_bitmap_scan_plan(PlannerInfo *root,
|
||||
scan_clauses = get_actual_clauses(scan_clauses);
|
||||
|
||||
/*
|
||||
* If this is a innerjoin scan, the indexclauses will contain join
|
||||
* clauses that are not present in scan_clauses (since the passed-in
|
||||
* value is just the rel's baserestrictinfo list). We must add these
|
||||
* clauses to scan_clauses to ensure they get checked. In most cases
|
||||
* we will remove the join clauses again below, but if a join clause
|
||||
* contains a special operator, we need to make sure it gets into the
|
||||
* scan_clauses.
|
||||
* If this is a innerjoin scan, the indexclauses will contain join clauses
|
||||
* that are not present in scan_clauses (since the passed-in value is just
|
||||
* the rel's baserestrictinfo list). We must add these clauses to
|
||||
* scan_clauses to ensure they get checked. In most cases we will remove
|
||||
* the join clauses again below, but if a join clause contains a special
|
||||
* operator, we need to make sure it gets into the scan_clauses.
|
||||
*/
|
||||
if (best_path->isjoininner)
|
||||
{
|
||||
@@ -912,12 +910,12 @@ create_bitmap_scan_plan(PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* The qpqual list must contain all restrictions not automatically
|
||||
* handled by the index. All the predicates in the indexquals will be
|
||||
* checked (either by the index itself, or by nodeBitmapHeapscan.c),
|
||||
* but if there are any "special" or lossy operators involved then they
|
||||
* must be added to qpqual. The upshot is that qpquals must contain
|
||||
* scan_clauses minus whatever appears in indexquals.
|
||||
* The qpqual list must contain all restrictions not automatically handled
|
||||
* by the index. All the predicates in the indexquals will be checked
|
||||
* (either by the index itself, or by nodeBitmapHeapscan.c), but if there
|
||||
* are any "special" or lossy operators involved then they must be added
|
||||
* to qpqual. The upshot is that qpquals must contain scan_clauses minus
|
||||
* whatever appears in indexquals.
|
||||
*
|
||||
* In normal cases simple equal() checks will be enough to spot duplicate
|
||||
* clauses, so we try that first. In some situations (particularly with
|
||||
@@ -930,25 +928,25 @@ create_bitmap_scan_plan(PlannerInfo *root,
|
||||
*
|
||||
* XXX For the moment, we only consider partial index predicates in the
|
||||
* simple single-index-scan case. Is it worth trying to be smart about
|
||||
* more complex cases? Perhaps create_bitmap_subplan should be made to
|
||||
* more complex cases? Perhaps create_bitmap_subplan should be made to
|
||||
* include predicate info in what it constructs.
|
||||
*/
|
||||
qpqual = NIL;
|
||||
foreach(l, scan_clauses)
|
||||
{
|
||||
Node *clause = (Node *) lfirst(l);
|
||||
Node *clause = (Node *) lfirst(l);
|
||||
|
||||
if (list_member(indexquals, clause))
|
||||
continue;
|
||||
if (!contain_mutable_functions(clause))
|
||||
{
|
||||
List *clausel = list_make1(clause);
|
||||
List *clausel = list_make1(clause);
|
||||
|
||||
if (predicate_implied_by(clausel, indexquals))
|
||||
continue;
|
||||
if (IsA(best_path->bitmapqual, IndexPath))
|
||||
{
|
||||
IndexPath *ipath = (IndexPath *) best_path->bitmapqual;
|
||||
IndexPath *ipath = (IndexPath *) best_path->bitmapqual;
|
||||
|
||||
if (predicate_implied_by(clausel, ipath->indexinfo->indpred))
|
||||
continue;
|
||||
@@ -1010,15 +1008,15 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
|
||||
/*
|
||||
* There may well be redundant quals among the subplans, since a
|
||||
* top-level WHERE qual might have gotten used to form several
|
||||
* different index quals. We don't try exceedingly hard to
|
||||
* eliminate redundancies, but we do eliminate obvious duplicates
|
||||
* by using list_concat_unique.
|
||||
* different index quals. We don't try exceedingly hard to eliminate
|
||||
* redundancies, but we do eliminate obvious duplicates by using
|
||||
* list_concat_unique.
|
||||
*/
|
||||
foreach(l, apath->bitmapquals)
|
||||
{
|
||||
Plan *subplan;
|
||||
List *subqual;
|
||||
List *subindexqual;
|
||||
Plan *subplan;
|
||||
List *subqual;
|
||||
List *subindexqual;
|
||||
|
||||
subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
|
||||
&subqual, &subindexqual);
|
||||
@@ -1048,7 +1046,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
|
||||
/*
|
||||
* Here, we only detect qual-free subplans. A qual-free subplan would
|
||||
* cause us to generate "... OR true ..." which we may as well reduce
|
||||
* to just "true". We do not try to eliminate redundant subclauses
|
||||
* to just "true". We do not try to eliminate redundant subclauses
|
||||
* because (a) it's not as likely as in the AND case, and (b) we might
|
||||
* well be working with hundreds or even thousands of OR conditions,
|
||||
* perhaps from a long IN list. The performance of list_append_unique
|
||||
@@ -1056,9 +1054,9 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
|
||||
*/
|
||||
foreach(l, opath->bitmapquals)
|
||||
{
|
||||
Plan *subplan;
|
||||
List *subqual;
|
||||
List *subindexqual;
|
||||
Plan *subplan;
|
||||
List *subqual;
|
||||
List *subindexqual;
|
||||
|
||||
subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
|
||||
&subqual, &subindexqual);
|
||||
@@ -1080,6 +1078,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
|
||||
plan->plan_rows =
|
||||
clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
|
||||
plan->plan_width = 0; /* meaningless */
|
||||
|
||||
/*
|
||||
* If there were constant-TRUE subquals, the OR reduces to constant
|
||||
* TRUE. Also, avoid generating one-element ORs, which could happen
|
||||
@@ -1100,9 +1099,9 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
|
||||
}
|
||||
else if (IsA(bitmapqual, IndexPath))
|
||||
{
|
||||
IndexPath *ipath = (IndexPath *) bitmapqual;
|
||||
IndexScan *iscan;
|
||||
List *nonlossy_clauses;
|
||||
IndexPath *ipath = (IndexPath *) bitmapqual;
|
||||
IndexScan *iscan;
|
||||
List *nonlossy_clauses;
|
||||
|
||||
/* Use the regular indexscan plan build machinery... */
|
||||
iscan = create_indexscan_plan(root, ipath, NIL, NIL,
|
||||
@@ -1245,18 +1244,18 @@ create_nestloop_plan(PlannerInfo *root,
|
||||
if (IsA(best_path->innerjoinpath, IndexPath))
|
||||
{
|
||||
/*
|
||||
* An index is being used to reduce the number of tuples scanned
|
||||
* in the inner relation. If there are join clauses being used
|
||||
* with the index, we may remove those join clauses from the list
|
||||
* of clauses that have to be checked as qpquals at the join node.
|
||||
* An index is being used to reduce the number of tuples scanned in
|
||||
* the inner relation. If there are join clauses being used with the
|
||||
* index, we may remove those join clauses from the list of clauses
|
||||
* that have to be checked as qpquals at the join node.
|
||||
*
|
||||
* We can also remove any join clauses that are redundant with those
|
||||
* being used in the index scan; prior redundancy checks will not
|
||||
* have caught this case because the join clauses would never have
|
||||
* been put in the same joininfo list.
|
||||
* being used in the index scan; prior redundancy checks will not have
|
||||
* caught this case because the join clauses would never have been put
|
||||
* in the same joininfo list.
|
||||
*
|
||||
* We can skip this if the index path is an ordinary indexpath and
|
||||
* not a special innerjoin path.
|
||||
* We can skip this if the index path is an ordinary indexpath and not a
|
||||
* special innerjoin path.
|
||||
*/
|
||||
IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
|
||||
|
||||
@@ -1266,7 +1265,7 @@ create_nestloop_plan(PlannerInfo *root,
|
||||
select_nonredundant_join_clauses(root,
|
||||
joinrestrictclauses,
|
||||
innerpath->indexclauses,
|
||||
IS_OUTER_JOIN(best_path->jointype));
|
||||
IS_OUTER_JOIN(best_path->jointype));
|
||||
}
|
||||
}
|
||||
else if (IsA(best_path->innerjoinpath, BitmapHeapPath))
|
||||
@@ -1275,11 +1274,11 @@ create_nestloop_plan(PlannerInfo *root,
|
||||
* Same deal for bitmapped index scans.
|
||||
*
|
||||
* Note: both here and above, we ignore any implicit index restrictions
|
||||
* associated with the use of partial indexes. This is OK because
|
||||
* associated with the use of partial indexes. This is OK because
|
||||
* we're only trying to prove we can dispense with some join quals;
|
||||
* failing to prove that doesn't result in an incorrect plan. It is
|
||||
* the right way to proceed because adding more quals to the stuff
|
||||
* we got from the original query would just make it harder to detect
|
||||
* the right way to proceed because adding more quals to the stuff we
|
||||
* got from the original query would just make it harder to detect
|
||||
* duplication.
|
||||
*/
|
||||
BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath;
|
||||
@@ -1296,7 +1295,7 @@ create_nestloop_plan(PlannerInfo *root,
|
||||
select_nonredundant_join_clauses(root,
|
||||
joinrestrictclauses,
|
||||
bitmapclauses,
|
||||
IS_OUTER_JOIN(best_path->jointype));
|
||||
IS_OUTER_JOIN(best_path->jointype));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1355,18 +1354,18 @@ create_mergejoin_plan(PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the mergeclauses from the list of join qual clauses, leaving
|
||||
* the list of quals that must be checked as qpquals.
|
||||
* Remove the mergeclauses from the list of join qual clauses, leaving the
|
||||
* list of quals that must be checked as qpquals.
|
||||
*/
|
||||
mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
|
||||
joinclauses = list_difference(joinclauses, mergeclauses);
|
||||
|
||||
/*
|
||||
* Rearrange mergeclauses, if needed, so that the outer variable is
|
||||
* always on the left.
|
||||
* Rearrange mergeclauses, if needed, so that the outer variable is always
|
||||
* on the left.
|
||||
*/
|
||||
mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
|
||||
best_path->jpath.outerjoinpath->parent->relids);
|
||||
best_path->jpath.outerjoinpath->parent->relids);
|
||||
|
||||
/* Sort clauses into best execution order */
|
||||
/* NB: do NOT reorder the mergeclauses */
|
||||
@@ -1375,8 +1374,8 @@ create_mergejoin_plan(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Create explicit sort nodes for the outer and inner join paths if
|
||||
* necessary. The sort cost was already accounted for in the path.
|
||||
* Make sure there are no excess columns in the inputs if sorting.
|
||||
* necessary. The sort cost was already accounted for in the path. Make
|
||||
* sure there are no excess columns in the inputs if sorting.
|
||||
*/
|
||||
if (best_path->outersortkeys)
|
||||
{
|
||||
@@ -1439,18 +1438,18 @@ create_hashjoin_plan(PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the hashclauses from the list of join qual clauses, leaving
|
||||
* the list of quals that must be checked as qpquals.
|
||||
* Remove the hashclauses from the list of join qual clauses, leaving the
|
||||
* list of quals that must be checked as qpquals.
|
||||
*/
|
||||
hashclauses = get_actual_clauses(best_path->path_hashclauses);
|
||||
joinclauses = list_difference(joinclauses, hashclauses);
|
||||
|
||||
/*
|
||||
* Rearrange hashclauses, if needed, so that the outer variable is
|
||||
* always on the left.
|
||||
* Rearrange hashclauses, if needed, so that the outer variable is always
|
||||
* on the left.
|
||||
*/
|
||||
hashclauses = get_switched_clauses(best_path->path_hashclauses,
|
||||
best_path->jpath.outerjoinpath->parent->relids);
|
||||
best_path->jpath.outerjoinpath->parent->relids);
|
||||
|
||||
/* Sort clauses into best execution order */
|
||||
joinclauses = order_qual_clauses(root, joinclauses);
|
||||
@@ -1551,23 +1550,22 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
|
||||
/*
|
||||
* Make a copy that will become the fixed clause.
|
||||
*
|
||||
* We used to try to do a shallow copy here, but that fails if there
|
||||
* is a subplan in the arguments of the opclause. So just do a
|
||||
* full copy.
|
||||
* We used to try to do a shallow copy here, but that fails if there is a
|
||||
* subplan in the arguments of the opclause. So just do a full copy.
|
||||
*/
|
||||
newclause = (OpExpr *) copyObject((Node *) clause);
|
||||
|
||||
/*
|
||||
* Check to see if the indexkey is on the right; if so, commute
|
||||
* the clause. The indexkey should be the side that refers to
|
||||
* (only) the base relation.
|
||||
* Check to see if the indexkey is on the right; if so, commute the
|
||||
* clause. The indexkey should be the side that refers to (only) the
|
||||
* base relation.
|
||||
*/
|
||||
if (!bms_equal(rinfo->left_relids, index->rel->relids))
|
||||
CommuteClause(newclause);
|
||||
|
||||
/*
|
||||
* Now, determine which index attribute this is, change the
|
||||
* indexkey operand as needed, and get the index opclass.
|
||||
* Now, determine which index attribute this is, change the indexkey
|
||||
* operand as needed, and get the index opclass.
|
||||
*/
|
||||
linitial(newclause->args) =
|
||||
fix_indexqual_operand(linitial(newclause->args),
|
||||
@@ -1577,10 +1575,9 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
|
||||
*fixed_indexquals = lappend(*fixed_indexquals, newclause);
|
||||
|
||||
/*
|
||||
* Look up the (possibly commuted) operator in the operator class
|
||||
* to get its strategy numbers and the recheck indicator. This
|
||||
* also double-checks that we found an operator matching the
|
||||
* index.
|
||||
* Look up the (possibly commuted) operator in the operator class to
|
||||
* get its strategy numbers and the recheck indicator. This also
|
||||
* double-checks that we found an operator matching the index.
|
||||
*/
|
||||
get_op_opclass_properties(newclause->opno, opclass,
|
||||
&stratno, &stratsubtype, &recheck);
|
||||
@@ -1598,11 +1595,11 @@ static Node *
|
||||
fix_indexqual_operand(Node *node, IndexOptInfo *index, Oid *opclass)
|
||||
{
|
||||
/*
|
||||
* We represent index keys by Var nodes having the varno of the base
|
||||
* table but varattno equal to the index's attribute number (index
|
||||
* column position). This is a bit hokey ... would be cleaner to use
|
||||
* a special-purpose node type that could not be mistaken for a
|
||||
* regular Var. But it will do for now.
|
||||
* We represent index keys by Var nodes having the varno of the base table
|
||||
* but varattno equal to the index's attribute number (index column
|
||||
* position). This is a bit hokey ... would be cleaner to use a
|
||||
* special-purpose node type that could not be mistaken for a regular Var.
|
||||
* But it will do for now.
|
||||
*/
|
||||
Var *result;
|
||||
int pos;
|
||||
@@ -1692,8 +1689,8 @@ get_switched_clauses(List *clauses, Relids outerrelids)
|
||||
if (bms_is_subset(restrictinfo->right_relids, outerrelids))
|
||||
{
|
||||
/*
|
||||
* Duplicate just enough of the structure to allow commuting
|
||||
* the clause without changing the original list. Could use
|
||||
* Duplicate just enough of the structure to allow commuting the
|
||||
* clause without changing the original list. Could use
|
||||
* copyObject, but a complete deep copy is overkill.
|
||||
*/
|
||||
OpExpr *temp = makeNode(OpExpr);
|
||||
@@ -1934,9 +1931,9 @@ make_subqueryscan(List *qptlist,
|
||||
Plan *plan = &node->scan.plan;
|
||||
|
||||
/*
|
||||
* Cost is figured here for the convenience of prepunion.c. Note this
|
||||
* is only correct for the case where qpqual is empty; otherwise
|
||||
* caller should overwrite cost with a better estimate.
|
||||
* Cost is figured here for the convenience of prepunion.c. Note this is
|
||||
* only correct for the case where qpqual is empty; otherwise caller
|
||||
* should overwrite cost with a better estimate.
|
||||
*/
|
||||
copy_plan_costsize(plan, subplan);
|
||||
plan->total_cost += cpu_tuple_cost * subplan->plan_rows;
|
||||
@@ -1977,9 +1974,9 @@ make_append(List *appendplans, bool isTarget, List *tlist)
|
||||
ListCell *subnode;
|
||||
|
||||
/*
|
||||
* Compute cost as sum of subplan costs. We charge nothing extra for
|
||||
* the Append itself, which perhaps is too optimistic, but since it
|
||||
* doesn't do any selection or projection, it is a pretty cheap node.
|
||||
* Compute cost as sum of subplan costs. We charge nothing extra for the
|
||||
* Append itself, which perhaps is too optimistic, but since it doesn't do
|
||||
* any selection or projection, it is a pretty cheap node.
|
||||
*/
|
||||
plan->startup_cost = 0;
|
||||
plan->total_cost = 0;
|
||||
@@ -2094,8 +2091,8 @@ make_hash(Plan *lefttree)
|
||||
copy_plan_costsize(plan, lefttree);
|
||||
|
||||
/*
|
||||
* For plausibility, make startup & total costs equal total cost of
|
||||
* input plan; this only affects EXPLAIN display not decisions.
|
||||
* For plausibility, make startup & total costs equal total cost of input
|
||||
* plan; this only affects EXPLAIN display not decisions.
|
||||
*/
|
||||
plan->startup_cost = plan->total_cost;
|
||||
plan->targetlist = copyObject(lefttree->targetlist);
|
||||
@@ -2217,8 +2214,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys)
|
||||
Oid *sortOperators;
|
||||
|
||||
/*
|
||||
* We will need at most list_length(pathkeys) sort columns; possibly
|
||||
* less
|
||||
* We will need at most list_length(pathkeys) sort columns; possibly less
|
||||
*/
|
||||
numsortkeys = list_length(pathkeys);
|
||||
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
|
||||
@@ -2236,14 +2232,14 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys)
|
||||
/*
|
||||
* We can sort by any one of the sort key items listed in this
|
||||
* sublist. For now, we take the first one that corresponds to an
|
||||
* available Var in the tlist. If there isn't any, use the first
|
||||
* one that is an expression in the input's vars.
|
||||
* available Var in the tlist. If there isn't any, use the first one
|
||||
* that is an expression in the input's vars.
|
||||
*
|
||||
* XXX if we have a choice, is there any way of figuring out which
|
||||
* might be cheapest to execute? (For example, int4lt is likely
|
||||
* much cheaper to execute than numericlt, but both might appear
|
||||
* in the same pathkey sublist...) Not clear that we ever will
|
||||
* have a choice in practice, so it may not matter.
|
||||
* XXX if we have a choice, is there any way of figuring out which might
|
||||
* be cheapest to execute? (For example, int4lt is likely much
|
||||
* cheaper to execute than numericlt, but both might appear in the
|
||||
* same pathkey sublist...) Not clear that we ever will have a choice
|
||||
* in practice, so it may not matter.
|
||||
*/
|
||||
foreach(j, keysublist)
|
||||
{
|
||||
@@ -2296,13 +2292,13 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys)
|
||||
}
|
||||
|
||||
/*
|
||||
* The column might already be selected as a sort key, if the
|
||||
* pathkeys contain duplicate entries. (This can happen in
|
||||
* scenarios where multiple mergejoinable clauses mention the same
|
||||
* var, for example.) So enter it only once in the sort arrays.
|
||||
* The column might already be selected as a sort key, if the pathkeys
|
||||
* contain duplicate entries. (This can happen in scenarios where
|
||||
* multiple mergejoinable clauses mention the same var, for example.)
|
||||
* So enter it only once in the sort arrays.
|
||||
*/
|
||||
numsortkeys = add_sort_column(tle->resno, pathkey->sortop,
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
}
|
||||
|
||||
Assert(numsortkeys > 0);
|
||||
@@ -2328,8 +2324,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree)
|
||||
Oid *sortOperators;
|
||||
|
||||
/*
|
||||
* We will need at most list_length(sortcls) sort columns; possibly
|
||||
* less
|
||||
* We will need at most list_length(sortcls) sort columns; possibly less
|
||||
*/
|
||||
numsortkeys = list_length(sortcls);
|
||||
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
|
||||
@@ -2348,7 +2343,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree)
|
||||
* redundantly.
|
||||
*/
|
||||
numsortkeys = add_sort_column(tle->resno, sortcl->sortop,
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
}
|
||||
|
||||
Assert(numsortkeys > 0);
|
||||
@@ -2384,8 +2379,7 @@ make_sort_from_groupcols(PlannerInfo *root,
|
||||
Oid *sortOperators;
|
||||
|
||||
/*
|
||||
* We will need at most list_length(groupcls) sort columns; possibly
|
||||
* less
|
||||
* We will need at most list_length(groupcls) sort columns; possibly less
|
||||
*/
|
||||
numsortkeys = list_length(groupcls);
|
||||
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
|
||||
@@ -2404,7 +2398,7 @@ make_sort_from_groupcols(PlannerInfo *root,
|
||||
* redundantly.
|
||||
*/
|
||||
numsortkeys = add_sort_column(tle->resno, grpcl->sortop,
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
numsortkeys, sortColIdx, sortOperators);
|
||||
grpno++;
|
||||
}
|
||||
|
||||
@@ -2492,8 +2486,8 @@ make_agg(PlannerInfo *root, List *tlist, List *qual,
|
||||
plan->total_cost = agg_path.total_cost;
|
||||
|
||||
/*
|
||||
* We will produce a single output tuple if not grouping, and a tuple
|
||||
* per group otherwise.
|
||||
* We will produce a single output tuple if not grouping, and a tuple per
|
||||
* group otherwise.
|
||||
*/
|
||||
if (aggstrategy == AGG_PLAIN)
|
||||
plan->plan_rows = 1;
|
||||
@@ -2501,13 +2495,13 @@ make_agg(PlannerInfo *root, List *tlist, List *qual,
|
||||
plan->plan_rows = numGroups;
|
||||
|
||||
/*
|
||||
* We also need to account for the cost of evaluation of the qual (ie,
|
||||
* the HAVING clause) and the tlist. Note that cost_qual_eval doesn't
|
||||
* charge anything for Aggref nodes; this is okay since they are
|
||||
* really comparable to Vars.
|
||||
* We also need to account for the cost of evaluation of the qual (ie, the
|
||||
* HAVING clause) and the tlist. Note that cost_qual_eval doesn't charge
|
||||
* anything for Aggref nodes; this is okay since they are really
|
||||
* comparable to Vars.
|
||||
*
|
||||
* See notes in grouping_planner about why this routine and make_group
|
||||
* are the only ones in this file that worry about tlist eval cost.
|
||||
* See notes in grouping_planner about why this routine and make_group are
|
||||
* the only ones in this file that worry about tlist eval cost.
|
||||
*/
|
||||
if (qual)
|
||||
{
|
||||
@@ -2559,16 +2553,15 @@ make_group(PlannerInfo *root,
|
||||
plan->plan_rows = numGroups;
|
||||
|
||||
/*
|
||||
* We also need to account for the cost of evaluation of the qual (ie,
|
||||
* the HAVING clause) and the tlist.
|
||||
* We also need to account for the cost of evaluation of the qual (ie, the
|
||||
* HAVING clause) and the tlist.
|
||||
*
|
||||
* XXX this double-counts the cost of evaluation of any expressions used
|
||||
* for grouping, since in reality those will have been evaluated at a
|
||||
* lower plan level and will only be copied by the Group node. Worth
|
||||
* fixing?
|
||||
* XXX this double-counts the cost of evaluation of any expressions used for
|
||||
* grouping, since in reality those will have been evaluated at a lower
|
||||
* plan level and will only be copied by the Group node. Worth fixing?
|
||||
*
|
||||
* See notes in grouping_planner about why this routine and make_agg are
|
||||
* the only ones in this file that worry about tlist eval cost.
|
||||
* See notes in grouping_planner about why this routine and make_agg are the
|
||||
* only ones in this file that worry about tlist eval cost.
|
||||
*/
|
||||
if (qual)
|
||||
{
|
||||
@@ -2607,16 +2600,16 @@ make_unique(Plan *lefttree, List *distinctList)
|
||||
copy_plan_costsize(plan, lefttree);
|
||||
|
||||
/*
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We
|
||||
* assume all columns get compared at most of the tuples. (XXX
|
||||
* probably this is an overestimate.)
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We assume
|
||||
* all columns get compared at most of the tuples. (XXX probably this is
|
||||
* an overestimate.)
|
||||
*/
|
||||
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
|
||||
|
||||
/*
|
||||
* plan->plan_rows is left as a copy of the input subplan's plan_rows;
|
||||
* ie, we assume the filter removes nothing. The caller must alter
|
||||
* this if he has a better idea.
|
||||
* plan->plan_rows is left as a copy of the input subplan's plan_rows; ie,
|
||||
* we assume the filter removes nothing. The caller must alter this if he
|
||||
* has a better idea.
|
||||
*/
|
||||
|
||||
plan->targetlist = copyObject(lefttree->targetlist);
|
||||
@@ -2625,8 +2618,7 @@ make_unique(Plan *lefttree, List *distinctList)
|
||||
plan->righttree = NULL;
|
||||
|
||||
/*
|
||||
* convert SortClause list into array of attr indexes, as wanted by
|
||||
* exec
|
||||
* convert SortClause list into array of attr indexes, as wanted by exec
|
||||
*/
|
||||
Assert(numCols > 0);
|
||||
uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
|
||||
@@ -2664,8 +2656,8 @@ make_setop(SetOpCmd cmd, Plan *lefttree,
|
||||
copy_plan_costsize(plan, lefttree);
|
||||
|
||||
/*
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We
|
||||
* assume all columns get compared at most of the tuples.
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We assume
|
||||
* all columns get compared at most of the tuples.
|
||||
*/
|
||||
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
|
||||
|
||||
@@ -2683,8 +2675,7 @@ make_setop(SetOpCmd cmd, Plan *lefttree,
|
||||
plan->righttree = NULL;
|
||||
|
||||
/*
|
||||
* convert SortClause list into array of attr indexes, as wanted by
|
||||
* exec
|
||||
* convert SortClause list into array of attr indexes, as wanted by exec
|
||||
*/
|
||||
Assert(numCols > 0);
|
||||
dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
|
||||
@@ -2727,8 +2718,8 @@ make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount,
|
||||
* building a subquery then it's important to report correct info to the
|
||||
* outer planner.
|
||||
*
|
||||
* When the offset or count couldn't be estimated, use 10% of the
|
||||
* estimated number of rows emitted from the subplan.
|
||||
* When the offset or count couldn't be estimated, use 10% of the estimated
|
||||
* number of rows emitted from the subplan.
|
||||
*/
|
||||
if (offset_est != 0)
|
||||
{
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.109 2005/09/28 21:17:02 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.110 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -221,7 +221,7 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode,
|
||||
result = bms_add_members(result,
|
||||
distribute_quals_to_rels(root,
|
||||
lfirst(l),
|
||||
below_outer_join));
|
||||
below_outer_join));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -243,17 +243,17 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode,
|
||||
ListCell *qual;
|
||||
|
||||
/*
|
||||
* Order of operations here is subtle and critical. First we
|
||||
* recurse to handle sub-JOINs. Their join quals will be placed
|
||||
* without regard for whether this level is an outer join, which
|
||||
* is correct. Then we place our own join quals, which are
|
||||
* restricted by lower outer joins in any case, and are forced to
|
||||
* this level if this is an outer join and they mention the outer
|
||||
* side. Finally, if this is an outer join, we mark baserels
|
||||
* contained within the inner side(s) with our own rel set; this
|
||||
* will prevent quals above us in the join tree that use those
|
||||
* rels from being pushed down below this level. (It's okay for
|
||||
* upper quals to be pushed down to the outer side, however.)
|
||||
* Order of operations here is subtle and critical. First we recurse
|
||||
* to handle sub-JOINs. Their join quals will be placed without
|
||||
* regard for whether this level is an outer join, which is correct.
|
||||
* Then we place our own join quals, which are restricted by lower
|
||||
* outer joins in any case, and are forced to this level if this is an
|
||||
* outer join and they mention the outer side. Finally, if this is an
|
||||
* outer join, we mark baserels contained within the inner side(s)
|
||||
* with our own rel set; this will prevent quals above us in the join
|
||||
* tree that use those rels from being pushed down below this level.
|
||||
* (It's okay for upper quals to be pushed down to the outer side,
|
||||
* however.)
|
||||
*/
|
||||
switch (j->jointype)
|
||||
{
|
||||
@@ -302,19 +302,19 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode,
|
||||
case JOIN_UNION:
|
||||
|
||||
/*
|
||||
* This is where we fail if upper levels of planner
|
||||
* haven't rewritten UNION JOIN as an Append ...
|
||||
* This is where we fail if upper levels of planner haven't
|
||||
* rewritten UNION JOIN as an Append ...
|
||||
*/
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("UNION JOIN is not implemented")));
|
||||
nonnullable_rels = NULL; /* keep compiler quiet */
|
||||
nonnullable_rels = NULL; /* keep compiler quiet */
|
||||
nullable_rels = NULL;
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized join type: %d",
|
||||
(int) j->jointype);
|
||||
nonnullable_rels = NULL; /* keep compiler quiet */
|
||||
nonnullable_rels = NULL; /* keep compiler quiet */
|
||||
nullable_rels = NULL;
|
||||
break;
|
||||
}
|
||||
@@ -349,19 +349,19 @@ mark_baserels_for_outer_join(PlannerInfo *root, Relids rels, Relids outerrels)
|
||||
RelOptInfo *rel = find_base_rel(root, relno);
|
||||
|
||||
/*
|
||||
* Since we do this bottom-up, any outer-rels previously marked
|
||||
* should be within the new outer join set.
|
||||
* Since we do this bottom-up, any outer-rels previously marked should
|
||||
* be within the new outer join set.
|
||||
*/
|
||||
Assert(bms_is_subset(rel->outerjoinset, outerrels));
|
||||
|
||||
/*
|
||||
* Presently the executor cannot support FOR UPDATE/SHARE marking of
|
||||
* rels appearing on the nullable side of an outer join. (It's
|
||||
* somewhat unclear what that would mean, anyway: what should we
|
||||
* mark when a result row is generated from no element of the
|
||||
* nullable relation?) So, complain if target rel is FOR UPDATE/SHARE.
|
||||
* It's sufficient to make this check once per rel, so do it only
|
||||
* if rel wasn't already known nullable.
|
||||
* somewhat unclear what that would mean, anyway: what should we mark
|
||||
* when a result row is generated from no element of the nullable
|
||||
* relation?) So, complain if target rel is FOR UPDATE/SHARE. It's
|
||||
* sufficient to make this check once per rel, so do it only if rel
|
||||
* wasn't already known nullable.
|
||||
*/
|
||||
if (rel->outerjoinset == NULL)
|
||||
{
|
||||
@@ -430,9 +430,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
/*
|
||||
* If the clause is variable-free, we force it to be evaluated at its
|
||||
* original syntactic level. Note that this should not happen for
|
||||
* top-level clauses, because query_planner() special-cases them. But
|
||||
* it will happen for variable-free JOIN/ON clauses. We don't have to
|
||||
* be real smart about such a case, we just have to be correct.
|
||||
* top-level clauses, because query_planner() special-cases them. But it
|
||||
* will happen for variable-free JOIN/ON clauses. We don't have to be
|
||||
* real smart about such a case, we just have to be correct.
|
||||
*/
|
||||
if (bms_is_empty(relids))
|
||||
relids = qualscope;
|
||||
@@ -446,8 +446,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
/*
|
||||
* If the qual came from implied-equality deduction, we always
|
||||
* evaluate the qual at its natural semantic level. It is the
|
||||
* responsibility of the deducer not to create any quals that
|
||||
* should be delayed by outer-join rules.
|
||||
* responsibility of the deducer not to create any quals that should
|
||||
* be delayed by outer-join rules.
|
||||
*/
|
||||
Assert(bms_equal(relids, qualscope));
|
||||
/* Needn't feed it back for more deductions */
|
||||
@@ -457,28 +457,28 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
else if (bms_overlap(relids, outerjoin_nonnullable))
|
||||
{
|
||||
/*
|
||||
* The qual is attached to an outer join and mentions (some of
|
||||
* the) rels on the nonnullable side. Force the qual to be
|
||||
* evaluated exactly at the level of joining corresponding to the
|
||||
* outer join. We cannot let it get pushed down into the
|
||||
* nonnullable side, since then we'd produce no output rows,
|
||||
* rather than the intended single null-extended row, for any
|
||||
* nonnullable-side rows failing the qual.
|
||||
* The qual is attached to an outer join and mentions (some of the)
|
||||
* rels on the nonnullable side. Force the qual to be evaluated
|
||||
* exactly at the level of joining corresponding to the outer join. We
|
||||
* cannot let it get pushed down into the nonnullable side, since then
|
||||
* we'd produce no output rows, rather than the intended single
|
||||
* null-extended row, for any nonnullable-side rows failing the qual.
|
||||
*
|
||||
* Note: an outer-join qual that mentions only nullable-side rels can
|
||||
* be pushed down into the nullable side without changing the join
|
||||
* Note: an outer-join qual that mentions only nullable-side rels can be
|
||||
* pushed down into the nullable side without changing the join
|
||||
* result, so we treat it the same as an ordinary inner-join qual,
|
||||
* except for not setting maybe_equijoin (see below).
|
||||
*/
|
||||
relids = qualscope;
|
||||
|
||||
/*
|
||||
* We can't use such a clause to deduce equijoin (the left and
|
||||
* right sides might be unequal above the join because one of
|
||||
* them has gone to NULL) ... but we might be able to use it
|
||||
* for more limited purposes. Note: for the current uses of
|
||||
* deductions from an outer-join clause, it seems safe to make
|
||||
* the deductions even when the clause is below a higher-level
|
||||
* outer join; so we do not check below_outer_join here.
|
||||
* We can't use such a clause to deduce equijoin (the left and right
|
||||
* sides might be unequal above the join because one of them has gone
|
||||
* to NULL) ... but we might be able to use it for more limited
|
||||
* purposes. Note: for the current uses of deductions from an
|
||||
* outer-join clause, it seems safe to make the deductions even when
|
||||
* the clause is below a higher-level outer join; so we do not check
|
||||
* below_outer_join here.
|
||||
*/
|
||||
maybe_equijoin = false;
|
||||
maybe_outer_join = true;
|
||||
@@ -486,15 +486,14 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* For a non-outer-join qual, we can evaluate the qual as soon as
|
||||
* (1) we have all the rels it mentions, and (2) we are at or
|
||||
* above any outer joins that can null any of these rels and are
|
||||
* below the syntactic location of the given qual. To enforce the
|
||||
* latter, scan the base rels listed in relids, and merge their
|
||||
* outer-join sets into the clause's own reference list. At the
|
||||
* time we are called, the outerjoinset of each baserel will show
|
||||
* exactly those outer joins that are below the qual in the join
|
||||
* tree.
|
||||
* For a non-outer-join qual, we can evaluate the qual as soon as (1)
|
||||
* we have all the rels it mentions, and (2) we are at or above any
|
||||
* outer joins that can null any of these rels and are below the
|
||||
* syntactic location of the given qual. To enforce the latter, scan
|
||||
* the base rels listed in relids, and merge their outer-join sets
|
||||
* into the clause's own reference list. At the time we are called,
|
||||
* the outerjoinset of each baserel will show exactly those outer
|
||||
* joins that are below the qual in the join tree.
|
||||
*/
|
||||
Relids addrelids = NULL;
|
||||
Relids tmprelids;
|
||||
@@ -513,13 +512,13 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
if (bms_is_subset(addrelids, relids))
|
||||
{
|
||||
/*
|
||||
* Qual is not delayed by any lower outer-join restriction.
|
||||
* If it is not itself below or within an outer join, we
|
||||
* can consider it "valid everywhere", so consider feeding
|
||||
* it to the equijoin machinery. (If it is within an outer
|
||||
* join, we can't consider it "valid everywhere": once the
|
||||
* contained variables have gone to NULL, we'd be asserting
|
||||
* things like NULL = NULL, which is not true.)
|
||||
* Qual is not delayed by any lower outer-join restriction. If it
|
||||
* is not itself below or within an outer join, we can consider it
|
||||
* "valid everywhere", so consider feeding it to the equijoin
|
||||
* machinery. (If it is within an outer join, we can't consider
|
||||
* it "valid everywhere": once the contained variables have gone
|
||||
* to NULL, we'd be asserting things like NULL = NULL, which is
|
||||
* not true.)
|
||||
*/
|
||||
if (!below_outer_join && outerjoin_nonnullable == NULL)
|
||||
maybe_equijoin = true;
|
||||
@@ -533,8 +532,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
Assert(bms_is_subset(relids, qualscope));
|
||||
|
||||
/*
|
||||
* Because application of the qual will be delayed by outer
|
||||
* join, we mustn't assume its vars are equal everywhere.
|
||||
* Because application of the qual will be delayed by outer join,
|
||||
* we mustn't assume its vars are equal everywhere.
|
||||
*/
|
||||
maybe_equijoin = false;
|
||||
}
|
||||
@@ -543,11 +542,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the qual as "pushed down" if it can be applied at a level
|
||||
* below its original syntactic level. This allows us to distinguish
|
||||
* original JOIN/ON quals from higher-level quals pushed down to the
|
||||
* same joinrel. A qual originating from WHERE is always considered
|
||||
* "pushed down".
|
||||
* Mark the qual as "pushed down" if it can be applied at a level below
|
||||
* its original syntactic level. This allows us to distinguish original
|
||||
* JOIN/ON quals from higher-level quals pushed down to the same joinrel.
|
||||
* A qual originating from WHERE is always considered "pushed down".
|
||||
*/
|
||||
if (!is_pushed_down)
|
||||
is_pushed_down = !bms_equal(relids, qualscope);
|
||||
@@ -573,25 +571,24 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
rel = find_base_rel(root, bms_singleton_member(relids));
|
||||
|
||||
/*
|
||||
* Check for a "mergejoinable" clause even though it's not a
|
||||
* join clause. This is so that we can recognize that "a.x =
|
||||
* a.y" makes x and y eligible to be considered equal, even
|
||||
* when they belong to the same rel. Without this, we would
|
||||
* not recognize that "a.x = a.y AND a.x = b.z AND a.y = c.q"
|
||||
* allows us to consider z and q equal after their rels are
|
||||
* joined.
|
||||
* Check for a "mergejoinable" clause even though it's not a join
|
||||
* clause. This is so that we can recognize that "a.x = a.y"
|
||||
* makes x and y eligible to be considered equal, even when they
|
||||
* belong to the same rel. Without this, we would not recognize
|
||||
* that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to
|
||||
* consider z and q equal after their rels are joined.
|
||||
*/
|
||||
check_mergejoinable(restrictinfo);
|
||||
|
||||
/*
|
||||
* If the clause was deduced from implied equality, check to
|
||||
* see whether it is redundant with restriction clauses we
|
||||
* already have for this rel. Note we cannot apply this check
|
||||
* to user-written clauses, since we haven't found the
|
||||
* canonical pathkey sets yet while processing user clauses.
|
||||
* (NB: no comparable check is done in the join-clause case;
|
||||
* redundancy will be detected when the join clause is moved
|
||||
* into a join rel's restriction list.)
|
||||
* If the clause was deduced from implied equality, check to see
|
||||
* whether it is redundant with restriction clauses we already
|
||||
* have for this rel. Note we cannot apply this check to
|
||||
* user-written clauses, since we haven't found the canonical
|
||||
* pathkey sets yet while processing user clauses. (NB: no
|
||||
* comparable check is done in the join-clause case; redundancy
|
||||
* will be detected when the join clause is moved into a join
|
||||
* rel's restriction list.)
|
||||
*/
|
||||
if (!is_deduced ||
|
||||
!qual_is_redundant(root, restrictinfo,
|
||||
@@ -605,17 +602,17 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
case BMS_MULTIPLE:
|
||||
|
||||
/*
|
||||
* 'clause' is a join clause, since there is more than one rel
|
||||
* in the relid set.
|
||||
* 'clause' is a join clause, since there is more than one rel in
|
||||
* the relid set.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Check for hash or mergejoinable operators.
|
||||
*
|
||||
* We don't bother setting the hashjoin info if we're not going
|
||||
* to need it. We do want to know about mergejoinable ops in
|
||||
* all cases, however, because we use mergejoinable ops for
|
||||
* other purposes such as detecting redundant clauses.
|
||||
* We don't bother setting the hashjoin info if we're not going to
|
||||
* need it. We do want to know about mergejoinable ops in all
|
||||
* cases, however, because we use mergejoinable ops for other
|
||||
* purposes such as detecting redundant clauses.
|
||||
*/
|
||||
check_mergejoinable(restrictinfo);
|
||||
if (enable_hashjoin)
|
||||
@@ -628,9 +625,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
|
||||
/*
|
||||
* Add vars used in the join clause to targetlists of their
|
||||
* relations, so that they will be emitted by the plan nodes
|
||||
* that scan those relations (else they won't be available at
|
||||
* the join node!).
|
||||
* relations, so that they will be emitted by the plan nodes that
|
||||
* scan those relations (else they won't be available at the join
|
||||
* node!).
|
||||
*/
|
||||
vars = pull_var_clause(clause, false);
|
||||
add_vars_to_targetlist(root, vars, relids);
|
||||
@@ -639,17 +636,16 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
|
||||
default:
|
||||
|
||||
/*
|
||||
* 'clause' references no rels, and therefore we have no place
|
||||
* to attach it. Shouldn't get here if callers are working
|
||||
* properly.
|
||||
* 'clause' references no rels, and therefore we have no place to
|
||||
* attach it. Shouldn't get here if callers are working properly.
|
||||
*/
|
||||
elog(ERROR, "cannot cope with variable-free clause");
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the clause has a mergejoinable operator, we may be able to
|
||||
* deduce more things from it under the principle of transitivity.
|
||||
* If the clause has a mergejoinable operator, we may be able to deduce
|
||||
* more things from it under the principle of transitivity.
|
||||
*
|
||||
* If it is not an outer-join qualification nor bubbled up due to an outer
|
||||
* join, then the two sides represent equivalent PathKeyItems for path
|
||||
@@ -744,8 +740,8 @@ process_implied_equality(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* If the exprs involve a single rel, we need to look at that rel's
|
||||
* baserestrictinfo list. If multiple rels, we can scan the joininfo
|
||||
* list of any of 'em.
|
||||
* baserestrictinfo list. If multiple rels, we can scan the joininfo list
|
||||
* of any of 'em.
|
||||
*/
|
||||
if (membership == BMS_SINGLETON)
|
||||
{
|
||||
@@ -767,8 +763,8 @@ process_implied_equality(PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan to see if equality is already known. If so, we're done in the
|
||||
* add case, and done after removing it in the delete case.
|
||||
* Scan to see if equality is already known. If so, we're done in the add
|
||||
* case, and done after removing it in the delete case.
|
||||
*/
|
||||
foreach(itm, restrictlist)
|
||||
{
|
||||
@@ -791,7 +787,7 @@ process_implied_equality(PlannerInfo *root,
|
||||
{
|
||||
/* delete it from local restrictinfo list */
|
||||
rel1->baserestrictinfo = list_delete_ptr(rel1->baserestrictinfo,
|
||||
restrictinfo);
|
||||
restrictinfo);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -808,8 +804,8 @@ process_implied_equality(PlannerInfo *root,
|
||||
return;
|
||||
|
||||
/*
|
||||
* This equality is new information, so construct a clause
|
||||
* representing it to add to the query data structures.
|
||||
* This equality is new information, so construct a clause representing it
|
||||
* to add to the query data structures.
|
||||
*/
|
||||
ltype = exprType(item1);
|
||||
rtype = exprType(item2);
|
||||
@@ -818,14 +814,14 @@ process_implied_equality(PlannerInfo *root,
|
||||
if (!HeapTupleIsValid(eq_operator))
|
||||
{
|
||||
/*
|
||||
* Would it be safe to just not add the equality to the query if
|
||||
* we have no suitable equality operator for the combination of
|
||||
* Would it be safe to just not add the equality to the query if we
|
||||
* have no suitable equality operator for the combination of
|
||||
* datatypes? NO, because sortkey selection may screw up anyway.
|
||||
*/
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
||||
errmsg("could not identify an equality operator for types %s and %s",
|
||||
format_type_be(ltype), format_type_be(rtype))));
|
||||
errmsg("could not identify an equality operator for types %s and %s",
|
||||
format_type_be(ltype), format_type_be(rtype))));
|
||||
}
|
||||
pgopform = (Form_pg_operator) GETSTRUCT(eq_operator);
|
||||
|
||||
@@ -856,8 +852,8 @@ process_implied_equality(PlannerInfo *root,
|
||||
/*
|
||||
* Push the new clause into all the appropriate restrictinfo lists.
|
||||
*
|
||||
* Note: we mark the qual "pushed down" to ensure that it can never be
|
||||
* taken for an original JOIN/ON clause.
|
||||
* Note: we mark the qual "pushed down" to ensure that it can never be taken
|
||||
* for an original JOIN/ON clause.
|
||||
*/
|
||||
distribute_qual_to_rels(root, (Node *) clause,
|
||||
true, true, false, NULL, relids);
|
||||
@@ -911,9 +907,9 @@ qual_is_redundant(PlannerInfo *root,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Scan existing quals to find those referencing same pathkeys.
|
||||
* Usually there will be few, if any, so build a list of just the
|
||||
* interesting ones.
|
||||
* Scan existing quals to find those referencing same pathkeys. Usually
|
||||
* there will be few, if any, so build a list of just the interesting
|
||||
* ones.
|
||||
*/
|
||||
oldquals = NIL;
|
||||
foreach(olditem, restrictlist)
|
||||
@@ -933,11 +929,10 @@ qual_is_redundant(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Now, we want to develop a list of exprs that are known equal to the
|
||||
* left side of the new qual. We traverse the old-quals list
|
||||
* repeatedly to transitively expand the exprs list. If at any point
|
||||
* we find we can reach the right-side expr of the new qual, we are
|
||||
* done. We give up when we can't expand the equalexprs list any
|
||||
* more.
|
||||
* left side of the new qual. We traverse the old-quals list repeatedly
|
||||
* to transitively expand the exprs list. If at any point we find we can
|
||||
* reach the right-side expr of the new qual, we are done. We give up
|
||||
* when we can't expand the equalexprs list any more.
|
||||
*/
|
||||
equalexprs = list_make1(newleft);
|
||||
do
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.9 2005/09/21 19:15:27 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.10 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -43,12 +43,12 @@ typedef struct
|
||||
|
||||
static bool find_minmax_aggs_walker(Node *node, List **context);
|
||||
static bool build_minmax_path(PlannerInfo *root, RelOptInfo *rel,
|
||||
MinMaxAggInfo *info);
|
||||
MinMaxAggInfo *info);
|
||||
static ScanDirection match_agg_to_index_col(MinMaxAggInfo *info,
|
||||
IndexOptInfo *index, int indexcol);
|
||||
IndexOptInfo *index, int indexcol);
|
||||
static void make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info,
|
||||
List *constant_quals);
|
||||
static Node *replace_aggs_with_params_mutator(Node *node, List **context);
|
||||
List *constant_quals);
|
||||
static Node *replace_aggs_with_params_mutator(Node *node, List **context);
|
||||
static Oid fetch_agg_sort_op(Oid aggfnoid);
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ static Oid fetch_agg_sort_op(Oid aggfnoid);
|
||||
* generic scan-all-the-rows plan.
|
||||
*
|
||||
* We are passed the preprocessed tlist, and the best path
|
||||
* devised for computing the input of a standard Agg node. If we are able
|
||||
* devised for computing the input of a standard Agg node. If we are able
|
||||
* to optimize all the aggregates, and the result is estimated to be cheaper
|
||||
* than the generic aggregate method, then generate and return a Plan that
|
||||
* does it that way. Otherwise, return NULL.
|
||||
@@ -87,24 +87,24 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
|
||||
if (!parse->hasAggs)
|
||||
return NULL;
|
||||
|
||||
Assert(!parse->setOperations); /* shouldn't get here if a setop */
|
||||
Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */
|
||||
Assert(!parse->setOperations); /* shouldn't get here if a setop */
|
||||
Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */
|
||||
|
||||
/*
|
||||
* Reject unoptimizable cases.
|
||||
*
|
||||
* We don't handle GROUP BY, because our current implementations of
|
||||
* grouping require looking at all the rows anyway, and so there's not
|
||||
* much point in optimizing MIN/MAX.
|
||||
* We don't handle GROUP BY, because our current implementations of grouping
|
||||
* require looking at all the rows anyway, and so there's not much point
|
||||
* in optimizing MIN/MAX.
|
||||
*/
|
||||
if (parse->groupClause)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* We also restrict the query to reference exactly one table, since
|
||||
* join conditions can't be handled reasonably. (We could perhaps
|
||||
* handle a query containing cartesian-product joins, but it hardly
|
||||
* seems worth the trouble.)
|
||||
* We also restrict the query to reference exactly one table, since join
|
||||
* conditions can't be handled reasonably. (We could perhaps handle a
|
||||
* query containing cartesian-product joins, but it hardly seems worth the
|
||||
* trouble.)
|
||||
*/
|
||||
Assert(parse->jointree != NULL && IsA(parse->jointree, FromExpr));
|
||||
if (list_length(parse->jointree->fromlist) != 1)
|
||||
@@ -118,8 +118,8 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
|
||||
rel = find_base_rel(root, rtr->rtindex);
|
||||
|
||||
/*
|
||||
* Also reject cases with subplans or volatile functions in WHERE.
|
||||
* This may be overly paranoid, but it's not entirely clear if the
|
||||
* Also reject cases with subplans or volatile functions in WHERE. This
|
||||
* may be overly paranoid, but it's not entirely clear if the
|
||||
* transformation is safe then.
|
||||
*/
|
||||
if (contain_subplans(parse->jointree->quals) ||
|
||||
@@ -127,17 +127,16 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Since this optimization is not applicable all that often, we want
|
||||
* to fall out before doing very much work if possible. Therefore
|
||||
* we do the work in several passes. The first pass scans the tlist
|
||||
* and HAVING qual to find all the aggregates and verify that
|
||||
* each of them is a MIN/MAX aggregate. If that succeeds, the second
|
||||
* pass looks at each aggregate to see if it is optimizable; if so
|
||||
* we make an IndexPath describing how we would scan it. (We do not
|
||||
* try to optimize if only some aggs are optimizable, since that means
|
||||
* we'll have to scan all the rows anyway.) If that succeeds, we have
|
||||
* enough info to compare costs against the generic implementation.
|
||||
* Only if that test passes do we build a Plan.
|
||||
* Since this optimization is not applicable all that often, we want to
|
||||
* fall out before doing very much work if possible. Therefore we do the
|
||||
* work in several passes. The first pass scans the tlist and HAVING qual
|
||||
* to find all the aggregates and verify that each of them is a MIN/MAX
|
||||
* aggregate. If that succeeds, the second pass looks at each aggregate
|
||||
* to see if it is optimizable; if so we make an IndexPath describing how
|
||||
* we would scan it. (We do not try to optimize if only some aggs are
|
||||
* optimizable, since that means we'll have to scan all the rows anyway.)
|
||||
* If that succeeds, we have enough info to compare costs against the
|
||||
* generic implementation. Only if that test passes do we build a Plan.
|
||||
*/
|
||||
|
||||
/* Pass 1: find all the aggregates */
|
||||
@@ -161,9 +160,9 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
|
||||
/*
|
||||
* Make the cost comparison.
|
||||
*
|
||||
* Note that we don't include evaluation cost of the tlist here;
|
||||
* this is OK since it isn't included in best_path's cost either,
|
||||
* and should be the same in either case.
|
||||
* Note that we don't include evaluation cost of the tlist here; this is OK
|
||||
* since it isn't included in best_path's cost either, and should be the
|
||||
* same in either case.
|
||||
*/
|
||||
cost_agg(&agg_p, root, AGG_PLAIN, list_length(aggs_list),
|
||||
0, 0,
|
||||
@@ -174,13 +173,13 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
|
||||
return NULL; /* too expensive */
|
||||
|
||||
/*
|
||||
* OK, we are going to generate an optimized plan. The first thing we
|
||||
* need to do is look for any non-variable WHERE clauses that query_planner
|
||||
* might have removed from the basic plan. (Normal WHERE clauses will
|
||||
* be properly incorporated into the sub-plans by create_plan.) If there
|
||||
* are any, they will be in a gating Result node atop the best_path.
|
||||
* They have to be incorporated into a gating Result in each sub-plan
|
||||
* in order to produce the semantically correct result.
|
||||
* OK, we are going to generate an optimized plan. The first thing we
|
||||
* need to do is look for any non-variable WHERE clauses that
|
||||
* query_planner might have removed from the basic plan. (Normal WHERE
|
||||
* clauses will be properly incorporated into the sub-plans by
|
||||
* create_plan.) If there are any, they will be in a gating Result node
|
||||
* atop the best_path. They have to be incorporated into a gating Result
|
||||
* in each sub-plan in order to produce the semantically correct result.
|
||||
*/
|
||||
if (IsA(best_path, ResultPath))
|
||||
{
|
||||
@@ -275,8 +274,8 @@ find_minmax_aggs_walker(Node *node, List **context)
|
||||
*context = lappend(*context, info);
|
||||
|
||||
/*
|
||||
* We need not recurse into the argument, since it can't contain
|
||||
* any aggregates.
|
||||
* We need not recurse into the argument, since it can't contain any
|
||||
* aggregates.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
@@ -325,8 +324,8 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
|
||||
|
||||
/*
|
||||
* Look for a match to one of the index columns. (In a stupidly
|
||||
* designed index, there could be multiple matches, but we only
|
||||
* care about the first one.)
|
||||
* designed index, there could be multiple matches, but we only care
|
||||
* about the first one.)
|
||||
*/
|
||||
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
|
||||
{
|
||||
@@ -340,12 +339,12 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
|
||||
/*
|
||||
* If the match is not at the first index column, we have to verify
|
||||
* that there are "x = something" restrictions on all the earlier
|
||||
* index columns. Since we'll need the restrictclauses list anyway
|
||||
* to build the path, it's convenient to extract that first and then
|
||||
* look through it for the equality restrictions.
|
||||
* index columns. Since we'll need the restrictclauses list anyway to
|
||||
* build the path, it's convenient to extract that first and then look
|
||||
* through it for the equality restrictions.
|
||||
*/
|
||||
restrictclauses = group_clauses_by_indexkey(index,
|
||||
index->rel->baserestrictinfo,
|
||||
index->rel->baserestrictinfo,
|
||||
NIL,
|
||||
NULL,
|
||||
&found_clause);
|
||||
@@ -354,8 +353,8 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
|
||||
continue; /* definitely haven't got enough */
|
||||
for (prevcol = 0; prevcol < indexcol; prevcol++)
|
||||
{
|
||||
List *rinfos = (List *) list_nth(restrictclauses, prevcol);
|
||||
ListCell *ll;
|
||||
List *rinfos = (List *) list_nth(restrictclauses, prevcol);
|
||||
ListCell *ll;
|
||||
|
||||
foreach(ll, rinfos)
|
||||
{
|
||||
@@ -453,9 +452,9 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
|
||||
NullTest *ntest;
|
||||
|
||||
/*
|
||||
* Generate a suitably modified query. Much of the work here is
|
||||
* probably unnecessary in the normal case, but we want to make it look
|
||||
* good if someone tries to EXPLAIN the result.
|
||||
* Generate a suitably modified query. Much of the work here is probably
|
||||
* unnecessary in the normal case, but we want to make it look good if
|
||||
* someone tries to EXPLAIN the result.
|
||||
*/
|
||||
memcpy(&subroot, root, sizeof(PlannerInfo));
|
||||
subroot.parse = subparse = (Query *) copyObject(root->parse);
|
||||
@@ -489,18 +488,17 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
|
||||
false, true);
|
||||
|
||||
/*
|
||||
* Generate the plan for the subquery. We already have a Path for
|
||||
* the basic indexscan, but we have to convert it to a Plan and
|
||||
* attach a LIMIT node above it. We might need a gating Result, too,
|
||||
* to handle any non-variable qual clauses.
|
||||
* Generate the plan for the subquery. We already have a Path for the
|
||||
* basic indexscan, but we have to convert it to a Plan and attach a LIMIT
|
||||
* node above it. We might need a gating Result, too, to handle any
|
||||
* non-variable qual clauses.
|
||||
*
|
||||
* Also we must add a "WHERE foo IS NOT NULL" restriction to the
|
||||
* indexscan, to be sure we don't return a NULL, which'd be contrary
|
||||
* to the standard behavior of MIN/MAX. XXX ideally this should be
|
||||
* done earlier, so that the selectivity of the restriction could be
|
||||
* included in our cost estimates. But that looks painful, and in
|
||||
* most cases the fraction of NULLs isn't high enough to change the
|
||||
* decision.
|
||||
* Also we must add a "WHERE foo IS NOT NULL" restriction to the indexscan,
|
||||
* to be sure we don't return a NULL, which'd be contrary to the standard
|
||||
* behavior of MIN/MAX. XXX ideally this should be done earlier, so that
|
||||
* the selectivity of the restriction could be included in our cost
|
||||
* estimates. But that looks painful, and in most cases the fraction of
|
||||
* NULLs isn't high enough to change the decision.
|
||||
*/
|
||||
plan = create_plan(&subroot, (Path *) info->path);
|
||||
|
||||
@@ -517,7 +515,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
|
||||
copyObject(constant_quals),
|
||||
plan);
|
||||
|
||||
plan = (Plan *) make_limit(plan,
|
||||
plan = (Plan *) make_limit(plan,
|
||||
subparse->limitOffset,
|
||||
subparse->limitCount,
|
||||
0, 1);
|
||||
@@ -534,7 +532,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
|
||||
* Replace original aggregate calls with subplan output Params
|
||||
*/
|
||||
static Node *
|
||||
replace_aggs_with_params_mutator(Node *node, List **context)
|
||||
replace_aggs_with_params_mutator(Node *node, List **context)
|
||||
{
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
|
@@ -14,7 +14,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.88 2005/09/28 21:17:02 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.89 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -57,7 +57,7 @@
|
||||
* does not use grouping
|
||||
*
|
||||
* Note: the PlannerInfo node also includes a query_pathkeys field, which is
|
||||
* both an input and an output of query_planner(). The input value signals
|
||||
* both an input and an output of query_planner(). The input value signals
|
||||
* query_planner that the indicated sort order is wanted in the final output
|
||||
* plan. But this value has not yet been "canonicalized", since the needed
|
||||
* info does not get computed until we scan the qual clauses. We canonicalize
|
||||
@@ -99,7 +99,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
if (parse->jointree->fromlist == NIL)
|
||||
{
|
||||
*cheapest_path = (Path *) create_result_path(NULL, NULL,
|
||||
(List *) parse->jointree->quals);
|
||||
(List *) parse->jointree->quals);
|
||||
*sorted_path = NULL;
|
||||
return;
|
||||
}
|
||||
@@ -107,21 +107,21 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
/*
|
||||
* Pull out any non-variable WHERE clauses so these can be put in a
|
||||
* toplevel "Result" node, where they will gate execution of the whole
|
||||
* plan (the Result will not invoke its descendant plan unless the
|
||||
* quals are true). Note that any *really* non-variable quals will
|
||||
* have been optimized away by eval_const_expressions(). What we're
|
||||
* mostly interested in here is quals that depend only on outer-level
|
||||
* vars, although if the qual reduces to "WHERE FALSE" this path will
|
||||
* also be taken.
|
||||
* plan (the Result will not invoke its descendant plan unless the quals
|
||||
* are true). Note that any *really* non-variable quals will have been
|
||||
* optimized away by eval_const_expressions(). What we're mostly
|
||||
* interested in here is quals that depend only on outer-level vars,
|
||||
* although if the qual reduces to "WHERE FALSE" this path will also be
|
||||
* taken.
|
||||
*/
|
||||
parse->jointree->quals = (Node *)
|
||||
pull_constant_clauses((List *) parse->jointree->quals,
|
||||
&constant_quals);
|
||||
|
||||
/*
|
||||
* Init planner lists to empty. We create the base_rel_array with a
|
||||
* size that will be sufficient if no pullups or inheritance additions
|
||||
* happen ... otherwise it will be enlarged as needed.
|
||||
* Init planner lists to empty. We create the base_rel_array with a size
|
||||
* that will be sufficient if no pullups or inheritance additions happen
|
||||
* ... otherwise it will be enlarged as needed.
|
||||
*
|
||||
* NOTE: in_info_list was set up by subquery_planner, do not touch here
|
||||
*/
|
||||
@@ -141,33 +141,32 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
add_base_rels_to_query(root, (Node *) parse->jointree);
|
||||
|
||||
/*
|
||||
* Examine the targetlist and qualifications, adding entries to
|
||||
* baserel targetlists for all referenced Vars. Restrict and join
|
||||
* clauses are added to appropriate lists belonging to the mentioned
|
||||
* relations. We also build lists of equijoined keys for pathkey
|
||||
* construction.
|
||||
* Examine the targetlist and qualifications, adding entries to baserel
|
||||
* targetlists for all referenced Vars. Restrict and join clauses are
|
||||
* added to appropriate lists belonging to the mentioned relations. We
|
||||
* also build lists of equijoined keys for pathkey construction.
|
||||
*
|
||||
* Note: all subplan nodes will have "flat" (var-only) tlists. This
|
||||
* implies that all expression evaluations are done at the root of the
|
||||
* plan tree. Once upon a time there was code to try to push
|
||||
* expensive function calls down to lower plan nodes, but that's dead
|
||||
* code and has been for a long time...
|
||||
* Note: all subplan nodes will have "flat" (var-only) tlists. This implies
|
||||
* that all expression evaluations are done at the root of the plan tree.
|
||||
* Once upon a time there was code to try to push expensive function calls
|
||||
* down to lower plan nodes, but that's dead code and has been for a long
|
||||
* time...
|
||||
*/
|
||||
build_base_rel_tlists(root, tlist);
|
||||
|
||||
(void) distribute_quals_to_rels(root, (Node *) parse->jointree, false);
|
||||
|
||||
/*
|
||||
* Use the completed lists of equijoined keys to deduce any implied
|
||||
* but unstated equalities (for example, A=B and B=C imply A=C).
|
||||
* Use the completed lists of equijoined keys to deduce any implied but
|
||||
* unstated equalities (for example, A=B and B=C imply A=C).
|
||||
*/
|
||||
generate_implied_equalities(root);
|
||||
|
||||
/*
|
||||
* We should now have all the pathkey equivalence sets built, so it's
|
||||
* now possible to convert the requested query_pathkeys to canonical
|
||||
* form. Also canonicalize the groupClause and sortClause pathkeys
|
||||
* for use later.
|
||||
* We should now have all the pathkey equivalence sets built, so it's now
|
||||
* possible to convert the requested query_pathkeys to canonical form.
|
||||
* Also canonicalize the groupClause and sortClause pathkeys for use
|
||||
* later.
|
||||
*/
|
||||
root->query_pathkeys = canonicalize_pathkeys(root, root->query_pathkeys);
|
||||
root->group_pathkeys = canonicalize_pathkeys(root, root->group_pathkeys);
|
||||
@@ -182,13 +181,13 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
elog(ERROR, "failed to construct the join relation");
|
||||
|
||||
/*
|
||||
* If there's grouping going on, estimate the number of result groups.
|
||||
* We couldn't do this any earlier because it depends on relation size
|
||||
* If there's grouping going on, estimate the number of result groups. We
|
||||
* couldn't do this any earlier because it depends on relation size
|
||||
* estimates that were set up above.
|
||||
*
|
||||
* Then convert tuple_fraction to fractional form if it is absolute,
|
||||
* and adjust it based on the knowledge that grouping_planner will be
|
||||
* doing grouping or aggregation work with our result.
|
||||
* Then convert tuple_fraction to fractional form if it is absolute, and
|
||||
* adjust it based on the knowledge that grouping_planner will be doing
|
||||
* grouping or aggregation work with our result.
|
||||
*
|
||||
* This introduces some undesirable coupling between this code and
|
||||
* grouping_planner, but the alternatives seem even uglier; we couldn't
|
||||
@@ -205,18 +204,18 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
final_rel->rows);
|
||||
|
||||
/*
|
||||
* In GROUP BY mode, an absolute LIMIT is relative to the number
|
||||
* of groups not the number of tuples. If the caller gave us
|
||||
* a fraction, keep it as-is. (In both cases, we are effectively
|
||||
* assuming that all the groups are about the same size.)
|
||||
* In GROUP BY mode, an absolute LIMIT is relative to the number of
|
||||
* groups not the number of tuples. If the caller gave us a fraction,
|
||||
* keep it as-is. (In both cases, we are effectively assuming that
|
||||
* all the groups are about the same size.)
|
||||
*/
|
||||
if (tuple_fraction >= 1.0)
|
||||
tuple_fraction /= *num_groups;
|
||||
|
||||
/*
|
||||
* If both GROUP BY and ORDER BY are specified, we will need two
|
||||
* levels of sort --- and, therefore, certainly need to read all
|
||||
* the tuples --- unless ORDER BY is a subset of GROUP BY.
|
||||
* levels of sort --- and, therefore, certainly need to read all the
|
||||
* tuples --- unless ORDER BY is a subset of GROUP BY.
|
||||
*/
|
||||
if (parse->groupClause && parse->sortClause &&
|
||||
!pathkeys_contained_in(root->sort_pathkeys, root->group_pathkeys))
|
||||
@@ -225,8 +224,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
else if (parse->hasAggs || root->hasHavingQual)
|
||||
{
|
||||
/*
|
||||
* Ungrouped aggregate will certainly want to read all the tuples,
|
||||
* and it will deliver a single result row (so leave *num_groups 1).
|
||||
* Ungrouped aggregate will certainly want to read all the tuples, and
|
||||
* it will deliver a single result row (so leave *num_groups 1).
|
||||
*/
|
||||
tuple_fraction = 0.0;
|
||||
}
|
||||
@@ -234,11 +233,11 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
{
|
||||
/*
|
||||
* Since there was no grouping or aggregation, it's reasonable to
|
||||
* assume the UNIQUE filter has effects comparable to GROUP BY.
|
||||
* Return the estimated number of output rows for use by caller.
|
||||
* (If DISTINCT is used with grouping, we ignore its effects for
|
||||
* rowcount estimation purposes; this amounts to assuming the grouped
|
||||
* rows are distinct already.)
|
||||
* assume the UNIQUE filter has effects comparable to GROUP BY. Return
|
||||
* the estimated number of output rows for use by caller. (If DISTINCT
|
||||
* is used with grouping, we ignore its effects for rowcount
|
||||
* estimation purposes; this amounts to assuming the grouped rows are
|
||||
* distinct already.)
|
||||
*/
|
||||
List *distinctExprs;
|
||||
|
||||
@@ -257,26 +256,26 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Plain non-grouped, non-aggregated query: an absolute tuple
|
||||
* fraction can be divided by the number of tuples.
|
||||
* Plain non-grouped, non-aggregated query: an absolute tuple fraction
|
||||
* can be divided by the number of tuples.
|
||||
*/
|
||||
if (tuple_fraction >= 1.0)
|
||||
tuple_fraction /= final_rel->rows;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pick out the cheapest-total path and the cheapest presorted path
|
||||
* for the requested pathkeys (if there is one). We should take the
|
||||
* tuple fraction into account when selecting the cheapest presorted
|
||||
* path, but not when selecting the cheapest-total path, since if we
|
||||
* have to sort then we'll have to fetch all the tuples. (But there's
|
||||
* a special case: if query_pathkeys is NIL, meaning order doesn't
|
||||
* matter, then the "cheapest presorted" path will be the cheapest
|
||||
* overall for the tuple fraction.)
|
||||
* Pick out the cheapest-total path and the cheapest presorted path for
|
||||
* the requested pathkeys (if there is one). We should take the tuple
|
||||
* fraction into account when selecting the cheapest presorted path, but
|
||||
* not when selecting the cheapest-total path, since if we have to sort
|
||||
* then we'll have to fetch all the tuples. (But there's a special case:
|
||||
* if query_pathkeys is NIL, meaning order doesn't matter, then the
|
||||
* "cheapest presorted" path will be the cheapest overall for the tuple
|
||||
* fraction.)
|
||||
*
|
||||
* The cheapest-total path is also the one to use if grouping_planner
|
||||
* decides to use hashed aggregation, so we return it separately even
|
||||
* if this routine thinks the presorted path is the winner.
|
||||
* The cheapest-total path is also the one to use if grouping_planner decides
|
||||
* to use hashed aggregation, so we return it separately even if this
|
||||
* routine thinks the presorted path is the winner.
|
||||
*/
|
||||
cheapestpath = final_rel->cheapest_total_path;
|
||||
|
||||
@@ -291,8 +290,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
|
||||
/*
|
||||
* Forget about the presorted path if it would be cheaper to sort the
|
||||
* cheapest-total path. Here we need consider only the behavior at
|
||||
* the tuple fraction point.
|
||||
* cheapest-total path. Here we need consider only the behavior at the
|
||||
* tuple fraction point.
|
||||
*/
|
||||
if (sortedpath)
|
||||
{
|
||||
@@ -323,8 +322,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have constant quals, add a toplevel Result step to process
|
||||
* them.
|
||||
* If we have constant quals, add a toplevel Result step to process them.
|
||||
*/
|
||||
if (constant_quals)
|
||||
{
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.193 2005/09/24 22:54:37 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.194 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -59,8 +59,8 @@ static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
|
||||
static Plan *inheritance_planner(PlannerInfo *root, List *inheritlist);
|
||||
static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction);
|
||||
static double preprocess_limit(PlannerInfo *root,
|
||||
double tuple_fraction,
|
||||
int *offset_est, int *count_est);
|
||||
double tuple_fraction,
|
||||
int *offset_est, int *count_est);
|
||||
static bool choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
|
||||
Path *cheapest_path, Path *sorted_path,
|
||||
double dNumGroups, AggClauseCounts *agg_counts);
|
||||
@@ -95,14 +95,13 @@ planner(Query *parse, bool isCursor, int cursorOptions,
|
||||
* these global state variables must be saved and restored.
|
||||
*
|
||||
* Query level and the param list cannot be moved into the per-query
|
||||
* PlannerInfo structure since their whole purpose is communication
|
||||
* across multiple sub-queries. Also, boundParams is explicitly info
|
||||
* from outside the query, and so is likewise better handled as a global
|
||||
* variable.
|
||||
* PlannerInfo structure since their whole purpose is communication across
|
||||
* multiple sub-queries. Also, boundParams is explicitly info from outside
|
||||
* the query, and so is likewise better handled as a global variable.
|
||||
*
|
||||
* Note we do NOT save and restore PlannerPlanId: it exists to assign
|
||||
* unique IDs to SubPlan nodes, and we want those IDs to be unique for
|
||||
* the life of a backend. Also, PlannerInitPlan is saved/restored in
|
||||
* Note we do NOT save and restore PlannerPlanId: it exists to assign unique
|
||||
* IDs to SubPlan nodes, and we want those IDs to be unique for the life
|
||||
* of a backend. Also, PlannerInitPlan is saved/restored in
|
||||
* subquery_planner, not here.
|
||||
*/
|
||||
save_PlannerQueryLevel = PlannerQueryLevel;
|
||||
@@ -118,10 +117,10 @@ planner(Query *parse, bool isCursor, int cursorOptions,
|
||||
if (isCursor)
|
||||
{
|
||||
/*
|
||||
* We have no real idea how many tuples the user will ultimately
|
||||
* FETCH from a cursor, but it seems a good bet that he doesn't
|
||||
* want 'em all. Optimize for 10% retrieval (you gotta better
|
||||
* number? Should this be a SETtable parameter?)
|
||||
* We have no real idea how many tuples the user will ultimately FETCH
|
||||
* from a cursor, but it seems a good bet that he doesn't want 'em
|
||||
* all. Optimize for 10% retrieval (you gotta better number? Should
|
||||
* this be a SETtable parameter?)
|
||||
*/
|
||||
tuple_fraction = 0.10;
|
||||
}
|
||||
@@ -207,10 +206,10 @@ subquery_planner(Query *parse, double tuple_fraction,
|
||||
root->parse = parse;
|
||||
|
||||
/*
|
||||
* Look for IN clauses at the top level of WHERE, and transform them
|
||||
* into joins. Note that this step only handles IN clauses originally
|
||||
* at top level of WHERE; if we pull up any subqueries in the next
|
||||
* step, their INs are processed just before pulling them up.
|
||||
* Look for IN clauses at the top level of WHERE, and transform them into
|
||||
* joins. Note that this step only handles IN clauses originally at top
|
||||
* level of WHERE; if we pull up any subqueries in the next step, their
|
||||
* INs are processed just before pulling them up.
|
||||
*/
|
||||
root->in_info_list = NIL;
|
||||
if (parse->hasSubLinks)
|
||||
@@ -225,14 +224,14 @@ subquery_planner(Query *parse, double tuple_fraction,
|
||||
pull_up_subqueries(root, (Node *) parse->jointree, false);
|
||||
|
||||
/*
|
||||
* Detect whether any rangetable entries are RTE_JOIN kind; if not, we
|
||||
* can avoid the expense of doing flatten_join_alias_vars(). Also
|
||||
* check for outer joins --- if none, we can skip reduce_outer_joins()
|
||||
* and some other processing. This must be done after we have done
|
||||
* Detect whether any rangetable entries are RTE_JOIN kind; if not, we can
|
||||
* avoid the expense of doing flatten_join_alias_vars(). Also check for
|
||||
* outer joins --- if none, we can skip reduce_outer_joins() and some
|
||||
* other processing. This must be done after we have done
|
||||
* pull_up_subqueries, of course.
|
||||
*
|
||||
* Note: if reduce_outer_joins manages to eliminate all outer joins,
|
||||
* root->hasOuterJoins is not reset currently. This is OK since its
|
||||
* root->hasOuterJoins is not reset currently. This is OK since its
|
||||
* purpose is merely to suppress unnecessary processing in simple cases.
|
||||
*/
|
||||
root->hasJoinRTEs = false;
|
||||
@@ -255,8 +254,8 @@ subquery_planner(Query *parse, double tuple_fraction,
|
||||
|
||||
/*
|
||||
* Set hasHavingQual to remember if HAVING clause is present. Needed
|
||||
* because preprocess_expression will reduce a constant-true condition
|
||||
* to an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
|
||||
* because preprocess_expression will reduce a constant-true condition to
|
||||
* an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
|
||||
*/
|
||||
root->hasHavingQual = (parse->havingQual != NULL);
|
||||
|
||||
@@ -292,29 +291,29 @@ subquery_planner(Query *parse, double tuple_fraction,
|
||||
}
|
||||
|
||||
/*
|
||||
* In some cases we may want to transfer a HAVING clause into WHERE.
|
||||
* We cannot do so if the HAVING clause contains aggregates (obviously)
|
||||
* or volatile functions (since a HAVING clause is supposed to be executed
|
||||
* In some cases we may want to transfer a HAVING clause into WHERE. We
|
||||
* cannot do so if the HAVING clause contains aggregates (obviously) or
|
||||
* volatile functions (since a HAVING clause is supposed to be executed
|
||||
* only once per group). Also, it may be that the clause is so expensive
|
||||
* to execute that we're better off doing it only once per group, despite
|
||||
* the loss of selectivity. This is hard to estimate short of doing the
|
||||
* entire planning process twice, so we use a heuristic: clauses
|
||||
* containing subplans are left in HAVING. Otherwise, we move or copy
|
||||
* the HAVING clause into WHERE, in hopes of eliminating tuples before
|
||||
* containing subplans are left in HAVING. Otherwise, we move or copy the
|
||||
* HAVING clause into WHERE, in hopes of eliminating tuples before
|
||||
* aggregation instead of after.
|
||||
*
|
||||
* If the query has explicit grouping then we can simply move such a
|
||||
* clause into WHERE; any group that fails the clause will not be
|
||||
* in the output because none of its tuples will reach the grouping
|
||||
* or aggregation stage. Otherwise we must have a degenerate
|
||||
* (variable-free) HAVING clause, which we put in WHERE so that
|
||||
* query_planner() can use it in a gating Result node, but also keep
|
||||
* in HAVING to ensure that we don't emit a bogus aggregated row.
|
||||
* (This could be done better, but it seems not worth optimizing.)
|
||||
* If the query has explicit grouping then we can simply move such a clause
|
||||
* into WHERE; any group that fails the clause will not be in the output
|
||||
* because none of its tuples will reach the grouping or aggregation
|
||||
* stage. Otherwise we must have a degenerate (variable-free) HAVING
|
||||
* clause, which we put in WHERE so that query_planner() can use it in a
|
||||
* gating Result node, but also keep in HAVING to ensure that we don't
|
||||
* emit a bogus aggregated row. (This could be done better, but it seems
|
||||
* not worth optimizing.)
|
||||
*
|
||||
* Note that both havingQual and parse->jointree->quals are in
|
||||
* implicitly-ANDed-list form at this point, even though they are
|
||||
* declared as Node *.
|
||||
* implicitly-ANDed-list form at this point, even though they are declared
|
||||
* as Node *.
|
||||
*/
|
||||
newHaving = NIL;
|
||||
foreach(l, (List *) parse->havingQual)
|
||||
@@ -346,28 +345,27 @@ subquery_planner(Query *parse, double tuple_fraction,
|
||||
parse->havingQual = (Node *) newHaving;
|
||||
|
||||
/*
|
||||
* If we have any outer joins, try to reduce them to plain inner
|
||||
* joins. This step is most easily done after we've done expression
|
||||
* If we have any outer joins, try to reduce them to plain inner joins.
|
||||
* This step is most easily done after we've done expression
|
||||
* preprocessing.
|
||||
*/
|
||||
if (root->hasOuterJoins)
|
||||
reduce_outer_joins(root);
|
||||
|
||||
/*
|
||||
* See if we can simplify the jointree; opportunities for this may
|
||||
* come from having pulled up subqueries, or from flattening explicit
|
||||
* JOIN syntax. We must do this after flattening JOIN alias
|
||||
* variables, since eliminating explicit JOIN nodes from the jointree
|
||||
* will cause get_relids_for_join() to fail. But it should happen
|
||||
* after reduce_outer_joins, anyway.
|
||||
* See if we can simplify the jointree; opportunities for this may come
|
||||
* from having pulled up subqueries, or from flattening explicit JOIN
|
||||
* syntax. We must do this after flattening JOIN alias variables, since
|
||||
* eliminating explicit JOIN nodes from the jointree will cause
|
||||
* get_relids_for_join() to fail. But it should happen after
|
||||
* reduce_outer_joins, anyway.
|
||||
*/
|
||||
parse->jointree = (FromExpr *)
|
||||
simplify_jointree(root, (Node *) parse->jointree);
|
||||
|
||||
/*
|
||||
* Do the main planning. If we have an inherited target relation,
|
||||
* that needs special processing, else go straight to
|
||||
* grouping_planner.
|
||||
* Do the main planning. If we have an inherited target relation, that
|
||||
* needs special processing, else go straight to grouping_planner.
|
||||
*/
|
||||
if (parse->resultRelation &&
|
||||
(lst = expand_inherited_rtentry(root, parse->resultRelation)) != NIL)
|
||||
@@ -377,8 +375,8 @@ subquery_planner(Query *parse, double tuple_fraction,
|
||||
|
||||
/*
|
||||
* If any subplans were generated, or if we're inside a subplan, build
|
||||
* initPlan list and extParam/allParam sets for plan nodes, and attach
|
||||
* the initPlans to the top plan node.
|
||||
* initPlan list and extParam/allParam sets for plan nodes, and attach the
|
||||
* initPlans to the top plan node.
|
||||
*/
|
||||
if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1)
|
||||
SS_finalize_plan(plan, parse->rtable);
|
||||
@@ -405,9 +403,9 @@ static Node *
|
||||
preprocess_expression(PlannerInfo *root, Node *expr, int kind)
|
||||
{
|
||||
/*
|
||||
* Fall out quickly if expression is empty. This occurs often enough
|
||||
* to be worth checking. Note that null->null is the correct conversion
|
||||
* for implicit-AND result format, too.
|
||||
* Fall out quickly if expression is empty. This occurs often enough to
|
||||
* be worth checking. Note that null->null is the correct conversion for
|
||||
* implicit-AND result format, too.
|
||||
*/
|
||||
if (expr == NULL)
|
||||
return NULL;
|
||||
@@ -415,8 +413,7 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
|
||||
/*
|
||||
* If the query has any join RTEs, replace join alias variables with
|
||||
* base-relation variables. We must do this before sublink processing,
|
||||
* else sublinks expanded out from join aliases wouldn't get
|
||||
* processed.
|
||||
* else sublinks expanded out from join aliases wouldn't get processed.
|
||||
*/
|
||||
if (root->hasJoinRTEs)
|
||||
expr = flatten_join_alias_vars(root, expr);
|
||||
@@ -429,13 +426,13 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
|
||||
* careful to maintain AND/OR flatness --- that is, do not generate a tree
|
||||
* with AND directly under AND, nor OR directly under OR.
|
||||
*
|
||||
* Because this is a relatively expensive process, we skip it when the
|
||||
* query is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()".
|
||||
* The expression will only be evaluated once anyway, so no point in
|
||||
* Because this is a relatively expensive process, we skip it when the query
|
||||
* is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". The
|
||||
* expression will only be evaluated once anyway, so no point in
|
||||
* pre-simplifying; we can't execute it any faster than the executor can,
|
||||
* and we will waste cycles copying the tree. Notice however that we
|
||||
* still must do it for quals (to get AND/OR flatness); and if we are
|
||||
* in a subquery we should not assume it will be done only once.
|
||||
* still must do it for quals (to get AND/OR flatness); and if we are in a
|
||||
* subquery we should not assume it will be done only once.
|
||||
*/
|
||||
if (root->parse->jointree->fromlist != NIL ||
|
||||
kind == EXPRKIND_QUAL ||
|
||||
@@ -460,8 +457,8 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
|
||||
expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL));
|
||||
|
||||
/*
|
||||
* XXX do not insert anything here unless you have grokked the
|
||||
* comments in SS_replace_correlation_vars ...
|
||||
* XXX do not insert anything here unless you have grokked the comments in
|
||||
* SS_replace_correlation_vars ...
|
||||
*/
|
||||
|
||||
/* Replace uplevel vars with Param nodes */
|
||||
@@ -469,9 +466,9 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
|
||||
expr = SS_replace_correlation_vars(expr);
|
||||
|
||||
/*
|
||||
* If it's a qual or havingQual, convert it to implicit-AND format.
|
||||
* (We don't want to do this before eval_const_expressions, since the
|
||||
* latter would be unable to simplify a top-level AND correctly. Also,
|
||||
* If it's a qual or havingQual, convert it to implicit-AND format. (We
|
||||
* don't want to do this before eval_const_expressions, since the latter
|
||||
* would be unable to simplify a top-level AND correctly. Also,
|
||||
* SS_process_sublinks expects explicit-AND format.)
|
||||
*/
|
||||
if (kind == EXPRKIND_QUAL)
|
||||
@@ -557,9 +554,9 @@ inheritance_planner(PlannerInfo *root, List *inheritlist)
|
||||
Plan *subplan;
|
||||
|
||||
/*
|
||||
* Generate modified query with this rel as target. We have to
|
||||
* be prepared to translate varnos in in_info_list as well as in
|
||||
* the Query proper.
|
||||
* Generate modified query with this rel as target. We have to be
|
||||
* prepared to translate varnos in in_info_list as well as in the
|
||||
* Query proper.
|
||||
*/
|
||||
memcpy(&subroot, root, sizeof(PlannerInfo));
|
||||
subroot.parse = (Query *)
|
||||
@@ -580,26 +577,26 @@ inheritance_planner(PlannerInfo *root, List *inheritlist)
|
||||
* XXX my goodness this next bit is ugly. Really need to think about
|
||||
* ways to rein in planner's habit of scribbling on its input.
|
||||
*
|
||||
* Planning of the subquery might have modified the rangetable,
|
||||
* either by addition of RTEs due to expansion of inherited source
|
||||
* tables, or by changes of the Query structures inside subquery
|
||||
* RTEs. We have to ensure that this gets propagated back to the
|
||||
* master copy. However, if we aren't done planning yet, we also
|
||||
* need to ensure that subsequent calls to grouping_planner have
|
||||
* virgin sub-Queries to work from. So, if we are at the last
|
||||
* list entry, just copy the subquery rangetable back to the master
|
||||
* copy; if we are not, then extend the master copy by adding
|
||||
* whatever the subquery added. (We assume these added entries
|
||||
* will go untouched by the future grouping_planner calls. We are
|
||||
* also effectively assuming that sub-Queries will get planned
|
||||
* identically each time, or at least that the impacts on their
|
||||
* rangetables will be the same each time. Did I say this is ugly?)
|
||||
* Planning of the subquery might have modified the rangetable, either by
|
||||
* addition of RTEs due to expansion of inherited source tables, or by
|
||||
* changes of the Query structures inside subquery RTEs. We have to
|
||||
* ensure that this gets propagated back to the master copy. However,
|
||||
* if we aren't done planning yet, we also need to ensure that
|
||||
* subsequent calls to grouping_planner have virgin sub-Queries to
|
||||
* work from. So, if we are at the last list entry, just copy the
|
||||
* subquery rangetable back to the master copy; if we are not, then
|
||||
* extend the master copy by adding whatever the subquery added. (We
|
||||
* assume these added entries will go untouched by the future
|
||||
* grouping_planner calls. We are also effectively assuming that
|
||||
* sub-Queries will get planned identically each time, or at least
|
||||
* that the impacts on their rangetables will be the same each time.
|
||||
* Did I say this is ugly?)
|
||||
*/
|
||||
if (lnext(l) == NULL)
|
||||
parse->rtable = subroot.parse->rtable;
|
||||
else
|
||||
{
|
||||
int subrtlength = list_length(subroot.parse->rtable);
|
||||
int subrtlength = list_length(subroot.parse->rtable);
|
||||
|
||||
if (subrtlength > mainrtlength)
|
||||
{
|
||||
@@ -666,38 +663,37 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
List *set_sortclauses;
|
||||
|
||||
/*
|
||||
* If there's a top-level ORDER BY, assume we have to fetch all
|
||||
* the tuples. This might seem too simplistic given all the
|
||||
* hackery below to possibly avoid the sort ... but a nonzero
|
||||
* tuple_fraction is only of use to plan_set_operations() when
|
||||
* the setop is UNION ALL, and the result of UNION ALL is always
|
||||
* unsorted.
|
||||
* If there's a top-level ORDER BY, assume we have to fetch all the
|
||||
* tuples. This might seem too simplistic given all the hackery below
|
||||
* to possibly avoid the sort ... but a nonzero tuple_fraction is only
|
||||
* of use to plan_set_operations() when the setop is UNION ALL, and
|
||||
* the result of UNION ALL is always unsorted.
|
||||
*/
|
||||
if (parse->sortClause)
|
||||
tuple_fraction = 0.0;
|
||||
|
||||
/*
|
||||
* Construct the plan for set operations. The result will not
|
||||
* need any work except perhaps a top-level sort and/or LIMIT.
|
||||
* Construct the plan for set operations. The result will not need
|
||||
* any work except perhaps a top-level sort and/or LIMIT.
|
||||
*/
|
||||
result_plan = plan_set_operations(root, tuple_fraction,
|
||||
&set_sortclauses);
|
||||
|
||||
/*
|
||||
* Calculate pathkeys representing the sort order (if any) of the
|
||||
* set operation's result. We have to do this before overwriting
|
||||
* the sort key information...
|
||||
* Calculate pathkeys representing the sort order (if any) of the set
|
||||
* operation's result. We have to do this before overwriting the sort
|
||||
* key information...
|
||||
*/
|
||||
current_pathkeys = make_pathkeys_for_sortclauses(set_sortclauses,
|
||||
result_plan->targetlist);
|
||||
result_plan->targetlist);
|
||||
current_pathkeys = canonicalize_pathkeys(root, current_pathkeys);
|
||||
|
||||
/*
|
||||
* We should not need to call preprocess_targetlist, since we must
|
||||
* be in a SELECT query node. Instead, use the targetlist
|
||||
* returned by plan_set_operations (since this tells whether it
|
||||
* returned any resjunk columns!), and transfer any sort key
|
||||
* information from the original tlist.
|
||||
* We should not need to call preprocess_targetlist, since we must be
|
||||
* in a SELECT query node. Instead, use the targetlist returned by
|
||||
* plan_set_operations (since this tells whether it returned any
|
||||
* resjunk columns!), and transfer any sort key information from the
|
||||
* original tlist.
|
||||
*/
|
||||
Assert(parse->commandType == CMD_SELECT);
|
||||
|
||||
@@ -741,11 +737,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
tlist = preprocess_targetlist(root, tlist);
|
||||
|
||||
/*
|
||||
* Generate appropriate target list for subplan; may be different
|
||||
* from tlist if grouping or aggregation is needed.
|
||||
* Generate appropriate target list for subplan; may be different from
|
||||
* tlist if grouping or aggregation is needed.
|
||||
*/
|
||||
sub_tlist = make_subplanTargetList(root, tlist,
|
||||
&groupColIdx, &need_tlist_eval);
|
||||
&groupColIdx, &need_tlist_eval);
|
||||
|
||||
/*
|
||||
* Calculate pathkeys that represent grouping/ordering requirements.
|
||||
@@ -763,10 +759,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
* Note: we do not attempt to detect duplicate aggregates here; a
|
||||
* somewhat-overestimated count is okay for our present purposes.
|
||||
*
|
||||
* Note: think not that we can turn off hasAggs if we find no aggs.
|
||||
* It is possible for constant-expression simplification to remove
|
||||
* all explicit references to aggs, but we still have to follow
|
||||
* the aggregate semantics (eg, producing only one output row).
|
||||
* Note: think not that we can turn off hasAggs if we find no aggs. It is
|
||||
* possible for constant-expression simplification to remove all
|
||||
* explicit references to aggs, but we still have to follow the
|
||||
* aggregate semantics (eg, producing only one output row).
|
||||
*/
|
||||
if (parse->hasAggs)
|
||||
{
|
||||
@@ -777,13 +773,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
/*
|
||||
* Figure out whether we need a sorted result from query_planner.
|
||||
*
|
||||
* If we have a GROUP BY clause, then we want a result sorted
|
||||
* properly for grouping. Otherwise, if there is an ORDER BY
|
||||
* clause, we want to sort by the ORDER BY clause. (Note: if we
|
||||
* have both, and ORDER BY is a superset of GROUP BY, it would be
|
||||
* tempting to request sort by ORDER BY --- but that might just
|
||||
* leave us failing to exploit an available sort order at all.
|
||||
* Needs more thought...)
|
||||
* If we have a GROUP BY clause, then we want a result sorted properly
|
||||
* for grouping. Otherwise, if there is an ORDER BY clause, we want
|
||||
* to sort by the ORDER BY clause. (Note: if we have both, and ORDER
|
||||
* BY is a superset of GROUP BY, it would be tempting to request sort
|
||||
* by ORDER BY --- but that might just leave us failing to exploit an
|
||||
* available sort order at all. Needs more thought...)
|
||||
*/
|
||||
if (parse->groupClause)
|
||||
root->query_pathkeys = root->group_pathkeys;
|
||||
@@ -793,10 +788,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
root->query_pathkeys = NIL;
|
||||
|
||||
/*
|
||||
* Generate the best unsorted and presorted paths for this Query
|
||||
* (but note there may not be any presorted path). query_planner
|
||||
* will also estimate the number of groups in the query, and
|
||||
* canonicalize all the pathkeys.
|
||||
* Generate the best unsorted and presorted paths for this Query (but
|
||||
* note there may not be any presorted path). query_planner will also
|
||||
* estimate the number of groups in the query, and canonicalize all
|
||||
* the pathkeys.
|
||||
*/
|
||||
query_planner(root, sub_tlist, tuple_fraction,
|
||||
&cheapest_path, &sorted_path, &dNumGroups);
|
||||
@@ -820,8 +815,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
|
||||
/*
|
||||
* Select the best path. If we are doing hashed grouping, we will
|
||||
* always read all the input tuples, so use the cheapest-total
|
||||
* path. Otherwise, trust query_planner's decision about which to use.
|
||||
* always read all the input tuples, so use the cheapest-total path.
|
||||
* Otherwise, trust query_planner's decision about which to use.
|
||||
*/
|
||||
if (use_hashed_grouping || !sorted_path)
|
||||
best_path = cheapest_path;
|
||||
@@ -829,10 +824,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
best_path = sorted_path;
|
||||
|
||||
/*
|
||||
* Check to see if it's possible to optimize MIN/MAX aggregates.
|
||||
* If so, we will forget all the work we did so far to choose a
|
||||
* "regular" path ... but we had to do it anyway to be able to
|
||||
* tell which way is cheaper.
|
||||
* Check to see if it's possible to optimize MIN/MAX aggregates. If
|
||||
* so, we will forget all the work we did so far to choose a "regular"
|
||||
* path ... but we had to do it anyway to be able to tell which way is
|
||||
* cheaper.
|
||||
*/
|
||||
result_plan = optimize_minmax_aggregates(root,
|
||||
tlist,
|
||||
@@ -840,8 +835,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
if (result_plan != NULL)
|
||||
{
|
||||
/*
|
||||
* optimize_minmax_aggregates generated the full plan, with
|
||||
* the right tlist, and it has no sort order.
|
||||
* optimize_minmax_aggregates generated the full plan, with the
|
||||
* right tlist, and it has no sort order.
|
||||
*/
|
||||
current_pathkeys = NIL;
|
||||
}
|
||||
@@ -985,8 +980,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
* GROUP BY without aggregation, so insert a group node (plus
|
||||
* the appropriate sort node, if necessary).
|
||||
*
|
||||
* Add an explicit sort if we couldn't make the path come
|
||||
* out the way the GROUP node needs it.
|
||||
* Add an explicit sort if we couldn't make the path come out the
|
||||
* way the GROUP node needs it.
|
||||
*/
|
||||
if (!pathkeys_contained_in(group_pathkeys, current_pathkeys))
|
||||
{
|
||||
@@ -1014,11 +1009,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
* This is a degenerate case in which we are supposed to emit
|
||||
* either 0 or 1 row depending on whether HAVING succeeds.
|
||||
* Furthermore, there cannot be any variables in either HAVING
|
||||
* or the targetlist, so we actually do not need the FROM table
|
||||
* at all! We can just throw away the plan-so-far and generate
|
||||
* a Result node. This is a sufficiently unusual corner case
|
||||
* that it's not worth contorting the structure of this routine
|
||||
* to avoid having to generate the plan in the first place.
|
||||
* or the targetlist, so we actually do not need the FROM
|
||||
* table at all! We can just throw away the plan-so-far and
|
||||
* generate a Result node. This is a sufficiently unusual
|
||||
* corner case that it's not worth contorting the structure of
|
||||
* this routine to avoid having to generate the plan in the
|
||||
* first place.
|
||||
*/
|
||||
result_plan = (Plan *) make_result(tlist,
|
||||
parse->havingQual,
|
||||
@@ -1028,8 +1024,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
} /* end of if (setOperations) */
|
||||
|
||||
/*
|
||||
* If we were not able to make the plan come out in the right order,
|
||||
* add an explicit sort step.
|
||||
* If we were not able to make the plan come out in the right order, add
|
||||
* an explicit sort step.
|
||||
*/
|
||||
if (parse->sortClause)
|
||||
{
|
||||
@@ -1051,9 +1047,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
result_plan = (Plan *) make_unique(result_plan, parse->distinctClause);
|
||||
|
||||
/*
|
||||
* If there was grouping or aggregation, leave plan_rows as-is
|
||||
* (ie, assume the result was already mostly unique). If not,
|
||||
* use the number of distinct-groups calculated by query_planner.
|
||||
* If there was grouping or aggregation, leave plan_rows as-is (ie,
|
||||
* assume the result was already mostly unique). If not, use the
|
||||
* number of distinct-groups calculated by query_planner.
|
||||
*/
|
||||
if (!parse->groupClause && !root->hasHavingQual && !parse->hasAggs)
|
||||
result_plan->plan_rows = dNumGroups;
|
||||
@@ -1072,8 +1068,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the actual output ordering in query_pathkeys for possible
|
||||
* use by an outer query level.
|
||||
* Return the actual output ordering in query_pathkeys for possible use by
|
||||
* an outer query level.
|
||||
*/
|
||||
root->query_pathkeys = current_pathkeys;
|
||||
|
||||
@@ -1084,7 +1080,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
* preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
|
||||
*
|
||||
* We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
|
||||
* results back in *count_est and *offset_est. These variables are set to
|
||||
* results back in *count_est and *offset_est. These variables are set to
|
||||
* 0 if the corresponding clause is not present, and -1 if it's present
|
||||
* but we couldn't estimate the value for it. (The "0" convention is OK
|
||||
* for OFFSET but a little bit bogus for LIMIT: effectively we estimate
|
||||
@@ -1093,7 +1089,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
|
||||
* be passed to make_limit, which see if you change this code.
|
||||
*
|
||||
* The return value is the suitably adjusted tuple_fraction to use for
|
||||
* planning the query. This adjustment is not overridable, since it reflects
|
||||
* planning the query. This adjustment is not overridable, since it reflects
|
||||
* plan actions that grouping_planner() will certainly take, not assumptions
|
||||
* about context.
|
||||
*/
|
||||
@@ -1120,7 +1116,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
|
||||
if (((Const *) est)->constisnull)
|
||||
{
|
||||
/* NULL indicates LIMIT ALL, ie, no limit */
|
||||
*count_est = 0; /* treat as not present */
|
||||
*count_est = 0; /* treat as not present */
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1143,7 +1139,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
|
||||
if (((Const *) est)->constisnull)
|
||||
{
|
||||
/* Treat NULL as no offset; the executor will too */
|
||||
*offset_est = 0; /* treat as not present */
|
||||
*offset_est = 0; /* treat as not present */
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1217,11 +1213,11 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
|
||||
else if (*offset_est != 0 && tuple_fraction > 0.0)
|
||||
{
|
||||
/*
|
||||
* We have an OFFSET but no LIMIT. This acts entirely differently
|
||||
* from the LIMIT case: here, we need to increase rather than
|
||||
* decrease the caller's tuple_fraction, because the OFFSET acts
|
||||
* to cause more tuples to be fetched instead of fewer. This only
|
||||
* matters if we got a tuple_fraction > 0, however.
|
||||
* We have an OFFSET but no LIMIT. This acts entirely differently
|
||||
* from the LIMIT case: here, we need to increase rather than decrease
|
||||
* the caller's tuple_fraction, because the OFFSET acts to cause more
|
||||
* tuples to be fetched instead of fewer. This only matters if we got
|
||||
* a tuple_fraction > 0, however.
|
||||
*
|
||||
* As above, use 10% if OFFSET is present but unestimatable.
|
||||
*/
|
||||
@@ -1232,9 +1228,9 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
|
||||
|
||||
/*
|
||||
* If we have absolute counts from both caller and OFFSET, add them
|
||||
* together; likewise if they are both fractional. If one is
|
||||
* fractional and the other absolute, we want to take the larger,
|
||||
* and we heuristically assume that's the fractional one.
|
||||
* together; likewise if they are both fractional. If one is
|
||||
* fractional and the other absolute, we want to take the larger, and
|
||||
* we heuristically assume that's the fractional one.
|
||||
*/
|
||||
if (tuple_fraction >= 1.0)
|
||||
{
|
||||
@@ -1260,7 +1256,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
|
||||
/* both fractional, so add them together */
|
||||
tuple_fraction += limit_fraction;
|
||||
if (tuple_fraction >= 1.0)
|
||||
tuple_fraction = 0.0; /* assume fetch all */
|
||||
tuple_fraction = 0.0; /* assume fetch all */
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1303,9 +1299,8 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
|
||||
* Don't do it if it doesn't look like the hashtable will fit into
|
||||
* work_mem.
|
||||
*
|
||||
* Beware here of the possibility that cheapest_path->parent is NULL.
|
||||
* This could happen if user does something silly like
|
||||
* SELECT 'foo' GROUP BY 1;
|
||||
* Beware here of the possibility that cheapest_path->parent is NULL. This
|
||||
* could happen if user does something silly like SELECT 'foo' GROUP BY 1;
|
||||
*/
|
||||
if (cheapest_path->parent)
|
||||
{
|
||||
@@ -1314,8 +1309,8 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
|
||||
}
|
||||
else
|
||||
{
|
||||
cheapest_path_rows = 1; /* assume non-set result */
|
||||
cheapest_path_width = 100; /* arbitrary */
|
||||
cheapest_path_rows = 1; /* assume non-set result */
|
||||
cheapest_path_width = 100; /* arbitrary */
|
||||
}
|
||||
|
||||
/* Estimate per-hash-entry space at tuple width... */
|
||||
@@ -1329,23 +1324,19 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* See if the estimated cost is no more than doing it the other way.
|
||||
* While avoiding the need for sorted input is usually a win, the fact
|
||||
* that the output won't be sorted may be a loss; so we need to do an
|
||||
* actual cost comparison.
|
||||
* See if the estimated cost is no more than doing it the other way. While
|
||||
* avoiding the need for sorted input is usually a win, the fact that the
|
||||
* output won't be sorted may be a loss; so we need to do an actual cost
|
||||
* comparison.
|
||||
*
|
||||
* We need to consider
|
||||
* cheapest_path + hashagg [+ final sort]
|
||||
* versus either
|
||||
* cheapest_path [+ sort] + group or agg [+ final sort]
|
||||
* or
|
||||
* presorted_path + group or agg [+ final sort]
|
||||
* where brackets indicate a step that may not be needed. We assume
|
||||
* query_planner() will have returned a presorted path only if it's a
|
||||
* winner compared to cheapest_path for this purpose.
|
||||
* We need to consider cheapest_path + hashagg [+ final sort] versus either
|
||||
* cheapest_path [+ sort] + group or agg [+ final sort] or presorted_path
|
||||
* + group or agg [+ final sort] where brackets indicate a step that may
|
||||
* not be needed. We assume query_planner() will have returned a presorted
|
||||
* path only if it's a winner compared to cheapest_path for this purpose.
|
||||
*
|
||||
* These path variables are dummies that just hold cost fields; we don't
|
||||
* make actual Paths for these steps.
|
||||
* These path variables are dummies that just hold cost fields; we don't make
|
||||
* actual Paths for these steps.
|
||||
*/
|
||||
cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs,
|
||||
numGroupCols, dNumGroups,
|
||||
@@ -1502,8 +1493,8 @@ make_subplanTargetList(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Otherwise, start with a "flattened" tlist (having just the vars
|
||||
* mentioned in the targetlist and HAVING qual --- but not upper-
|
||||
* level Vars; they will be replaced by Params later on).
|
||||
* mentioned in the targetlist and HAVING qual --- but not upper- level
|
||||
* Vars; they will be replaced by Params later on).
|
||||
*/
|
||||
sub_tlist = flatten_tlist(tlist);
|
||||
extravars = pull_var_clause(parse->havingQual, false);
|
||||
@@ -1513,9 +1504,8 @@ make_subplanTargetList(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* If grouping, create sub_tlist entries for all GROUP BY expressions
|
||||
* (GROUP BY items that are simple Vars should be in the list
|
||||
* already), and make an array showing where the group columns are in
|
||||
* the sub_tlist.
|
||||
* (GROUP BY items that are simple Vars should be in the list already),
|
||||
* and make an array showing where the group columns are in the sub_tlist.
|
||||
*/
|
||||
numCols = list_length(parse->groupClause);
|
||||
if (numCols > 0)
|
||||
@@ -1634,7 +1624,7 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
|
||||
Assert(orig_tlist_item != NULL);
|
||||
orig_tle = (TargetEntry *) lfirst(orig_tlist_item);
|
||||
orig_tlist_item = lnext(orig_tlist_item);
|
||||
if (orig_tle->resjunk) /* should not happen */
|
||||
if (orig_tle->resjunk) /* should not happen */
|
||||
elog(ERROR, "resjunk output columns are not implemented");
|
||||
Assert(new_tle->resno == orig_tle->resno);
|
||||
new_tle->ressortgroupref = orig_tle->ressortgroupref;
|
||||
|
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.114 2005/09/05 18:59:38 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.115 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -38,7 +38,7 @@ typedef struct
|
||||
int num_vars; /* number of plain Var tlist entries */
|
||||
bool has_non_vars; /* are there non-plain-Var entries? */
|
||||
/* array of num_vars entries: */
|
||||
tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */
|
||||
tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */
|
||||
} indexed_tlist; /* VARIABLE LENGTH STRUCT */
|
||||
|
||||
typedef struct
|
||||
@@ -64,28 +64,28 @@ static void fix_expr_references(Plan *plan, Node *node);
|
||||
static bool fix_expr_references_walker(Node *node, void *context);
|
||||
static void set_join_references(Join *join, List *rtable);
|
||||
static void set_inner_join_references(Plan *inner_plan,
|
||||
List *rtable,
|
||||
indexed_tlist *outer_itlist);
|
||||
List *rtable,
|
||||
indexed_tlist *outer_itlist);
|
||||
static void set_uppernode_references(Plan *plan, Index subvarno);
|
||||
static indexed_tlist *build_tlist_index(List *tlist);
|
||||
static Var *search_indexed_tlist_for_var(Var *var,
|
||||
indexed_tlist *itlist,
|
||||
Index newvarno);
|
||||
indexed_tlist *itlist,
|
||||
Index newvarno);
|
||||
static Var *search_indexed_tlist_for_non_var(Node *node,
|
||||
indexed_tlist *itlist,
|
||||
Index newvarno);
|
||||
indexed_tlist *itlist,
|
||||
Index newvarno);
|
||||
static List *join_references(List *clauses,
|
||||
List *rtable,
|
||||
indexed_tlist *outer_itlist,
|
||||
indexed_tlist *inner_itlist,
|
||||
Index acceptable_rel);
|
||||
List *rtable,
|
||||
indexed_tlist *outer_itlist,
|
||||
indexed_tlist *inner_itlist,
|
||||
Index acceptable_rel);
|
||||
static Node *join_references_mutator(Node *node,
|
||||
join_references_context *context);
|
||||
static Node *replace_vars_with_subplan_refs(Node *node,
|
||||
indexed_tlist *subplan_itlist,
|
||||
Index subvarno);
|
||||
indexed_tlist *subplan_itlist,
|
||||
Index subvarno);
|
||||
static Node *replace_vars_with_subplan_refs_mutator(Node *node,
|
||||
replace_vars_with_subplan_refs_context *context);
|
||||
replace_vars_with_subplan_refs_context *context);
|
||||
static bool fix_opfuncids_walker(Node *node, void *context);
|
||||
static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
|
||||
|
||||
@@ -99,7 +99,7 @@ static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
|
||||
/*
|
||||
* set_plan_references
|
||||
*
|
||||
* This is the final processing pass of the planner/optimizer. The plan
|
||||
* This is the final processing pass of the planner/optimizer. The plan
|
||||
* tree is complete; we just have to adjust some representational details
|
||||
* for the convenience of the executor. We update Vars in upper plan nodes
|
||||
* to refer to the outputs of their subplans, and we compute regproc OIDs
|
||||
@@ -150,22 +150,22 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
fix_expr_references(plan,
|
||||
(Node *) ((IndexScan *) plan)->indexqual);
|
||||
fix_expr_references(plan,
|
||||
(Node *) ((IndexScan *) plan)->indexqualorig);
|
||||
(Node *) ((IndexScan *) plan)->indexqualorig);
|
||||
break;
|
||||
case T_BitmapIndexScan:
|
||||
/* no need to fix targetlist and qual */
|
||||
Assert(plan->targetlist == NIL);
|
||||
Assert(plan->qual == NIL);
|
||||
fix_expr_references(plan,
|
||||
(Node *) ((BitmapIndexScan *) plan)->indexqual);
|
||||
(Node *) ((BitmapIndexScan *) plan)->indexqual);
|
||||
fix_expr_references(plan,
|
||||
(Node *) ((BitmapIndexScan *) plan)->indexqualorig);
|
||||
(Node *) ((BitmapIndexScan *) plan)->indexqualorig);
|
||||
break;
|
||||
case T_BitmapHeapScan:
|
||||
fix_expr_references(plan, (Node *) plan->targetlist);
|
||||
fix_expr_references(plan, (Node *) plan->qual);
|
||||
fix_expr_references(plan,
|
||||
(Node *) ((BitmapHeapScan *) plan)->bitmapqualorig);
|
||||
(Node *) ((BitmapHeapScan *) plan)->bitmapqualorig);
|
||||
break;
|
||||
case T_TidScan:
|
||||
fix_expr_references(plan, (Node *) plan->targetlist);
|
||||
@@ -200,7 +200,7 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
fix_expr_references(plan, (Node *) plan->qual);
|
||||
fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual);
|
||||
fix_expr_references(plan,
|
||||
(Node *) ((MergeJoin *) plan)->mergeclauses);
|
||||
(Node *) ((MergeJoin *) plan)->mergeclauses);
|
||||
break;
|
||||
case T_HashJoin:
|
||||
set_join_references((Join *) plan, rtable);
|
||||
@@ -208,7 +208,7 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
fix_expr_references(plan, (Node *) plan->qual);
|
||||
fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual);
|
||||
fix_expr_references(plan,
|
||||
(Node *) ((HashJoin *) plan)->hashclauses);
|
||||
(Node *) ((HashJoin *) plan)->hashclauses);
|
||||
break;
|
||||
case T_Hash:
|
||||
case T_Material:
|
||||
@@ -218,24 +218,24 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
|
||||
/*
|
||||
* These plan types don't actually bother to evaluate their
|
||||
* targetlists (because they just return their unmodified
|
||||
* input tuples). The optimizer is lazy about creating really
|
||||
* valid targetlists for them --- it tends to just put in a
|
||||
* pointer to the child plan node's tlist. Hence, we leave
|
||||
* the tlist alone. In particular, we do not want to process
|
||||
* subplans in the tlist, since we will likely end up reprocessing
|
||||
* subplans that also appear in lower levels of the plan tree!
|
||||
* targetlists (because they just return their unmodified input
|
||||
* tuples). The optimizer is lazy about creating really valid
|
||||
* targetlists for them --- it tends to just put in a pointer to
|
||||
* the child plan node's tlist. Hence, we leave the tlist alone.
|
||||
* In particular, we do not want to process subplans in the tlist,
|
||||
* since we will likely end up reprocessing subplans that also
|
||||
* appear in lower levels of the plan tree!
|
||||
*
|
||||
* Since these plan types don't check quals either, we should
|
||||
* not find any qual expression attached to them.
|
||||
* Since these plan types don't check quals either, we should not
|
||||
* find any qual expression attached to them.
|
||||
*/
|
||||
Assert(plan->qual == NIL);
|
||||
break;
|
||||
case T_Limit:
|
||||
|
||||
/*
|
||||
* Like the plan types above, Limit doesn't evaluate its tlist
|
||||
* or quals. It does have live expressions for limit/offset,
|
||||
* Like the plan types above, Limit doesn't evaluate its tlist or
|
||||
* quals. It does have live expressions for limit/offset,
|
||||
* however.
|
||||
*/
|
||||
Assert(plan->qual == NIL);
|
||||
@@ -251,8 +251,8 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
case T_Result:
|
||||
|
||||
/*
|
||||
* Result may or may not have a subplan; no need to fix up
|
||||
* subplan references if it hasn't got one...
|
||||
* Result may or may not have a subplan; no need to fix up subplan
|
||||
* references if it hasn't got one...
|
||||
*
|
||||
* XXX why does Result use a different subvarno from Agg/Group?
|
||||
*/
|
||||
@@ -300,9 +300,9 @@ set_plan_references(Plan *plan, List *rtable)
|
||||
* NOTE: it is essential that we recurse into child plans AFTER we set
|
||||
* subplan references in this plan's tlist and quals. If we did the
|
||||
* reference-adjustments bottom-up, then we would fail to match this
|
||||
* plan's var nodes against the already-modified nodes of the
|
||||
* children. Fortunately, that consideration doesn't apply to SubPlan
|
||||
* nodes; else we'd need two passes over the expression trees.
|
||||
* plan's var nodes against the already-modified nodes of the children.
|
||||
* Fortunately, that consideration doesn't apply to SubPlan nodes; else
|
||||
* we'd need two passes over the expression trees.
|
||||
*/
|
||||
plan->lefttree = set_plan_references(plan->lefttree, rtable);
|
||||
plan->righttree = set_plan_references(plan->righttree, rtable);
|
||||
@@ -339,8 +339,8 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable)
|
||||
rte->subquery->rtable);
|
||||
|
||||
/*
|
||||
* We have to process any initplans too; set_plan_references can't do
|
||||
* it for us because of the possibility of double-processing.
|
||||
* We have to process any initplans too; set_plan_references can't do it
|
||||
* for us because of the possibility of double-processing.
|
||||
*/
|
||||
foreach(l, plan->scan.plan.initPlan)
|
||||
{
|
||||
@@ -353,12 +353,12 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable)
|
||||
if (trivial_subqueryscan(plan))
|
||||
{
|
||||
/*
|
||||
* We can omit the SubqueryScan node and just pull up the subplan.
|
||||
* We have to merge its rtable into the outer rtable, which means
|
||||
* We can omit the SubqueryScan node and just pull up the subplan. We
|
||||
* have to merge its rtable into the outer rtable, which means
|
||||
* adjusting varnos throughout the subtree.
|
||||
*/
|
||||
int rtoffset = list_length(rtable);
|
||||
List *sub_rtable;
|
||||
int rtoffset = list_length(rtable);
|
||||
List *sub_rtable;
|
||||
|
||||
sub_rtable = copyObject(rte->subquery->rtable);
|
||||
range_table_walker(sub_rtable,
|
||||
@@ -382,11 +382,11 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Keep the SubqueryScan node. We have to do the processing that
|
||||
* set_plan_references would otherwise have done on it. Notice
|
||||
* we do not do set_uppernode_references() here, because a
|
||||
* SubqueryScan will always have been created with correct
|
||||
* references to its subplan's outputs to begin with.
|
||||
* Keep the SubqueryScan node. We have to do the processing that
|
||||
* set_plan_references would otherwise have done on it. Notice we do
|
||||
* not do set_uppernode_references() here, because a SubqueryScan will
|
||||
* always have been created with correct references to its subplan's
|
||||
* outputs to begin with.
|
||||
*/
|
||||
result = (Plan *) plan;
|
||||
|
||||
@@ -532,9 +532,9 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
|
||||
case T_SetOp:
|
||||
|
||||
/*
|
||||
* Even though the targetlist won't be used by the executor,
|
||||
* we fix it up for possible use by EXPLAIN (not to mention
|
||||
* ease of debugging --- wrong varnos are very confusing).
|
||||
* Even though the targetlist won't be used by the executor, we
|
||||
* fix it up for possible use by EXPLAIN (not to mention ease of
|
||||
* debugging --- wrong varnos are very confusing).
|
||||
*/
|
||||
adjust_expr_varnos((Node *) plan->targetlist, rtoffset);
|
||||
Assert(plan->qual == NIL);
|
||||
@@ -542,8 +542,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
|
||||
case T_Limit:
|
||||
|
||||
/*
|
||||
* Like the plan types above, Limit doesn't evaluate its tlist
|
||||
* or quals. It does have live expressions for limit/offset,
|
||||
* Like the plan types above, Limit doesn't evaluate its tlist or
|
||||
* quals. It does have live expressions for limit/offset,
|
||||
* however.
|
||||
*/
|
||||
adjust_expr_varnos((Node *) plan->targetlist, rtoffset);
|
||||
@@ -590,8 +590,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
|
||||
/*
|
||||
* Now recurse into child plans.
|
||||
*
|
||||
* We don't need to (and in fact mustn't) recurse into subqueries,
|
||||
* so no need to examine initPlan list.
|
||||
* We don't need to (and in fact mustn't) recurse into subqueries, so no need
|
||||
* to examine initPlan list.
|
||||
*/
|
||||
adjust_plan_varnos(plan->lefttree, rtoffset);
|
||||
adjust_plan_varnos(plan->righttree, rtoffset);
|
||||
@@ -603,7 +603,7 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
|
||||
*
|
||||
* This is different from the rewriter's OffsetVarNodes in that it has to
|
||||
* work on an already-planned expression tree; in particular, we should not
|
||||
* disturb INNER and OUTER references. On the other hand, we don't have to
|
||||
* disturb INNER and OUTER references. On the other hand, we don't have to
|
||||
* recurse into subqueries nor deal with outer-level Vars, so it's pretty
|
||||
* simple.
|
||||
*/
|
||||
@@ -763,10 +763,10 @@ set_inner_join_references(Plan *inner_plan,
|
||||
if (IsA(inner_plan, IndexScan))
|
||||
{
|
||||
/*
|
||||
* An index is being used to reduce the number of tuples
|
||||
* scanned in the inner relation. If there are join clauses
|
||||
* being used with the index, we must update their outer-rel
|
||||
* var nodes to refer to the outer side of the join.
|
||||
* An index is being used to reduce the number of tuples scanned in
|
||||
* the inner relation. If there are join clauses being used with the
|
||||
* index, we must update their outer-rel var nodes to refer to the
|
||||
* outer side of the join.
|
||||
*/
|
||||
IndexScan *innerscan = (IndexScan *) inner_plan;
|
||||
List *indexqualorig = innerscan->indexqualorig;
|
||||
@@ -789,9 +789,9 @@ set_inner_join_references(Plan *inner_plan,
|
||||
innerrel);
|
||||
|
||||
/*
|
||||
* We must fix the inner qpqual too, if it has join
|
||||
* clauses (this could happen if special operators are
|
||||
* involved: some indexquals may get rechecked as qpquals).
|
||||
* We must fix the inner qpqual too, if it has join clauses (this
|
||||
* could happen if special operators are involved: some indexquals
|
||||
* may get rechecked as qpquals).
|
||||
*/
|
||||
if (NumRelids((Node *) inner_plan->qual) > 1)
|
||||
inner_plan->qual = join_references(inner_plan->qual,
|
||||
@@ -832,11 +832,11 @@ set_inner_join_references(Plan *inner_plan,
|
||||
else if (IsA(inner_plan, BitmapHeapScan))
|
||||
{
|
||||
/*
|
||||
* The inner side is a bitmap scan plan. Fix the top node,
|
||||
* and recurse to get the lower nodes.
|
||||
* The inner side is a bitmap scan plan. Fix the top node, and
|
||||
* recurse to get the lower nodes.
|
||||
*
|
||||
* Note: create_bitmap_scan_plan removes clauses from bitmapqualorig
|
||||
* if they are duplicated in qpqual, so must test these independently.
|
||||
* Note: create_bitmap_scan_plan removes clauses from bitmapqualorig if
|
||||
* they are duplicated in qpqual, so must test these independently.
|
||||
*/
|
||||
BitmapHeapScan *innerscan = (BitmapHeapScan *) inner_plan;
|
||||
Index innerrel = innerscan->scan.scanrelid;
|
||||
@@ -851,9 +851,9 @@ set_inner_join_references(Plan *inner_plan,
|
||||
innerrel);
|
||||
|
||||
/*
|
||||
* We must fix the inner qpqual too, if it has join
|
||||
* clauses (this could happen if special operators are
|
||||
* involved: some indexquals may get rechecked as qpquals).
|
||||
* We must fix the inner qpqual too, if it has join clauses (this
|
||||
* could happen if special operators are involved: some indexquals may
|
||||
* get rechecked as qpquals).
|
||||
*/
|
||||
if (NumRelids((Node *) inner_plan->qual) > 1)
|
||||
inner_plan->qual = join_references(inner_plan->qual,
|
||||
@@ -870,8 +870,8 @@ set_inner_join_references(Plan *inner_plan,
|
||||
else if (IsA(inner_plan, BitmapAnd))
|
||||
{
|
||||
/* All we need do here is recurse */
|
||||
BitmapAnd *innerscan = (BitmapAnd *) inner_plan;
|
||||
ListCell *l;
|
||||
BitmapAnd *innerscan = (BitmapAnd *) inner_plan;
|
||||
ListCell *l;
|
||||
|
||||
foreach(l, innerscan->bitmapplans)
|
||||
{
|
||||
@@ -883,8 +883,8 @@ set_inner_join_references(Plan *inner_plan,
|
||||
else if (IsA(inner_plan, BitmapOr))
|
||||
{
|
||||
/* All we need do here is recurse */
|
||||
BitmapOr *innerscan = (BitmapOr *) inner_plan;
|
||||
ListCell *l;
|
||||
BitmapOr *innerscan = (BitmapOr *) inner_plan;
|
||||
ListCell *l;
|
||||
|
||||
foreach(l, innerscan->bitmapplans)
|
||||
{
|
||||
@@ -963,7 +963,7 @@ set_uppernode_references(Plan *plan, Index subvarno)
|
||||
*
|
||||
* In most cases, subplan tlists will be "flat" tlists with only Vars,
|
||||
* so we try to optimize that case by extracting information about Vars
|
||||
* in advance. Matching a parent tlist to a child is still an O(N^2)
|
||||
* in advance. Matching a parent tlist to a child is still an O(N^2)
|
||||
* operation, but at least with a much smaller constant factor than plain
|
||||
* tlist_member() searches.
|
||||
*
|
||||
@@ -994,7 +994,7 @@ build_tlist_index(List *tlist)
|
||||
|
||||
if (tle->expr && IsA(tle->expr, Var))
|
||||
{
|
||||
Var *var = (Var *) tle->expr;
|
||||
Var *var = (Var *) tle->expr;
|
||||
|
||||
vinfo->varno = var->varno;
|
||||
vinfo->varattno = var->varattno;
|
||||
@@ -1068,7 +1068,7 @@ search_indexed_tlist_for_non_var(Node *node,
|
||||
exprType((Node *) tle->expr),
|
||||
exprTypmod((Node *) tle->expr),
|
||||
0);
|
||||
newvar->varnoold = 0; /* wasn't ever a plain Var */
|
||||
newvar->varnoold = 0; /* wasn't ever a plain Var */
|
||||
newvar->varoattno = 0;
|
||||
return newvar;
|
||||
}
|
||||
@@ -1213,7 +1213,7 @@ replace_vars_with_subplan_refs(Node *node,
|
||||
|
||||
static Node *
|
||||
replace_vars_with_subplan_refs_mutator(Node *node,
|
||||
replace_vars_with_subplan_refs_context *context)
|
||||
replace_vars_with_subplan_refs_context *context)
|
||||
{
|
||||
Var *newvar;
|
||||
|
||||
|
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.99 2005/06/05 22:32:56 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.100 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -110,19 +110,18 @@ replace_outer_var(Var *var)
|
||||
abslevel = PlannerQueryLevel - var->varlevelsup;
|
||||
|
||||
/*
|
||||
* If there's already a PlannerParamList entry for this same Var, just
|
||||
* use it. NOTE: in sufficiently complex querytrees, it is possible
|
||||
* for the same varno/abslevel to refer to different RTEs in different
|
||||
* parts of the parsetree, so that different fields might end up
|
||||
* sharing the same Param number. As long as we check the vartype as
|
||||
* well, I believe that this sort of aliasing will cause no trouble.
|
||||
* The correct field should get stored into the Param slot at
|
||||
* execution in each part of the tree.
|
||||
* If there's already a PlannerParamList entry for this same Var, just use
|
||||
* it. NOTE: in sufficiently complex querytrees, it is possible for the
|
||||
* same varno/abslevel to refer to different RTEs in different parts of
|
||||
* the parsetree, so that different fields might end up sharing the same
|
||||
* Param number. As long as we check the vartype as well, I believe that
|
||||
* this sort of aliasing will cause no trouble. The correct field should
|
||||
* get stored into the Param slot at execution in each part of the tree.
|
||||
*
|
||||
* We also need to demand a match on vartypmod. This does not matter for
|
||||
* the Param itself, since those are not typmod-dependent, but it does
|
||||
* matter when make_subplan() instantiates a modified copy of the Var
|
||||
* for a subplan's args list.
|
||||
* We also need to demand a match on vartypmod. This does not matter for the
|
||||
* Param itself, since those are not typmod-dependent, but it does matter
|
||||
* when make_subplan() instantiates a modified copy of the Var for a
|
||||
* subplan's args list.
|
||||
*/
|
||||
i = 0;
|
||||
foreach(ppl, PlannerParamList)
|
||||
@@ -179,8 +178,8 @@ replace_outer_agg(Aggref *agg)
|
||||
abslevel = PlannerQueryLevel - agg->agglevelsup;
|
||||
|
||||
/*
|
||||
* It does not seem worthwhile to try to match duplicate outer aggs.
|
||||
* Just make a new slot every time.
|
||||
* It does not seem worthwhile to try to match duplicate outer aggs. Just
|
||||
* make a new slot every time.
|
||||
*/
|
||||
agg = (Aggref *) copyObject(agg);
|
||||
IncrementVarSublevelsUp((Node *) agg, -((int) agg->agglevelsup), 0);
|
||||
@@ -253,33 +252,32 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
Node *result;
|
||||
|
||||
/*
|
||||
* Copy the source Query node. This is a quick and dirty kluge to
|
||||
* resolve the fact that the parser can generate trees with multiple
|
||||
* links to the same sub-Query node, but the planner wants to scribble
|
||||
* on the Query. Try to clean this up when we do querytree redesign...
|
||||
* Copy the source Query node. This is a quick and dirty kluge to resolve
|
||||
* the fact that the parser can generate trees with multiple links to the
|
||||
* same sub-Query node, but the planner wants to scribble on the Query.
|
||||
* Try to clean this up when we do querytree redesign...
|
||||
*/
|
||||
subquery = (Query *) copyObject(subquery);
|
||||
|
||||
/*
|
||||
* For an EXISTS subplan, tell lower-level planner to expect that only
|
||||
* the first tuple will be retrieved. For ALL and ANY subplans, we
|
||||
* will be able to stop evaluating if the test condition fails, so
|
||||
* very often not all the tuples will be retrieved; for lack of a
|
||||
* better idea, specify 50% retrieval. For EXPR and MULTIEXPR
|
||||
* subplans, use default behavior (we're only expecting one row out,
|
||||
* anyway).
|
||||
* For an EXISTS subplan, tell lower-level planner to expect that only the
|
||||
* first tuple will be retrieved. For ALL and ANY subplans, we will be
|
||||
* able to stop evaluating if the test condition fails, so very often not
|
||||
* all the tuples will be retrieved; for lack of a better idea, specify
|
||||
* 50% retrieval. For EXPR and MULTIEXPR subplans, use default behavior
|
||||
* (we're only expecting one row out, anyway).
|
||||
*
|
||||
* NOTE: if you change these numbers, also change cost_qual_eval_walker()
|
||||
* in path/costsize.c.
|
||||
* NOTE: if you change these numbers, also change cost_qual_eval_walker() in
|
||||
* path/costsize.c.
|
||||
*
|
||||
* XXX If an ALL/ANY subplan is uncorrelated, we may decide to hash or
|
||||
* materialize its result below. In that case it would've been better
|
||||
* to specify full retrieval. At present, however, we can only detect
|
||||
* materialize its result below. In that case it would've been better to
|
||||
* specify full retrieval. At present, however, we can only detect
|
||||
* correlation or lack of it after we've made the subplan :-(. Perhaps
|
||||
* detection of correlation should be done as a separate step.
|
||||
* Meanwhile, we don't want to be too optimistic about the percentage
|
||||
* of tuples retrieved, for fear of selecting a plan that's bad for
|
||||
* the materialization case.
|
||||
* detection of correlation should be done as a separate step. Meanwhile,
|
||||
* we don't want to be too optimistic about the percentage of tuples
|
||||
* retrieved, for fear of selecting a plan that's bad for the
|
||||
* materialization case.
|
||||
*/
|
||||
if (slink->subLinkType == EXISTS_SUBLINK)
|
||||
tuple_fraction = 1.0; /* just like a LIMIT 1 */
|
||||
@@ -294,8 +292,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
*/
|
||||
node->plan = plan = subquery_planner(subquery, tuple_fraction, NULL);
|
||||
|
||||
node->plan_id = PlannerPlanId++; /* Assign unique ID to this
|
||||
* SubPlan */
|
||||
node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */
|
||||
|
||||
node->rtable = subquery->rtable;
|
||||
|
||||
@@ -314,8 +311,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
node->args = NIL;
|
||||
|
||||
/*
|
||||
* Make parParam list of params that current query level will pass to
|
||||
* this child plan.
|
||||
* Make parParam list of params that current query level will pass to this
|
||||
* child plan.
|
||||
*/
|
||||
tmpset = bms_copy(plan->extParam);
|
||||
while ((paramid = bms_first_member(tmpset)) >= 0)
|
||||
@@ -328,13 +325,12 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
bms_free(tmpset);
|
||||
|
||||
/*
|
||||
* Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY,
|
||||
* or MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or
|
||||
* ARRAY, we just produce a Param referring to the result of
|
||||
* evaluating the initPlan. For MULTIEXPR, we must build an AND or
|
||||
* OR-clause of the individual comparison operators, using the
|
||||
* appropriate lefthand side expressions and Params for the initPlan's
|
||||
* target items.
|
||||
* Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, or
|
||||
* MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or ARRAY,
|
||||
* we just produce a Param referring to the result of evaluating the
|
||||
* initPlan. For MULTIEXPR, we must build an AND or OR-clause of the
|
||||
* individual comparison operators, using the appropriate lefthand side
|
||||
* expressions and Params for the initPlan's target items.
|
||||
*/
|
||||
if (node->parParam == NIL && slink->subLinkType == EXISTS_SUBLINK)
|
||||
{
|
||||
@@ -387,9 +383,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
PlannerInitPlan = lappend(PlannerInitPlan, node);
|
||||
|
||||
/*
|
||||
* The executable expressions are returned to become part of the
|
||||
* outer plan's expression tree; they are not kept in the initplan
|
||||
* node.
|
||||
* The executable expressions are returned to become part of the outer
|
||||
* plan's expression tree; they are not kept in the initplan node.
|
||||
*/
|
||||
if (list_length(exprs) > 1)
|
||||
result = (Node *) (node->useOr ? make_orclause(exprs) :
|
||||
@@ -403,22 +398,22 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
ListCell *l;
|
||||
|
||||
/*
|
||||
* We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types
|
||||
* to initPlans, even when they are uncorrelated or undirect
|
||||
* correlated, because we need to scan the output of the subplan
|
||||
* for each outer tuple. But if it's an IN (= ANY) test, we might
|
||||
* be able to use a hashtable to avoid comparing all the tuples.
|
||||
* We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types to
|
||||
* initPlans, even when they are uncorrelated or undirect correlated,
|
||||
* because we need to scan the output of the subplan for each outer
|
||||
* tuple. But if it's an IN (= ANY) test, we might be able to use a
|
||||
* hashtable to avoid comparing all the tuples.
|
||||
*/
|
||||
if (subplan_is_hashable(slink, node))
|
||||
node->useHashTable = true;
|
||||
|
||||
/*
|
||||
* Otherwise, we have the option to tack a MATERIAL node onto the
|
||||
* top of the subplan, to reduce the cost of reading it
|
||||
* repeatedly. This is pointless for a direct-correlated subplan,
|
||||
* since we'd have to recompute its results each time anyway. For
|
||||
* uncorrelated/undirect correlated subplans, we add MATERIAL unless
|
||||
* the subplan's top plan node would materialize its output anyway.
|
||||
* Otherwise, we have the option to tack a MATERIAL node onto the top
|
||||
* of the subplan, to reduce the cost of reading it repeatedly. This
|
||||
* is pointless for a direct-correlated subplan, since we'd have to
|
||||
* recompute its results each time anyway. For uncorrelated/undirect
|
||||
* correlated subplans, we add MATERIAL unless the subplan's top plan
|
||||
* node would materialize its output anyway.
|
||||
*/
|
||||
else if (node->parParam == NIL)
|
||||
{
|
||||
@@ -455,9 +450,9 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
|
||||
PlannerParamItem *pitem = list_nth(PlannerParamList, lfirst_int(l));
|
||||
|
||||
/*
|
||||
* The Var or Aggref has already been adjusted to have the
|
||||
* correct varlevelsup or agglevelsup. We probably don't even
|
||||
* need to copy it again, but be safe.
|
||||
* The Var or Aggref has already been adjusted to have the correct
|
||||
* varlevelsup or agglevelsup. We probably don't even need to
|
||||
* copy it again, but be safe.
|
||||
*/
|
||||
args = lappend(args, copyObject(pitem->item));
|
||||
}
|
||||
@@ -545,8 +540,8 @@ convert_sublink_opers(List *lefthand, List *operOids,
|
||||
*
|
||||
* Note: we use make_op_expr in case runtime type conversion function
|
||||
* calls must be inserted for this operator! (But we are not
|
||||
* expecting to have to resolve unknown Params, so it's okay to
|
||||
* pass a null pstate.)
|
||||
* expecting to have to resolve unknown Params, so it's okay to pass a
|
||||
* null pstate.)
|
||||
*/
|
||||
result = lappend(result,
|
||||
make_op_expr(NULL,
|
||||
@@ -580,8 +575,8 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
|
||||
/*
|
||||
* The sublink type must be "= ANY" --- that is, an IN operator. (We
|
||||
* require the operator name to be unqualified, which may be overly
|
||||
* paranoid, or may not be.) XXX since we also check that the
|
||||
* operators are hashable, the test on operator name may be redundant?
|
||||
* paranoid, or may not be.) XXX since we also check that the operators
|
||||
* are hashable, the test on operator name may be redundant?
|
||||
*/
|
||||
if (slink->subLinkType != ANY_SUBLINK)
|
||||
return false;
|
||||
@@ -591,15 +586,15 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
|
||||
|
||||
/*
|
||||
* The subplan must not have any direct correlation vars --- else we'd
|
||||
* have to recompute its output each time, so that the hashtable
|
||||
* wouldn't gain anything.
|
||||
* have to recompute its output each time, so that the hashtable wouldn't
|
||||
* gain anything.
|
||||
*/
|
||||
if (node->parParam != NIL)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The estimated size of the subquery result must fit in work_mem.
|
||||
* (XXX what about hashtable overhead?)
|
||||
* The estimated size of the subquery result must fit in work_mem. (XXX
|
||||
* what about hashtable overhead?)
|
||||
*/
|
||||
subquery_size = node->plan->plan_rows *
|
||||
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData)));
|
||||
@@ -607,18 +602,17 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The combining operators must be hashable, strict, and
|
||||
* self-commutative. The need for hashability is obvious, since we
|
||||
* want to use hashing. Without strictness, behavior in the presence
|
||||
* of nulls is too unpredictable. (We actually must assume even more
|
||||
* than plain strictness, see nodeSubplan.c for details.) And
|
||||
* commutativity ensures that the left and right datatypes are the
|
||||
* same; this allows us to assume that the combining operators are
|
||||
* equality for the righthand datatype, so that they can be used to
|
||||
* compare righthand tuples as well as comparing lefthand to righthand
|
||||
* tuples. (This last restriction could be relaxed by using two
|
||||
* different sets of operators with the hash table, but there is no
|
||||
* obvious usefulness to that at present.)
|
||||
* The combining operators must be hashable, strict, and self-commutative.
|
||||
* The need for hashability is obvious, since we want to use hashing.
|
||||
* Without strictness, behavior in the presence of nulls is too
|
||||
* unpredictable. (We actually must assume even more than plain
|
||||
* strictness, see nodeSubplan.c for details.) And commutativity ensures
|
||||
* that the left and right datatypes are the same; this allows us to
|
||||
* assume that the combining operators are equality for the righthand
|
||||
* datatype, so that they can be used to compare righthand tuples as well
|
||||
* as comparing lefthand to righthand tuples. (This last restriction
|
||||
* could be relaxed by using two different sets of operators with the hash
|
||||
* table, but there is no obvious usefulness to that at present.)
|
||||
*/
|
||||
foreach(l, slink->operOids)
|
||||
{
|
||||
@@ -679,24 +673,24 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The sub-select must not refer to any Vars of the parent query.
|
||||
* (Vars of higher levels should be okay, though.)
|
||||
* The sub-select must not refer to any Vars of the parent query. (Vars of
|
||||
* higher levels should be okay, though.)
|
||||
*/
|
||||
if (contain_vars_of_level((Node *) subselect, 1))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The left-hand expressions must contain some Vars of the current
|
||||
* query, else it's not gonna be a join.
|
||||
* The left-hand expressions must contain some Vars of the current query,
|
||||
* else it's not gonna be a join.
|
||||
*/
|
||||
left_varnos = pull_varnos((Node *) sublink->lefthand);
|
||||
if (bms_is_empty(left_varnos))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The left-hand expressions mustn't be volatile. (Perhaps we should
|
||||
* test the combining operators, too? We'd only need to point the
|
||||
* function directly at the sublink ...)
|
||||
* The left-hand expressions mustn't be volatile. (Perhaps we should test
|
||||
* the combining operators, too? We'd only need to point the function
|
||||
* directly at the sublink ...)
|
||||
*/
|
||||
if (contain_volatile_functions((Node *) sublink->lefthand))
|
||||
return NULL;
|
||||
@@ -704,10 +698,10 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
|
||||
/*
|
||||
* Okay, pull up the sub-select into top range table and jointree.
|
||||
*
|
||||
* We rely here on the assumption that the outer query has no references
|
||||
* to the inner (necessarily true, other than the Vars that we build
|
||||
* below). Therefore this is a lot easier than what
|
||||
* pull_up_subqueries has to go through.
|
||||
* We rely here on the assumption that the outer query has no references to
|
||||
* the inner (necessarily true, other than the Vars that we build below).
|
||||
* Therefore this is a lot easier than what pull_up_subqueries has to go
|
||||
* through.
|
||||
*/
|
||||
rte = addRangeTableEntryForSubquery(NULL,
|
||||
subselect,
|
||||
@@ -729,8 +723,8 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
|
||||
|
||||
/*
|
||||
* Build the result qual expressions. As a side effect,
|
||||
* ininfo->sub_targetlist is filled with a list of Vars representing
|
||||
* the subselect outputs.
|
||||
* ininfo->sub_targetlist is filled with a list of Vars representing the
|
||||
* subselect outputs.
|
||||
*/
|
||||
exprs = convert_sublink_opers(sublink->lefthand,
|
||||
sublink->operOids,
|
||||
@@ -811,8 +805,7 @@ process_sublinks_mutator(Node *node, bool *isTopQual)
|
||||
List *lefthand;
|
||||
|
||||
/*
|
||||
* First, recursively process the lefthand-side expressions, if
|
||||
* any.
|
||||
* First, recursively process the lefthand-side expressions, if any.
|
||||
*/
|
||||
locTopQual = false;
|
||||
lefthand = (List *)
|
||||
@@ -825,22 +818,22 @@ process_sublinks_mutator(Node *node, bool *isTopQual)
|
||||
}
|
||||
|
||||
/*
|
||||
* We should never see a SubPlan expression in the input (since this
|
||||
* is the very routine that creates 'em to begin with). We shouldn't
|
||||
* find ourselves invoked directly on a Query, either.
|
||||
* We should never see a SubPlan expression in the input (since this is
|
||||
* the very routine that creates 'em to begin with). We shouldn't find
|
||||
* ourselves invoked directly on a Query, either.
|
||||
*/
|
||||
Assert(!is_subplan(node));
|
||||
Assert(!IsA(node, Query));
|
||||
|
||||
/*
|
||||
* Because make_subplan() could return an AND or OR clause, we have to
|
||||
* take steps to preserve AND/OR flatness of a qual. We assume the
|
||||
* input has been AND/OR flattened and so we need no recursion here.
|
||||
* take steps to preserve AND/OR flatness of a qual. We assume the input
|
||||
* has been AND/OR flattened and so we need no recursion here.
|
||||
*
|
||||
* If we recurse down through anything other than an AND node, we are
|
||||
* definitely not at top qual level anymore. (Due to the coding here,
|
||||
* we will not get called on the List subnodes of an AND, so no check
|
||||
* is needed for List.)
|
||||
* definitely not at top qual level anymore. (Due to the coding here, we
|
||||
* will not get called on the List subnodes of an AND, so no check is
|
||||
* needed for List.)
|
||||
*/
|
||||
if (and_clause(node))
|
||||
{
|
||||
@@ -909,8 +902,8 @@ SS_finalize_plan(Plan *plan, List *rtable)
|
||||
|
||||
/*
|
||||
* First, scan the param list to discover the sets of params that are
|
||||
* available from outer query levels and my own query level. We do
|
||||
* this once to save time in the per-plan recursion steps.
|
||||
* available from outer query levels and my own query level. We do this
|
||||
* once to save time in the per-plan recursion steps.
|
||||
*/
|
||||
paramid = 0;
|
||||
foreach(l, PlannerParamList)
|
||||
@@ -942,13 +935,12 @@ SS_finalize_plan(Plan *plan, List *rtable)
|
||||
bms_free(valid_params);
|
||||
|
||||
/*
|
||||
* Finally, attach any initPlans to the topmost plan node,
|
||||
* and add their extParams to the topmost node's, too.
|
||||
* Finally, attach any initPlans to the topmost plan node, and add their
|
||||
* extParams to the topmost node's, too.
|
||||
*
|
||||
* We also add the total_cost of each initPlan to the startup cost of
|
||||
* the top node. This is a conservative overestimate, since in
|
||||
* fact each initPlan might be executed later than plan startup,
|
||||
* or even not at all.
|
||||
* We also add the total_cost of each initPlan to the startup cost of the top
|
||||
* node. This is a conservative overestimate, since in fact each initPlan
|
||||
* might be executed later than plan startup, or even not at all.
|
||||
*/
|
||||
plan->initPlan = PlannerInitPlan;
|
||||
PlannerInitPlan = NIL; /* make sure they're not attached twice */
|
||||
@@ -988,10 +980,10 @@ finalize_plan(Plan *plan, List *rtable,
|
||||
context.outer_params = outer_params;
|
||||
|
||||
/*
|
||||
* When we call finalize_primnode, context.paramids sets are
|
||||
* automatically merged together. But when recursing to self, we have
|
||||
* to do it the hard way. We want the paramids set to include params
|
||||
* in subplans as well as at this level.
|
||||
* When we call finalize_primnode, context.paramids sets are automatically
|
||||
* merged together. But when recursing to self, we have to do it the hard
|
||||
* way. We want the paramids set to include params in subplans as well as
|
||||
* at this level.
|
||||
*/
|
||||
|
||||
/* Find params in targetlist and qual */
|
||||
@@ -1011,17 +1003,18 @@ finalize_plan(Plan *plan, List *rtable,
|
||||
&context);
|
||||
|
||||
/*
|
||||
* we need not look at indexqualorig, since it will have the
|
||||
* same param references as indexqual.
|
||||
* we need not look at indexqualorig, since it will have the same
|
||||
* param references as indexqual.
|
||||
*/
|
||||
break;
|
||||
|
||||
case T_BitmapIndexScan:
|
||||
finalize_primnode((Node *) ((BitmapIndexScan *) plan)->indexqual,
|
||||
&context);
|
||||
|
||||
/*
|
||||
* we need not look at indexqualorig, since it will have the
|
||||
* same param references as indexqual.
|
||||
* we need not look at indexqualorig, since it will have the same
|
||||
* param references as indexqual.
|
||||
*/
|
||||
break;
|
||||
|
||||
@@ -1038,14 +1031,14 @@ finalize_plan(Plan *plan, List *rtable,
|
||||
case T_SubqueryScan:
|
||||
|
||||
/*
|
||||
* In a SubqueryScan, SS_finalize_plan has already been run on
|
||||
* the subplan by the inner invocation of subquery_planner, so
|
||||
* there's no need to do it again. Instead, just pull out the
|
||||
* subplan's extParams list, which represents the params it
|
||||
* needs from my level and higher levels.
|
||||
* In a SubqueryScan, SS_finalize_plan has already been run on the
|
||||
* subplan by the inner invocation of subquery_planner, so there's
|
||||
* no need to do it again. Instead, just pull out the subplan's
|
||||
* extParams list, which represents the params it needs from my
|
||||
* level and higher levels.
|
||||
*/
|
||||
context.paramids = bms_add_members(context.paramids,
|
||||
((SubqueryScan *) plan)->subplan->extParam);
|
||||
((SubqueryScan *) plan)->subplan->extParam);
|
||||
break;
|
||||
|
||||
case T_FunctionScan:
|
||||
@@ -1170,8 +1163,8 @@ finalize_plan(Plan *plan, List *rtable,
|
||||
plan->allParam = context.paramids;
|
||||
|
||||
/*
|
||||
* For speed at execution time, make sure extParam/allParam are
|
||||
* actually NULL if they are empty sets.
|
||||
* For speed at execution time, make sure extParam/allParam are actually
|
||||
* NULL if they are empty sets.
|
||||
*/
|
||||
if (bms_is_empty(plan->extParam))
|
||||
{
|
||||
@@ -1212,8 +1205,8 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
|
||||
|
||||
/* Add outer-level params needed by the subplan to paramids */
|
||||
context->paramids = bms_join(context->paramids,
|
||||
bms_intersect(subplan->plan->extParam,
|
||||
context->outer_params));
|
||||
bms_intersect(subplan->plan->extParam,
|
||||
context->outer_params));
|
||||
/* fall through to recurse into subplan args */
|
||||
}
|
||||
return expression_tree_walker(node, finalize_primnode,
|
||||
@@ -1241,7 +1234,7 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan,
|
||||
int paramid;
|
||||
|
||||
/*
|
||||
* Set up for a new level of subquery. This is just to keep
|
||||
* Set up for a new level of subquery. This is just to keep
|
||||
* SS_finalize_plan from becoming confused.
|
||||
*/
|
||||
PlannerQueryLevel++;
|
||||
@@ -1262,16 +1255,15 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan,
|
||||
node = makeNode(SubPlan);
|
||||
node->subLinkType = EXPR_SUBLINK;
|
||||
node->plan = plan;
|
||||
node->plan_id = PlannerPlanId++; /* Assign unique ID to this
|
||||
* SubPlan */
|
||||
node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */
|
||||
|
||||
node->rtable = root->parse->rtable;
|
||||
|
||||
PlannerInitPlan = lappend(PlannerInitPlan, node);
|
||||
|
||||
/*
|
||||
* Make parParam list of params that current query level will pass to
|
||||
* this child plan. (In current usage there probably aren't any.)
|
||||
* Make parParam list of params that current query level will pass to this
|
||||
* child plan. (In current usage there probably aren't any.)
|
||||
*/
|
||||
tmpset = bms_copy(plan->extParam);
|
||||
while ((paramid = bms_first_member(tmpset)) >= 0)
|
||||
|
@@ -16,7 +16,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.30 2005/08/01 20:31:09 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.31 2005/10/15 02:49:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -143,8 +143,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
Query *subquery = rte->subquery;
|
||||
|
||||
/*
|
||||
* Is this a subquery RTE, and if so, is the subquery simple
|
||||
* enough to pull up? (If not, do nothing at this node.)
|
||||
* Is this a subquery RTE, and if so, is the subquery simple enough to
|
||||
* pull up? (If not, do nothing at this node.)
|
||||
*
|
||||
* If we are inside an outer join, only pull up subqueries whose
|
||||
* targetlists are nullable --- otherwise substituting their tlist
|
||||
@@ -153,8 +153,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
*
|
||||
* XXX This could be improved by generating pseudo-variables for such
|
||||
* expressions; we'd have to figure out how to get the pseudo-
|
||||
* variables evaluated at the right place in the modified plan
|
||||
* tree. Fix it someday.
|
||||
* variables evaluated at the right place in the modified plan tree.
|
||||
* Fix it someday.
|
||||
*/
|
||||
if (rte->rtekind == RTE_SUBQUERY &&
|
||||
is_simple_subquery(subquery) &&
|
||||
@@ -166,53 +166,53 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
ListCell *rt;
|
||||
|
||||
/*
|
||||
* Need a modifiable copy of the subquery to hack on. Even if
|
||||
* we didn't sometimes choose not to pull up below, we must do
|
||||
* this to avoid problems if the same subquery is referenced
|
||||
* from multiple jointree items (which can't happen normally,
|
||||
* but might after rule rewriting).
|
||||
* Need a modifiable copy of the subquery to hack on. Even if we
|
||||
* didn't sometimes choose not to pull up below, we must do this
|
||||
* to avoid problems if the same subquery is referenced from
|
||||
* multiple jointree items (which can't happen normally, but might
|
||||
* after rule rewriting).
|
||||
*/
|
||||
subquery = copyObject(subquery);
|
||||
|
||||
/*
|
||||
* Create a PlannerInfo data structure for this subquery.
|
||||
*
|
||||
* NOTE: the next few steps should match the first processing
|
||||
* in subquery_planner(). Can we refactor to avoid code
|
||||
* duplication, or would that just make things uglier?
|
||||
* NOTE: the next few steps should match the first processing in
|
||||
* subquery_planner(). Can we refactor to avoid code duplication,
|
||||
* or would that just make things uglier?
|
||||
*/
|
||||
subroot = makeNode(PlannerInfo);
|
||||
subroot->parse = subquery;
|
||||
|
||||
/*
|
||||
* Pull up any IN clauses within the subquery's WHERE, so that
|
||||
* we don't leave unoptimized INs behind.
|
||||
* Pull up any IN clauses within the subquery's WHERE, so that we
|
||||
* don't leave unoptimized INs behind.
|
||||
*/
|
||||
subroot->in_info_list = NIL;
|
||||
if (subquery->hasSubLinks)
|
||||
subquery->jointree->quals = pull_up_IN_clauses(subroot,
|
||||
subquery->jointree->quals);
|
||||
subquery->jointree->quals);
|
||||
|
||||
/*
|
||||
* Recursively pull up the subquery's subqueries, so that this
|
||||
* routine's processing is complete for its jointree and
|
||||
* rangetable.
|
||||
*
|
||||
* Note: 'false' is correct here even if we are within an outer
|
||||
* join in the upper query; the lower query starts with a
|
||||
* clean slate for outer-join semantics.
|
||||
* Note: 'false' is correct here even if we are within an outer join
|
||||
* in the upper query; the lower query starts with a clean slate
|
||||
* for outer-join semantics.
|
||||
*/
|
||||
subquery->jointree = (FromExpr *)
|
||||
pull_up_subqueries(subroot, (Node *) subquery->jointree,
|
||||
false);
|
||||
|
||||
/*
|
||||
* Now we must recheck whether the subquery is still simple
|
||||
* enough to pull up. If not, abandon processing it.
|
||||
* Now we must recheck whether the subquery is still simple enough
|
||||
* to pull up. If not, abandon processing it.
|
||||
*
|
||||
* We don't really need to recheck all the conditions involved,
|
||||
* but it's easier just to keep this "if" looking the same as
|
||||
* the one above.
|
||||
* We don't really need to recheck all the conditions involved, but
|
||||
* it's easier just to keep this "if" looking the same as the one
|
||||
* above.
|
||||
*/
|
||||
if (is_simple_subquery(subquery) &&
|
||||
(!below_outer_join || has_nullable_targetlist(subquery)))
|
||||
@@ -224,10 +224,10 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
/*
|
||||
* Give up, return unmodified RangeTblRef.
|
||||
*
|
||||
* Note: The work we just did will be redone when the
|
||||
* subquery gets planned on its own. Perhaps we could
|
||||
* avoid that by storing the modified subquery back into
|
||||
* the rangetable, but I'm not gonna risk it now.
|
||||
* Note: The work we just did will be redone when the subquery
|
||||
* gets planned on its own. Perhaps we could avoid that by
|
||||
* storing the modified subquery back into the rangetable, but
|
||||
* I'm not gonna risk it now.
|
||||
*/
|
||||
return jtnode;
|
||||
}
|
||||
@@ -242,8 +242,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
OffsetVarNodes((Node *) subroot->in_info_list, rtoffset, 0);
|
||||
|
||||
/*
|
||||
* Upper-level vars in subquery are now one level closer to
|
||||
* their parent than before.
|
||||
* Upper-level vars in subquery are now one level closer to their
|
||||
* parent than before.
|
||||
*/
|
||||
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
|
||||
IncrementVarSublevelsUp((Node *) subroot->in_info_list, -1, 1);
|
||||
@@ -251,9 +251,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
/*
|
||||
* Replace all of the top query's references to the subquery's
|
||||
* outputs with copies of the adjusted subtlist items, being
|
||||
* careful not to replace any of the jointree structure.
|
||||
* (This'd be a lot cleaner if we could use
|
||||
* query_tree_mutator.)
|
||||
* careful not to replace any of the jointree structure. (This'd
|
||||
* be a lot cleaner if we could use query_tree_mutator.)
|
||||
*/
|
||||
subtlist = subquery->targetList;
|
||||
parse->targetList = (List *)
|
||||
@@ -284,9 +283,9 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
}
|
||||
|
||||
/*
|
||||
* Now append the adjusted rtable entries to upper query. (We
|
||||
* hold off until after fixing the upper rtable entries; no
|
||||
* point in running that code on the subquery ones too.)
|
||||
* Now append the adjusted rtable entries to upper query. (We hold
|
||||
* off until after fixing the upper rtable entries; no point in
|
||||
* running that code on the subquery ones too.)
|
||||
*/
|
||||
parse->rtable = list_concat(parse->rtable, subquery->rtable);
|
||||
|
||||
@@ -295,8 +294,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
* already adjusted the marker values, so just list_concat the
|
||||
* list.)
|
||||
*
|
||||
* Executor can't handle multiple FOR UPDATE/SHARE/NOWAIT flags,
|
||||
* so complain if they are valid but different
|
||||
* Executor can't handle multiple FOR UPDATE/SHARE/NOWAIT flags, so
|
||||
* complain if they are valid but different
|
||||
*/
|
||||
if (parse->rowMarks && subquery->rowMarks)
|
||||
{
|
||||
@@ -307,7 +306,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
if (parse->rowNoWait != subquery->rowNoWait)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use both wait and NOWAIT in one query")));
|
||||
errmsg("cannot use both wait and NOWAIT in one query")));
|
||||
}
|
||||
parse->rowMarks = list_concat(parse->rowMarks, subquery->rowMarks);
|
||||
if (subquery->rowMarks)
|
||||
@@ -317,10 +316,9 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
}
|
||||
|
||||
/*
|
||||
* We also have to fix the relid sets of any parent
|
||||
* InClauseInfo nodes. (This could perhaps be done by
|
||||
* ResolveNew, but it would clutter that routine's API
|
||||
* unreasonably.)
|
||||
* We also have to fix the relid sets of any parent InClauseInfo
|
||||
* nodes. (This could perhaps be done by ResolveNew, but it would
|
||||
* clutter that routine's API unreasonably.)
|
||||
*/
|
||||
if (root->in_info_list)
|
||||
{
|
||||
@@ -392,8 +390,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
|
||||
case JOIN_UNION:
|
||||
|
||||
/*
|
||||
* This is where we fail if upper levels of planner
|
||||
* haven't rewritten UNION JOIN as an Append ...
|
||||
* This is where we fail if upper levels of planner haven't
|
||||
* rewritten UNION JOIN as an Append ...
|
||||
*/
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
@@ -436,8 +434,8 @@ is_simple_subquery(Query *subquery)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Can't pull up a subquery involving grouping, aggregation, sorting,
|
||||
* or limiting.
|
||||
* Can't pull up a subquery involving grouping, aggregation, sorting, or
|
||||
* limiting.
|
||||
*/
|
||||
if (subquery->hasAggs ||
|
||||
subquery->groupClause ||
|
||||
@@ -449,21 +447,20 @@ is_simple_subquery(Query *subquery)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Don't pull up a subquery that has any set-returning functions in
|
||||
* its targetlist. Otherwise we might well wind up inserting
|
||||
* set-returning functions into places where they mustn't go, such as
|
||||
* quals of higher queries.
|
||||
* Don't pull up a subquery that has any set-returning functions in its
|
||||
* targetlist. Otherwise we might well wind up inserting set-returning
|
||||
* functions into places where they mustn't go, such as quals of higher
|
||||
* queries.
|
||||
*/
|
||||
if (expression_returns_set((Node *) subquery->targetList))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Hack: don't try to pull up a subquery with an empty jointree.
|
||||
* query_planner() will correctly generate a Result plan for a
|
||||
* jointree that's totally empty, but I don't think the right things
|
||||
* happen if an empty FromExpr appears lower down in a jointree. Not
|
||||
* worth working hard on this, just to collapse SubqueryScan/Result
|
||||
* into Result...
|
||||
* query_planner() will correctly generate a Result plan for a jointree
|
||||
* that's totally empty, but I don't think the right things happen if an
|
||||
* empty FromExpr appears lower down in a jointree. Not worth working hard
|
||||
* on this, just to collapse SubqueryScan/Result into Result...
|
||||
*/
|
||||
if (subquery->jointree->fromlist == NIL)
|
||||
return false;
|
||||
@@ -545,8 +542,8 @@ resolvenew_in_jointree(Node *jtnode, int varno,
|
||||
subtlist, CMD_SELECT, 0);
|
||||
|
||||
/*
|
||||
* We don't bother to update the colvars list, since it won't be
|
||||
* used again ...
|
||||
* We don't bother to update the colvars list, since it won't be used
|
||||
* again ...
|
||||
*/
|
||||
}
|
||||
else
|
||||
@@ -583,14 +580,13 @@ reduce_outer_joins(PlannerInfo *root)
|
||||
reduce_outer_joins_state *state;
|
||||
|
||||
/*
|
||||
* To avoid doing strictness checks on more quals than necessary, we
|
||||
* want to stop descending the jointree as soon as there are no outer
|
||||
* joins below our current point. This consideration forces a
|
||||
* two-pass process. The first pass gathers information about which
|
||||
* base rels appear below each side of each join clause, and about
|
||||
* whether there are outer join(s) below each side of each join
|
||||
* clause. The second pass examines qual clauses and changes join
|
||||
* types as it descends the tree.
|
||||
* To avoid doing strictness checks on more quals than necessary, we want
|
||||
* to stop descending the jointree as soon as there are no outer joins
|
||||
* below our current point. This consideration forces a two-pass process.
|
||||
* The first pass gathers information about which base rels appear below
|
||||
* each side of each join clause, and about whether there are outer
|
||||
* join(s) below each side of each join clause. The second pass examines
|
||||
* qual clauses and changes join types as it descends the tree.
|
||||
*/
|
||||
state = reduce_outer_joins_pass1((Node *) root->parse->jointree);
|
||||
|
||||
@@ -768,12 +764,11 @@ reduce_outer_joins_pass2(Node *jtnode,
|
||||
|
||||
/*
|
||||
* If this join is (now) inner, we can add any nonnullability
|
||||
* constraints its quals provide to those we got from above.
|
||||
* But if it is outer, we can only pass down the local
|
||||
* constraints into the nullable side, because an outer join
|
||||
* never eliminates any rows from its non-nullable side. If
|
||||
* it's a FULL join then it doesn't eliminate anything from
|
||||
* either side.
|
||||
* constraints its quals provide to those we got from above. But
|
||||
* if it is outer, we can only pass down the local constraints
|
||||
* into the nullable side, because an outer join never eliminates
|
||||
* any rows from its non-nullable side. If it's a FULL join then
|
||||
* it doesn't eliminate anything from either side.
|
||||
*/
|
||||
if (jointype != JOIN_FULL)
|
||||
{
|
||||
@@ -782,8 +777,7 @@ reduce_outer_joins_pass2(Node *jtnode,
|
||||
nonnullable_rels);
|
||||
}
|
||||
else
|
||||
local_nonnullable = NULL; /* no use in calculating
|
||||
* it */
|
||||
local_nonnullable = NULL; /* no use in calculating it */
|
||||
|
||||
if (left_state->contains_outer)
|
||||
{
|
||||
@@ -886,8 +880,8 @@ find_nonnullable_rels(Node *node, bool top_level)
|
||||
NullTest *expr = (NullTest *) node;
|
||||
|
||||
/*
|
||||
* IS NOT NULL can be considered strict, but only at top level;
|
||||
* else we might have something like NOT (x IS NOT NULL).
|
||||
* IS NOT NULL can be considered strict, but only at top level; else
|
||||
* we might have something like NOT (x IS NOT NULL).
|
||||
*/
|
||||
if (top_level && expr->nulltesttype == IS_NOT_NULL)
|
||||
result = find_nonnullable_rels((Node *) expr->arg, false);
|
||||
@@ -960,10 +954,10 @@ simplify_jointree(PlannerInfo *root, Node *jtnode)
|
||||
if (child && IsA(child, FromExpr))
|
||||
{
|
||||
/*
|
||||
* Yes, so do we want to merge it into parent? Always do
|
||||
* so if child has just one element (since that doesn't
|
||||
* make the parent's list any longer). Otherwise merge if
|
||||
* the resulting join list would be no longer than
|
||||
* Yes, so do we want to merge it into parent? Always do so
|
||||
* if child has just one element (since that doesn't make the
|
||||
* parent's list any longer). Otherwise merge if the
|
||||
* resulting join list would be no longer than
|
||||
* from_collapse_limit.
|
||||
*/
|
||||
FromExpr *subf = (FromExpr *) child;
|
||||
@@ -976,9 +970,9 @@ simplify_jointree(PlannerInfo *root, Node *jtnode)
|
||||
newlist = list_concat(newlist, subf->fromlist);
|
||||
|
||||
/*
|
||||
* By now, the quals have been converted to
|
||||
* implicit-AND lists, so we just need to join the
|
||||
* lists. NOTE: we put the pulled-up quals first.
|
||||
* By now, the quals have been converted to implicit-AND
|
||||
* lists, so we just need to join the lists. NOTE: we put
|
||||
* the pulled-up quals first.
|
||||
*/
|
||||
f->quals = (Node *) list_concat((List *) subf->quals,
|
||||
(List *) f->quals);
|
||||
@@ -1000,8 +994,8 @@ simplify_jointree(PlannerInfo *root, Node *jtnode)
|
||||
j->rarg = simplify_jointree(root, j->rarg);
|
||||
|
||||
/*
|
||||
* If it is an outer join, we must not flatten it. An inner join
|
||||
* is semantically equivalent to a FromExpr; we convert it to one,
|
||||
* If it is an outer join, we must not flatten it. An inner join is
|
||||
* semantically equivalent to a FromExpr; we convert it to one,
|
||||
* allowing it to be flattened into its parent, if the resulting
|
||||
* FromExpr would have no more than join_collapse_limit members.
|
||||
*/
|
||||
|
@@ -25,7 +25,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.50 2005/07/29 21:40:02 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.51 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -73,10 +73,10 @@ canonicalize_qual(Expr *qual)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Push down NOTs. We do this only in the top-level boolean
|
||||
* expression, without examining arguments of operators/functions. The
|
||||
* main reason for doing this is to expose as much top-level AND/OR
|
||||
* structure as we can, so there's no point in descending further.
|
||||
* Push down NOTs. We do this only in the top-level boolean expression,
|
||||
* without examining arguments of operators/functions. The main reason for
|
||||
* doing this is to expose as much top-level AND/OR structure as we can,
|
||||
* so there's no point in descending further.
|
||||
*/
|
||||
newqual = find_nots(qual);
|
||||
|
||||
@@ -110,12 +110,12 @@ pull_ands(List *andlist)
|
||||
/*
|
||||
* Note: we can destructively concat the subexpression's arglist
|
||||
* because we know the recursive invocation of pull_ands will have
|
||||
* built a new arglist not shared with any other expr. Otherwise
|
||||
* we'd need a list_copy here.
|
||||
* built a new arglist not shared with any other expr. Otherwise we'd
|
||||
* need a list_copy here.
|
||||
*/
|
||||
if (and_clause(subexpr))
|
||||
out_list = list_concat(out_list,
|
||||
pull_ands(((BoolExpr *) subexpr)->args));
|
||||
pull_ands(((BoolExpr *) subexpr)->args));
|
||||
else
|
||||
out_list = lappend(out_list, subexpr);
|
||||
}
|
||||
@@ -142,12 +142,12 @@ pull_ors(List *orlist)
|
||||
/*
|
||||
* Note: we can destructively concat the subexpression's arglist
|
||||
* because we know the recursive invocation of pull_ors will have
|
||||
* built a new arglist not shared with any other expr. Otherwise
|
||||
* we'd need a list_copy here.
|
||||
* built a new arglist not shared with any other expr. Otherwise we'd
|
||||
* need a list_copy here.
|
||||
*/
|
||||
if (or_clause(subexpr))
|
||||
out_list = list_concat(out_list,
|
||||
pull_ors(((BoolExpr *) subexpr)->args));
|
||||
pull_ors(((BoolExpr *) subexpr)->args));
|
||||
else
|
||||
out_list = lappend(out_list, subexpr);
|
||||
}
|
||||
@@ -249,8 +249,8 @@ push_nots(Expr *qual)
|
||||
{
|
||||
/*
|
||||
* Another NOT cancels this NOT, so eliminate the NOT and stop
|
||||
* negating this branch. But search the subexpression for more
|
||||
* NOTs to simplify.
|
||||
* negating this branch. But search the subexpression for more NOTs
|
||||
* to simplify.
|
||||
*/
|
||||
return find_nots(get_notclausearg(qual));
|
||||
}
|
||||
@@ -307,8 +307,8 @@ find_duplicate_ors(Expr *qual)
|
||||
orlist = lappend(orlist, find_duplicate_ors(lfirst(temp)));
|
||||
|
||||
/*
|
||||
* Don't need pull_ors() since this routine will never introduce
|
||||
* an OR where there wasn't one before.
|
||||
* Don't need pull_ors() since this routine will never introduce an OR
|
||||
* where there wasn't one before.
|
||||
*/
|
||||
return process_duplicate_ors(orlist);
|
||||
}
|
||||
@@ -353,10 +353,10 @@ process_duplicate_ors(List *orlist)
|
||||
return linitial(orlist);
|
||||
|
||||
/*
|
||||
* Choose the shortest AND clause as the reference list --- obviously,
|
||||
* any subclause not in this clause isn't in all the clauses. If we
|
||||
* find a clause that's not an AND, we can treat it as a one-element
|
||||
* AND clause, which necessarily wins as shortest.
|
||||
* Choose the shortest AND clause as the reference list --- obviously, any
|
||||
* subclause not in this clause isn't in all the clauses. If we find a
|
||||
* clause that's not an AND, we can treat it as a one-element AND clause,
|
||||
* which necessarily wins as shortest.
|
||||
*/
|
||||
foreach(temp, orlist)
|
||||
{
|
||||
@@ -386,8 +386,8 @@ process_duplicate_ors(List *orlist)
|
||||
reference = list_union(NIL, reference);
|
||||
|
||||
/*
|
||||
* Check each element of the reference list to see if it's in all the
|
||||
* OR clauses. Build a new list of winning clauses.
|
||||
* Check each element of the reference list to see if it's in all the OR
|
||||
* clauses. Build a new list of winning clauses.
|
||||
*/
|
||||
winners = NIL;
|
||||
foreach(temp, reference)
|
||||
@@ -431,13 +431,12 @@ process_duplicate_ors(List *orlist)
|
||||
/*
|
||||
* Generate new OR list consisting of the remaining sub-clauses.
|
||||
*
|
||||
* If any clause degenerates to empty, then we have a situation like (A
|
||||
* AND B) OR (A), which can be reduced to just A --- that is, the
|
||||
* additional conditions in other arms of the OR are irrelevant.
|
||||
* If any clause degenerates to empty, then we have a situation like (A AND
|
||||
* B) OR (A), which can be reduced to just A --- that is, the additional
|
||||
* conditions in other arms of the OR are irrelevant.
|
||||
*
|
||||
* Note that because we use list_difference, any multiple occurrences of
|
||||
* a winning clause in an AND sub-clause will be removed
|
||||
* automatically.
|
||||
* Note that because we use list_difference, any multiple occurrences of a
|
||||
* winning clause in an AND sub-clause will be removed automatically.
|
||||
*/
|
||||
neworlist = NIL;
|
||||
foreach(temp, orlist)
|
||||
@@ -475,10 +474,10 @@ process_duplicate_ors(List *orlist)
|
||||
}
|
||||
|
||||
/*
|
||||
* Append reduced OR to the winners list, if it's not degenerate,
|
||||
* handling the special case of one element correctly (can that really
|
||||
* happen?). Also be careful to maintain AND/OR flatness in case we
|
||||
* pulled up a sub-sub-OR-clause.
|
||||
* Append reduced OR to the winners list, if it's not degenerate, handling
|
||||
* the special case of one element correctly (can that really happen?).
|
||||
* Also be careful to maintain AND/OR flatness in case we pulled up a
|
||||
* sub-sub-OR-clause.
|
||||
*/
|
||||
if (neworlist != NIL)
|
||||
{
|
||||
|
@@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.77 2005/06/05 22:32:56 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.78 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -45,10 +45,10 @@ static List *expand_targetlist(List *tlist, int command_type,
|
||||
List *
|
||||
preprocess_targetlist(PlannerInfo *root, List *tlist)
|
||||
{
|
||||
Query *parse = root->parse;
|
||||
int result_relation = parse->resultRelation;
|
||||
List *range_table = parse->rtable;
|
||||
CmdType command_type = parse->commandType;
|
||||
Query *parse = root->parse;
|
||||
int result_relation = parse->resultRelation;
|
||||
List *range_table = parse->rtable;
|
||||
CmdType command_type = parse->commandType;
|
||||
|
||||
/*
|
||||
* Sanity check: if there is a result relation, it'd better be a real
|
||||
@@ -63,20 +63,20 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
|
||||
}
|
||||
|
||||
/*
|
||||
* for heap_formtuple to work, the targetlist must match the exact
|
||||
* order of the attributes. We also need to fill in any missing
|
||||
* attributes. -ay 10/94
|
||||
* for heap_formtuple to work, the targetlist must match the exact order
|
||||
* of the attributes. We also need to fill in any missing attributes.
|
||||
* -ay 10/94
|
||||
*/
|
||||
if (command_type == CMD_INSERT || command_type == CMD_UPDATE)
|
||||
tlist = expand_targetlist(tlist, command_type,
|
||||
result_relation, range_table);
|
||||
|
||||
/*
|
||||
* for "update" and "delete" queries, add ctid of the result relation
|
||||
* into the target list so that the ctid will propagate through
|
||||
* execution and ExecutePlan() will be able to identify the right
|
||||
* tuple to replace or delete. This extra field is marked "junk" so
|
||||
* that it is not stored back into the tuple.
|
||||
* for "update" and "delete" queries, add ctid of the result relation into
|
||||
* the target list so that the ctid will propagate through execution and
|
||||
* ExecutePlan() will be able to identify the right tuple to replace or
|
||||
* delete. This extra field is marked "junk" so that it is not stored
|
||||
* back into the tuple.
|
||||
*/
|
||||
if (command_type == CMD_UPDATE || command_type == CMD_DELETE)
|
||||
{
|
||||
@@ -92,9 +92,9 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
|
||||
true);
|
||||
|
||||
/*
|
||||
* For an UPDATE, expand_targetlist already created a fresh tlist.
|
||||
* For DELETE, better do a listCopy so that we don't destructively
|
||||
* modify the original tlist (is this really necessary?).
|
||||
* For an UPDATE, expand_targetlist already created a fresh tlist. For
|
||||
* DELETE, better do a listCopy so that we don't destructively modify
|
||||
* the original tlist (is this really necessary?).
|
||||
*/
|
||||
if (command_type == CMD_DELETE)
|
||||
tlist = list_copy(tlist);
|
||||
@@ -103,31 +103,28 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
|
||||
}
|
||||
|
||||
/*
|
||||
* Add TID targets for rels selected FOR UPDATE/SHARE. The executor
|
||||
* uses the TID to know which rows to lock, much as for UPDATE or
|
||||
* DELETE.
|
||||
* Add TID targets for rels selected FOR UPDATE/SHARE. The executor uses
|
||||
* the TID to know which rows to lock, much as for UPDATE or DELETE.
|
||||
*/
|
||||
if (parse->rowMarks)
|
||||
{
|
||||
ListCell *l;
|
||||
|
||||
/*
|
||||
* We've got trouble if the FOR UPDATE/SHARE appears inside
|
||||
* grouping, since grouping renders a reference to individual
|
||||
* tuple CTIDs invalid. This is also checked at parse time,
|
||||
* but that's insufficient because of rule substitution, query
|
||||
* pullup, etc.
|
||||
* We've got trouble if the FOR UPDATE/SHARE appears inside grouping,
|
||||
* since grouping renders a reference to individual tuple CTIDs
|
||||
* invalid. This is also checked at parse time, but that's
|
||||
* insufficient because of rule substitution, query pullup, etc.
|
||||
*/
|
||||
CheckSelectLocking(parse, parse->forUpdate);
|
||||
|
||||
/*
|
||||
* Currently the executor only supports FOR UPDATE/SHARE at top
|
||||
* level
|
||||
* Currently the executor only supports FOR UPDATE/SHARE at top level
|
||||
*/
|
||||
if (PlannerQueryLevel > 1)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("SELECT FOR UPDATE/SHARE is not allowed in subqueries")));
|
||||
errmsg("SELECT FOR UPDATE/SHARE is not allowed in subqueries")));
|
||||
|
||||
foreach(l, parse->rowMarks)
|
||||
{
|
||||
@@ -185,14 +182,13 @@ expand_targetlist(List *tlist, int command_type,
|
||||
tlist_item = list_head(tlist);
|
||||
|
||||
/*
|
||||
* The rewriter should have already ensured that the TLEs are in
|
||||
* correct order; but we have to insert TLEs for any missing
|
||||
* attributes.
|
||||
* The rewriter should have already ensured that the TLEs are in correct
|
||||
* order; but we have to insert TLEs for any missing attributes.
|
||||
*
|
||||
* Scan the tuple description in the relation's relcache entry to make
|
||||
* sure we have all the user attributes in the right order. We assume
|
||||
* that the rewriter already acquired at least AccessShareLock on the
|
||||
* relation, so we need no lock here.
|
||||
* Scan the tuple description in the relation's relcache entry to make sure
|
||||
* we have all the user attributes in the right order. We assume that the
|
||||
* rewriter already acquired at least AccessShareLock on the relation, so
|
||||
* we need no lock here.
|
||||
*/
|
||||
rel = heap_open(getrelid(result_relation, range_table), NoLock);
|
||||
|
||||
@@ -220,23 +216,22 @@ expand_targetlist(List *tlist, int command_type,
|
||||
* Didn't find a matching tlist entry, so make one.
|
||||
*
|
||||
* For INSERT, generate a NULL constant. (We assume the rewriter
|
||||
* would have inserted any available default value.) Also, if
|
||||
* the column isn't dropped, apply any domain constraints that
|
||||
* might exist --- this is to catch domain NOT NULL.
|
||||
* would have inserted any available default value.) Also, if the
|
||||
* column isn't dropped, apply any domain constraints that might
|
||||
* exist --- this is to catch domain NOT NULL.
|
||||
*
|
||||
* For UPDATE, generate a Var reference to the existing value of
|
||||
* the attribute, so that it gets copied to the new tuple. But
|
||||
* generate a NULL for dropped columns (we want to drop any
|
||||
* old values).
|
||||
* For UPDATE, generate a Var reference to the existing value of the
|
||||
* attribute, so that it gets copied to the new tuple. But
|
||||
* generate a NULL for dropped columns (we want to drop any old
|
||||
* values).
|
||||
*
|
||||
* When generating a NULL constant for a dropped column, we label
|
||||
* it INT4 (any other guaranteed-to-exist datatype would do as
|
||||
* well). We can't label it with the dropped column's
|
||||
* datatype since that might not exist anymore. It does not
|
||||
* really matter what we claim the type is, since NULL is NULL
|
||||
* --- its representation is datatype-independent. This could
|
||||
* perhaps confuse code comparing the finished plan to the
|
||||
* target relation, however.
|
||||
* When generating a NULL constant for a dropped column, we label it
|
||||
* INT4 (any other guaranteed-to-exist datatype would do as well).
|
||||
* We can't label it with the dropped column's datatype since that
|
||||
* might not exist anymore. It does not really matter what we
|
||||
* claim the type is, since NULL is NULL --- its representation is
|
||||
* datatype-independent. This could perhaps confuse code
|
||||
* comparing the finished plan to the target relation, however.
|
||||
*/
|
||||
Oid atttype = att_tup->atttypid;
|
||||
int32 atttypmod = att_tup->atttypmod;
|
||||
@@ -305,12 +300,12 @@ expand_targetlist(List *tlist, int command_type,
|
||||
}
|
||||
|
||||
/*
|
||||
* The remaining tlist entries should be resjunk; append them all to
|
||||
* the end of the new tlist, making sure they have resnos higher than
|
||||
* the last real attribute. (Note: although the rewriter already did
|
||||
* such renumbering, we have to do it again here in case we are doing
|
||||
* an UPDATE in a table with dropped columns, or an inheritance child
|
||||
* table with extra columns.)
|
||||
* The remaining tlist entries should be resjunk; append them all to the
|
||||
* end of the new tlist, making sure they have resnos higher than the last
|
||||
* real attribute. (Note: although the rewriter already did such
|
||||
* renumbering, we have to do it again here in case we are doing an UPDATE
|
||||
* in a table with dropped columns, or an inheritance child table with
|
||||
* extra columns.)
|
||||
*/
|
||||
while (tlist_item)
|
||||
{
|
||||
|
@@ -14,7 +14,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.126 2005/08/02 20:27:45 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.127 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -51,19 +51,19 @@ typedef struct
|
||||
} adjust_inherited_attrs_context;
|
||||
|
||||
static Plan *recurse_set_operations(Node *setOp, PlannerInfo *root,
|
||||
double tuple_fraction,
|
||||
List *colTypes, bool junkOK,
|
||||
int flag, List *refnames_tlist,
|
||||
List **sortClauses);
|
||||
double tuple_fraction,
|
||||
List *colTypes, bool junkOK,
|
||||
int flag, List *refnames_tlist,
|
||||
List **sortClauses);
|
||||
static Plan *generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
|
||||
double tuple_fraction,
|
||||
List *refnames_tlist, List **sortClauses);
|
||||
double tuple_fraction,
|
||||
List *refnames_tlist, List **sortClauses);
|
||||
static Plan *generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
|
||||
List *refnames_tlist, List **sortClauses);
|
||||
static List *recurse_union_children(Node *setOp, PlannerInfo *root,
|
||||
double tuple_fraction,
|
||||
SetOperationStmt *top_union,
|
||||
List *refnames_tlist);
|
||||
double tuple_fraction,
|
||||
SetOperationStmt *top_union,
|
||||
List *refnames_tlist);
|
||||
static List *generate_setop_tlist(List *colTypes, int flag,
|
||||
Index varno,
|
||||
bool hack_constants,
|
||||
@@ -117,8 +117,8 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
|
||||
Assert(parse->distinctClause == NIL);
|
||||
|
||||
/*
|
||||
* Find the leftmost component Query. We need to use its column names
|
||||
* for all generated tlists (else SELECT INTO won't work right).
|
||||
* Find the leftmost component Query. We need to use its column names for
|
||||
* all generated tlists (else SELECT INTO won't work right).
|
||||
*/
|
||||
node = topop->larg;
|
||||
while (node && IsA(node, SetOperationStmt))
|
||||
@@ -129,10 +129,10 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
|
||||
Assert(leftmostQuery != NULL);
|
||||
|
||||
/*
|
||||
* Recurse on setOperations tree to generate plans for set ops. The
|
||||
* final output plan should have just the column types shown as the
|
||||
* output from the top-level node, plus possibly resjunk working
|
||||
* columns (we can rely on upper-level nodes to deal with that).
|
||||
* Recurse on setOperations tree to generate plans for set ops. The final
|
||||
* output plan should have just the column types shown as the output from
|
||||
* the top-level node, plus possibly resjunk working columns (we can rely
|
||||
* on upper-level nodes to deal with that).
|
||||
*/
|
||||
return recurse_set_operations((Node *) topop, root, tuple_fraction,
|
||||
topop->colTypes, true, -1,
|
||||
@@ -187,8 +187,8 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
|
||||
subplan);
|
||||
|
||||
/*
|
||||
* We don't bother to determine the subquery's output ordering
|
||||
* since it won't be reflected in the set-op result anyhow.
|
||||
* We don't bother to determine the subquery's output ordering since
|
||||
* it won't be reflected in the set-op result anyhow.
|
||||
*/
|
||||
*sortClauses = NIL;
|
||||
|
||||
@@ -214,13 +214,13 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
|
||||
* output columns.
|
||||
*
|
||||
* XXX you don't really want to know about this: setrefs.c will apply
|
||||
* replace_vars_with_subplan_refs() to the Result node's tlist.
|
||||
* This would fail if the Vars generated by generate_setop_tlist()
|
||||
* were not exactly equal() to the corresponding tlist entries of
|
||||
* the subplan. However, since the subplan was generated by
|
||||
* generate_union_plan() or generate_nonunion_plan(), and hence
|
||||
* its tlist was generated by generate_append_tlist(), this will
|
||||
* work. We just tell generate_setop_tlist() to use varno 0.
|
||||
* replace_vars_with_subplan_refs() to the Result node's tlist. This
|
||||
* would fail if the Vars generated by generate_setop_tlist() were not
|
||||
* exactly equal() to the corresponding tlist entries of the subplan.
|
||||
* However, since the subplan was generated by generate_union_plan()
|
||||
* or generate_nonunion_plan(), and hence its tlist was generated by
|
||||
* generate_append_tlist(), this will work. We just tell
|
||||
* generate_setop_tlist() to use varno 0.
|
||||
*/
|
||||
if (flag >= 0 ||
|
||||
!tlist_same_datatypes(plan->targetlist, colTypes, junkOK))
|
||||
@@ -260,22 +260,22 @@ generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
|
||||
/*
|
||||
* If plain UNION, tell children to fetch all tuples.
|
||||
*
|
||||
* Note: in UNION ALL, we pass the top-level tuple_fraction unmodified
|
||||
* to each arm of the UNION ALL. One could make a case for reducing
|
||||
* the tuple fraction for later arms (discounting by the expected size
|
||||
* of the earlier arms' results) but it seems not worth the trouble.
|
||||
* The normal case where tuple_fraction isn't already zero is a LIMIT
|
||||
* at top level, and passing it down as-is is usually enough to get the
|
||||
* desired result of preferring fast-start plans.
|
||||
* Note: in UNION ALL, we pass the top-level tuple_fraction unmodified to
|
||||
* each arm of the UNION ALL. One could make a case for reducing the
|
||||
* tuple fraction for later arms (discounting by the expected size of the
|
||||
* earlier arms' results) but it seems not worth the trouble. The normal
|
||||
* case where tuple_fraction isn't already zero is a LIMIT at top level,
|
||||
* and passing it down as-is is usually enough to get the desired result
|
||||
* of preferring fast-start plans.
|
||||
*/
|
||||
if (!op->all)
|
||||
tuple_fraction = 0.0;
|
||||
|
||||
/*
|
||||
* If any of my children are identical UNION nodes (same op, all-flag,
|
||||
* and colTypes) then they can be merged into this node so that we
|
||||
* generate only one Append and Sort for the lot. Recurse to find
|
||||
* such nodes and compute their children's plans.
|
||||
* If any of my children are identical UNION nodes (same op, all-flag, and
|
||||
* colTypes) then they can be merged into this node so that we generate
|
||||
* only one Append and Sort for the lot. Recurse to find such nodes and
|
||||
* compute their children's plans.
|
||||
*/
|
||||
planlist = list_concat(recurse_union_children(op->larg, root,
|
||||
tuple_fraction,
|
||||
@@ -288,8 +288,8 @@ generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
|
||||
* Generate tlist for Append plan node.
|
||||
*
|
||||
* The tlist for an Append plan isn't important as far as the Append is
|
||||
* concerned, but we must make it look real anyway for the benefit of
|
||||
* the next plan level up.
|
||||
* concerned, but we must make it look real anyway for the benefit of the
|
||||
* next plan level up.
|
||||
*/
|
||||
tlist = generate_append_tlist(op->colTypes, false,
|
||||
planlist, refnames_tlist);
|
||||
@@ -300,8 +300,8 @@ generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
|
||||
plan = (Plan *) make_append(planlist, false, tlist);
|
||||
|
||||
/*
|
||||
* For UNION ALL, we just need the Append plan. For UNION, need to
|
||||
* add Sort and Unique nodes to produce unique output.
|
||||
* For UNION ALL, we just need the Append plan. For UNION, need to add
|
||||
* Sort and Unique nodes to produce unique output.
|
||||
*/
|
||||
if (!op->all)
|
||||
{
|
||||
@@ -340,12 +340,12 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
|
||||
|
||||
/* Recurse on children, ensuring their outputs are marked */
|
||||
lplan = recurse_set_operations(op->larg, root,
|
||||
0.0 /* all tuples needed */,
|
||||
0.0 /* all tuples needed */ ,
|
||||
op->colTypes, false, 0,
|
||||
refnames_tlist,
|
||||
&child_sortclauses);
|
||||
rplan = recurse_set_operations(op->rarg, root,
|
||||
0.0 /* all tuples needed */,
|
||||
0.0 /* all tuples needed */ ,
|
||||
op->colTypes, false, 1,
|
||||
refnames_tlist,
|
||||
&child_sortclauses);
|
||||
@@ -355,10 +355,10 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
|
||||
* Generate tlist for Append plan node.
|
||||
*
|
||||
* The tlist for an Append plan isn't important as far as the Append is
|
||||
* concerned, but we must make it look real anyway for the benefit of
|
||||
* the next plan level up. In fact, it has to be real enough that the
|
||||
* flag column is shown as a variable not a constant, else setrefs.c
|
||||
* will get confused.
|
||||
* concerned, but we must make it look real anyway for the benefit of the
|
||||
* next plan level up. In fact, it has to be real enough that the flag
|
||||
* column is shown as a variable not a constant, else setrefs.c will get
|
||||
* confused.
|
||||
*/
|
||||
tlist = generate_append_tlist(op->colTypes, true,
|
||||
planlist, refnames_tlist);
|
||||
@@ -439,12 +439,11 @@ recurse_union_children(Node *setOp, PlannerInfo *root,
|
||||
/*
|
||||
* Not same, so plan this child separately.
|
||||
*
|
||||
* Note we disallow any resjunk columns in child results. This is
|
||||
* necessary since the Append node that implements the union won't do
|
||||
* any projection, and upper levels will get confused if some of our
|
||||
* output tuples have junk and some don't. This case only arises when
|
||||
* we have an EXCEPT or INTERSECT as child, else there won't be
|
||||
* resjunk anyway.
|
||||
* Note we disallow any resjunk columns in child results. This is necessary
|
||||
* since the Append node that implements the union won't do any
|
||||
* projection, and upper levels will get confused if some of our output
|
||||
* tuples have junk and some don't. This case only arises when we have an
|
||||
* EXCEPT or INTERSECT as child, else there won't be resjunk anyway.
|
||||
*/
|
||||
return list_make1(recurse_set_operations(setOp, root,
|
||||
tuple_fraction,
|
||||
@@ -492,17 +491,17 @@ generate_setop_tlist(List *colTypes, int flag,
|
||||
Assert(!reftle->resjunk);
|
||||
|
||||
/*
|
||||
* Generate columns referencing input columns and having
|
||||
* appropriate data types and column names. Insert datatype
|
||||
* coercions where necessary.
|
||||
* Generate columns referencing input columns and having appropriate
|
||||
* data types and column names. Insert datatype coercions where
|
||||
* necessary.
|
||||
*
|
||||
* HACK: constants in the input's targetlist are copied up as-is
|
||||
* rather than being referenced as subquery outputs. This is
|
||||
* mainly to ensure that when we try to coerce them to the output
|
||||
* column's datatype, the right things happen for UNKNOWN
|
||||
* constants. But do this only at the first level of
|
||||
* subquery-scan plans; we don't want phony constants appearing in
|
||||
* the output tlists of upper-level nodes!
|
||||
* HACK: constants in the input's targetlist are copied up as-is rather
|
||||
* than being referenced as subquery outputs. This is mainly to
|
||||
* ensure that when we try to coerce them to the output column's
|
||||
* datatype, the right things happen for UNKNOWN constants. But do
|
||||
* this only at the first level of subquery-scan plans; we don't want
|
||||
* phony constants appearing in the output tlists of upper-level
|
||||
* nodes!
|
||||
*/
|
||||
if (hack_constants && inputtle->expr && IsA(inputtle->expr, Const))
|
||||
expr = (Node *) inputtle->expr;
|
||||
@@ -710,7 +709,7 @@ find_all_inheritors(Oid parentrel)
|
||||
List *rels_list;
|
||||
ListCell *l;
|
||||
|
||||
/*
|
||||
/*
|
||||
* We build a list starting with the given rel and adding all direct and
|
||||
* indirect children. We can use a single list as both the record of
|
||||
* already-found rels and the agenda of rels yet to be scanned for more
|
||||
@@ -728,11 +727,11 @@ find_all_inheritors(Oid parentrel)
|
||||
currentchildren = find_inheritance_children(currentrel);
|
||||
|
||||
/*
|
||||
* Add to the queue only those children not already seen. This
|
||||
* avoids making duplicate entries in case of multiple inheritance
|
||||
* paths from the same parent. (It'll also keep us from getting
|
||||
* into an infinite loop, though theoretically there can't be any
|
||||
* cycles in the inheritance graph anyway.)
|
||||
* Add to the queue only those children not already seen. This avoids
|
||||
* making duplicate entries in case of multiple inheritance paths from
|
||||
* the same parent. (It'll also keep us from getting into an infinite
|
||||
* loop, though theoretically there can't be any cycles in the
|
||||
* inheritance graph anyway.)
|
||||
*/
|
||||
rels_list = list_concat_unique_oid(rels_list, currentchildren);
|
||||
}
|
||||
@@ -790,8 +789,8 @@ expand_inherited_rtentry(PlannerInfo *root, Index rti)
|
||||
|
||||
/*
|
||||
* Check that there's at least one descendant, else treat as no-child
|
||||
* case. This could happen despite above has_subclass() check, if
|
||||
* table once had a child but no longer does.
|
||||
* case. This could happen despite above has_subclass() check, if table
|
||||
* once had a child but no longer does.
|
||||
*/
|
||||
if (list_length(inhOIDs) < 2)
|
||||
{
|
||||
@@ -809,19 +808,19 @@ expand_inherited_rtentry(PlannerInfo *root, Index rti)
|
||||
Index childRTindex;
|
||||
|
||||
/*
|
||||
* It is possible that the parent table has children that are
|
||||
* temp tables of other backends. We cannot safely access such
|
||||
* tables (because of buffering issues), and the best thing to do
|
||||
* seems to be to silently ignore them.
|
||||
* It is possible that the parent table has children that are temp
|
||||
* tables of other backends. We cannot safely access such tables
|
||||
* (because of buffering issues), and the best thing to do seems to be
|
||||
* to silently ignore them.
|
||||
*/
|
||||
if (childOID != parentOID &&
|
||||
isOtherTempNamespace(get_rel_namespace(childOID)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Build an RTE for the child, and attach to query's rangetable
|
||||
* list. We copy most fields of the parent's RTE, but replace
|
||||
* relation OID, and set inh = false.
|
||||
* Build an RTE for the child, and attach to query's rangetable list.
|
||||
* We copy most fields of the parent's RTE, but replace relation OID,
|
||||
* and set inh = false.
|
||||
*/
|
||||
childrte = copyObject(rte);
|
||||
childrte->relid = childOID;
|
||||
@@ -833,7 +832,8 @@ expand_inherited_rtentry(PlannerInfo *root, Index rti)
|
||||
|
||||
/*
|
||||
* If all the children were temp tables, pretend it's a non-inheritance
|
||||
* situation. The duplicate RTE we added for the parent table is harmless.
|
||||
* situation. The duplicate RTE we added for the parent table is
|
||||
* harmless.
|
||||
*/
|
||||
if (list_length(inhRTIs) < 2)
|
||||
{
|
||||
@@ -843,11 +843,11 @@ expand_inherited_rtentry(PlannerInfo *root, Index rti)
|
||||
}
|
||||
|
||||
/*
|
||||
* The executor will check the parent table's access permissions when
|
||||
* it examines the parent's inheritlist entry. There's no need to
|
||||
* check twice, so turn off access check bits in the original RTE.
|
||||
* (If we are invoked more than once, extra copies of the child RTEs
|
||||
* will also not cause duplicate permission checks.)
|
||||
* The executor will check the parent table's access permissions when it
|
||||
* examines the parent's inheritlist entry. There's no need to check
|
||||
* twice, so turn off access check bits in the original RTE. (If we are
|
||||
* invoked more than once, extra copies of the child RTEs will also not
|
||||
* cause duplicate permission checks.)
|
||||
*/
|
||||
rte->requiredPerms = 0;
|
||||
|
||||
@@ -882,9 +882,8 @@ adjust_inherited_attrs(Node *node,
|
||||
}
|
||||
|
||||
/*
|
||||
* We assume that by now the planner has acquired at least
|
||||
* AccessShareLock on both rels, and so we need no additional lock
|
||||
* now.
|
||||
* We assume that by now the planner has acquired at least AccessShareLock
|
||||
* on both rels, and so we need no additional lock now.
|
||||
*/
|
||||
oldrelation = heap_open(old_relid, NoLock);
|
||||
newrelation = heap_open(new_relid, NoLock);
|
||||
@@ -1035,7 +1034,7 @@ adjust_inherited_attrs_mutator(Node *node,
|
||||
JoinExpr *j;
|
||||
|
||||
j = (JoinExpr *) expression_tree_mutator(node,
|
||||
adjust_inherited_attrs_mutator,
|
||||
adjust_inherited_attrs_mutator,
|
||||
(void *) context);
|
||||
/* now fix JoinExpr's rtindex */
|
||||
if (j->rtindex == context->old_rt_index)
|
||||
@@ -1048,8 +1047,8 @@ adjust_inherited_attrs_mutator(Node *node,
|
||||
InClauseInfo *ininfo;
|
||||
|
||||
ininfo = (InClauseInfo *) expression_tree_mutator(node,
|
||||
adjust_inherited_attrs_mutator,
|
||||
(void *) context);
|
||||
adjust_inherited_attrs_mutator,
|
||||
(void *) context);
|
||||
/* now fix InClauseInfo's relid sets */
|
||||
ininfo->lefthand = adjust_relid_set(ininfo->lefthand,
|
||||
context->old_rt_index,
|
||||
@@ -1119,10 +1118,10 @@ adjust_inherited_attrs_mutator(Node *node,
|
||||
/*
|
||||
* BUT: although we don't need to recurse into subplans, we do need to
|
||||
* make sure that they are copied, not just referenced as
|
||||
* expression_tree_mutator will do by default. Otherwise we'll have
|
||||
* the same subplan node referenced from each arm of the inheritance
|
||||
* APPEND plan, which will cause trouble in the executor. This is a
|
||||
* kluge that should go away when we redesign querytrees.
|
||||
* expression_tree_mutator will do by default. Otherwise we'll have the
|
||||
* same subplan node referenced from each arm of the inheritance APPEND
|
||||
* plan, which will cause trouble in the executor. This is a kluge that
|
||||
* should go away when we redesign querytrees.
|
||||
*/
|
||||
if (is_subplan(node))
|
||||
{
|
||||
@@ -1205,8 +1204,8 @@ adjust_inherited_tlist(List *tlist,
|
||||
/*
|
||||
* If we changed anything, re-sort the tlist by resno, and make sure
|
||||
* resjunk entries have resnos above the last real resno. The sort
|
||||
* algorithm is a bit stupid, but for such a seldom-taken path, small
|
||||
* is probably better than fast.
|
||||
* algorithm is a bit stupid, but for such a seldom-taken path, small is
|
||||
* probably better than fast.
|
||||
*/
|
||||
if (!changed_it)
|
||||
return tlist;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.200 2005/07/03 21:14:17 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.201 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
* HISTORY
|
||||
* AUTHOR DATE MAJOR EVENT
|
||||
@@ -91,7 +91,7 @@ static Expr *inline_function(Oid funcid, Oid result_type, List *args,
|
||||
static Node *substitute_actual_parameters(Node *expr, int nargs, List *args,
|
||||
int *usecounts);
|
||||
static Node *substitute_actual_parameters_mutator(Node *node,
|
||||
substitute_actual_parameters_context *context);
|
||||
substitute_actual_parameters_context *context);
|
||||
static void sql_inline_error_callback(void *arg);
|
||||
static Expr *evaluate_expr(Expr *expr, Oid result_type);
|
||||
|
||||
@@ -308,10 +308,10 @@ List *
|
||||
make_ands_implicit(Expr *clause)
|
||||
{
|
||||
/*
|
||||
* NB: because the parser sets the qual field to NULL in a query that
|
||||
* has no WHERE clause, we must consider a NULL input clause as TRUE,
|
||||
* even though one might more reasonably think it FALSE. Grumble. If
|
||||
* this causes trouble, consider changing the parser's behavior.
|
||||
* NB: because the parser sets the qual field to NULL in a query that has
|
||||
* no WHERE clause, we must consider a NULL input clause as TRUE, even
|
||||
* though one might more reasonably think it FALSE. Grumble. If this
|
||||
* causes trouble, consider changing the parser's behavior.
|
||||
*/
|
||||
if (clause == NULL)
|
||||
return NIL; /* NULL -> NIL list == TRUE */
|
||||
@@ -357,8 +357,7 @@ contain_agg_clause_walker(Node *node, void *context)
|
||||
if (IsA(node, Aggref))
|
||||
{
|
||||
Assert(((Aggref *) node)->agglevelsup == 0);
|
||||
return true; /* abort the tree traversal and return
|
||||
* true */
|
||||
return true; /* abort the tree traversal and return true */
|
||||
}
|
||||
Assert(!IsA(node, SubLink));
|
||||
return expression_tree_walker(node, contain_agg_clause_walker, context);
|
||||
@@ -438,9 +437,9 @@ count_agg_clauses_walker(Node *node, AggClauseCounts *counts)
|
||||
|
||||
/*
|
||||
* If the transition type is pass-by-value then it doesn't add
|
||||
* anything to the required size of the hashtable. If it is
|
||||
* pass-by-reference then we have to add the estimated size of
|
||||
* the value itself, plus palloc overhead.
|
||||
* anything to the required size of the hashtable. If it is
|
||||
* pass-by-reference then we have to add the estimated size of the
|
||||
* value itself, plus palloc overhead.
|
||||
*/
|
||||
if (!get_typbyval(aggtranstype))
|
||||
{
|
||||
@@ -470,7 +469,7 @@ count_agg_clauses_walker(Node *node, AggClauseCounts *counts)
|
||||
if (contain_agg_clause((Node *) aggref->target))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_GROUPING_ERROR),
|
||||
errmsg("aggregate function calls may not be nested")));
|
||||
errmsg("aggregate function calls may not be nested")));
|
||||
|
||||
/*
|
||||
* Having checked that, we need not recurse into the argument.
|
||||
@@ -579,8 +578,7 @@ contain_subplans_walker(Node *node, void *context)
|
||||
return false;
|
||||
if (IsA(node, SubPlan) ||
|
||||
IsA(node, SubLink))
|
||||
return true; /* abort the tree traversal and return
|
||||
* true */
|
||||
return true; /* abort the tree traversal and return true */
|
||||
return expression_tree_walker(node, contain_subplans_walker, context);
|
||||
}
|
||||
|
||||
@@ -882,9 +880,9 @@ is_pseudo_constant_clause(Node *clause)
|
||||
{
|
||||
/*
|
||||
* We could implement this check in one recursive scan. But since the
|
||||
* check for volatile functions is both moderately expensive and
|
||||
* unlikely to fail, it seems better to look for Vars first and only
|
||||
* check for volatile functions if we find no Vars.
|
||||
* check for volatile functions is both moderately expensive and unlikely
|
||||
* to fail, it seems better to look for Vars first and only check for
|
||||
* volatile functions if we find no Vars.
|
||||
*/
|
||||
if (!contain_var_clause(clause) &&
|
||||
!contain_volatile_functions(clause))
|
||||
@@ -958,13 +956,12 @@ has_distinct_on_clause(Query *query)
|
||||
|
||||
/*
|
||||
* If the DISTINCT list contains all the nonjunk targetlist items, and
|
||||
* nothing else (ie, no junk tlist items), then it's a simple
|
||||
* DISTINCT, else it's DISTINCT ON. We do not require the lists to be
|
||||
* in the same order (since the parser may have adjusted the DISTINCT
|
||||
* clause ordering to agree with ORDER BY). Furthermore, a
|
||||
* non-DISTINCT junk tlist item that is in the sortClause is also
|
||||
* evidence of DISTINCT ON, since we don't allow ORDER BY on junk
|
||||
* tlist items when plain DISTINCT is used.
|
||||
* nothing else (ie, no junk tlist items), then it's a simple DISTINCT,
|
||||
* else it's DISTINCT ON. We do not require the lists to be in the same
|
||||
* order (since the parser may have adjusted the DISTINCT clause ordering
|
||||
* to agree with ORDER BY). Furthermore, a non-DISTINCT junk tlist item
|
||||
* that is in the sortClause is also evidence of DISTINCT ON, since we
|
||||
* don't allow ORDER BY on junk tlist items when plain DISTINCT is used.
|
||||
*
|
||||
* This code assumes that the DISTINCT list is valid, ie, all its entries
|
||||
* match some entry of the tlist.
|
||||
@@ -1224,7 +1221,7 @@ eval_const_expressions(Node *node)
|
||||
*
|
||||
* Currently the extra steps that are taken in this mode are:
|
||||
* 1. Substitute values for Params, where a bound Param value has been made
|
||||
* available by the caller of planner().
|
||||
* available by the caller of planner().
|
||||
* 2. Fold stable, as well as immutable, functions to constants.
|
||||
*--------------------
|
||||
*/
|
||||
@@ -1264,11 +1261,11 @@ eval_const_expressions_mutator(Node *node,
|
||||
if (paramInfo)
|
||||
{
|
||||
/*
|
||||
* Found it, so return a Const representing the param
|
||||
* value. Note that we don't copy pass-by-ref datatypes,
|
||||
* so the Const will only be valid as long as the bound
|
||||
* parameter list exists. This is okay for intended uses
|
||||
* of estimate_expression_value().
|
||||
* Found it, so return a Const representing the param value.
|
||||
* Note that we don't copy pass-by-ref datatypes, so the Const
|
||||
* will only be valid as long as the bound parameter list
|
||||
* exists. This is okay for intended uses of
|
||||
* estimate_expression_value().
|
||||
*/
|
||||
int16 typLen;
|
||||
bool typByVal;
|
||||
@@ -1294,16 +1291,16 @@ eval_const_expressions_mutator(Node *node,
|
||||
|
||||
/*
|
||||
* Reduce constants in the FuncExpr's arguments. We know args is
|
||||
* either NIL or a List node, so we can call
|
||||
* expression_tree_mutator directly rather than recursing to self.
|
||||
* either NIL or a List node, so we can call expression_tree_mutator
|
||||
* directly rather than recursing to self.
|
||||
*/
|
||||
args = (List *) expression_tree_mutator((Node *) expr->args,
|
||||
eval_const_expressions_mutator,
|
||||
eval_const_expressions_mutator,
|
||||
(void *) context);
|
||||
|
||||
/*
|
||||
* Code for op/func reduction is pretty bulky, so split it out as
|
||||
* a separate function.
|
||||
* Code for op/func reduction is pretty bulky, so split it out as a
|
||||
* separate function.
|
||||
*/
|
||||
simple = simplify_function(expr->funcid, expr->funcresulttype, args,
|
||||
true, context);
|
||||
@@ -1312,8 +1309,8 @@ eval_const_expressions_mutator(Node *node,
|
||||
|
||||
/*
|
||||
* The expression cannot be simplified any further, so build and
|
||||
* return a replacement FuncExpr node using the
|
||||
* possibly-simplified arguments.
|
||||
* return a replacement FuncExpr node using the possibly-simplified
|
||||
* arguments.
|
||||
*/
|
||||
newexpr = makeNode(FuncExpr);
|
||||
newexpr->funcid = expr->funcid;
|
||||
@@ -1331,23 +1328,23 @@ eval_const_expressions_mutator(Node *node,
|
||||
OpExpr *newexpr;
|
||||
|
||||
/*
|
||||
* Reduce constants in the OpExpr's arguments. We know args is
|
||||
* either NIL or a List node, so we can call
|
||||
* expression_tree_mutator directly rather than recursing to self.
|
||||
* Reduce constants in the OpExpr's arguments. We know args is either
|
||||
* NIL or a List node, so we can call expression_tree_mutator directly
|
||||
* rather than recursing to self.
|
||||
*/
|
||||
args = (List *) expression_tree_mutator((Node *) expr->args,
|
||||
eval_const_expressions_mutator,
|
||||
eval_const_expressions_mutator,
|
||||
(void *) context);
|
||||
|
||||
/*
|
||||
* Need to get OID of underlying function. Okay to scribble on
|
||||
* input to this extent.
|
||||
* Need to get OID of underlying function. Okay to scribble on input
|
||||
* to this extent.
|
||||
*/
|
||||
set_opfuncid(expr);
|
||||
|
||||
/*
|
||||
* Code for op/func reduction is pretty bulky, so split it out as
|
||||
* a separate function.
|
||||
* Code for op/func reduction is pretty bulky, so split it out as a
|
||||
* separate function.
|
||||
*/
|
||||
simple = simplify_function(expr->opfuncid, expr->opresulttype, args,
|
||||
true, context);
|
||||
@@ -1355,8 +1352,8 @@ eval_const_expressions_mutator(Node *node,
|
||||
return (Node *) simple;
|
||||
|
||||
/*
|
||||
* If the operator is boolean equality, we know how to simplify
|
||||
* cases involving one constant and one non-constant argument.
|
||||
* If the operator is boolean equality, we know how to simplify cases
|
||||
* involving one constant and one non-constant argument.
|
||||
*/
|
||||
if (expr->opno == BooleanEqualOperator)
|
||||
{
|
||||
@@ -1390,18 +1387,17 @@ eval_const_expressions_mutator(Node *node,
|
||||
DistinctExpr *newexpr;
|
||||
|
||||
/*
|
||||
* Reduce constants in the DistinctExpr's arguments. We know args
|
||||
* is either NIL or a List node, so we can call
|
||||
* expression_tree_mutator directly rather than recursing to self.
|
||||
* Reduce constants in the DistinctExpr's arguments. We know args is
|
||||
* either NIL or a List node, so we can call expression_tree_mutator
|
||||
* directly rather than recursing to self.
|
||||
*/
|
||||
args = (List *) expression_tree_mutator((Node *) expr->args,
|
||||
eval_const_expressions_mutator,
|
||||
eval_const_expressions_mutator,
|
||||
(void *) context);
|
||||
|
||||
/*
|
||||
* We must do our own check for NULLs because DistinctExpr has
|
||||
* different results for NULL input than the underlying operator
|
||||
* does.
|
||||
* different results for NULL input than the underlying operator does.
|
||||
*/
|
||||
foreach(arg, args)
|
||||
{
|
||||
@@ -1429,15 +1425,14 @@ eval_const_expressions_mutator(Node *node,
|
||||
/* (NOT okay to try to inline it, though!) */
|
||||
|
||||
/*
|
||||
* Need to get OID of underlying function. Okay to scribble
|
||||
* on input to this extent.
|
||||
* Need to get OID of underlying function. Okay to scribble on
|
||||
* input to this extent.
|
||||
*/
|
||||
set_opfuncid((OpExpr *) expr); /* rely on struct
|
||||
* equivalence */
|
||||
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
|
||||
|
||||
/*
|
||||
* Code for op/func reduction is pretty bulky, so split it out
|
||||
* as a separate function.
|
||||
* Code for op/func reduction is pretty bulky, so split it out as
|
||||
* a separate function.
|
||||
*/
|
||||
simple = simplify_function(expr->opfuncid, expr->opresulttype,
|
||||
args, false, context);
|
||||
@@ -1482,7 +1477,7 @@ eval_const_expressions_mutator(Node *node,
|
||||
bool forceTrue = false;
|
||||
|
||||
newargs = simplify_or_arguments(expr->args, context,
|
||||
&haveNull, &forceTrue);
|
||||
&haveNull, &forceTrue);
|
||||
if (forceTrue)
|
||||
return makeBoolConst(true, false);
|
||||
if (haveNull)
|
||||
@@ -1503,7 +1498,7 @@ eval_const_expressions_mutator(Node *node,
|
||||
bool forceFalse = false;
|
||||
|
||||
newargs = simplify_and_arguments(expr->args, context,
|
||||
&haveNull, &forceFalse);
|
||||
&haveNull, &forceFalse);
|
||||
if (forceFalse)
|
||||
return makeBoolConst(false, false);
|
||||
if (haveNull)
|
||||
@@ -1554,17 +1549,17 @@ eval_const_expressions_mutator(Node *node,
|
||||
/*
|
||||
* Return a SubPlan unchanged --- too late to do anything with it.
|
||||
*
|
||||
* XXX should we ereport() here instead? Probably this routine
|
||||
* should never be invoked after SubPlan creation.
|
||||
* XXX should we ereport() here instead? Probably this routine should
|
||||
* never be invoked after SubPlan creation.
|
||||
*/
|
||||
return node;
|
||||
}
|
||||
if (IsA(node, RelabelType))
|
||||
{
|
||||
/*
|
||||
* If we can simplify the input to a constant, then we don't need
|
||||
* the RelabelType node anymore: just change the type field of the
|
||||
* Const node. Otherwise, must copy the RelabelType node.
|
||||
* If we can simplify the input to a constant, then we don't need the
|
||||
* RelabelType node anymore: just change the type field of the Const
|
||||
* node. Otherwise, must copy the RelabelType node.
|
||||
*/
|
||||
RelabelType *relabel = (RelabelType *) node;
|
||||
Node *arg;
|
||||
@@ -1573,8 +1568,8 @@ eval_const_expressions_mutator(Node *node,
|
||||
context);
|
||||
|
||||
/*
|
||||
* If we find stacked RelabelTypes (eg, from foo :: int :: oid) we
|
||||
* can discard all but the top one.
|
||||
* If we find stacked RelabelTypes (eg, from foo :: int :: oid) we can
|
||||
* discard all but the top one.
|
||||
*/
|
||||
while (arg && IsA(arg, RelabelType))
|
||||
arg = (Node *) ((RelabelType *) arg)->arg;
|
||||
@@ -1586,10 +1581,9 @@ eval_const_expressions_mutator(Node *node,
|
||||
con->consttype = relabel->resulttype;
|
||||
|
||||
/*
|
||||
* relabel's resulttypmod is discarded, which is OK for now;
|
||||
* if the type actually needs a runtime length coercion then
|
||||
* there should be a function call to do it just above this
|
||||
* node.
|
||||
* relabel's resulttypmod is discarded, which is OK for now; if
|
||||
* the type actually needs a runtime length coercion then there
|
||||
* should be a function call to do it just above this node.
|
||||
*/
|
||||
return (Node *) con;
|
||||
}
|
||||
@@ -1692,7 +1686,7 @@ eval_const_expressions_mutator(Node *node,
|
||||
|
||||
/*
|
||||
* Found a TRUE condition, so none of the remaining alternatives
|
||||
* can be reached. We treat the result as the default result.
|
||||
* can be reached. We treat the result as the default result.
|
||||
*/
|
||||
defresult = caseresult;
|
||||
break;
|
||||
@@ -1720,9 +1714,9 @@ eval_const_expressions_mutator(Node *node,
|
||||
if (IsA(node, CaseTestExpr))
|
||||
{
|
||||
/*
|
||||
* If we know a constant test value for the current CASE
|
||||
* construct, substitute it for the placeholder. Else just
|
||||
* return the placeholder as-is.
|
||||
* If we know a constant test value for the current CASE construct,
|
||||
* substitute it for the placeholder. Else just return the
|
||||
* placeholder as-is.
|
||||
*/
|
||||
if (context->case_val)
|
||||
return copyObject(context->case_val);
|
||||
@@ -1803,15 +1797,15 @@ eval_const_expressions_mutator(Node *node,
|
||||
if (IsA(node, FieldSelect))
|
||||
{
|
||||
/*
|
||||
* We can optimize field selection from a whole-row Var into a
|
||||
* simple Var. (This case won't be generated directly by the
|
||||
* parser, because ParseComplexProjection short-circuits it. But
|
||||
* it can arise while simplifying functions.) Also, we can
|
||||
* optimize field selection from a RowExpr construct.
|
||||
* We can optimize field selection from a whole-row Var into a simple
|
||||
* Var. (This case won't be generated directly by the parser, because
|
||||
* ParseComplexProjection short-circuits it. But it can arise while
|
||||
* simplifying functions.) Also, we can optimize field selection from
|
||||
* a RowExpr construct.
|
||||
*
|
||||
* We must however check that the declared type of the field is still
|
||||
* the same as when the FieldSelect was created --- this can
|
||||
* change if someone did ALTER COLUMN TYPE on the rowtype.
|
||||
* We must however check that the declared type of the field is still the
|
||||
* same as when the FieldSelect was created --- this can change if
|
||||
* someone did ALTER COLUMN TYPE on the rowtype.
|
||||
*/
|
||||
FieldSelect *fselect = (FieldSelect *) node;
|
||||
FieldSelect *newfselect;
|
||||
@@ -1840,7 +1834,7 @@ eval_const_expressions_mutator(Node *node,
|
||||
fselect->fieldnum <= list_length(rowexpr->args))
|
||||
{
|
||||
Node *fld = (Node *) list_nth(rowexpr->args,
|
||||
fselect->fieldnum - 1);
|
||||
fselect->fieldnum - 1);
|
||||
|
||||
if (rowtype_field_matches(rowexpr->row_typeid,
|
||||
fselect->fieldnum,
|
||||
@@ -1861,10 +1855,10 @@ eval_const_expressions_mutator(Node *node,
|
||||
|
||||
/*
|
||||
* For any node type not handled above, we recurse using
|
||||
* expression_tree_mutator, which will copy the node unchanged but try
|
||||
* to simplify its arguments (if any) using this routine. For example:
|
||||
* we cannot eliminate an ArrayRef node, but we might be able to
|
||||
* simplify constant expressions in its subscripts.
|
||||
* expression_tree_mutator, which will copy the node unchanged but try to
|
||||
* simplify its arguments (if any) using this routine. For example: we
|
||||
* cannot eliminate an ArrayRef node, but we might be able to simplify
|
||||
* constant expressions in its subscripts.
|
||||
*/
|
||||
return expression_tree_mutator(node, eval_const_expressions_mutator,
|
||||
(void *) context);
|
||||
@@ -1900,7 +1894,7 @@ simplify_or_arguments(List *args,
|
||||
/*
|
||||
* Since the parser considers OR to be a binary operator, long OR lists
|
||||
* become deeply nested expressions. We must flatten these into long
|
||||
* argument lists of a single OR operator. To avoid blowing out the stack
|
||||
* argument lists of a single OR operator. To avoid blowing out the stack
|
||||
* with recursion of eval_const_expressions, we resort to some tenseness
|
||||
* here: we keep a list of not-yet-processed inputs, and handle flattening
|
||||
* of nested ORs by prepending to the to-do list instead of recursing.
|
||||
@@ -1915,14 +1909,14 @@ simplify_or_arguments(List *args,
|
||||
/* flatten nested ORs as per above comment */
|
||||
if (or_clause(arg))
|
||||
{
|
||||
List *subargs = list_copy(((BoolExpr *) arg)->args);
|
||||
List *subargs = list_copy(((BoolExpr *) arg)->args);
|
||||
|
||||
/* overly tense code to avoid leaking unused list header */
|
||||
if (!unprocessed_args)
|
||||
unprocessed_args = subargs;
|
||||
else
|
||||
{
|
||||
List *oldhdr = unprocessed_args;
|
||||
List *oldhdr = unprocessed_args;
|
||||
|
||||
unprocessed_args = list_concat(subargs, unprocessed_args);
|
||||
pfree(oldhdr);
|
||||
@@ -1934,23 +1928,22 @@ simplify_or_arguments(List *args,
|
||||
arg = eval_const_expressions_mutator(arg, context);
|
||||
|
||||
/*
|
||||
* It is unlikely but not impossible for simplification of a
|
||||
* non-OR clause to produce an OR. Recheck, but don't be
|
||||
* too tense about it since it's not a mainstream case.
|
||||
* In particular we don't worry about const-simplifying
|
||||
* the input twice.
|
||||
* It is unlikely but not impossible for simplification of a non-OR
|
||||
* clause to produce an OR. Recheck, but don't be too tense about it
|
||||
* since it's not a mainstream case. In particular we don't worry
|
||||
* about const-simplifying the input twice.
|
||||
*/
|
||||
if (or_clause(arg))
|
||||
{
|
||||
List *subargs = list_copy(((BoolExpr *) arg)->args);
|
||||
List *subargs = list_copy(((BoolExpr *) arg)->args);
|
||||
|
||||
unprocessed_args = list_concat(subargs, unprocessed_args);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, we have a const-simplified non-OR argument. Process it
|
||||
* per comments above.
|
||||
* OK, we have a const-simplified non-OR argument. Process it per
|
||||
* comments above.
|
||||
*/
|
||||
if (IsA(arg, Const))
|
||||
{
|
||||
@@ -2018,14 +2011,14 @@ simplify_and_arguments(List *args,
|
||||
/* flatten nested ANDs as per above comment */
|
||||
if (and_clause(arg))
|
||||
{
|
||||
List *subargs = list_copy(((BoolExpr *) arg)->args);
|
||||
List *subargs = list_copy(((BoolExpr *) arg)->args);
|
||||
|
||||
/* overly tense code to avoid leaking unused list header */
|
||||
if (!unprocessed_args)
|
||||
unprocessed_args = subargs;
|
||||
else
|
||||
{
|
||||
List *oldhdr = unprocessed_args;
|
||||
List *oldhdr = unprocessed_args;
|
||||
|
||||
unprocessed_args = list_concat(subargs, unprocessed_args);
|
||||
pfree(oldhdr);
|
||||
@@ -2037,23 +2030,22 @@ simplify_and_arguments(List *args,
|
||||
arg = eval_const_expressions_mutator(arg, context);
|
||||
|
||||
/*
|
||||
* It is unlikely but not impossible for simplification of a
|
||||
* non-AND clause to produce an AND. Recheck, but don't be
|
||||
* too tense about it since it's not a mainstream case.
|
||||
* In particular we don't worry about const-simplifying
|
||||
* the input twice.
|
||||
* It is unlikely but not impossible for simplification of a non-AND
|
||||
* clause to produce an AND. Recheck, but don't be too tense about it
|
||||
* since it's not a mainstream case. In particular we don't worry
|
||||
* about const-simplifying the input twice.
|
||||
*/
|
||||
if (and_clause(arg))
|
||||
{
|
||||
List *subargs = list_copy(((BoolExpr *) arg)->args);
|
||||
List *subargs = list_copy(((BoolExpr *) arg)->args);
|
||||
|
||||
unprocessed_args = list_concat(subargs, unprocessed_args);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, we have a const-simplified non-AND argument. Process it
|
||||
* per comments above.
|
||||
* OK, we have a const-simplified non-AND argument. Process it per
|
||||
* comments above.
|
||||
*/
|
||||
if (IsA(arg, Const))
|
||||
{
|
||||
@@ -2111,7 +2103,7 @@ simplify_boolean_equality(List *args)
|
||||
{
|
||||
Assert(!((Const *) leftop)->constisnull);
|
||||
if (DatumGetBool(((Const *) leftop)->constvalue))
|
||||
return rightop; /* true = foo */
|
||||
return rightop; /* true = foo */
|
||||
else
|
||||
return make_notclause(rightop); /* false = foo */
|
||||
}
|
||||
@@ -2119,7 +2111,7 @@ simplify_boolean_equality(List *args)
|
||||
{
|
||||
Assert(!((Const *) rightop)->constisnull);
|
||||
if (DatumGetBool(((Const *) rightop)->constvalue))
|
||||
return leftop; /* foo = true */
|
||||
return leftop; /* foo = true */
|
||||
else
|
||||
return make_notclause(leftop); /* foo = false */
|
||||
}
|
||||
@@ -2146,12 +2138,12 @@ simplify_function(Oid funcid, Oid result_type, List *args,
|
||||
Expr *newexpr;
|
||||
|
||||
/*
|
||||
* We have two strategies for simplification: either execute the
|
||||
* function to deliver a constant result, or expand in-line the body
|
||||
* of the function definition (which only works for simple
|
||||
* SQL-language functions, but that is a common case). In either case
|
||||
* we need access to the function's pg_proc tuple, so fetch it just
|
||||
* once to use in both attempts.
|
||||
* We have two strategies for simplification: either execute the function
|
||||
* to deliver a constant result, or expand in-line the body of the
|
||||
* function definition (which only works for simple SQL-language
|
||||
* functions, but that is a common case). In either case we need access
|
||||
* to the function's pg_proc tuple, so fetch it just once to use in both
|
||||
* attempts.
|
||||
*/
|
||||
func_tuple = SearchSysCache(PROCOID,
|
||||
ObjectIdGetDatum(funcid),
|
||||
@@ -2200,15 +2192,15 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Can't simplify if it returns RECORD. The immediate problem is that
|
||||
* it will be needing an expected tupdesc which we can't supply here.
|
||||
* Can't simplify if it returns RECORD. The immediate problem is that it
|
||||
* will be needing an expected tupdesc which we can't supply here.
|
||||
*
|
||||
* In the case where it has OUT parameters, it could get by without an
|
||||
* expected tupdesc, but we still have issues: get_expr_result_type()
|
||||
* doesn't know how to extract type info from a RECORD constant, and
|
||||
* in the case of a NULL function result there doesn't seem to be any
|
||||
* clean way to fix that. In view of the likelihood of there being
|
||||
* still other gotchas, seems best to leave the function call unreduced.
|
||||
* doesn't know how to extract type info from a RECORD constant, and in
|
||||
* the case of a NULL function result there doesn't seem to be any clean
|
||||
* way to fix that. In view of the likelihood of there being still other
|
||||
* gotchas, seems best to leave the function call unreduced.
|
||||
*/
|
||||
if (funcform->prorettype == RECORDOID)
|
||||
return NULL;
|
||||
@@ -2225,10 +2217,10 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
|
||||
}
|
||||
|
||||
/*
|
||||
* If the function is strict and has a constant-NULL input, it will
|
||||
* never be called at all, so we can replace the call by a NULL
|
||||
* constant, even if there are other inputs that aren't constant, and
|
||||
* even if the function is not otherwise immutable.
|
||||
* If the function is strict and has a constant-NULL input, it will never
|
||||
* be called at all, so we can replace the call by a NULL constant, even
|
||||
* if there are other inputs that aren't constant, and even if the
|
||||
* function is not otherwise immutable.
|
||||
*/
|
||||
if (funcform->proisstrict && has_null_input)
|
||||
return (Expr *) makeNullConst(result_type);
|
||||
@@ -2242,16 +2234,16 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Ordinarily we are only allowed to simplify immutable functions.
|
||||
* But for purposes of estimation, we consider it okay to simplify
|
||||
* functions that are merely stable; the risk that the result might
|
||||
* change from planning time to execution time is worth taking in
|
||||
* preference to not being able to estimate the value at all.
|
||||
* Ordinarily we are only allowed to simplify immutable functions. But for
|
||||
* purposes of estimation, we consider it okay to simplify functions that
|
||||
* are merely stable; the risk that the result might change from planning
|
||||
* time to execution time is worth taking in preference to not being able
|
||||
* to estimate the value at all.
|
||||
*/
|
||||
if (funcform->provolatile == PROVOLATILE_IMMUTABLE)
|
||||
/* okay */ ;
|
||||
/* okay */ ;
|
||||
else if (context->estimate && funcform->provolatile == PROVOLATILE_STABLE)
|
||||
/* okay */ ;
|
||||
/* okay */ ;
|
||||
else
|
||||
return NULL;
|
||||
|
||||
@@ -2318,8 +2310,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Forget it if the function is not SQL-language or has other
|
||||
* showstopper properties. (The nargs check is just paranoia.)
|
||||
* Forget it if the function is not SQL-language or has other showstopper
|
||||
* properties. (The nargs check is just paranoia.)
|
||||
*/
|
||||
if (funcform->prolang != SQLlanguageId ||
|
||||
funcform->prosecdef ||
|
||||
@@ -2336,8 +2328,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Setup error traceback support for ereport(). This is so that we
|
||||
* can finger the function that bad information came from.
|
||||
* Setup error traceback support for ereport(). This is so that we can
|
||||
* finger the function that bad information came from.
|
||||
*/
|
||||
sqlerrcontext.callback = sql_inline_error_callback;
|
||||
sqlerrcontext.arg = func_tuple;
|
||||
@@ -2345,8 +2337,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
error_context_stack = &sqlerrcontext;
|
||||
|
||||
/*
|
||||
* Make a temporary memory context, so that we don't leak all the
|
||||
* stuff that parsing might create.
|
||||
* Make a temporary memory context, so that we don't leak all the stuff
|
||||
* that parsing might create.
|
||||
*/
|
||||
mycxt = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"inline_function",
|
||||
@@ -2383,10 +2375,10 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
src = DatumGetCString(DirectFunctionCall1(textout, tmp));
|
||||
|
||||
/*
|
||||
* We just do parsing and parse analysis, not rewriting, because
|
||||
* rewriting will not affect table-free-SELECT-only queries, which is
|
||||
* all that we care about. Also, we can punt as soon as we detect
|
||||
* more than one command in the function body.
|
||||
* We just do parsing and parse analysis, not rewriting, because rewriting
|
||||
* will not affect table-free-SELECT-only queries, which is all that we
|
||||
* care about. Also, we can punt as soon as we detect more than one
|
||||
* command in the function body.
|
||||
*/
|
||||
raw_parsetree_list = pg_parse_query(src);
|
||||
if (list_length(raw_parsetree_list) != 1)
|
||||
@@ -2425,24 +2417,24 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
newexpr = (Node *) ((TargetEntry *) linitial(querytree->targetList))->expr;
|
||||
|
||||
/*
|
||||
* If the function has any arguments declared as polymorphic types,
|
||||
* then it wasn't type-checked at definition time; must do so now.
|
||||
* (This will raise an error if wrong, but that's okay since the
|
||||
* function would fail at runtime anyway. Note we do not try this
|
||||
* until we have verified that no rewriting was needed; that's
|
||||
* probably not important, but let's be careful.)
|
||||
* If the function has any arguments declared as polymorphic types, then
|
||||
* it wasn't type-checked at definition time; must do so now. (This will
|
||||
* raise an error if wrong, but that's okay since the function would fail
|
||||
* at runtime anyway. Note we do not try this until we have verified that
|
||||
* no rewriting was needed; that's probably not important, but let's be
|
||||
* careful.)
|
||||
*/
|
||||
if (polymorphic)
|
||||
(void) check_sql_fn_retval(funcid, result_type, querytree_list, NULL);
|
||||
|
||||
/*
|
||||
* Additional validity checks on the expression. It mustn't return a
|
||||
* set, and it mustn't be more volatile than the surrounding function
|
||||
* (this is to avoid breaking hacks that involve pretending a function
|
||||
* is immutable when it really ain't). If the surrounding function is
|
||||
* declared strict, then the expression must contain only strict
|
||||
* constructs and must use all of the function parameters (this is
|
||||
* overkill, but an exact analysis is hard).
|
||||
* Additional validity checks on the expression. It mustn't return a set,
|
||||
* and it mustn't be more volatile than the surrounding function (this is
|
||||
* to avoid breaking hacks that involve pretending a function is immutable
|
||||
* when it really ain't). If the surrounding function is declared strict,
|
||||
* then the expression must contain only strict constructs and must use
|
||||
* all of the function parameters (this is overkill, but an exact analysis
|
||||
* is hard).
|
||||
*/
|
||||
if (expression_returns_set(newexpr))
|
||||
goto fail;
|
||||
@@ -2459,10 +2451,10 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* We may be able to do it; there are still checks on parameter usage
|
||||
* to make, but those are most easily done in combination with the
|
||||
* actual substitution of the inputs. So start building expression
|
||||
* with inputs substituted.
|
||||
* We may be able to do it; there are still checks on parameter usage to
|
||||
* make, but those are most easily done in combination with the actual
|
||||
* substitution of the inputs. So start building expression with inputs
|
||||
* substituted.
|
||||
*/
|
||||
usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
|
||||
newexpr = substitute_actual_parameters(newexpr, funcform->pronargs,
|
||||
@@ -2486,8 +2478,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
QualCost eval_cost;
|
||||
|
||||
/*
|
||||
* We define "expensive" as "contains any subplan or more than
|
||||
* 10 operators". Note that the subplan search has to be done
|
||||
* We define "expensive" as "contains any subplan or more than 10
|
||||
* operators". Note that the subplan search has to be done
|
||||
* explicitly, since cost_qual_eval() will barf on unplanned
|
||||
* subselects.
|
||||
*/
|
||||
@@ -2509,8 +2501,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
}
|
||||
|
||||
/*
|
||||
* Whew --- we can make the substitution. Copy the modified
|
||||
* expression out of the temporary memory context, and clean up.
|
||||
* Whew --- we can make the substitution. Copy the modified expression
|
||||
* out of the temporary memory context, and clean up.
|
||||
*/
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
@@ -2519,8 +2511,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
|
||||
MemoryContextDelete(mycxt);
|
||||
|
||||
/*
|
||||
* Recursively try to simplify the modified expression. Here we must
|
||||
* add the current function to the context list of active functions.
|
||||
* Recursively try to simplify the modified expression. Here we must add
|
||||
* the current function to the context list of active functions.
|
||||
*/
|
||||
context->active_fns = lcons_oid(funcid, context->active_fns);
|
||||
newexpr = eval_const_expressions_mutator(newexpr, context);
|
||||
@@ -2557,7 +2549,7 @@ substitute_actual_parameters(Node *expr, int nargs, List *args,
|
||||
|
||||
static Node *
|
||||
substitute_actual_parameters_mutator(Node *node,
|
||||
substitute_actual_parameters_context *context)
|
||||
substitute_actual_parameters_context *context)
|
||||
{
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
@@ -2646,10 +2638,10 @@ evaluate_expr(Expr *expr, Oid result_type)
|
||||
/*
|
||||
* And evaluate it.
|
||||
*
|
||||
* It is OK to use a default econtext because none of the ExecEvalExpr()
|
||||
* code used in this situation will use econtext. That might seem
|
||||
* fortuitous, but it's not so unreasonable --- a constant expression
|
||||
* does not depend on context, by definition, n'est ce pas?
|
||||
* It is OK to use a default econtext because none of the ExecEvalExpr() code
|
||||
* used in this situation will use econtext. That might seem fortuitous,
|
||||
* but it's not so unreasonable --- a constant expression does not depend
|
||||
* on context, by definition, n'est ce pas?
|
||||
*/
|
||||
const_val = ExecEvalExprSwitchContext(exprstate,
|
||||
GetPerTupleExprContext(estate),
|
||||
@@ -2779,12 +2771,12 @@ expression_tree_walker(Node *node,
|
||||
ListCell *temp;
|
||||
|
||||
/*
|
||||
* The walker has already visited the current node, and so we need
|
||||
* only recurse into any sub-nodes it has.
|
||||
* The walker has already visited the current node, and so we need only
|
||||
* recurse into any sub-nodes it has.
|
||||
*
|
||||
* We assume that the walker is not interested in List nodes per se, so
|
||||
* when we expect a List we just recurse directly to self without
|
||||
* bothering to call the walker.
|
||||
* We assume that the walker is not interested in List nodes per se, so when
|
||||
* we expect a List we just recurse directly to self without bothering to
|
||||
* call the walker.
|
||||
*/
|
||||
if (node == NULL)
|
||||
return false;
|
||||
@@ -2877,8 +2869,8 @@ expression_tree_walker(Node *node,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Also invoke the walker on the sublink's Query node, so
|
||||
* it can recurse into the sub-query if it wants to.
|
||||
* Also invoke the walker on the sublink's Query node, so it
|
||||
* can recurse into the sub-query if it wants to.
|
||||
*/
|
||||
return walker(sublink->subselect, context);
|
||||
}
|
||||
@@ -3167,8 +3159,8 @@ expression_tree_mutator(Node *node,
|
||||
void *context)
|
||||
{
|
||||
/*
|
||||
* The mutator has already decided not to modify the current node, but
|
||||
* we must call the mutator for any sub-nodes.
|
||||
* The mutator has already decided not to modify the current node, but we
|
||||
* must call the mutator for any sub-nodes.
|
||||
*/
|
||||
|
||||
#define FLATCOPY(newnode, node, nodetype) \
|
||||
@@ -3286,8 +3278,8 @@ expression_tree_mutator(Node *node,
|
||||
MUTATE(newnode->lefthand, sublink->lefthand, List *);
|
||||
|
||||
/*
|
||||
* Also invoke the mutator on the sublink's Query node, so
|
||||
* it can recurse into the sub-query if it wants to.
|
||||
* Also invoke the mutator on the sublink's Query node, so it
|
||||
* can recurse into the sub-query if it wants to.
|
||||
*/
|
||||
MUTATE(newnode->subselect, sublink->subselect, Node *);
|
||||
return (Node *) newnode;
|
||||
@@ -3468,10 +3460,9 @@ expression_tree_mutator(Node *node,
|
||||
case T_List:
|
||||
{
|
||||
/*
|
||||
* We assume the mutator isn't interested in the list
|
||||
* nodes per se, so just invoke it on each list element.
|
||||
* NOTE: this would fail badly on a list with integer
|
||||
* elements!
|
||||
* We assume the mutator isn't interested in the list nodes
|
||||
* per se, so just invoke it on each list element. NOTE: this
|
||||
* would fail badly on a list with integer elements!
|
||||
*/
|
||||
List *resultlist;
|
||||
ListCell *temp;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.124 2005/07/22 19:12:01 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.125 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -59,8 +59,8 @@ compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
|
||||
return +1;
|
||||
|
||||
/*
|
||||
* If paths have the same startup cost (not at all unlikely),
|
||||
* order them by total cost.
|
||||
* If paths have the same startup cost (not at all unlikely), order
|
||||
* them by total cost.
|
||||
*/
|
||||
if (path1->total_cost < path2->total_cost)
|
||||
return -1;
|
||||
@@ -111,8 +111,8 @@ compare_fuzzy_path_costs(Path *path1, Path *path2, CostSelector criterion)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* If paths have the same startup cost (not at all unlikely),
|
||||
* order them by total cost.
|
||||
* If paths have the same startup cost (not at all unlikely), order
|
||||
* them by total cost.
|
||||
*/
|
||||
if (path1->total_cost > path2->total_cost * 1.01)
|
||||
return +1;
|
||||
@@ -253,22 +253,21 @@ set_cheapest(RelOptInfo *parent_rel)
|
||||
void
|
||||
add_path(RelOptInfo *parent_rel, Path *new_path)
|
||||
{
|
||||
bool accept_new = true; /* unless we find a superior old
|
||||
* path */
|
||||
bool accept_new = true; /* unless we find a superior old path */
|
||||
ListCell *insert_after = NULL; /* where to insert new item */
|
||||
ListCell *p1_prev = NULL;
|
||||
ListCell *p1;
|
||||
|
||||
/*
|
||||
* This is a convenient place to check for query cancel --- no part
|
||||
* of the planner goes very long without calling add_path().
|
||||
* This is a convenient place to check for query cancel --- no part of the
|
||||
* planner goes very long without calling add_path().
|
||||
*/
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/*
|
||||
* Loop to check proposed new path against old paths. Note it is
|
||||
* possible for more than one old path to be tossed out because
|
||||
* new_path dominates it.
|
||||
* Loop to check proposed new path against old paths. Note it is possible
|
||||
* for more than one old path to be tossed out because new_path dominates
|
||||
* it.
|
||||
*/
|
||||
p1 = list_head(parent_rel->pathlist); /* cannot use foreach here */
|
||||
while (p1 != NULL)
|
||||
@@ -278,20 +277,20 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
|
||||
int costcmp;
|
||||
|
||||
/*
|
||||
* As of Postgres 8.0, we use fuzzy cost comparison to avoid
|
||||
* wasting cycles keeping paths that are really not significantly
|
||||
* different in cost.
|
||||
* As of Postgres 8.0, we use fuzzy cost comparison to avoid wasting
|
||||
* cycles keeping paths that are really not significantly different in
|
||||
* cost.
|
||||
*/
|
||||
costcmp = compare_fuzzy_path_costs(new_path, old_path, TOTAL_COST);
|
||||
|
||||
/*
|
||||
* If the two paths compare differently for startup and total
|
||||
* cost, then we want to keep both, and we can skip the (much
|
||||
* slower) comparison of pathkeys. If they compare the same,
|
||||
* proceed with the pathkeys comparison. Note: this test relies
|
||||
* on the fact that compare_fuzzy_path_costs will only return 0 if
|
||||
* both costs are effectively equal (and, therefore, there's no
|
||||
* need to call it twice in that case).
|
||||
* If the two paths compare differently for startup and total cost,
|
||||
* then we want to keep both, and we can skip the (much slower)
|
||||
* comparison of pathkeys. If they compare the same, proceed with the
|
||||
* pathkeys comparison. Note: this test relies on the fact that
|
||||
* compare_fuzzy_path_costs will only return 0 if both costs are
|
||||
* effectively equal (and, therefore, there's no need to call it twice
|
||||
* in that case).
|
||||
*/
|
||||
if (costcmp == 0 ||
|
||||
costcmp == compare_fuzzy_path_costs(new_path, old_path,
|
||||
@@ -307,16 +306,15 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Same pathkeys, and fuzzily the same cost, so
|
||||
* keep just one --- but we'll do an exact cost
|
||||
* comparison to decide which.
|
||||
* Same pathkeys, and fuzzily the same cost, so keep
|
||||
* just one --- but we'll do an exact cost comparison
|
||||
* to decide which.
|
||||
*/
|
||||
if (compare_path_costs(new_path, old_path,
|
||||
TOTAL_COST) < 0)
|
||||
remove_old = true; /* new dominates old */
|
||||
else
|
||||
accept_new = false; /* old equals or dominates
|
||||
* new */
|
||||
accept_new = false; /* old equals or dominates new */
|
||||
}
|
||||
break;
|
||||
case PATHKEYS_BETTER1:
|
||||
@@ -340,6 +338,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
|
||||
{
|
||||
parent_rel->pathlist = list_delete_cell(parent_rel->pathlist,
|
||||
p1, p1_prev);
|
||||
|
||||
/*
|
||||
* Delete the data pointed-to by the deleted cell, if possible
|
||||
*/
|
||||
@@ -442,10 +441,9 @@ create_index_path(PlannerInfo *root,
|
||||
/*
|
||||
* For a join inner scan, there's no point in marking the path with any
|
||||
* pathkeys, since it will only ever be used as the inner path of a
|
||||
* nestloop, and so its ordering does not matter. For the same reason
|
||||
* we don't really care what order it's scanned in. (We could expect
|
||||
* the caller to supply the correct values, but it's easier to force
|
||||
* it here.)
|
||||
* nestloop, and so its ordering does not matter. For the same reason we
|
||||
* don't really care what order it's scanned in. (We could expect the
|
||||
* caller to supply the correct values, but it's easier to force it here.)
|
||||
*/
|
||||
if (isjoininner)
|
||||
{
|
||||
@@ -476,15 +474,15 @@ create_index_path(PlannerInfo *root,
|
||||
/*
|
||||
* We must compute the estimated number of output rows for the
|
||||
* indexscan. This is less than rel->rows because of the additional
|
||||
* selectivity of the join clauses. Since clause_groups may
|
||||
* contain both restriction and join clauses, we have to do a set
|
||||
* union to get the full set of clauses that must be considered to
|
||||
* compute the correct selectivity. (Without the union operation,
|
||||
* we might have some restriction clauses appearing twice, which'd
|
||||
* mislead clauselist_selectivity into double-counting their
|
||||
* selectivity. However, since RestrictInfo nodes aren't copied when
|
||||
* linking them into different lists, it should be sufficient to use
|
||||
* pointer comparison to remove duplicates.)
|
||||
* selectivity of the join clauses. Since clause_groups may contain
|
||||
* both restriction and join clauses, we have to do a set union to get
|
||||
* the full set of clauses that must be considered to compute the
|
||||
* correct selectivity. (Without the union operation, we might have
|
||||
* some restriction clauses appearing twice, which'd mislead
|
||||
* clauselist_selectivity into double-counting their selectivity.
|
||||
* However, since RestrictInfo nodes aren't copied when linking them
|
||||
* into different lists, it should be sufficient to use pointer
|
||||
* comparison to remove duplicates.)
|
||||
*
|
||||
* Always assume the join type is JOIN_INNER; even if some of the join
|
||||
* clauses come from other contexts, that's not our problem.
|
||||
@@ -493,7 +491,7 @@ create_index_path(PlannerInfo *root,
|
||||
pathnode->rows = rel->tuples *
|
||||
clauselist_selectivity(root,
|
||||
allclauses,
|
||||
rel->relid, /* do not use 0! */
|
||||
rel->relid, /* do not use 0! */
|
||||
JOIN_INNER);
|
||||
/* Like costsize.c, force estimate to be at least one row */
|
||||
pathnode->rows = clamp_row_est(pathnode->rows);
|
||||
@@ -501,8 +499,8 @@ create_index_path(PlannerInfo *root,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* The number of rows is the same as the parent rel's estimate,
|
||||
* since this isn't a join inner indexscan.
|
||||
* The number of rows is the same as the parent rel's estimate, since
|
||||
* this isn't a join inner indexscan.
|
||||
*/
|
||||
pathnode->rows = rel->rows;
|
||||
}
|
||||
@@ -528,7 +526,7 @@ create_bitmap_heap_path(PlannerInfo *root,
|
||||
|
||||
pathnode->path.pathtype = T_BitmapHeapScan;
|
||||
pathnode->path.parent = rel;
|
||||
pathnode->path.pathkeys = NIL; /* always unordered */
|
||||
pathnode->path.pathkeys = NIL; /* always unordered */
|
||||
|
||||
pathnode->bitmapqual = bitmapqual;
|
||||
pathnode->isjoininner = isjoininner;
|
||||
@@ -539,9 +537,9 @@ create_bitmap_heap_path(PlannerInfo *root,
|
||||
* We must compute the estimated number of output rows for the
|
||||
* indexscan. This is less than rel->rows because of the additional
|
||||
* selectivity of the join clauses. We make use of the selectivity
|
||||
* estimated for the bitmap to do this; this isn't really quite
|
||||
* right since there may be restriction conditions not included
|
||||
* in the bitmap ...
|
||||
* estimated for the bitmap to do this; this isn't really quite right
|
||||
* since there may be restriction conditions not included in the
|
||||
* bitmap ...
|
||||
*/
|
||||
Cost indexTotalCost;
|
||||
Selectivity indexSelectivity;
|
||||
@@ -556,8 +554,8 @@ create_bitmap_heap_path(PlannerInfo *root,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* The number of rows is the same as the parent rel's estimate,
|
||||
* since this isn't a join inner indexscan.
|
||||
* The number of rows is the same as the parent rel's estimate, since
|
||||
* this isn't a join inner indexscan.
|
||||
*/
|
||||
pathnode->rows = rel->rows;
|
||||
}
|
||||
@@ -580,7 +578,7 @@ create_bitmap_and_path(PlannerInfo *root,
|
||||
|
||||
pathnode->path.pathtype = T_BitmapAnd;
|
||||
pathnode->path.parent = rel;
|
||||
pathnode->path.pathkeys = NIL; /* always unordered */
|
||||
pathnode->path.pathkeys = NIL; /* always unordered */
|
||||
|
||||
pathnode->bitmapquals = bitmapquals;
|
||||
|
||||
@@ -603,7 +601,7 @@ create_bitmap_or_path(PlannerInfo *root,
|
||||
|
||||
pathnode->path.pathtype = T_BitmapOr;
|
||||
pathnode->path.parent = rel;
|
||||
pathnode->path.pathkeys = NIL; /* always unordered */
|
||||
pathnode->path.pathkeys = NIL; /* always unordered */
|
||||
|
||||
pathnode->bitmapquals = bitmapquals;
|
||||
|
||||
@@ -759,8 +757,8 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
|
||||
return (UniquePath *) rel->cheapest_unique_path;
|
||||
|
||||
/*
|
||||
* We must ensure path struct is allocated in same context as parent
|
||||
* rel; otherwise GEQO memory management causes trouble. (Compare
|
||||
* We must ensure path struct is allocated in same context as parent rel;
|
||||
* otherwise GEQO memory management causes trouble. (Compare
|
||||
* best_inner_indexscan().)
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
|
||||
@@ -774,17 +772,17 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
|
||||
pathnode->path.parent = rel;
|
||||
|
||||
/*
|
||||
* Treat the output as always unsorted, since we don't necessarily
|
||||
* have pathkeys to represent it.
|
||||
* Treat the output as always unsorted, since we don't necessarily have
|
||||
* pathkeys to represent it.
|
||||
*/
|
||||
pathnode->path.pathkeys = NIL;
|
||||
|
||||
pathnode->subpath = subpath;
|
||||
|
||||
/*
|
||||
* Try to identify the targetlist that will actually be unique-ified.
|
||||
* In current usage, this routine is only used for sub-selects of IN
|
||||
* clauses, so we should be able to find the tlist in in_info_list.
|
||||
* Try to identify the targetlist that will actually be unique-ified. In
|
||||
* current usage, this routine is only used for sub-selects of IN clauses,
|
||||
* so we should be able to find the tlist in in_info_list.
|
||||
*/
|
||||
sub_targetlist = NIL;
|
||||
foreach(l, root->in_info_list)
|
||||
@@ -799,19 +797,19 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the input is a subquery whose output must be unique already,
|
||||
* then we don't need to do anything. The test for uniqueness has
|
||||
* to consider exactly which columns we are extracting; for example
|
||||
* "SELECT DISTINCT x,y" doesn't guarantee that x alone is distinct.
|
||||
* So we cannot check for this optimization unless we found our own
|
||||
* targetlist above, and it consists only of simple Vars referencing
|
||||
* subquery outputs. (Possibly we could do something with expressions
|
||||
* in the subquery outputs, too, but for now keep it simple.)
|
||||
* If the input is a subquery whose output must be unique already, then we
|
||||
* don't need to do anything. The test for uniqueness has to consider
|
||||
* exactly which columns we are extracting; for example "SELECT DISTINCT
|
||||
* x,y" doesn't guarantee that x alone is distinct. So we cannot check for
|
||||
* this optimization unless we found our own targetlist above, and it
|
||||
* consists only of simple Vars referencing subquery outputs. (Possibly
|
||||
* we could do something with expressions in the subquery outputs, too,
|
||||
* but for now keep it simple.)
|
||||
*/
|
||||
if (sub_targetlist && rel->rtekind == RTE_SUBQUERY)
|
||||
{
|
||||
RangeTblEntry *rte = rt_fetch(rel->relid, root->parse->rtable);
|
||||
List *sub_tlist_colnos;
|
||||
List *sub_tlist_colnos;
|
||||
|
||||
sub_tlist_colnos = translate_sub_tlist(sub_targetlist, rel->relid);
|
||||
|
||||
@@ -854,24 +852,23 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
|
||||
rel->width);
|
||||
|
||||
/*
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We
|
||||
* assume all columns get compared at most of the tuples. (XXX
|
||||
* probably this is an overestimate.) This should agree with
|
||||
* make_unique.
|
||||
* Charge one cpu_operator_cost per comparison per input tuple. We assume
|
||||
* all columns get compared at most of the tuples. (XXX probably this is
|
||||
* an overestimate.) This should agree with make_unique.
|
||||
*/
|
||||
sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
|
||||
|
||||
/*
|
||||
* Is it safe to use a hashed implementation? If so, estimate and
|
||||
* compare costs. We only try this if we know the targetlist for sure
|
||||
* (else we can't be sure about the datatypes involved).
|
||||
* Is it safe to use a hashed implementation? If so, estimate and compare
|
||||
* costs. We only try this if we know the targetlist for sure (else we
|
||||
* can't be sure about the datatypes involved).
|
||||
*/
|
||||
pathnode->umethod = UNIQUE_PATH_SORT;
|
||||
if (enable_hashagg && sub_targetlist && hash_safe_tlist(sub_targetlist))
|
||||
{
|
||||
/*
|
||||
* Estimate the overhead per hashtable entry at 64 bytes (same as
|
||||
* in planner.c).
|
||||
* Estimate the overhead per hashtable entry at 64 bytes (same as in
|
||||
* planner.c).
|
||||
*/
|
||||
int hashentrysize = rel->width + 64;
|
||||
|
||||
@@ -923,7 +920,7 @@ translate_sub_tlist(List *tlist, int relid)
|
||||
|
||||
foreach(l, tlist)
|
||||
{
|
||||
Var *var = (Var *) lfirst(l);
|
||||
Var *var = (Var *) lfirst(l);
|
||||
|
||||
if (!var || !IsA(var, Var) ||
|
||||
var->varno != relid)
|
||||
@@ -987,8 +984,8 @@ query_is_distinct_for(Query *query, List *colnos)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If we have no GROUP BY, but do have aggregates or HAVING, then
|
||||
* the result is at most one row so it's surely unique.
|
||||
* If we have no GROUP BY, but do have aggregates or HAVING, then the
|
||||
* result is at most one row so it's surely unique.
|
||||
*/
|
||||
if (query->hasAggs || query->havingQual)
|
||||
return true;
|
||||
@@ -1167,8 +1164,8 @@ create_mergejoin_path(PlannerInfo *root,
|
||||
MergePath *pathnode = makeNode(MergePath);
|
||||
|
||||
/*
|
||||
* If the given paths are already well enough ordered, we can skip
|
||||
* doing an explicit sort.
|
||||
* If the given paths are already well enough ordered, we can skip doing
|
||||
* an explicit sort.
|
||||
*/
|
||||
if (outersortkeys &&
|
||||
pathkeys_contained_in(outersortkeys, outer_path->pathkeys))
|
||||
@@ -1178,15 +1175,15 @@ create_mergejoin_path(PlannerInfo *root,
|
||||
innersortkeys = NIL;
|
||||
|
||||
/*
|
||||
* If we are not sorting the inner path, we may need a materialize
|
||||
* node to ensure it can be marked/restored. (Sort does support
|
||||
* mark/restore, so no materialize is needed in that case.)
|
||||
* If we are not sorting the inner path, we may need a materialize node to
|
||||
* ensure it can be marked/restored. (Sort does support mark/restore, so
|
||||
* no materialize is needed in that case.)
|
||||
*
|
||||
* Since the inner side must be ordered, and only Sorts and IndexScans
|
||||
* can create order to begin with, you might think there's no problem
|
||||
* --- but you'd be wrong. Nestloop and merge joins can *preserve*
|
||||
* the order of their inputs, so they can be selected as the input of
|
||||
* a mergejoin, and they don't support mark/restore at present.
|
||||
* Since the inner side must be ordered, and only Sorts and IndexScans can
|
||||
* create order to begin with, you might think there's no problem --- but
|
||||
* you'd be wrong. Nestloop and merge joins can *preserve* the order of
|
||||
* their inputs, so they can be selected as the input of a mergejoin, and
|
||||
* they don't support mark/restore at present.
|
||||
*/
|
||||
if (innersortkeys == NIL &&
|
||||
!ExecSupportsMarkRestore(inner_path->pathtype))
|
||||
|
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.113 2005/07/23 21:05:47 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.114 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -41,7 +41,7 @@
|
||||
|
||||
|
||||
static void estimate_rel_size(Relation rel, int32 *attr_widths,
|
||||
BlockNumber *pages, double *tuples);
|
||||
BlockNumber *pages, double *tuples);
|
||||
|
||||
|
||||
/*
|
||||
@@ -71,18 +71,18 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
|
||||
/*
|
||||
* Normally, we can assume the rewriter already acquired at least
|
||||
* AccessShareLock on each relation used in the query. However this
|
||||
* will not be the case for relations added to the query because they
|
||||
* are inheritance children of some relation mentioned explicitly.
|
||||
* For them, this is the first access during the parse/rewrite/plan
|
||||
* pipeline, and so we need to obtain and keep a suitable lock.
|
||||
* AccessShareLock on each relation used in the query. However this will
|
||||
* not be the case for relations added to the query because they are
|
||||
* inheritance children of some relation mentioned explicitly. For them,
|
||||
* this is the first access during the parse/rewrite/plan pipeline, and so
|
||||
* we need to obtain and keep a suitable lock.
|
||||
*
|
||||
* XXX really, a suitable lock is RowShareLock if the relation is
|
||||
* an UPDATE/DELETE target, and AccessShareLock otherwise. However
|
||||
* we cannot easily tell here which to get, so for the moment just
|
||||
* get AccessShareLock always. The executor will get the right lock
|
||||
* when it runs, which means there is a very small chance of deadlock
|
||||
* trying to upgrade our lock.
|
||||
* XXX really, a suitable lock is RowShareLock if the relation is an
|
||||
* UPDATE/DELETE target, and AccessShareLock otherwise. However we cannot
|
||||
* easily tell here which to get, so for the moment just get
|
||||
* AccessShareLock always. The executor will get the right lock when it
|
||||
* runs, which means there is a very small chance of deadlock trying to
|
||||
* upgrade our lock.
|
||||
*/
|
||||
if (rel->reloptkind == RELOPT_BASEREL)
|
||||
relation = heap_open(relationObjectId, NoLock);
|
||||
@@ -105,8 +105,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
&rel->pages, &rel->tuples);
|
||||
|
||||
/*
|
||||
* Make list of indexes. Ignore indexes on system catalogs if told
|
||||
* to.
|
||||
* Make list of indexes. Ignore indexes on system catalogs if told to.
|
||||
*/
|
||||
if (IsIgnoringSystemIndexes() && IsSystemClass(relation->rd_rel))
|
||||
hasindex = false;
|
||||
@@ -133,10 +132,10 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
/*
|
||||
* Extract info from the relation descriptor for the index.
|
||||
*
|
||||
* Note that we take no lock on the index; we assume our lock on
|
||||
* the parent table will protect the index's schema information.
|
||||
* When and if the executor actually uses the index, it will take
|
||||
* a lock as needed to protect the access to the index contents.
|
||||
* Note that we take no lock on the index; we assume our lock on the
|
||||
* parent table will protect the index's schema information. When
|
||||
* and if the executor actually uses the index, it will take a
|
||||
* lock as needed to protect the access to the index contents.
|
||||
*/
|
||||
indexRelation = index_open(indexoid);
|
||||
index = indexRelation->rd_index;
|
||||
@@ -148,8 +147,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
info->ncolumns = ncolumns = index->indnatts;
|
||||
|
||||
/*
|
||||
* Need to make classlist and ordering arrays large enough to
|
||||
* put a terminating 0 at the end of each one.
|
||||
* Need to make classlist and ordering arrays large enough to put
|
||||
* a terminating 0 at the end of each one.
|
||||
*/
|
||||
info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
|
||||
info->classlist = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));
|
||||
@@ -166,8 +165,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
info->amoptionalkey = indexRelation->rd_am->amoptionalkey;
|
||||
|
||||
/*
|
||||
* Fetch the ordering operators associated with the index, if
|
||||
* any.
|
||||
* Fetch the ordering operators associated with the index, if any.
|
||||
*/
|
||||
amorderstrategy = indexRelation->rd_am->amorderstrategy;
|
||||
if (amorderstrategy != 0)
|
||||
@@ -184,8 +182,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
/*
|
||||
* Fetch the index expressions and predicate, if any. We must
|
||||
* modify the copies we obtain from the relcache to have the
|
||||
* correct varno for the parent relation, so that they match
|
||||
* up correctly against qual clauses.
|
||||
* correct varno for the parent relation, so that they match up
|
||||
* correctly against qual clauses.
|
||||
*/
|
||||
info->indexprs = RelationGetIndexExpressions(indexRelation);
|
||||
info->indpred = RelationGetIndexPredicate(indexRelation);
|
||||
@@ -197,11 +195,11 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
|
||||
info->unique = index->indisunique;
|
||||
|
||||
/*
|
||||
* Estimate the index size. If it's not a partial index, we
|
||||
* lock the number-of-tuples estimate to equal the parent table;
|
||||
* if it is partial then we have to use the same methods as we
|
||||
* would for a table, except we can be sure that the index is
|
||||
* not larger than the table.
|
||||
* Estimate the index size. If it's not a partial index, we lock
|
||||
* the number-of-tuples estimate to equal the parent table; if it
|
||||
* is partial then we have to use the same methods as we would for
|
||||
* a table, except we can be sure that the index is not larger
|
||||
* than the table.
|
||||
*/
|
||||
if (info->indpred == NIL)
|
||||
{
|
||||
@@ -241,8 +239,8 @@ static void
|
||||
estimate_rel_size(Relation rel, int32 *attr_widths,
|
||||
BlockNumber *pages, double *tuples)
|
||||
{
|
||||
BlockNumber curpages;
|
||||
BlockNumber relpages;
|
||||
BlockNumber curpages;
|
||||
BlockNumber relpages;
|
||||
double reltuples;
|
||||
double density;
|
||||
|
||||
@@ -256,22 +254,22 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
|
||||
|
||||
/*
|
||||
* HACK: if the relation has never yet been vacuumed, use a
|
||||
* minimum estimate of 10 pages. This emulates a desirable
|
||||
* aspect of pre-8.0 behavior, which is that we wouldn't assume
|
||||
* a newly created relation is really small, which saves us from
|
||||
* making really bad plans during initial data loading. (The
|
||||
* plans are not wrong when they are made, but if they are cached
|
||||
* and used again after the table has grown a lot, they are bad.)
|
||||
* It would be better to force replanning if the table size has
|
||||
* changed a lot since the plan was made ... but we don't
|
||||
* currently have any infrastructure for redoing cached plans at
|
||||
* all, so we have to kluge things here instead.
|
||||
* minimum estimate of 10 pages. This emulates a desirable aspect
|
||||
* of pre-8.0 behavior, which is that we wouldn't assume a newly
|
||||
* created relation is really small, which saves us from making
|
||||
* really bad plans during initial data loading. (The plans are
|
||||
* not wrong when they are made, but if they are cached and used
|
||||
* again after the table has grown a lot, they are bad.) It would
|
||||
* be better to force replanning if the table size has changed a
|
||||
* lot since the plan was made ... but we don't currently have any
|
||||
* infrastructure for redoing cached plans at all, so we have to
|
||||
* kluge things here instead.
|
||||
*
|
||||
* We approximate "never vacuumed" by "has relpages = 0", which
|
||||
* means this will also fire on genuinely empty relations. Not
|
||||
* great, but fortunately that's a seldom-seen case in the real
|
||||
* world, and it shouldn't degrade the quality of the plan too
|
||||
* much anyway to err in this direction.
|
||||
* We approximate "never vacuumed" by "has relpages = 0", which means
|
||||
* this will also fire on genuinely empty relations. Not great,
|
||||
* but fortunately that's a seldom-seen case in the real world,
|
||||
* and it shouldn't degrade the quality of the plan too much
|
||||
* anyway to err in this direction.
|
||||
*/
|
||||
if (curpages < 10 && rel->rd_rel->relpages == 0)
|
||||
curpages = 10;
|
||||
@@ -287,6 +285,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
|
||||
/* coerce values in pg_class to more desirable types */
|
||||
relpages = (BlockNumber) rel->rd_rel->relpages;
|
||||
reltuples = (double) rel->rd_rel->reltuples;
|
||||
|
||||
/*
|
||||
* If it's an index, discount the metapage. This is a kluge
|
||||
* because it assumes more than it ought to about index contents;
|
||||
@@ -307,19 +306,19 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
|
||||
* When we have no data because the relation was truncated,
|
||||
* estimate tuple width from attribute datatypes. We assume
|
||||
* here that the pages are completely full, which is OK for
|
||||
* tables (since they've presumably not been VACUUMed yet)
|
||||
* but is probably an overestimate for indexes. Fortunately
|
||||
* tables (since they've presumably not been VACUUMed yet) but
|
||||
* is probably an overestimate for indexes. Fortunately
|
||||
* get_relation_info() can clamp the overestimate to the
|
||||
* parent table's size.
|
||||
*
|
||||
* Note: this code intentionally disregards alignment
|
||||
* considerations, because (a) that would be gilding the
|
||||
* lily considering how crude the estimate is, and (b)
|
||||
* it creates platform dependencies in the default plans
|
||||
* which are kind of a headache for regression testing.
|
||||
* considerations, because (a) that would be gilding the lily
|
||||
* considering how crude the estimate is, and (b) it creates
|
||||
* platform dependencies in the default plans which are kind
|
||||
* of a headache for regression testing.
|
||||
*/
|
||||
int32 tuple_width = 0;
|
||||
int i;
|
||||
int32 tuple_width = 0;
|
||||
int i;
|
||||
|
||||
for (i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
|
||||
{
|
||||
@@ -391,12 +390,12 @@ get_relation_constraints(Oid relationObjectId, RelOptInfo *rel)
|
||||
constr = relation->rd_att->constr;
|
||||
if (constr != NULL)
|
||||
{
|
||||
int num_check = constr->num_check;
|
||||
int i;
|
||||
int num_check = constr->num_check;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_check; i++)
|
||||
{
|
||||
Node *cexpr;
|
||||
Node *cexpr;
|
||||
|
||||
cexpr = stringToNode(constr->check[i].ccbin);
|
||||
|
||||
@@ -425,8 +424,8 @@ get_relation_constraints(Oid relationObjectId, RelOptInfo *rel)
|
||||
ChangeVarNodes(cexpr, 1, varno, 0);
|
||||
|
||||
/*
|
||||
* Finally, convert to implicit-AND format (that is, a List)
|
||||
* and append the resulting item(s) to our output list.
|
||||
* Finally, convert to implicit-AND format (that is, a List) and
|
||||
* append the resulting item(s) to our output list.
|
||||
*/
|
||||
result = list_concat(result,
|
||||
make_ands_implicit((Expr *) cexpr));
|
||||
@@ -532,11 +531,12 @@ build_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
|
||||
break;
|
||||
|
||||
case RTE_FUNCTION:
|
||||
expandRTE(rte, varno, 0, true /* include dropped */,
|
||||
expandRTE(rte, varno, 0, true /* include dropped */ ,
|
||||
NULL, &colvars);
|
||||
foreach(l, colvars)
|
||||
{
|
||||
var = (Var *) lfirst(l);
|
||||
|
||||
/*
|
||||
* A non-Var in expandRTE's output means a dropped column;
|
||||
* must punt.
|
||||
@@ -727,11 +727,11 @@ has_unique_index(RelOptInfo *rel, AttrNumber attno)
|
||||
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
|
||||
|
||||
/*
|
||||
* Note: ignore partial indexes, since they don't allow us to
|
||||
* conclude that all attr values are distinct. We don't take any
|
||||
* interest in expressional indexes either. Also, a multicolumn
|
||||
* unique index doesn't allow us to conclude that just the
|
||||
* specified attr is unique.
|
||||
* Note: ignore partial indexes, since they don't allow us to conclude
|
||||
* that all attr values are distinct. We don't take any interest in
|
||||
* expressional indexes either. Also, a multicolumn unique index
|
||||
* doesn't allow us to conclude that just the specified attr is
|
||||
* unique.
|
||||
*/
|
||||
if (index->unique &&
|
||||
index->ncolumns == 1 &&
|
||||
|
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.3 2005/10/06 16:01:55 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.4 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -31,7 +31,7 @@ static bool predicate_refuted_by_recurse(Node *clause, Node *predicate);
|
||||
static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause);
|
||||
static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause);
|
||||
static bool btree_predicate_proof(Expr *predicate, Node *clause,
|
||||
bool refute_it);
|
||||
bool refute_it);
|
||||
|
||||
|
||||
/*
|
||||
@@ -66,9 +66,9 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
|
||||
/*
|
||||
* In all cases where the predicate is an AND-clause,
|
||||
* predicate_implied_by_recurse() will prefer to iterate over the
|
||||
* predicate's components. So we can just do that to start with here,
|
||||
* and eliminate the need for predicate_implied_by_recurse() to handle
|
||||
* a bare List on the predicate side.
|
||||
* predicate's components. So we can just do that to start with here, and
|
||||
* eliminate the need for predicate_implied_by_recurse() to handle a bare
|
||||
* List on the predicate side.
|
||||
*
|
||||
* Logic is: restriction must imply each of the AND'ed predicate items.
|
||||
*/
|
||||
@@ -110,11 +110,11 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
|
||||
return false; /* no restriction: refutation must fail */
|
||||
|
||||
/*
|
||||
* Unlike the implication case, predicate_refuted_by_recurse needs to
|
||||
* be able to see the top-level AND structure on both sides --- otherwise
|
||||
* it will fail to handle the case where one restriction clause is an OR
|
||||
* that can refute the predicate AND as a whole, but not each predicate
|
||||
* clause separately.
|
||||
* Unlike the implication case, predicate_refuted_by_recurse needs to be
|
||||
* able to see the top-level AND structure on both sides --- otherwise it
|
||||
* will fail to handle the case where one restriction clause is an OR that
|
||||
* can refute the predicate AND as a whole, but not each predicate clause
|
||||
* separately.
|
||||
*/
|
||||
return predicate_refuted_by_recurse((Node *) restrictinfo_list,
|
||||
(Node *) predicate_list);
|
||||
@@ -137,7 +137,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
|
||||
* OR-expr A => AND-expr B iff: A => each of B's components
|
||||
* OR-expr A => OR-expr B iff: each of A's components => any of B's
|
||||
*
|
||||
* An "atom" is anything other than an AND or OR node. Notice that we don't
|
||||
* An "atom" is anything other than an AND or OR node. Notice that we don't
|
||||
* have any special logic to handle NOT nodes; these should have been pushed
|
||||
* down or eliminated where feasible by prepqual.c.
|
||||
*
|
||||
@@ -152,7 +152,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
|
||||
* under the assumption that both inputs have been AND/OR flattened.
|
||||
*
|
||||
* A bare List node on the restriction side is interpreted as an AND clause,
|
||||
* in order to handle the top-level restriction List properly. However we
|
||||
* in order to handle the top-level restriction List properly. However we
|
||||
* need not consider a List on the predicate side since predicate_implied_by()
|
||||
* already expanded it.
|
||||
*
|
||||
@@ -228,8 +228,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
|
||||
if (or_clause(predicate))
|
||||
{
|
||||
/*
|
||||
* OR-clause => OR-clause if each of A's items implies any of
|
||||
* B's items. Messy but can't do it any more simply.
|
||||
* OR-clause => OR-clause if each of A's items implies any of B's
|
||||
* items. Messy but can't do it any more simply.
|
||||
*/
|
||||
foreach(item, ((BoolExpr *) clause)->args)
|
||||
{
|
||||
@@ -242,7 +242,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
|
||||
break;
|
||||
}
|
||||
if (item2 == NULL)
|
||||
return false; /* doesn't imply any of B's */
|
||||
return false; /* doesn't imply any of B's */
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@@ -520,7 +520,7 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause)
|
||||
*
|
||||
* When the predicate is of the form "foo IS NULL", we can conclude that
|
||||
* the predicate is refuted if the clause is a strict operator or function
|
||||
* that has "foo" as an input. See notes for implication case.
|
||||
* that has "foo" as an input. See notes for implication case.
|
||||
*
|
||||
* Finally, we may be able to deduce something using knowledge about btree
|
||||
* operator classes; this is encapsulated in btree_predicate_proof().
|
||||
@@ -602,28 +602,28 @@ static const StrategyNumber BT_implic_table[6][6] = {
|
||||
/*
|
||||
* The target operator:
|
||||
*
|
||||
* LT LE EQ GE GT NE
|
||||
* LT LE EQ GE GT NE
|
||||
*/
|
||||
{BTGE, BTGE, 0 , 0 , 0 , BTGE}, /* LT */
|
||||
{BTGT, BTGE, 0 , 0 , 0 , BTGT}, /* LE */
|
||||
{BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
|
||||
{0 , 0 , 0 , BTLE, BTLT, BTLT}, /* GE */
|
||||
{0 , 0 , 0 , BTLE, BTLE, BTLE}, /* GT */
|
||||
{0 , 0 , 0 , 0 , 0 , BTEQ} /* NE */
|
||||
{BTGE, BTGE, 0, 0, 0, BTGE}, /* LT */
|
||||
{BTGT, BTGE, 0, 0, 0, BTGT}, /* LE */
|
||||
{BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
|
||||
{0, 0, 0, BTLE, BTLT, BTLT}, /* GE */
|
||||
{0, 0, 0, BTLE, BTLE, BTLE}, /* GT */
|
||||
{0, 0, 0, 0, 0, BTEQ} /* NE */
|
||||
};
|
||||
|
||||
static const StrategyNumber BT_refute_table[6][6] = {
|
||||
/*
|
||||
* The target operator:
|
||||
*
|
||||
* LT LE EQ GE GT NE
|
||||
* LT LE EQ GE GT NE
|
||||
*/
|
||||
{0 , 0 , BTGE, BTGE, BTGE, 0 }, /* LT */
|
||||
{0 , 0 , BTGT, BTGT, BTGE, 0 }, /* LE */
|
||||
{BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
|
||||
{BTLE, BTLT, BTLT, 0 , 0 , 0 }, /* GE */
|
||||
{BTLE, BTLE, BTLE, 0 , 0 , 0 }, /* GT */
|
||||
{0 , 0 , BTEQ, 0 , 0 , 0 } /* NE */
|
||||
{0, 0, BTGE, BTGE, BTGE, 0}, /* LT */
|
||||
{0, 0, BTGT, BTGT, BTGE, 0}, /* LE */
|
||||
{BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
|
||||
{BTLE, BTLT, BTLT, 0, 0, 0}, /* GE */
|
||||
{BTLE, BTLE, BTLE, 0, 0, 0}, /* GT */
|
||||
{0, 0, BTEQ, 0, 0, 0} /* NE */
|
||||
};
|
||||
|
||||
|
||||
@@ -683,13 +683,13 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
MemoryContext oldcontext;
|
||||
|
||||
/*
|
||||
* Both expressions must be binary opclauses with a
|
||||
* Const on one side, and identical subexpressions on the other sides.
|
||||
* Note we don't have to think about binary relabeling of the Const
|
||||
* node, since that would have been folded right into the Const.
|
||||
* Both expressions must be binary opclauses with a Const on one side, and
|
||||
* identical subexpressions on the other sides. Note we don't have to
|
||||
* think about binary relabeling of the Const node, since that would have
|
||||
* been folded right into the Const.
|
||||
*
|
||||
* If either Const is null, we also fail right away; this assumes that
|
||||
* the test operator will always be strict.
|
||||
* If either Const is null, we also fail right away; this assumes that the
|
||||
* test operator will always be strict.
|
||||
*/
|
||||
if (!is_opclause(predicate))
|
||||
return false;
|
||||
@@ -738,11 +738,11 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Check for matching subexpressions on the non-Const sides. We used
|
||||
* to only allow a simple Var, but it's about as easy to allow any
|
||||
* expression. Remember we already know that the pred expression does
|
||||
* not contain any non-immutable functions, so identical expressions
|
||||
* should yield identical results.
|
||||
* Check for matching subexpressions on the non-Const sides. We used to
|
||||
* only allow a simple Var, but it's about as easy to allow any
|
||||
* expression. Remember we already know that the pred expression does not
|
||||
* contain any non-immutable functions, so identical expressions should
|
||||
* yield identical results.
|
||||
*/
|
||||
if (!equal(pred_var, clause_var))
|
||||
return false;
|
||||
@@ -772,24 +772,24 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
*
|
||||
* We must find a btree opclass that contains both operators, else the
|
||||
* implication can't be determined. Also, the pred_op has to be of
|
||||
* default subtype (implying left and right input datatypes are the
|
||||
* same); otherwise it's unsafe to put the pred_const on the left side
|
||||
* of the test. Also, the opclass must contain a suitable test
|
||||
* operator matching the clause_const's type (which we take to mean
|
||||
* that it has the same subtype as the original clause_operator).
|
||||
* default subtype (implying left and right input datatypes are the same);
|
||||
* otherwise it's unsafe to put the pred_const on the left side of the
|
||||
* test. Also, the opclass must contain a suitable test operator matching
|
||||
* the clause_const's type (which we take to mean that it has the same
|
||||
* subtype as the original clause_operator).
|
||||
*
|
||||
* If there are multiple matching opclasses, assume we can use any one to
|
||||
* determine the logical relationship of the two operators and the
|
||||
* correct corresponding test operator. This should work for any
|
||||
* logically consistent opclasses.
|
||||
* determine the logical relationship of the two operators and the correct
|
||||
* corresponding test operator. This should work for any logically
|
||||
* consistent opclasses.
|
||||
*/
|
||||
catlist = SearchSysCacheList(AMOPOPID, 1,
|
||||
ObjectIdGetDatum(pred_op),
|
||||
0, 0, 0);
|
||||
|
||||
/*
|
||||
* If we couldn't find any opclass containing the pred_op, perhaps it
|
||||
* is a <> operator. See if it has a negator that is in an opclass.
|
||||
* If we couldn't find any opclass containing the pred_op, perhaps it is a
|
||||
* <> operator. See if it has a negator that is in an opclass.
|
||||
*/
|
||||
pred_op_negated = false;
|
||||
if (catlist->n_members == 0)
|
||||
@@ -800,7 +800,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
pred_op_negated = true;
|
||||
ReleaseSysCacheList(catlist);
|
||||
catlist = SearchSysCacheList(AMOPOPID, 1,
|
||||
ObjectIdGetDatum(pred_op_negator),
|
||||
ObjectIdGetDatum(pred_op_negator),
|
||||
0, 0, 0);
|
||||
}
|
||||
}
|
||||
@@ -837,8 +837,8 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
}
|
||||
|
||||
/*
|
||||
* From the same opclass, find a strategy number for the
|
||||
* clause_op, if possible
|
||||
* From the same opclass, find a strategy number for the clause_op, if
|
||||
* possible
|
||||
*/
|
||||
clause_tuple = SearchSysCache(AMOPOPID,
|
||||
ObjectIdGetDatum(clause_op),
|
||||
@@ -857,7 +857,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
else if (OidIsValid(clause_op_negator))
|
||||
{
|
||||
clause_tuple = SearchSysCache(AMOPOPID,
|
||||
ObjectIdGetDatum(clause_op_negator),
|
||||
ObjectIdGetDatum(clause_op_negator),
|
||||
ObjectIdGetDatum(opclass_id),
|
||||
0, 0);
|
||||
if (HeapTupleIsValid(clause_tuple))
|
||||
@@ -896,8 +896,8 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
}
|
||||
|
||||
/*
|
||||
* See if opclass has an operator for the test strategy and the
|
||||
* clause datatype.
|
||||
* See if opclass has an operator for the test strategy and the clause
|
||||
* datatype.
|
||||
*/
|
||||
if (test_strategy == BTNE)
|
||||
{
|
||||
@@ -918,9 +918,9 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
*
|
||||
* Note that we require only the test_op to be immutable, not the
|
||||
* original clause_op. (pred_op is assumed to have been checked
|
||||
* immutable by the caller.) Essentially we are assuming that
|
||||
* the opclass is consistent even if it contains operators that
|
||||
* are merely stable.
|
||||
* immutable by the caller.) Essentially we are assuming that the
|
||||
* opclass is consistent even if it contains operators that are
|
||||
* merely stable.
|
||||
*/
|
||||
if (op_volatile(test_op) == PROVOLATILE_IMMUTABLE)
|
||||
{
|
||||
@@ -958,7 +958,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
|
||||
|
||||
/* And execute it. */
|
||||
test_result = ExecEvalExprSwitchContext(test_exprstate,
|
||||
GetPerTupleExprContext(estate),
|
||||
GetPerTupleExprContext(estate),
|
||||
&isNull, NULL);
|
||||
|
||||
/* Get back to outer memory context */
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.71 2005/07/28 22:27:00 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.72 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -31,9 +31,9 @@ typedef struct JoinHashEntry
|
||||
} JoinHashEntry;
|
||||
|
||||
static RelOptInfo *make_reloptinfo(PlannerInfo *root, int relid,
|
||||
RelOptKind reloptkind);
|
||||
RelOptKind reloptkind);
|
||||
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
|
||||
RelOptInfo *input_rel);
|
||||
RelOptInfo *input_rel);
|
||||
static List *build_joinrel_restrictlist(PlannerInfo *root,
|
||||
RelOptInfo *joinrel,
|
||||
RelOptInfo *outer_rel,
|
||||
@@ -165,8 +165,8 @@ make_reloptinfo(PlannerInfo *root, int relid, RelOptKind reloptkind)
|
||||
/* Add the finished struct to the base_rel_array */
|
||||
if (relid >= root->base_rel_array_size)
|
||||
{
|
||||
int oldsize = root->base_rel_array_size;
|
||||
int newsize;
|
||||
int oldsize = root->base_rel_array_size;
|
||||
int newsize;
|
||||
|
||||
newsize = Max(oldsize * 2, relid + 1);
|
||||
root->base_rel_array = (RelOptInfo **)
|
||||
@@ -225,7 +225,7 @@ build_join_rel_hash(PlannerInfo *root)
|
||||
hashtab = hash_create("JoinRelHashTable",
|
||||
256L,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
|
||||
|
||||
/* Insert all the already-existing joinrels */
|
||||
foreach(l, root->join_rel_list)
|
||||
@@ -254,7 +254,7 @@ RelOptInfo *
|
||||
find_join_rel(PlannerInfo *root, Relids relids)
|
||||
{
|
||||
/*
|
||||
* Switch to using hash lookup when list grows "too long". The threshold
|
||||
* Switch to using hash lookup when list grows "too long". The threshold
|
||||
* is arbitrary and is known only here.
|
||||
*/
|
||||
if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
|
||||
@@ -263,10 +263,10 @@ find_join_rel(PlannerInfo *root, Relids relids)
|
||||
/*
|
||||
* Use either hashtable lookup or linear search, as appropriate.
|
||||
*
|
||||
* Note: the seemingly redundant hashkey variable is used to avoid
|
||||
* taking the address of relids; unless the compiler is exceedingly
|
||||
* smart, doing so would force relids out of a register and thus
|
||||
* probably slow down the list-search case.
|
||||
* Note: the seemingly redundant hashkey variable is used to avoid taking the
|
||||
* address of relids; unless the compiler is exceedingly smart, doing so
|
||||
* would force relids out of a register and thus probably slow down the
|
||||
* list-search case.
|
||||
*/
|
||||
if (root->join_rel_hash)
|
||||
{
|
||||
@@ -331,8 +331,8 @@ build_join_rel(PlannerInfo *root,
|
||||
if (joinrel)
|
||||
{
|
||||
/*
|
||||
* Yes, so we only need to figure the restrictlist for this
|
||||
* particular pair of component relations.
|
||||
* Yes, so we only need to figure the restrictlist for this particular
|
||||
* pair of component relations.
|
||||
*/
|
||||
if (restrictlist_ptr)
|
||||
*restrictlist_ptr = build_joinrel_restrictlist(root,
|
||||
@@ -375,21 +375,20 @@ build_join_rel(PlannerInfo *root,
|
||||
joinrel->index_inner_paths = NIL;
|
||||
|
||||
/*
|
||||
* Create a new tlist containing just the vars that need to be output
|
||||
* from this join (ie, are needed for higher joinclauses or final
|
||||
* output).
|
||||
* Create a new tlist containing just the vars that need to be output from
|
||||
* this join (ie, are needed for higher joinclauses or final output).
|
||||
*
|
||||
* NOTE: the tlist order for a join rel will depend on which pair of
|
||||
* outer and inner rels we first try to build it from. But the
|
||||
* contents should be the same regardless.
|
||||
* NOTE: the tlist order for a join rel will depend on which pair of outer
|
||||
* and inner rels we first try to build it from. But the contents should
|
||||
* be the same regardless.
|
||||
*/
|
||||
build_joinrel_tlist(root, joinrel, outer_rel);
|
||||
build_joinrel_tlist(root, joinrel, inner_rel);
|
||||
|
||||
/*
|
||||
* Construct restrict and join clause lists for the new joinrel. (The
|
||||
* caller might or might not need the restrictlist, but I need it
|
||||
* anyway for set_joinrel_size_estimates().)
|
||||
* caller might or might not need the restrictlist, but I need it anyway
|
||||
* for set_joinrel_size_estimates().)
|
||||
*/
|
||||
restrictlist = build_joinrel_restrictlist(root,
|
||||
joinrel,
|
||||
@@ -407,9 +406,9 @@ build_join_rel(PlannerInfo *root,
|
||||
jointype, restrictlist);
|
||||
|
||||
/*
|
||||
* Add the joinrel to the query's joinrel list, and store it into
|
||||
* the auxiliary hashtable if there is one. NB: GEQO requires us
|
||||
* to append the new joinrel to the end of the list!
|
||||
* Add the joinrel to the query's joinrel list, and store it into the
|
||||
* auxiliary hashtable if there is one. NB: GEQO requires us to append
|
||||
* the new joinrel to the end of the list!
|
||||
*/
|
||||
root->join_rel_list = lappend(root->join_rel_list, joinrel);
|
||||
|
||||
@@ -527,18 +526,18 @@ build_joinrel_restrictlist(PlannerInfo *root,
|
||||
* Collect all the clauses that syntactically belong at this level.
|
||||
*/
|
||||
rlist = list_concat(subbuild_joinrel_restrictlist(joinrel,
|
||||
outer_rel->joininfo),
|
||||
outer_rel->joininfo),
|
||||
subbuild_joinrel_restrictlist(joinrel,
|
||||
inner_rel->joininfo));
|
||||
inner_rel->joininfo));
|
||||
|
||||
/*
|
||||
* Eliminate duplicate and redundant clauses.
|
||||
*
|
||||
* We must eliminate duplicates, since we will see many of the same
|
||||
* clauses arriving from both input relations. Also, if a clause is a
|
||||
* mergejoinable clause, it's possible that it is redundant with
|
||||
* previous clauses (see optimizer/README for discussion). We detect
|
||||
* that case and omit the redundant clause from the result list.
|
||||
* We must eliminate duplicates, since we will see many of the same clauses
|
||||
* arriving from both input relations. Also, if a clause is a
|
||||
* mergejoinable clause, it's possible that it is redundant with previous
|
||||
* clauses (see optimizer/README for discussion). We detect that case and
|
||||
* omit the redundant clause from the result list.
|
||||
*/
|
||||
result = remove_redundant_join_clauses(root, rlist,
|
||||
IS_OUTER_JOIN(jointype));
|
||||
@@ -571,18 +570,17 @@ subbuild_joinrel_restrictlist(RelOptInfo *joinrel,
|
||||
if (bms_is_subset(rinfo->required_relids, joinrel->relids))
|
||||
{
|
||||
/*
|
||||
* This clause becomes a restriction clause for the joinrel,
|
||||
* since it refers to no outside rels. We don't bother to
|
||||
* check for duplicates here --- build_joinrel_restrictlist
|
||||
* will do that.
|
||||
* This clause becomes a restriction clause for the joinrel, since
|
||||
* it refers to no outside rels. We don't bother to check for
|
||||
* duplicates here --- build_joinrel_restrictlist will do that.
|
||||
*/
|
||||
restrictlist = lappend(restrictlist, rinfo);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* This clause is still a join clause at this level, so we
|
||||
* ignore it in this routine.
|
||||
* This clause is still a join clause at this level, so we ignore
|
||||
* it in this routine.
|
||||
*/
|
||||
}
|
||||
}
|
||||
@@ -603,17 +601,17 @@ subbuild_joinrel_joinlist(RelOptInfo *joinrel,
|
||||
if (bms_is_subset(rinfo->required_relids, joinrel->relids))
|
||||
{
|
||||
/*
|
||||
* This clause becomes a restriction clause for the joinrel,
|
||||
* since it refers to no outside rels. So we can ignore it
|
||||
* in this routine.
|
||||
* This clause becomes a restriction clause for the joinrel, since
|
||||
* it refers to no outside rels. So we can ignore it in this
|
||||
* routine.
|
||||
*/
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* This clause is still a join clause at this level, so add
|
||||
* it to the joininfo list for the joinrel, being careful to
|
||||
* eliminate duplicates. (Since RestrictInfo nodes are normally
|
||||
* This clause is still a join clause at this level, so add it to
|
||||
* the joininfo list for the joinrel, being careful to eliminate
|
||||
* duplicates. (Since RestrictInfo nodes are normally
|
||||
* multiply-linked rather than copied, pointer equality should be
|
||||
* a sufficient test. If two equal() nodes should happen to sneak
|
||||
* in, no great harm is done --- they'll be detected by
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.40 2005/10/13 00:06:46 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.41 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -51,8 +51,8 @@ RestrictInfo *
|
||||
make_restrictinfo(Expr *clause, bool is_pushed_down, Relids required_relids)
|
||||
{
|
||||
/*
|
||||
* If it's an OR clause, build a modified copy with RestrictInfos
|
||||
* inserted above each subclause of the top-level AND/OR structure.
|
||||
* If it's an OR clause, build a modified copy with RestrictInfos inserted
|
||||
* above each subclause of the top-level AND/OR structure.
|
||||
*/
|
||||
if (or_clause((Node *) clause))
|
||||
return (RestrictInfo *) make_sub_restrictinfos(clause, is_pushed_down);
|
||||
@@ -101,9 +101,9 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
|
||||
/*
|
||||
* There may well be redundant quals among the subplans, since a
|
||||
* top-level WHERE qual might have gotten used to form several
|
||||
* different index quals. We don't try exceedingly hard to
|
||||
* eliminate redundancies, but we do eliminate obvious duplicates
|
||||
* by using list_concat_unique.
|
||||
* different index quals. We don't try exceedingly hard to eliminate
|
||||
* redundancies, but we do eliminate obvious duplicates by using
|
||||
* list_concat_unique.
|
||||
*/
|
||||
result = NIL;
|
||||
foreach(l, apath->bitmapquals)
|
||||
@@ -125,7 +125,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
|
||||
/*
|
||||
* Here, we only detect qual-free subplans. A qual-free subplan would
|
||||
* cause us to generate "... OR true ..." which we may as well reduce
|
||||
* to just "true". We do not try to eliminate redundant subclauses
|
||||
* to just "true". We do not try to eliminate redundant subclauses
|
||||
* because (a) it's not as likely as in the AND case, and (b) we might
|
||||
* well be working with hundreds or even thousands of OR conditions,
|
||||
* perhaps from a long IN list. The performance of list_append_unique
|
||||
@@ -142,8 +142,8 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
|
||||
{
|
||||
/*
|
||||
* If we find a qual-less subscan, it represents a constant
|
||||
* TRUE, and hence the OR result is also constant TRUE, so
|
||||
* we can stop here.
|
||||
* TRUE, and hence the OR result is also constant TRUE, so we
|
||||
* can stop here.
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
@@ -157,8 +157,8 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid generating one-element ORs, which could happen
|
||||
* due to redundancy elimination.
|
||||
* Avoid generating one-element ORs, which could happen due to
|
||||
* redundancy elimination.
|
||||
*/
|
||||
if (list_length(withris) <= 1)
|
||||
result = withris;
|
||||
@@ -174,20 +174,20 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
|
||||
}
|
||||
else if (IsA(bitmapqual, IndexPath))
|
||||
{
|
||||
IndexPath *ipath = (IndexPath *) bitmapqual;
|
||||
IndexPath *ipath = (IndexPath *) bitmapqual;
|
||||
|
||||
result = list_copy(ipath->indexclauses);
|
||||
if (include_predicates && ipath->indexinfo->indpred != NIL)
|
||||
{
|
||||
foreach(l, ipath->indexinfo->indpred)
|
||||
{
|
||||
Expr *pred = (Expr *) lfirst(l);
|
||||
Expr *pred = (Expr *) lfirst(l);
|
||||
|
||||
/*
|
||||
* We know that the index predicate must have been implied
|
||||
* by the query condition as a whole, but it may or may not
|
||||
* be implied by the conditions that got pushed into the
|
||||
* bitmapqual. Avoid generating redundant conditions.
|
||||
* We know that the index predicate must have been implied by
|
||||
* the query condition as a whole, but it may or may not be
|
||||
* implied by the conditions that got pushed into the
|
||||
* bitmapqual. Avoid generating redundant conditions.
|
||||
*/
|
||||
if (!predicate_implied_by(list_make1(pred), result))
|
||||
result = lappend(result,
|
||||
@@ -223,8 +223,8 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
|
||||
restrictinfo->can_join = false; /* may get set below */
|
||||
|
||||
/*
|
||||
* If it's a binary opclause, set up left/right relids info. In any
|
||||
* case set up the total clause relids info.
|
||||
* If it's a binary opclause, set up left/right relids info. In any case
|
||||
* set up the total clause relids info.
|
||||
*/
|
||||
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
|
||||
{
|
||||
@@ -232,13 +232,13 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
|
||||
restrictinfo->right_relids = pull_varnos(get_rightop(clause));
|
||||
|
||||
restrictinfo->clause_relids = bms_union(restrictinfo->left_relids,
|
||||
restrictinfo->right_relids);
|
||||
restrictinfo->right_relids);
|
||||
|
||||
/*
|
||||
* Does it look like a normal join clause, i.e., a binary operator
|
||||
* relating expressions that come from distinct relations? If so
|
||||
* we might be able to use it in a join algorithm. Note that this
|
||||
* is a purely syntactic test that is made regardless of context.
|
||||
* relating expressions that come from distinct relations? If so we
|
||||
* might be able to use it in a join algorithm. Note that this is a
|
||||
* purely syntactic test that is made regardless of context.
|
||||
*/
|
||||
if (!bms_is_empty(restrictinfo->left_relids) &&
|
||||
!bms_is_empty(restrictinfo->right_relids) &&
|
||||
@@ -262,11 +262,11 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
|
||||
restrictinfo->required_relids = restrictinfo->clause_relids;
|
||||
|
||||
/*
|
||||
* Fill in all the cacheable fields with "not yet set" markers. None
|
||||
* of these will be computed until/unless needed. Note in particular
|
||||
* that we don't mark a binary opclause as mergejoinable or
|
||||
* hashjoinable here; that happens only if it appears in the right
|
||||
* context (top level of a joinclause list).
|
||||
* Fill in all the cacheable fields with "not yet set" markers. None of
|
||||
* these will be computed until/unless needed. Note in particular that we
|
||||
* don't mark a binary opclause as mergejoinable or hashjoinable here;
|
||||
* that happens only if it appears in the right context (top level of a
|
||||
* joinclause list).
|
||||
*/
|
||||
restrictinfo->eval_cost.startup = -1;
|
||||
restrictinfo->this_selec = -1;
|
||||
@@ -420,17 +420,16 @@ remove_redundant_join_clauses(PlannerInfo *root, List *restrictinfo_list,
|
||||
QualCost cost;
|
||||
|
||||
/*
|
||||
* If there are any redundant clauses, we want to eliminate the ones
|
||||
* that are more expensive in favor of the ones that are less so. Run
|
||||
* If there are any redundant clauses, we want to eliminate the ones that
|
||||
* are more expensive in favor of the ones that are less so. Run
|
||||
* cost_qual_eval() to ensure the eval_cost fields are set up.
|
||||
*/
|
||||
cost_qual_eval(&cost, restrictinfo_list);
|
||||
|
||||
/*
|
||||
* We don't have enough knowledge yet to be able to estimate the
|
||||
* number of times a clause might be evaluated, so it's hard to weight
|
||||
* the startup and per-tuple costs appropriately. For now just weight
|
||||
* 'em the same.
|
||||
* We don't have enough knowledge yet to be able to estimate the number of
|
||||
* times a clause might be evaluated, so it's hard to weight the startup
|
||||
* and per-tuple costs appropriately. For now just weight 'em the same.
|
||||
*/
|
||||
#define CLAUSECOST(r) ((r)->eval_cost.startup + (r)->eval_cost.per_tuple)
|
||||
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.69 2005/04/06 16:34:06 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.70 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -93,7 +93,7 @@ add_to_flat_tlist(List *tlist, List *vars)
|
||||
{
|
||||
TargetEntry *tle;
|
||||
|
||||
tle = makeTargetEntry(copyObject(var), /* copy needed?? */
|
||||
tle = makeTargetEntry(copyObject(var), /* copy needed?? */
|
||||
next_resno++,
|
||||
NULL,
|
||||
false);
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.65 2005/06/05 22:32:56 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.66 2005/10/15 02:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -88,8 +88,8 @@ pull_varnos(Node *node)
|
||||
context.sublevels_up = 0;
|
||||
|
||||
/*
|
||||
* Must be prepared to start with a Query or a bare expression tree;
|
||||
* if it's a Query, we don't want to increment sublevels_up.
|
||||
* Must be prepared to start with a Query or a bare expression tree; if
|
||||
* it's a Query, we don't want to increment sublevels_up.
|
||||
*/
|
||||
query_or_expression_tree_walker(node,
|
||||
pull_varnos_walker,
|
||||
@@ -149,8 +149,8 @@ contain_var_reference(Node *node, int varno, int varattno, int levelsup)
|
||||
context.sublevels_up = levelsup;
|
||||
|
||||
/*
|
||||
* Must be prepared to start with a Query or a bare expression tree;
|
||||
* if it's a Query, we don't want to increment sublevels_up.
|
||||
* Must be prepared to start with a Query or a bare expression tree; if
|
||||
* it's a Query, we don't want to increment sublevels_up.
|
||||
*/
|
||||
return query_or_expression_tree_walker(node,
|
||||
contain_var_reference_walker,
|
||||
@@ -215,8 +215,7 @@ contain_var_clause_walker(Node *node, void *context)
|
||||
if (IsA(node, Var))
|
||||
{
|
||||
if (((Var *) node)->varlevelsup == 0)
|
||||
return true; /* abort the tree traversal and return
|
||||
* true */
|
||||
return true; /* abort the tree traversal and return true */
|
||||
return false;
|
||||
}
|
||||
return expression_tree_walker(node, contain_var_clause_walker, context);
|
||||
@@ -286,7 +285,7 @@ contain_vars_above_level(Node *node, int levelsup)
|
||||
int sublevels_up = levelsup;
|
||||
|
||||
return query_or_expression_tree_walker(node,
|
||||
contain_vars_above_level_walker,
|
||||
contain_vars_above_level_walker,
|
||||
(void *) &sublevels_up,
|
||||
0);
|
||||
}
|
||||
@@ -370,8 +369,8 @@ find_minimum_var_level_walker(Node *node,
|
||||
context->min_varlevel = varlevelsup;
|
||||
|
||||
/*
|
||||
* As soon as we find a local variable, we can abort the
|
||||
* tree traversal, since min_varlevel is then certainly 0.
|
||||
* As soon as we find a local variable, we can abort the tree
|
||||
* traversal, since min_varlevel is then certainly 0.
|
||||
*/
|
||||
if (varlevelsup == 0)
|
||||
return true;
|
||||
@@ -380,10 +379,9 @@ find_minimum_var_level_walker(Node *node,
|
||||
}
|
||||
|
||||
/*
|
||||
* An Aggref must be treated like a Var of its level. Normally we'd
|
||||
* get the same result from looking at the Vars in the aggregate's
|
||||
* argument, but this fails in the case of a Var-less aggregate call
|
||||
* (COUNT(*)).
|
||||
* An Aggref must be treated like a Var of its level. Normally we'd get
|
||||
* the same result from looking at the Vars in the aggregate's argument,
|
||||
* but this fails in the case of a Var-less aggregate call (COUNT(*)).
|
||||
*/
|
||||
if (IsA(node, Aggref))
|
||||
{
|
||||
@@ -400,8 +398,8 @@ find_minimum_var_level_walker(Node *node,
|
||||
context->min_varlevel = agglevelsup;
|
||||
|
||||
/*
|
||||
* As soon as we find a local aggregate, we can abort the
|
||||
* tree traversal, since min_varlevel is then certainly 0.
|
||||
* As soon as we find a local aggregate, we can abort the tree
|
||||
* traversal, since min_varlevel is then certainly 0.
|
||||
*/
|
||||
if (agglevelsup == 0)
|
||||
return true;
|
||||
@@ -553,8 +551,8 @@ flatten_join_alias_vars_mutator(Node *node,
|
||||
newvar = (Node *) list_nth(rte->joinaliasvars, var->varattno - 1);
|
||||
|
||||
/*
|
||||
* If we are expanding an alias carried down from an upper query,
|
||||
* must adjust its varlevelsup fields.
|
||||
* If we are expanding an alias carried down from an upper query, must
|
||||
* adjust its varlevelsup fields.
|
||||
*/
|
||||
if (context->sublevels_up != 0)
|
||||
{
|
||||
@@ -570,8 +568,8 @@ flatten_join_alias_vars_mutator(Node *node,
|
||||
InClauseInfo *ininfo;
|
||||
|
||||
ininfo = (InClauseInfo *) expression_tree_mutator(node,
|
||||
flatten_join_alias_vars_mutator,
|
||||
(void *) context);
|
||||
flatten_join_alias_vars_mutator,
|
||||
(void *) context);
|
||||
/* now fix InClauseInfo's relid sets */
|
||||
if (context->sublevels_up == 0)
|
||||
{
|
||||
|
Reference in New Issue
Block a user