1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-13 16:22:44 +03:00

pgindent run. Make it all clean.

This commit is contained in:
Bruce Momjian
2001-03-22 04:01:46 +00:00
parent 6cf8707b82
commit 9e1552607a
555 changed files with 32514 additions and 28110 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.8 2001/01/24 19:42:58 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.9 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -485,7 +485,7 @@ xfunc_form_groups(Query *queryInfo, Stream root, Stream bottom)
}
/* ------------------- UTILITY FUNCTIONS ------------------------- */
/* ------------------- UTILITY FUNCTIONS ------------------------- */
/*
** xfunc_free_stream

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.71 2001/02/03 21:17:52 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.72 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,12 +33,12 @@ int geqo_rels = DEFAULT_GEQO_RELS;
static void set_base_rel_pathlists(Query *root);
static void set_plain_rel_pathlist(Query *root, RelOptInfo *rel,
RangeTblEntry *rte);
RangeTblEntry *rte);
static void set_inherited_rel_pathlist(Query *root, RelOptInfo *rel,
RangeTblEntry *rte,
List *inheritlist);
RangeTblEntry *rte,
List *inheritlist);
static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed,
List *initial_rels);
List *initial_rels);
#ifdef OPTIMIZER_DEBUG
static void debug_print_rel(Query *root, RelOptInfo *rel);
@@ -94,7 +94,7 @@ set_base_rel_pathlists(Query *root)
RangeTblEntry *rte;
List *inheritlist;
Assert(length(rel->relids) == 1); /* better be base rel */
Assert(length(rel->relids) == 1); /* better be base rel */
rti = lfirsti(rel->relids);
rte = rt_fetch(rti, root->rtable);
@@ -103,24 +103,25 @@ set_base_rel_pathlists(Query *root)
/* Subquery --- generate a separate plan for it */
/*
* If there are any restriction clauses that have been attached
* to the subquery relation, consider pushing them down to become
* HAVING quals of the subquery itself. (Not WHERE clauses, since
* they may refer to subquery outputs that are aggregate results.
* But planner.c will transfer them into the subquery's WHERE if
* they do not.) This transformation is useful because it may
* allow us to generate a better plan for the subquery than
* evaluating all the subquery output rows and then filtering
* them.
* If there are any restriction clauses that have been
* attached to the subquery relation, consider pushing them
* down to become HAVING quals of the subquery itself. (Not
* WHERE clauses, since they may refer to subquery outputs
* that are aggregate results. But planner.c will transfer
* them into the subquery's WHERE if they do not.) This
* transformation is useful because it may allow us to
* generate a better plan for the subquery than evaluating all
* the subquery output rows and then filtering them.
*
* Currently, we do not push down clauses that contain subselects,
* mainly because I'm not sure it will work correctly (the
* subplan hasn't yet transformed sublinks to subselects).
* Also, if the subquery contains set ops (UNION/INTERSECT/EXCEPT)
* we do not push down any qual clauses, since the planner doesn't
* support quals at the top level of a setop. (With suitable
* analysis we could try to push the quals down into the component
* queries of the setop, but getting it right is not trivial.)
* Currently, we do not push down clauses that contain
* subselects, mainly because I'm not sure it will work
* correctly (the subplan hasn't yet transformed sublinks to
* subselects). Also, if the subquery contains set ops
* (UNION/INTERSECT/EXCEPT) we do not push down any qual
* clauses, since the planner doesn't support quals at the top
* level of a setop. (With suitable analysis we could try to
* push the quals down into the component queries of the
* setop, but getting it right is not trivial.)
* Non-pushed-down clauses will get evaluated as qpquals of
* the SubqueryScan node.
*
@@ -136,8 +137,8 @@ set_base_rel_pathlists(Query *root)
foreach(lst, rel->baserestrictinfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lst);
Node *clause = (Node *) rinfo->clause;
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lst);
Node *clause = (Node *) rinfo->clause;
if (contain_subplans(clause))
{
@@ -146,13 +147,14 @@ set_base_rel_pathlists(Query *root)
}
else
{
/*
* We need to replace Vars in the clause (which must
* refer to outputs of the subquery) with copies of
* the subquery's targetlist expressions. Note that
* at this point, any uplevel Vars in the clause
* should have been replaced with Params, so they
* need no work.
* We need to replace Vars in the clause (which
* must refer to outputs of the subquery) with
* copies of the subquery's targetlist
* expressions. Note that at this point, any
* uplevel Vars in the clause should have been
* replaced with Params, so they need no work.
*/
clause = ResolveNew(clause, rti, 0,
rte->subquery->targetList,
@@ -160,11 +162,12 @@ set_base_rel_pathlists(Query *root)
rte->subquery->havingQual =
make_and_qual(rte->subquery->havingQual,
clause);
/*
* We need not change the subquery's hasAggs or
* hasSublinks flags, since we can't be pushing down
* any aggregates that weren't there before, and we
* don't push down subselects at all.
* hasSublinks flags, since we can't be pushing
* down any aggregates that weren't there before,
* and we don't push down subselects at all.
*/
}
}
@@ -215,9 +218,9 @@ set_plain_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte)
/*
* Generate paths and add them to the rel's pathlist.
*
* Note: add_path() will discard any paths that are dominated by
* another available path, keeping only those paths that are
* superior along at least one dimension of cost or sortedness.
* Note: add_path() will discard any paths that are dominated by another
* available path, keeping only those paths that are superior along at
* least one dimension of cost or sortedness.
*/
/* Consider sequential scan */
@@ -230,9 +233,9 @@ set_plain_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte)
create_index_paths(root, rel, indices);
/*
* Note: create_or_index_paths depends on create_index_paths to
* have marked OR restriction clauses with relevant indices; this
* is why it doesn't need to be given the list of indices.
* Note: create_or_index_paths depends on create_index_paths to have
* marked OR restriction clauses with relevant indices; this is why it
* doesn't need to be given the list of indices.
*/
create_or_index_paths(root, rel, rel->baserestrictinfo);
@@ -258,8 +261,8 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
List *il;
/*
* XXX for now, can't handle inherited expansion of FOR UPDATE;
* can we do better?
* XXX for now, can't handle inherited expansion of FOR UPDATE; can we
* do better?
*/
if (intMember(parentRTindex, root->rowMarks))
elog(ERROR, "SELECT FOR UPDATE is not supported for inherit queries");
@@ -271,14 +274,14 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
rel->width = 0;
/*
* Generate access paths for each table in the tree (parent AND children),
* and pick the cheapest path for each table.
* Generate access paths for each table in the tree (parent AND
* children), and pick the cheapest path for each table.
*/
foreach(il, inheritlist)
{
int childRTindex = lfirsti(il);
int childRTindex = lfirsti(il);
RangeTblEntry *childrte;
Oid childOID;
Oid childOID;
RelOptInfo *childrel;
childrte = rt_fetch(childRTindex, root->rtable);
@@ -289,16 +292,18 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
* attach the RelOptInfo to the query's base_rel_list, however.
*
* NOTE: when childRTindex == parentRTindex, we create a second
* RelOptInfo for the same relation. This RelOptInfo will represent
* the parent table alone, whereas the original RelOptInfo represents
* the union of the inheritance tree members.
* RelOptInfo for the same relation. This RelOptInfo will
* represent the parent table alone, whereas the original
* RelOptInfo represents the union of the inheritance tree
* members.
*/
childrel = make_base_rel(root, childRTindex);
/*
* Copy the parent's targetlist and restriction quals to the child,
* with attribute-number adjustment if needed. We don't bother
* to copy the join quals, since we can't do any joining here.
* Copy the parent's targetlist and restriction quals to the
* child, with attribute-number adjustment if needed. We don't
* bother to copy the join quals, since we can't do any joining
* here.
*/
childrel->targetlist = (List *)
adjust_inherited_attrs((Node *) rel->targetlist,
@@ -328,8 +333,8 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
}
/*
* Finally, build Append path and install it as the only access
* path for the parent rel.
* Finally, build Append path and install it as the only access path
* for the parent rel.
*/
add_path(rel, (Path *) create_append_path(rel, subpaths));
@@ -350,9 +355,9 @@ make_fromexpr_rel(Query *root, FromExpr *from)
List *jt;
/*
* Count the number of child jointree nodes. This is the depth
* of the dynamic-programming algorithm we must employ to consider
* all ways of joining the child nodes.
* Count the number of child jointree nodes. This is the depth of the
* dynamic-programming algorithm we must employ to consider all ways
* of joining the child nodes.
*/
levels_needed = length(from->fromlist);
@@ -374,6 +379,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
if (levels_needed == 1)
{
/*
* Single jointree node, so we're done.
*/
@@ -381,6 +387,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
}
else
{
/*
* Consider the different orders in which we could join the rels,
* using either GEQO or regular optimizer.
@@ -401,7 +408,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
* independent jointree items in the query. This is > 1.
*
* 'initial_rels' is a list of RelOptInfo nodes for each independent
* jointree item. These are the components to be joined together.
* jointree item. These are the components to be joined together.
*
* Returns the final level of join relations, i.e., the relation that is
* the result of joining all the original relations together.
@@ -423,8 +430,8 @@ make_one_rel_by_joins(Query *root, int levels_needed, List *initial_rels)
* joinitems[j] is a list of all the j-item rels. Initially we set
* joinitems[1] to represent all the single-jointree-item relations.
*/
joinitems = (List **) palloc((levels_needed+1) * sizeof(List *));
MemSet(joinitems, 0, (levels_needed+1) * sizeof(List *));
joinitems = (List **) palloc((levels_needed + 1) * sizeof(List *));
MemSet(joinitems, 0, (levels_needed + 1) * sizeof(List *));
joinitems[1] = initial_rels;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.41 2001/01/24 19:42:57 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.42 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -128,7 +128,8 @@ clauselist_selectivity(Query *root,
* behave in the simple way we are expecting.)
*
* NB: for consistency of results, this fragment of code had better
* match what clause_selectivity() would do in the cases it handles.
* match what clause_selectivity() would do in the cases it
* handles.
*/
if (varRelid != 0 || NumRelids(clause) == 1)
{
@@ -148,7 +149,7 @@ clauselist_selectivity(Query *root,
get_leftop((Expr *) clause);
if (is_pseudo_constant_clause((Node *) other))
{
Oid opno = ((Oper *) ((Expr *) clause)->oper)->opno;
Oid opno = ((Oper *) ((Expr *) clause)->oper)->opno;
RegProcedure oprrest = get_oprrest(opno);
if (!oprrest)
@@ -156,15 +157,16 @@ clauselist_selectivity(Query *root,
else
s2 = restriction_selectivity(oprrest, opno,
getrelid(relidx,
root->rtable),
root->rtable),
attno,
constval, flag);
/*
* If we reach here, we have computed the same result that
* clause_selectivity would, so we can just use s2 if it's
* the wrong oprrest. But if it's the right oprrest, add
* the clause to rqlist for later processing.
* If we reach here, we have computed the same result
* that clause_selectivity would, so we can just use
* s2 if it's the wrong oprrest. But if it's the
* right oprrest, add the clause to rqlist for later
* processing.
*/
switch (oprrest)
{
@@ -384,18 +386,20 @@ clause_selectivity(Query *root,
if (rte->subquery)
{
/*
* XXX not smart about subquery references...
* any way to do better?
* XXX not smart about subquery references... any way to
* do better?
*/
s1 = 0.5;
}
else
{
/*
* A Var at the top of a clause must be a bool Var.
* This is equivalent to the clause reln.attribute = 't',
* so we compute the selectivity as if that is what we have.
* A Var at the top of a clause must be a bool Var. This
* is equivalent to the clause reln.attribute = 't', so we
* compute the selectivity as if that is what we have.
*/
s1 = restriction_selectivity(F_EQSEL,
BooleanEqualOperator,

View File

@@ -41,7 +41,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.68 2001/02/16 00:03:07 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.69 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,11 +67,11 @@
#define LOG6(x) (log(x) / 1.79175946922805)
double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
Cost disable_cost = 100000000.0;
@@ -117,14 +117,14 @@ cost_seqscan(Path *path, RelOptInfo *baserel)
/*
* disk costs
*
* The cost of reading a page sequentially is 1.0, by definition.
* Note that the Unix kernel will typically do some amount of
* read-ahead optimization, so that this cost is less than the
* true cost of reading a page from disk. We ignore that issue
* here, but must take it into account when estimating the cost of
* non-sequential accesses!
* The cost of reading a page sequentially is 1.0, by definition. Note
* that the Unix kernel will typically do some amount of read-ahead
* optimization, so that this cost is less than the true cost of
* reading a page from disk. We ignore that issue here, but must take
* it into account when estimating the cost of non-sequential
* accesses!
*/
run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
/* CPU costs */
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
@@ -600,12 +600,12 @@ cost_hashjoin(Path *path,
/*
* The number of tuple comparisons needed is the number of outer
* tuples times the typical hash bucket size. nodeHash.c tries for
* average bucket loading of NTUP_PER_BUCKET, but that goal will
* be reached only if data values are uniformly distributed among
* the buckets. To be conservative, we scale up the target bucket
* size by the number of inner rows times inner dispersion, giving
* an estimate of the typical number of duplicates of each value.
* We then charge one cpu_operator_cost per tuple comparison.
* average bucket loading of NTUP_PER_BUCKET, but that goal will be
* reached only if data values are uniformly distributed among the
* buckets. To be conservative, we scale up the target bucket size by
* the number of inner rows times inner dispersion, giving an estimate
* of the typical number of duplicates of each value. We then charge
* one cpu_operator_cost per tuple comparison.
*/
run_cost += cpu_operator_cost * outer_path->parent->rows *
NTUP_PER_BUCKET * ceil(inner_path->parent->rows * innerdispersion);
@@ -672,7 +672,7 @@ cost_qual_eval(List *quals)
foreach(l, quals)
{
Node *qual = (Node *) lfirst(l);
Node *qual = (Node *) lfirst(l);
/*
* RestrictInfo nodes contain an eval_cost field reserved for this

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.102 2001/02/16 03:16:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.103 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -183,8 +183,8 @@ create_index_paths(Query *root,
restrictinfo_list);
/*
* 3. Compute pathkeys describing index's ordering, if any,
* then see how many of them are actually useful for this query.
* 3. Compute pathkeys describing index's ordering, if any, then
* see how many of them are actually useful for this query.
*/
index_pathkeys = build_index_pathkeys(root, rel, index,
ForwardScanDirection);
@@ -207,8 +207,9 @@ create_index_paths(Query *root,
NoMovementScanDirection));
/*
* 5. If the index is ordered, a backwards scan might be interesting.
* Currently this is only possible for a DESC query result ordering.
* 5. If the index is ordered, a backwards scan might be
* interesting. Currently this is only possible for a DESC query
* result ordering.
*/
if (index_is_ordered)
{
@@ -422,10 +423,11 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
if (and_clause((Node *) orsubclause))
{
/*
* Extract relevant sub-subclauses in indexkey order. This is just
* like group_clauses_by_indexkey() except that the input and output
* are lists of bare clauses, not of RestrictInfo nodes.
* Extract relevant sub-subclauses in indexkey order. This is
* just like group_clauses_by_indexkey() except that the input and
* output are lists of bare clauses, not of RestrictInfo nodes.
*/
int *indexkeys = index->indexkeys;
Oid *classes = index->classlist;
@@ -446,8 +448,8 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
}
/*
* If no clauses match this key, we're done; we don't want to look
* at keys to its right.
* If no clauses match this key, we're done; we don't want to
* look at keys to its right.
*/
if (clausegroup == NIL)
break;
@@ -748,8 +750,8 @@ match_clause_to_indexkey(RelOptInfo *rel,
/*
* Check for an indexqual that could be handled by a nestloop
* join. We need the index key to be compared against an
* expression that uses none of the indexed relation's vars
* and contains no non-cachable functions.
* expression that uses none of the indexed relation's vars and
* contains no non-cachable functions.
*/
if (match_index_to_operand(indexkey, leftop, rel, index))
{
@@ -793,7 +795,7 @@ match_clause_to_indexkey(RelOptInfo *rel,
* recognizing binary-compatible datatypes. For example, if we have
* an expression like "oid = 123", the operator will be oideqint4,
* which we need to replace with oideq in order to recognize it as
* matching an oid_ops index on the oid field. A variant case is where
* matching an oid_ops index on the oid field. A variant case is where
* the expression is like "oid::int4 = 123", where the given operator
* will be int4eq and again we need to intuit that we want to use oideq.
*
@@ -832,13 +834,13 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
/*
* Maybe the index uses a binary-compatible operator set.
*
* Get the nominal input types of the given operator and the actual
* type (before binary-compatible relabeling) of the index key.
* Get the nominal input types of the given operator and the actual type
* (before binary-compatible relabeling) of the index key.
*/
oldoptup = SearchSysCache(OPEROID,
ObjectIdGetDatum(expr_op),
0, 0, 0);
if (! HeapTupleIsValid(oldoptup))
if (!HeapTupleIsValid(oldoptup))
return InvalidOid; /* probably can't happen */
oldopform = (Form_pg_operator) GETSTRUCT(oldoptup);
opname = pstrdup(NameStr(oldopform->oprname));
@@ -848,7 +850,7 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
if (indexkey_on_left)
{
Node *leftop = (Node *) get_leftop(clause);
Node *leftop = (Node *) get_leftop(clause);
if (leftop && IsA(leftop, RelabelType))
leftop = ((RelabelType *) leftop)->arg;
@@ -856,7 +858,7 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
}
else
{
Node *rightop = (Node *) get_rightop(clause);
Node *rightop = (Node *) get_rightop(clause);
if (rightop && IsA(rightop, RelabelType))
rightop = ((RelabelType *) rightop)->arg;
@@ -874,9 +876,10 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
return InvalidOid;
/*
* OK, look for operator of the same name with the indexkey's data type.
* (In theory this might find a non-semantically-comparable operator,
* but in practice that seems pretty unlikely for binary-compatible types.)
* OK, look for operator of the same name with the indexkey's data
* type. (In theory this might find a non-semantically-comparable
* operator, but in practice that seems pretty unlikely for
* binary-compatible types.)
*/
new_op = compatible_oper_opid(opname, indexkeytype, indexkeytype, true);
@@ -886,8 +889,8 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
{
/*
* OK, we found a binary-compatible operator of the same
* name; now does it match the index?
* OK, we found a binary-compatible operator of the same name;
* now does it match the index?
*/
if (indexkey_on_left)
commuted_op = new_op;
@@ -1491,8 +1494,9 @@ match_index_to_operand(int indexkey,
RelOptInfo *rel,
IndexOptInfo *index)
{
/*
* Ignore any RelabelType node above the indexkey. This is needed to
* Ignore any RelabelType node above the indexkey. This is needed to
* be able to apply indexscanning in binary-compatible-operator cases.
* Note: we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
@@ -1670,7 +1674,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1687,7 +1691,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1704,7 +1708,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1721,7 +1725,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1983,8 +1987,8 @@ prefix_quals(Var *leftop, Oid expr_op,
result = makeList1(expr);
/*
* If we can create a string larger than the prefix, we can say
* "x < greaterstr".
* If we can create a string larger than the prefix, we can say "x <
* greaterstr".
*/
greaterstr = make_greater_string(prefix, datatype);
if (greaterstr)
@@ -2025,6 +2029,7 @@ find_operator(const char *opname, Oid datatype)
static Datum
string_to_datum(const char *str, Oid datatype)
{
/*
* We cheat a little by assuming that textin() will do for bpchar and
* varchar constants too...

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.61 2001/01/24 19:42:58 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.62 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,32 +25,32 @@
#include "utils/lsyscache.h"
static void sort_inner_and_outer(Query *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list,
JoinType jointype);
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list,
JoinType jointype);
static void match_unsorted_outer(Query *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list,
JoinType jointype);
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list,
JoinType jointype);
#ifdef NOT_USED
static void match_unsorted_inner(Query *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list,
JoinType jointype);
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, List *mergeclause_list,
JoinType jointype);
#endif
static void hash_inner_and_outer(Query *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, JoinType jointype);
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, JoinType jointype);
static Path *best_innerjoin(List *join_paths, List *outer_relid,
JoinType jointype);
JoinType jointype);
static Selectivity estimate_dispersion(Query *root, Var *var);
static List *select_mergejoin_clauses(RelOptInfo *joinrel,
RelOptInfo *outerrel,
RelOptInfo *innerrel,
List *restrictlist,
JoinType jointype);
RelOptInfo *outerrel,
RelOptInfo *innerrel,
List *restrictlist,
JoinType jointype);
/*
@@ -160,26 +160,27 @@ sort_inner_and_outer(Query *root,
* generate a differently-sorted result path at essentially the same
* cost. We have no basis for choosing one over another at this level
* of joining, but some sort orders may be more useful than others for
* higher-level mergejoins, so it's worth considering multiple orderings.
* higher-level mergejoins, so it's worth considering multiple
* orderings.
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
* redundant. Therefore, what we do is convert the mergeclause list to
* a list of canonical pathkeys, and then consider different orderings
* of the pathkeys.
* redundant. Therefore, what we do is convert the mergeclause list
* to a list of canonical pathkeys, and then consider different
* orderings of the pathkeys.
*
* Generating a path for *every* permutation of the pathkeys doesn't
* seem like a winning strategy; the cost in planning time is too high.
* For now, we generate one path for each pathkey, listing that pathkey
* first and the rest in random order. This should allow at
* least a one-clause mergejoin without re-sorting against any other
* possible mergejoin partner path. But if we've not guessed the
* right ordering of secondary keys, we may end up evaluating
* clauses as qpquals when they could have been done as mergeclauses.
* We need to figure out a better way. (Two possible approaches: look
* at all the relevant index relations to suggest plausible sort
* orders, or make just one output path and somehow mark it as having
* a sort-order that can be rearranged freely.)
* Generating a path for *every* permutation of the pathkeys doesn't seem
* like a winning strategy; the cost in planning time is too high. For
* now, we generate one path for each pathkey, listing that pathkey
* first and the rest in random order. This should allow at least a
* one-clause mergejoin without re-sorting against any other possible
* mergejoin partner path. But if we've not guessed the right
* ordering of secondary keys, we may end up evaluating clauses as
* qpquals when they could have been done as mergeclauses. We need to
* figure out a better way. (Two possible approaches: look at all the
* relevant index relations to suggest plausible sort orders, or make
* just one output path and somehow mark it as having a sort-order
* that can be rearranged freely.)
*/
all_pathkeys = make_pathkeys_for_mergeclauses(root,
mergeclause_list,
@@ -200,16 +201,17 @@ sort_inner_and_outer(Query *root,
lremove(front_pathkey,
listCopy(all_pathkeys)));
else
cur_pathkeys = all_pathkeys; /* no work at first one... */
cur_pathkeys = all_pathkeys; /* no work at first one... */
/*
* Select mergeclause(s) that match this sort ordering. If we had
* redundant merge clauses then we will get a subset of the original
* clause list. There had better be some match, however...
* redundant merge clauses then we will get a subset of the
* original clause list. There had better be some match,
* however...
*/
cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
cur_pathkeys,
mergeclause_list);
mergeclause_list);
Assert(cur_mergeclauses != NIL);
/*
@@ -334,10 +336,12 @@ match_unsorted_outer(Query *root,
if (nestjoinOK)
{
/*
* Always consider a nestloop join with this outer and cheapest-
* total-cost inner. Consider nestloops using the cheapest-
* startup-cost inner as well, and the best innerjoin indexpath.
* Always consider a nestloop join with this outer and
* cheapest- total-cost inner. Consider nestloops using the
* cheapest- startup-cost inner as well, and the best
* innerjoin indexpath.
*/
add_path(joinrel, (Path *)
create_nestloop_path(joinrel,
@@ -352,7 +356,7 @@ match_unsorted_outer(Query *root,
create_nestloop_path(joinrel,
jointype,
outerpath,
innerrel->cheapest_startup_path,
innerrel->cheapest_startup_path,
restrictlist,
merge_pathkeys));
if (bestinnerjoin != NULL)
@@ -382,8 +386,8 @@ match_unsorted_outer(Query *root,
/*
* Generate a mergejoin on the basis of sorting the cheapest
* inner. Since a sort will be needed, only cheapest total cost
* matters. (But create_mergejoin_path will do the right thing
* if innerrel->cheapest_total_path is already correctly sorted.)
* matters. (But create_mergejoin_path will do the right thing if
* innerrel->cheapest_total_path is already correctly sorted.)
*/
add_path(joinrel, (Path *)
create_mergejoin_path(joinrel,
@@ -400,13 +404,14 @@ match_unsorted_outer(Query *root,
* Look for presorted inner paths that satisfy the innersortkey
* list or any truncation thereof. Here, we consider both cheap
* startup cost and cheap total cost. Ignore
* innerrel->cheapest_total_path, since we already made a path with it.
* innerrel->cheapest_total_path, since we already made a path
* with it.
*/
num_sortkeys = length(innersortkeys);
if (num_sortkeys > 1)
trialsortkeys = listCopy(innersortkeys); /* need modifiable copy */
trialsortkeys = listCopy(innersortkeys); /* need modifiable copy */
else
trialsortkeys = innersortkeys; /* won't really truncate */
trialsortkeys = innersortkeys; /* won't really truncate */
cheapest_startup_inner = NULL;
cheapest_total_inner = NULL;
@@ -417,8 +422,8 @@ match_unsorted_outer(Query *root,
/*
* Look for an inner path ordered well enough for the first
* 'sortkeycnt' innersortkeys. NB: trialsortkeys list
* is modified destructively, which is why we made a copy...
* 'sortkeycnt' innersortkeys. NB: trialsortkeys list is
* modified destructively, which is why we made a copy...
*/
trialsortkeys = ltruncate(sortkeycnt, trialsortkeys);
innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
@@ -478,8 +483,8 @@ match_unsorted_outer(Query *root,
{
newclauses =
find_mergeclauses_for_pathkeys(root,
trialsortkeys,
mergeclauses);
trialsortkeys,
mergeclauses);
Assert(newclauses != NIL);
}
else
@@ -601,7 +606,7 @@ match_unsorted_inner(Query *root,
if (startupouterpath != NULL && startupouterpath != totalouterpath)
{
merge_pathkeys = build_join_pathkeys(root, joinrel,
startupouterpath->pathkeys);
startupouterpath->pathkeys);
add_path(joinrel, (Path *)
create_mergejoin_path(joinrel,
jointype,
@@ -696,8 +701,8 @@ hash_inner_and_outer(Query *root,
* estimate dispersion of inner var for costing purposes.
*
* Since we tend to visit the same clauses over and over when
* planning a large query, we cache the dispersion estimates in the
* RestrictInfo node to avoid repeated lookups of statistics.
* planning a large query, we cache the dispersion estimates in
* the RestrictInfo node to avoid repeated lookups of statistics.
*/
if (intMember(left->varno, outerrelids) &&
intMember(right->varno, innerrelids))
@@ -793,13 +798,13 @@ best_innerjoin(List *join_paths, Relids outer_relids, JoinType jointype)
foreach(join_path, join_paths)
{
IndexPath *path = (IndexPath *) lfirst(join_path);
IndexPath *path = (IndexPath *) lfirst(join_path);
Assert(IsA(path, IndexPath));
/*
* If processing an outer join, only use explicit join clauses in the
* inner indexscan. For inner joins we need not be so picky.
* If processing an outer join, only use explicit join clauses in
* the inner indexscan. For inner joins we need not be so picky.
*/
if (isouterjoin && !path->alljoinquals)
continue;
@@ -879,15 +884,15 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
*right;
/*
* If processing an outer join, only use its own join clauses in the
* merge. For inner joins we need not be so picky.
* If processing an outer join, only use its own join clauses in
* the merge. For inner joins we need not be so picky.
*
* Furthermore, if it is a right/full join then *all* the explicit
* join clauses must be mergejoinable, else the executor will fail.
* If we are asked for a right join then just return NIL to indicate
* no mergejoin is possible (we can handle it as a left join instead).
* If we are asked for a full join then emit an error, because there
* is no fallback.
* join clauses must be mergejoinable, else the executor will
* fail. If we are asked for a right join then just return NIL to
* indicate no mergejoin is possible (we can handle it as a left
* join instead). If we are asked for a full join then emit an
* error, because there is no fallback.
*/
if (isouterjoin)
{
@@ -897,7 +902,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
{
case JOIN_RIGHT:
if (restrictinfo->mergejoinoperator == InvalidOid)
return NIL; /* not mergejoinable */
return NIL; /* not mergejoinable */
break;
case JOIN_FULL:
if (restrictinfo->mergejoinoperator == InvalidOid)

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.51 2001/02/16 00:03:07 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.52 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
static RelOptInfo *make_join_rel(Query *root, RelOptInfo *rel1,
RelOptInfo *rel2, JoinType jointype);
RelOptInfo *rel2, JoinType jointype);
/*
@@ -44,18 +44,19 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* First, consider left-sided and right-sided plans, in which rels of
* exactly level-1 member relations are joined against initial relations.
* We prefer to join using join clauses, but if we find a rel of level-1
* members that has no join clauses, we will generate Cartesian-product
* joins against all initial rels not already contained in it.
* exactly level-1 member relations are joined against initial
* relations. We prefer to join using join clauses, but if we find a
* rel of level-1 members that has no join clauses, we will generate
* Cartesian-product joins against all initial rels not already
* contained in it.
*
* In the first pass (level == 2), we try to join each initial rel to each
* initial rel that appears later in joinrels[1]. (The mirror-image
* joins are handled automatically by make_join_rel.) In later
* passes, we try to join rels of size level-1 from joinrels[level-1]
* to each initial rel in joinrels[1].
* In the first pass (level == 2), we try to join each initial rel to
* each initial rel that appears later in joinrels[1]. (The
* mirror-image joins are handled automatically by make_join_rel.) In
* later passes, we try to join rels of size level-1 from
* joinrels[level-1] to each initial rel in joinrels[1].
*/
foreach(r, joinrels[level-1])
foreach(r, joinrels[level - 1])
{
RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
List *other_rels;
@@ -73,9 +74,9 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
* Note that if all available join clauses for this rel
* require more than one other rel, we will fail to make any
* joins against it here. That's OK; it'll be considered by
* "bushy plan" join code in a higher-level pass where we
* have those other rels collected into a join rel. See also
* the last-ditch case below.
* "bushy plan" join code in a higher-level pass where we have
* those other rels collected into a join rel. See also the
* last-ditch case below.
*/
new_rels = make_rels_by_clause_joins(root,
old_rel,
@@ -94,16 +95,16 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
}
/*
* At levels above 2 we will generate the same joined relation
* in multiple ways --- for example (a join b) join c is the same
* At levels above 2 we will generate the same joined relation in
* multiple ways --- for example (a join b) join c is the same
* RelOptInfo as (b join c) join a, though the second case will
* add a different set of Paths to it. To avoid making extra work
* for subsequent passes, do not enter the same RelOptInfo into our
* output list multiple times.
* add a different set of Paths to it. To avoid making extra work
* for subsequent passes, do not enter the same RelOptInfo into
* our output list multiple times.
*/
foreach(nr, new_rels)
{
RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
@@ -111,20 +112,21 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
}
/*
* Now, consider "bushy plans" in which relations of k initial rels are
* joined to relations of level-k initial rels, for 2 <= k <= level-2.
* Now, consider "bushy plans" in which relations of k initial rels
* are joined to relations of level-k initial rels, for 2 <= k <=
* level-2.
*
* We only consider bushy-plan joins for pairs of rels where there is a
* suitable join clause, in order to avoid unreasonable growth of
* planning time.
*/
for (k = 2; ; k++)
for (k = 2;; k++)
{
int other_level = level - k;
/*
* Since make_join_rel(x, y) handles both x,y and y,x cases,
* we only need to go as far as the halfway point.
* Since make_join_rel(x, y) handles both x,y and y,x cases, we
* only need to go as far as the halfway point.
*/
if (k > other_level)
break;
@@ -139,7 +141,7 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
continue; /* we ignore clauseless joins here */
if (k == other_level)
other_rels = lnext(r); /* only consider remaining rels */
other_rels = lnext(r); /* only consider remaining rels */
else
other_rels = joinrels[other_level];
@@ -153,8 +155,8 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* OK, we can build a rel of the right level from this
* pair of rels. Do so if there is at least one usable
* join clause.
* pair of rels. Do so if there is at least one
* usable join clause.
*/
foreach(i, old_rel->joininfo)
{
@@ -170,7 +172,8 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/* Avoid making duplicate entries ... */
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
break; /* need not consider more joininfos */
break; /* need not consider more
* joininfos */
}
}
}
@@ -180,31 +183,34 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* Last-ditch effort: if we failed to find any usable joins so far,
* force a set of cartesian-product joins to be generated. This
* force a set of cartesian-product joins to be generated. This
* handles the special case where all the available rels have join
* clauses but we cannot use any of the joins yet. An example is
* clauses but we cannot use any of the joins yet. An example is
*
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
*
* The join clause will be usable at level 3, but at level 2 we have
* no choice but to make cartesian joins. We consider only left-sided
* The join clause will be usable at level 3, but at level 2 we have no
* choice but to make cartesian joins. We consider only left-sided
* and right-sided cartesian joins in this case (no bushy).
*/
if (result_rels == NIL)
{
/* This loop is just like the first one, except we always call
/*
* This loop is just like the first one, except we always call
* make_rels_by_clauseless_joins().
*/
foreach(r, joinrels[level-1])
foreach(r, joinrels[level - 1])
{
RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
List *other_rels;
if (level == 2)
other_rels = lnext(r); /* only consider remaining initial
* rels */
other_rels = lnext(r); /* only consider remaining initial
* rels */
else
other_rels = joinrels[1]; /* consider all initial rels */
other_rels = joinrels[1]; /* consider all initial
* rels */
new_rels = make_rels_by_clauseless_joins(root,
old_rel,
@@ -212,7 +218,7 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
foreach(nr, new_rels)
{
RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
@@ -266,6 +272,7 @@ make_rels_by_clause_joins(Query *root,
RelOptInfo *jrel;
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
/*
* Avoid entering same joinrel into our output list more
* than once. (make_rels_by_joins doesn't really care,
@@ -310,9 +317,10 @@ make_rels_by_clauseless_joins(Query *root,
RelOptInfo *jrel;
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
/*
* As long as given other_rels are distinct, don't need
* to test to see if jrel is already part of output list.
* As long as given other_rels are distinct, don't need to
* test to see if jrel is already part of output list.
*/
result = lcons(jrel, result);
}
@@ -325,7 +333,7 @@ make_rels_by_clauseless_joins(Query *root,
/*
* make_jointree_rel
* Find or build a RelOptInfojoin rel representing a specific
* jointree item. For JoinExprs, we only consider the construction
* jointree item. For JoinExprs, we only consider the construction
* path that corresponds exactly to what the user wrote.
*/
RelOptInfo *

View File

@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.30 2001/01/24 19:42:58 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.31 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
static PathKeyItem *makePathKeyItem(Node *key, Oid sortop);
static List *make_canonical_pathkey(Query *root, PathKeyItem *item);
static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
AttrNumber varattno);
AttrNumber varattno);
/*
@@ -89,10 +89,10 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
* into our new set. When done, we add the new set to the front of
* equi_key_list.
*
* It may well be that the two items we're given are already known to
* be equijoin-equivalent, in which case we don't need to change our
* data structure. If we find both of them in the same equivalence
* set to start with, we can quit immediately.
* It may well be that the two items we're given are already known to be
* equijoin-equivalent, in which case we don't need to change our data
* structure. If we find both of them in the same equivalence set to
* start with, we can quit immediately.
*
* This is a standard UNION-FIND problem, for which there exist better
* data structures than simple lists. If this code ever proves to be
@@ -109,7 +109,11 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
if (item1here || item2here)
{
/* If find both in same equivalence set, no need to do any more */
/*
* If find both in same equivalence set, no need to do any
* more
*/
if (item1here && item2here)
{
/* Better not have seen only one in an earlier set... */
@@ -126,7 +130,8 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
/*
* Remove old set from equi_key_list. NOTE this does not
* change lnext(cursetlink), so the foreach loop doesn't break.
* change lnext(cursetlink), so the foreach loop doesn't
* break.
*/
root->equi_key_list = lremove(curset, root->equi_key_list);
freeList(curset); /* might as well recycle old cons cells */
@@ -171,8 +176,8 @@ generate_implied_equalities(Query *root)
continue;
/*
* Match each item in the set with all that appear after it
* (it's sufficient to generate A=B, need not process B=A too).
* Match each item in the set with all that appear after it (it's
* sufficient to generate A=B, need not process B=A too).
*/
foreach(ptr1, curset)
{
@@ -246,11 +251,12 @@ canonicalize_pathkeys(Query *root, List *pathkeys)
Assert(pathkey != NIL);
item = (PathKeyItem *) lfirst(pathkey);
cpathkey = make_canonical_pathkey(root, item);
/*
* Eliminate redundant ordering requests --- ORDER BY A,A
* is the same as ORDER BY A. We want to check this only
* after we have canonicalized the keys, so that equivalent-key
* knowledge is used when deciding if an item is redundant.
* Eliminate redundant ordering requests --- ORDER BY A,A is the
* same as ORDER BY A. We want to check this only after we have
* canonicalized the keys, so that equivalent-key knowledge is
* used when deciding if an item is redundant.
*/
if (!ptrMember(cpathkey, new_pathkeys))
new_pathkeys = lappend(new_pathkeys, cpathkey);
@@ -285,8 +291,8 @@ compare_pathkeys(List *keys1, List *keys2)
List *subkey2 = lfirst(key2);
/*
* XXX would like to check that we've been given canonicalized input,
* but query root not accessible here...
* XXX would like to check that we've been given canonicalized
* input, but query root not accessible here...
*/
#ifdef NOT_USED
Assert(ptrMember(subkey1, root->equi_key_list));
@@ -295,7 +301,7 @@ compare_pathkeys(List *keys1, List *keys2)
/*
* We will never have two subkeys where one is a subset of the
* other, because of the canonicalization process. Either they
* other, because of the canonicalization process. Either they
* are equal or they ain't. Furthermore, we only need pointer
* comparison to detect equality.
*/
@@ -555,9 +561,10 @@ build_index_pathkeys(Query *root,
/* OK, make a sublist for this sort key */
item = makePathKeyItem((Node *) relvar, sortop);
cpathkey = make_canonical_pathkey(root, item);
/*
* Eliminate redundant ordering info; could happen if query
* is such that index keys are equijoined...
* Eliminate redundant ordering info; could happen if query is
* such that index keys are equijoined...
*/
if (!ptrMember(cpathkey, retval))
retval = lappend(retval, cpathkey);
@@ -693,7 +700,7 @@ make_pathkeys_for_sortclauses(List *sortclauses,
*
* RestrictInfo contains fields in which we may cache the result
* of looking up the canonical pathkeys for the left and right sides
* of the mergeclause. (Note that in normal cases they will be the
* of the mergeclause. (Note that in normal cases they will be the
* same, but not if the mergeclause appears above an OUTER JOIN.)
* This is a worthwhile savings because these routines will be invoked
* many times when dealing with a many-relation query.
@@ -756,8 +763,8 @@ find_mergeclauses_for_pathkeys(Query *root,
/*
* We can match a pathkey against either left or right side of any
* mergejoin clause we haven't used yet. For the moment we use a
* dumb "greedy" algorithm with no backtracking. Is it worth being
* any smarter to make a longer list of usable mergeclauses?
* dumb "greedy" algorithm with no backtracking. Is it worth
* being any smarter to make a longer list of usable mergeclauses?
* Probably not.
*/
foreach(j, restrictinfos)
@@ -765,9 +772,10 @@ find_mergeclauses_for_pathkeys(Query *root,
RestrictInfo *restrictinfo = lfirst(j);
cache_mergeclause_pathkeys(root, restrictinfo);
/*
* We can compare canonical pathkey sublists by simple
* pointer equality; see compare_pathkeys.
* We can compare canonical pathkey sublists by simple pointer
* equality; see compare_pathkeys.
*/
if ((pathkey == restrictinfo->left_pathkey ||
pathkey == restrictinfo->right_pathkey) &&
@@ -830,7 +838,7 @@ make_pathkeys_for_mergeclauses(Query *root,
cache_mergeclause_pathkeys(root, restrictinfo);
key = (Node *) get_leftop(restrictinfo->clause);
if (IsA(key, Var) && intMember(((Var *) key)->varno, rel->relids))
if (IsA(key, Var) &&intMember(((Var *) key)->varno, rel->relids))
{
/* Rel is left side of mergeclause */
pathkey = restrictinfo->left_pathkey;
@@ -838,7 +846,7 @@ make_pathkeys_for_mergeclauses(Query *root,
else
{
key = (Node *) get_rightop(restrictinfo->clause);
if (IsA(key, Var) && intMember(((Var *) key)->varno, rel->relids))
if (IsA(key, Var) &&intMember(((Var *) key)->varno, rel->relids))
{
/* Rel is right side of mergeclause */
pathkey = restrictinfo->right_pathkey;
@@ -851,13 +859,14 @@ make_pathkeys_for_mergeclauses(Query *root,
}
/*
* When we are given multiple merge clauses, it's possible that some
* clauses refer to the same vars as earlier clauses. There's no
* reason for us to specify sort keys like (A,B,A) when (A,B) will
* do --- and adding redundant sort keys makes add_path think that
* this sort order is different from ones that are really the same,
* so don't do it. Since we now have a canonicalized pathkey,
* a simple ptrMember test is sufficient to detect redundant keys.
* When we are given multiple merge clauses, it's possible that
* some clauses refer to the same vars as earlier clauses.
* There's no reason for us to specify sort keys like (A,B,A) when
* (A,B) will do --- and adding redundant sort keys makes add_path
* think that this sort order is different from ones that are
* really the same, so don't do it. Since we now have a
* canonicalized pathkey, a simple ptrMember test is sufficient to
* detect redundant keys.
*/
if (!ptrMember(pathkey, pathkeys))
pathkeys = lappend(pathkeys, pathkey);
@@ -911,6 +920,7 @@ pathkeys_useful_for_merging(Query *root, RelOptInfo *rel, List *pathkeys)
if (restrictinfo->mergejoinoperator == InvalidOid)
continue;
cache_mergeclause_pathkeys(root, restrictinfo);
/*
* We can compare canonical pathkey sublists by simple
* pointer equality; see compare_pathkeys.
@@ -984,7 +994,9 @@ truncate_useless_pathkeys(Query *root,
nuseful2 = pathkeys_useful_for_ordering(root, pathkeys);
if (nuseful2 > nuseful)
nuseful = nuseful2;
/* Note: not safe to modify input list destructively, but we can avoid
/*
* Note: not safe to modify input list destructively, but we can avoid
* copying the list if we're not actually going to change it
*/
if (nuseful == length(pathkeys))