1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-11 20:28:21 +03:00

Standard pgindent run for 8.1.

This commit is contained in:
Bruce Momjian
2005-10-15 02:49:52 +00:00
parent 790c01d280
commit 1dc3498251
770 changed files with 34334 additions and 32507 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.200 2005/07/03 21:14:17 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.201 2005/10/15 02:49:21 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@ -91,7 +91,7 @@ static Expr *inline_function(Oid funcid, Oid result_type, List *args,
static Node *substitute_actual_parameters(Node *expr, int nargs, List *args,
int *usecounts);
static Node *substitute_actual_parameters_mutator(Node *node,
substitute_actual_parameters_context *context);
substitute_actual_parameters_context *context);
static void sql_inline_error_callback(void *arg);
static Expr *evaluate_expr(Expr *expr, Oid result_type);
@ -308,10 +308,10 @@ List *
make_ands_implicit(Expr *clause)
{
/*
* NB: because the parser sets the qual field to NULL in a query that
* has no WHERE clause, we must consider a NULL input clause as TRUE,
* even though one might more reasonably think it FALSE. Grumble. If
* this causes trouble, consider changing the parser's behavior.
* NB: because the parser sets the qual field to NULL in a query that has
* no WHERE clause, we must consider a NULL input clause as TRUE, even
* though one might more reasonably think it FALSE. Grumble. If this
* causes trouble, consider changing the parser's behavior.
*/
if (clause == NULL)
return NIL; /* NULL -> NIL list == TRUE */
@ -357,8 +357,7 @@ contain_agg_clause_walker(Node *node, void *context)
if (IsA(node, Aggref))
{
Assert(((Aggref *) node)->agglevelsup == 0);
return true; /* abort the tree traversal and return
* true */
return true; /* abort the tree traversal and return true */
}
Assert(!IsA(node, SubLink));
return expression_tree_walker(node, contain_agg_clause_walker, context);
@ -438,9 +437,9 @@ count_agg_clauses_walker(Node *node, AggClauseCounts *counts)
/*
* If the transition type is pass-by-value then it doesn't add
* anything to the required size of the hashtable. If it is
* pass-by-reference then we have to add the estimated size of
* the value itself, plus palloc overhead.
* anything to the required size of the hashtable. If it is
* pass-by-reference then we have to add the estimated size of the
* value itself, plus palloc overhead.
*/
if (!get_typbyval(aggtranstype))
{
@ -470,7 +469,7 @@ count_agg_clauses_walker(Node *node, AggClauseCounts *counts)
if (contain_agg_clause((Node *) aggref->target))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
errmsg("aggregate function calls may not be nested")));
errmsg("aggregate function calls may not be nested")));
/*
* Having checked that, we need not recurse into the argument.
@ -579,8 +578,7 @@ contain_subplans_walker(Node *node, void *context)
return false;
if (IsA(node, SubPlan) ||
IsA(node, SubLink))
return true; /* abort the tree traversal and return
* true */
return true; /* abort the tree traversal and return true */
return expression_tree_walker(node, contain_subplans_walker, context);
}
@ -882,9 +880,9 @@ is_pseudo_constant_clause(Node *clause)
{
/*
* We could implement this check in one recursive scan. But since the
* check for volatile functions is both moderately expensive and
* unlikely to fail, it seems better to look for Vars first and only
* check for volatile functions if we find no Vars.
* check for volatile functions is both moderately expensive and unlikely
* to fail, it seems better to look for Vars first and only check for
* volatile functions if we find no Vars.
*/
if (!contain_var_clause(clause) &&
!contain_volatile_functions(clause))
@ -958,13 +956,12 @@ has_distinct_on_clause(Query *query)
/*
* If the DISTINCT list contains all the nonjunk targetlist items, and
* nothing else (ie, no junk tlist items), then it's a simple
* DISTINCT, else it's DISTINCT ON. We do not require the lists to be
* in the same order (since the parser may have adjusted the DISTINCT
* clause ordering to agree with ORDER BY). Furthermore, a
* non-DISTINCT junk tlist item that is in the sortClause is also
* evidence of DISTINCT ON, since we don't allow ORDER BY on junk
* tlist items when plain DISTINCT is used.
* nothing else (ie, no junk tlist items), then it's a simple DISTINCT,
* else it's DISTINCT ON. We do not require the lists to be in the same
* order (since the parser may have adjusted the DISTINCT clause ordering
* to agree with ORDER BY). Furthermore, a non-DISTINCT junk tlist item
* that is in the sortClause is also evidence of DISTINCT ON, since we
* don't allow ORDER BY on junk tlist items when plain DISTINCT is used.
*
* This code assumes that the DISTINCT list is valid, ie, all its entries
* match some entry of the tlist.
@ -1224,7 +1221,7 @@ eval_const_expressions(Node *node)
*
* Currently the extra steps that are taken in this mode are:
* 1. Substitute values for Params, where a bound Param value has been made
* available by the caller of planner().
* available by the caller of planner().
* 2. Fold stable, as well as immutable, functions to constants.
*--------------------
*/
@ -1264,11 +1261,11 @@ eval_const_expressions_mutator(Node *node,
if (paramInfo)
{
/*
* Found it, so return a Const representing the param
* value. Note that we don't copy pass-by-ref datatypes,
* so the Const will only be valid as long as the bound
* parameter list exists. This is okay for intended uses
* of estimate_expression_value().
* Found it, so return a Const representing the param value.
* Note that we don't copy pass-by-ref datatypes, so the Const
* will only be valid as long as the bound parameter list
* exists. This is okay for intended uses of
* estimate_expression_value().
*/
int16 typLen;
bool typByVal;
@ -1294,16 +1291,16 @@ eval_const_expressions_mutator(Node *node,
/*
* Reduce constants in the FuncExpr's arguments. We know args is
* either NIL or a List node, so we can call
* expression_tree_mutator directly rather than recursing to self.
* either NIL or a List node, so we can call expression_tree_mutator
* directly rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
eval_const_expressions_mutator,
eval_const_expressions_mutator,
(void *) context);
/*
* Code for op/func reduction is pretty bulky, so split it out as
* a separate function.
* Code for op/func reduction is pretty bulky, so split it out as a
* separate function.
*/
simple = simplify_function(expr->funcid, expr->funcresulttype, args,
true, context);
@ -1312,8 +1309,8 @@ eval_const_expressions_mutator(Node *node,
/*
* The expression cannot be simplified any further, so build and
* return a replacement FuncExpr node using the
* possibly-simplified arguments.
* return a replacement FuncExpr node using the possibly-simplified
* arguments.
*/
newexpr = makeNode(FuncExpr);
newexpr->funcid = expr->funcid;
@ -1331,23 +1328,23 @@ eval_const_expressions_mutator(Node *node,
OpExpr *newexpr;
/*
* Reduce constants in the OpExpr's arguments. We know args is
* either NIL or a List node, so we can call
* expression_tree_mutator directly rather than recursing to self.
* Reduce constants in the OpExpr's arguments. We know args is either
* NIL or a List node, so we can call expression_tree_mutator directly
* rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
eval_const_expressions_mutator,
eval_const_expressions_mutator,
(void *) context);
/*
* Need to get OID of underlying function. Okay to scribble on
* input to this extent.
* Need to get OID of underlying function. Okay to scribble on input
* to this extent.
*/
set_opfuncid(expr);
/*
* Code for op/func reduction is pretty bulky, so split it out as
* a separate function.
* Code for op/func reduction is pretty bulky, so split it out as a
* separate function.
*/
simple = simplify_function(expr->opfuncid, expr->opresulttype, args,
true, context);
@ -1355,8 +1352,8 @@ eval_const_expressions_mutator(Node *node,
return (Node *) simple;
/*
* If the operator is boolean equality, we know how to simplify
* cases involving one constant and one non-constant argument.
* If the operator is boolean equality, we know how to simplify cases
* involving one constant and one non-constant argument.
*/
if (expr->opno == BooleanEqualOperator)
{
@ -1390,18 +1387,17 @@ eval_const_expressions_mutator(Node *node,
DistinctExpr *newexpr;
/*
* Reduce constants in the DistinctExpr's arguments. We know args
* is either NIL or a List node, so we can call
* expression_tree_mutator directly rather than recursing to self.
* Reduce constants in the DistinctExpr's arguments. We know args is
* either NIL or a List node, so we can call expression_tree_mutator
* directly rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
eval_const_expressions_mutator,
eval_const_expressions_mutator,
(void *) context);
/*
* We must do our own check for NULLs because DistinctExpr has
* different results for NULL input than the underlying operator
* does.
* different results for NULL input than the underlying operator does.
*/
foreach(arg, args)
{
@ -1429,15 +1425,14 @@ eval_const_expressions_mutator(Node *node,
/* (NOT okay to try to inline it, though!) */
/*
* Need to get OID of underlying function. Okay to scribble
* on input to this extent.
* Need to get OID of underlying function. Okay to scribble on
* input to this extent.
*/
set_opfuncid((OpExpr *) expr); /* rely on struct
* equivalence */
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
/*
* Code for op/func reduction is pretty bulky, so split it out
* as a separate function.
* Code for op/func reduction is pretty bulky, so split it out as
* a separate function.
*/
simple = simplify_function(expr->opfuncid, expr->opresulttype,
args, false, context);
@ -1482,7 +1477,7 @@ eval_const_expressions_mutator(Node *node,
bool forceTrue = false;
newargs = simplify_or_arguments(expr->args, context,
&haveNull, &forceTrue);
&haveNull, &forceTrue);
if (forceTrue)
return makeBoolConst(true, false);
if (haveNull)
@ -1503,7 +1498,7 @@ eval_const_expressions_mutator(Node *node,
bool forceFalse = false;
newargs = simplify_and_arguments(expr->args, context,
&haveNull, &forceFalse);
&haveNull, &forceFalse);
if (forceFalse)
return makeBoolConst(false, false);
if (haveNull)
@ -1554,17 +1549,17 @@ eval_const_expressions_mutator(Node *node,
/*
* Return a SubPlan unchanged --- too late to do anything with it.
*
* XXX should we ereport() here instead? Probably this routine
* should never be invoked after SubPlan creation.
* XXX should we ereport() here instead? Probably this routine should
* never be invoked after SubPlan creation.
*/
return node;
}
if (IsA(node, RelabelType))
{
/*
* If we can simplify the input to a constant, then we don't need
* the RelabelType node anymore: just change the type field of the
* Const node. Otherwise, must copy the RelabelType node.
* If we can simplify the input to a constant, then we don't need the
* RelabelType node anymore: just change the type field of the Const
* node. Otherwise, must copy the RelabelType node.
*/
RelabelType *relabel = (RelabelType *) node;
Node *arg;
@ -1573,8 +1568,8 @@ eval_const_expressions_mutator(Node *node,
context);
/*
* If we find stacked RelabelTypes (eg, from foo :: int :: oid) we
* can discard all but the top one.
* If we find stacked RelabelTypes (eg, from foo :: int :: oid) we can
* discard all but the top one.
*/
while (arg && IsA(arg, RelabelType))
arg = (Node *) ((RelabelType *) arg)->arg;
@ -1586,10 +1581,9 @@ eval_const_expressions_mutator(Node *node,
con->consttype = relabel->resulttype;
/*
* relabel's resulttypmod is discarded, which is OK for now;
* if the type actually needs a runtime length coercion then
* there should be a function call to do it just above this
* node.
* relabel's resulttypmod is discarded, which is OK for now; if
* the type actually needs a runtime length coercion then there
* should be a function call to do it just above this node.
*/
return (Node *) con;
}
@ -1692,7 +1686,7 @@ eval_const_expressions_mutator(Node *node,
/*
* Found a TRUE condition, so none of the remaining alternatives
* can be reached. We treat the result as the default result.
* can be reached. We treat the result as the default result.
*/
defresult = caseresult;
break;
@ -1720,9 +1714,9 @@ eval_const_expressions_mutator(Node *node,
if (IsA(node, CaseTestExpr))
{
/*
* If we know a constant test value for the current CASE
* construct, substitute it for the placeholder. Else just
* return the placeholder as-is.
* If we know a constant test value for the current CASE construct,
* substitute it for the placeholder. Else just return the
* placeholder as-is.
*/
if (context->case_val)
return copyObject(context->case_val);
@ -1803,15 +1797,15 @@ eval_const_expressions_mutator(Node *node,
if (IsA(node, FieldSelect))
{
/*
* We can optimize field selection from a whole-row Var into a
* simple Var. (This case won't be generated directly by the
* parser, because ParseComplexProjection short-circuits it. But
* it can arise while simplifying functions.) Also, we can
* optimize field selection from a RowExpr construct.
* We can optimize field selection from a whole-row Var into a simple
* Var. (This case won't be generated directly by the parser, because
* ParseComplexProjection short-circuits it. But it can arise while
* simplifying functions.) Also, we can optimize field selection from
* a RowExpr construct.
*
* We must however check that the declared type of the field is still
* the same as when the FieldSelect was created --- this can
* change if someone did ALTER COLUMN TYPE on the rowtype.
* We must however check that the declared type of the field is still the
* same as when the FieldSelect was created --- this can change if
* someone did ALTER COLUMN TYPE on the rowtype.
*/
FieldSelect *fselect = (FieldSelect *) node;
FieldSelect *newfselect;
@ -1840,7 +1834,7 @@ eval_const_expressions_mutator(Node *node,
fselect->fieldnum <= list_length(rowexpr->args))
{
Node *fld = (Node *) list_nth(rowexpr->args,
fselect->fieldnum - 1);
fselect->fieldnum - 1);
if (rowtype_field_matches(rowexpr->row_typeid,
fselect->fieldnum,
@ -1861,10 +1855,10 @@ eval_const_expressions_mutator(Node *node,
/*
* For any node type not handled above, we recurse using
* expression_tree_mutator, which will copy the node unchanged but try
* to simplify its arguments (if any) using this routine. For example:
* we cannot eliminate an ArrayRef node, but we might be able to
* simplify constant expressions in its subscripts.
* expression_tree_mutator, which will copy the node unchanged but try to
* simplify its arguments (if any) using this routine. For example: we
* cannot eliminate an ArrayRef node, but we might be able to simplify
* constant expressions in its subscripts.
*/
return expression_tree_mutator(node, eval_const_expressions_mutator,
(void *) context);
@ -1900,7 +1894,7 @@ simplify_or_arguments(List *args,
/*
* Since the parser considers OR to be a binary operator, long OR lists
* become deeply nested expressions. We must flatten these into long
* argument lists of a single OR operator. To avoid blowing out the stack
* argument lists of a single OR operator. To avoid blowing out the stack
* with recursion of eval_const_expressions, we resort to some tenseness
* here: we keep a list of not-yet-processed inputs, and handle flattening
* of nested ORs by prepending to the to-do list instead of recursing.
@ -1915,14 +1909,14 @@ simplify_or_arguments(List *args,
/* flatten nested ORs as per above comment */
if (or_clause(arg))
{
List *subargs = list_copy(((BoolExpr *) arg)->args);
List *subargs = list_copy(((BoolExpr *) arg)->args);
/* overly tense code to avoid leaking unused list header */
if (!unprocessed_args)
unprocessed_args = subargs;
else
{
List *oldhdr = unprocessed_args;
List *oldhdr = unprocessed_args;
unprocessed_args = list_concat(subargs, unprocessed_args);
pfree(oldhdr);
@ -1934,23 +1928,22 @@ simplify_or_arguments(List *args,
arg = eval_const_expressions_mutator(arg, context);
/*
* It is unlikely but not impossible for simplification of a
* non-OR clause to produce an OR. Recheck, but don't be
* too tense about it since it's not a mainstream case.
* In particular we don't worry about const-simplifying
* the input twice.
* It is unlikely but not impossible for simplification of a non-OR
* clause to produce an OR. Recheck, but don't be too tense about it
* since it's not a mainstream case. In particular we don't worry
* about const-simplifying the input twice.
*/
if (or_clause(arg))
{
List *subargs = list_copy(((BoolExpr *) arg)->args);
List *subargs = list_copy(((BoolExpr *) arg)->args);
unprocessed_args = list_concat(subargs, unprocessed_args);
continue;
}
/*
* OK, we have a const-simplified non-OR argument. Process it
* per comments above.
* OK, we have a const-simplified non-OR argument. Process it per
* comments above.
*/
if (IsA(arg, Const))
{
@ -2018,14 +2011,14 @@ simplify_and_arguments(List *args,
/* flatten nested ANDs as per above comment */
if (and_clause(arg))
{
List *subargs = list_copy(((BoolExpr *) arg)->args);
List *subargs = list_copy(((BoolExpr *) arg)->args);
/* overly tense code to avoid leaking unused list header */
if (!unprocessed_args)
unprocessed_args = subargs;
else
{
List *oldhdr = unprocessed_args;
List *oldhdr = unprocessed_args;
unprocessed_args = list_concat(subargs, unprocessed_args);
pfree(oldhdr);
@ -2037,23 +2030,22 @@ simplify_and_arguments(List *args,
arg = eval_const_expressions_mutator(arg, context);
/*
* It is unlikely but not impossible for simplification of a
* non-AND clause to produce an AND. Recheck, but don't be
* too tense about it since it's not a mainstream case.
* In particular we don't worry about const-simplifying
* the input twice.
* It is unlikely but not impossible for simplification of a non-AND
* clause to produce an AND. Recheck, but don't be too tense about it
* since it's not a mainstream case. In particular we don't worry
* about const-simplifying the input twice.
*/
if (and_clause(arg))
{
List *subargs = list_copy(((BoolExpr *) arg)->args);
List *subargs = list_copy(((BoolExpr *) arg)->args);
unprocessed_args = list_concat(subargs, unprocessed_args);
continue;
}
/*
* OK, we have a const-simplified non-AND argument. Process it
* per comments above.
* OK, we have a const-simplified non-AND argument. Process it per
* comments above.
*/
if (IsA(arg, Const))
{
@ -2111,7 +2103,7 @@ simplify_boolean_equality(List *args)
{
Assert(!((Const *) leftop)->constisnull);
if (DatumGetBool(((Const *) leftop)->constvalue))
return rightop; /* true = foo */
return rightop; /* true = foo */
else
return make_notclause(rightop); /* false = foo */
}
@ -2119,7 +2111,7 @@ simplify_boolean_equality(List *args)
{
Assert(!((Const *) rightop)->constisnull);
if (DatumGetBool(((Const *) rightop)->constvalue))
return leftop; /* foo = true */
return leftop; /* foo = true */
else
return make_notclause(leftop); /* foo = false */
}
@ -2146,12 +2138,12 @@ simplify_function(Oid funcid, Oid result_type, List *args,
Expr *newexpr;
/*
* We have two strategies for simplification: either execute the
* function to deliver a constant result, or expand in-line the body
* of the function definition (which only works for simple
* SQL-language functions, but that is a common case). In either case
* we need access to the function's pg_proc tuple, so fetch it just
* once to use in both attempts.
* We have two strategies for simplification: either execute the function
* to deliver a constant result, or expand in-line the body of the
* function definition (which only works for simple SQL-language
* functions, but that is a common case). In either case we need access
* to the function's pg_proc tuple, so fetch it just once to use in both
* attempts.
*/
func_tuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(funcid),
@ -2200,15 +2192,15 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
return NULL;
/*
* Can't simplify if it returns RECORD. The immediate problem is that
* it will be needing an expected tupdesc which we can't supply here.
* Can't simplify if it returns RECORD. The immediate problem is that it
* will be needing an expected tupdesc which we can't supply here.
*
* In the case where it has OUT parameters, it could get by without an
* expected tupdesc, but we still have issues: get_expr_result_type()
* doesn't know how to extract type info from a RECORD constant, and
* in the case of a NULL function result there doesn't seem to be any
* clean way to fix that. In view of the likelihood of there being
* still other gotchas, seems best to leave the function call unreduced.
* doesn't know how to extract type info from a RECORD constant, and in
* the case of a NULL function result there doesn't seem to be any clean
* way to fix that. In view of the likelihood of there being still other
* gotchas, seems best to leave the function call unreduced.
*/
if (funcform->prorettype == RECORDOID)
return NULL;
@ -2225,10 +2217,10 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
}
/*
* If the function is strict and has a constant-NULL input, it will
* never be called at all, so we can replace the call by a NULL
* constant, even if there are other inputs that aren't constant, and
* even if the function is not otherwise immutable.
* If the function is strict and has a constant-NULL input, it will never
* be called at all, so we can replace the call by a NULL constant, even
* if there are other inputs that aren't constant, and even if the
* function is not otherwise immutable.
*/
if (funcform->proisstrict && has_null_input)
return (Expr *) makeNullConst(result_type);
@ -2242,16 +2234,16 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
return NULL;
/*
* Ordinarily we are only allowed to simplify immutable functions.
* But for purposes of estimation, we consider it okay to simplify
* functions that are merely stable; the risk that the result might
* change from planning time to execution time is worth taking in
* preference to not being able to estimate the value at all.
* Ordinarily we are only allowed to simplify immutable functions. But for
* purposes of estimation, we consider it okay to simplify functions that
* are merely stable; the risk that the result might change from planning
* time to execution time is worth taking in preference to not being able
* to estimate the value at all.
*/
if (funcform->provolatile == PROVOLATILE_IMMUTABLE)
/* okay */ ;
/* okay */ ;
else if (context->estimate && funcform->provolatile == PROVOLATILE_STABLE)
/* okay */ ;
/* okay */ ;
else
return NULL;
@ -2318,8 +2310,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
int i;
/*
* Forget it if the function is not SQL-language or has other
* showstopper properties. (The nargs check is just paranoia.)
* Forget it if the function is not SQL-language or has other showstopper
* properties. (The nargs check is just paranoia.)
*/
if (funcform->prolang != SQLlanguageId ||
funcform->prosecdef ||
@ -2336,8 +2328,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
return NULL;
/*
* Setup error traceback support for ereport(). This is so that we
* can finger the function that bad information came from.
* Setup error traceback support for ereport(). This is so that we can
* finger the function that bad information came from.
*/
sqlerrcontext.callback = sql_inline_error_callback;
sqlerrcontext.arg = func_tuple;
@ -2345,8 +2337,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
error_context_stack = &sqlerrcontext;
/*
* Make a temporary memory context, so that we don't leak all the
* stuff that parsing might create.
* Make a temporary memory context, so that we don't leak all the stuff
* that parsing might create.
*/
mycxt = AllocSetContextCreate(CurrentMemoryContext,
"inline_function",
@ -2383,10 +2375,10 @@ inline_function(Oid funcid, Oid result_type, List *args,
src = DatumGetCString(DirectFunctionCall1(textout, tmp));
/*
* We just do parsing and parse analysis, not rewriting, because
* rewriting will not affect table-free-SELECT-only queries, which is
* all that we care about. Also, we can punt as soon as we detect
* more than one command in the function body.
* We just do parsing and parse analysis, not rewriting, because rewriting
* will not affect table-free-SELECT-only queries, which is all that we
* care about. Also, we can punt as soon as we detect more than one
* command in the function body.
*/
raw_parsetree_list = pg_parse_query(src);
if (list_length(raw_parsetree_list) != 1)
@ -2425,24 +2417,24 @@ inline_function(Oid funcid, Oid result_type, List *args,
newexpr = (Node *) ((TargetEntry *) linitial(querytree->targetList))->expr;
/*
* If the function has any arguments declared as polymorphic types,
* then it wasn't type-checked at definition time; must do so now.
* (This will raise an error if wrong, but that's okay since the
* function would fail at runtime anyway. Note we do not try this
* until we have verified that no rewriting was needed; that's
* probably not important, but let's be careful.)
* If the function has any arguments declared as polymorphic types, then
* it wasn't type-checked at definition time; must do so now. (This will
* raise an error if wrong, but that's okay since the function would fail
* at runtime anyway. Note we do not try this until we have verified that
* no rewriting was needed; that's probably not important, but let's be
* careful.)
*/
if (polymorphic)
(void) check_sql_fn_retval(funcid, result_type, querytree_list, NULL);
/*
* Additional validity checks on the expression. It mustn't return a
* set, and it mustn't be more volatile than the surrounding function
* (this is to avoid breaking hacks that involve pretending a function
* is immutable when it really ain't). If the surrounding function is
* declared strict, then the expression must contain only strict
* constructs and must use all of the function parameters (this is
* overkill, but an exact analysis is hard).
* Additional validity checks on the expression. It mustn't return a set,
* and it mustn't be more volatile than the surrounding function (this is
* to avoid breaking hacks that involve pretending a function is immutable
* when it really ain't). If the surrounding function is declared strict,
* then the expression must contain only strict constructs and must use
* all of the function parameters (this is overkill, but an exact analysis
* is hard).
*/
if (expression_returns_set(newexpr))
goto fail;
@ -2459,10 +2451,10 @@ inline_function(Oid funcid, Oid result_type, List *args,
goto fail;
/*
* We may be able to do it; there are still checks on parameter usage
* to make, but those are most easily done in combination with the
* actual substitution of the inputs. So start building expression
* with inputs substituted.
* We may be able to do it; there are still checks on parameter usage to
* make, but those are most easily done in combination with the actual
* substitution of the inputs. So start building expression with inputs
* substituted.
*/
usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
newexpr = substitute_actual_parameters(newexpr, funcform->pronargs,
@ -2486,8 +2478,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
QualCost eval_cost;
/*
* We define "expensive" as "contains any subplan or more than
* 10 operators". Note that the subplan search has to be done
* We define "expensive" as "contains any subplan or more than 10
* operators". Note that the subplan search has to be done
* explicitly, since cost_qual_eval() will barf on unplanned
* subselects.
*/
@ -2509,8 +2501,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
}
/*
* Whew --- we can make the substitution. Copy the modified
* expression out of the temporary memory context, and clean up.
* Whew --- we can make the substitution. Copy the modified expression
* out of the temporary memory context, and clean up.
*/
MemoryContextSwitchTo(oldcxt);
@ -2519,8 +2511,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
MemoryContextDelete(mycxt);
/*
* Recursively try to simplify the modified expression. Here we must
* add the current function to the context list of active functions.
* Recursively try to simplify the modified expression. Here we must add
* the current function to the context list of active functions.
*/
context->active_fns = lcons_oid(funcid, context->active_fns);
newexpr = eval_const_expressions_mutator(newexpr, context);
@ -2557,7 +2549,7 @@ substitute_actual_parameters(Node *expr, int nargs, List *args,
static Node *
substitute_actual_parameters_mutator(Node *node,
substitute_actual_parameters_context *context)
substitute_actual_parameters_context *context)
{
if (node == NULL)
return NULL;
@ -2646,10 +2638,10 @@ evaluate_expr(Expr *expr, Oid result_type)
/*
* And evaluate it.
*
* It is OK to use a default econtext because none of the ExecEvalExpr()
* code used in this situation will use econtext. That might seem
* fortuitous, but it's not so unreasonable --- a constant expression
* does not depend on context, by definition, n'est ce pas?
* It is OK to use a default econtext because none of the ExecEvalExpr() code
* used in this situation will use econtext. That might seem fortuitous,
* but it's not so unreasonable --- a constant expression does not depend
* on context, by definition, n'est ce pas?
*/
const_val = ExecEvalExprSwitchContext(exprstate,
GetPerTupleExprContext(estate),
@ -2779,12 +2771,12 @@ expression_tree_walker(Node *node,
ListCell *temp;
/*
* The walker has already visited the current node, and so we need
* only recurse into any sub-nodes it has.
* The walker has already visited the current node, and so we need only
* recurse into any sub-nodes it has.
*
* We assume that the walker is not interested in List nodes per se, so
* when we expect a List we just recurse directly to self without
* bothering to call the walker.
* We assume that the walker is not interested in List nodes per se, so when
* we expect a List we just recurse directly to self without bothering to
* call the walker.
*/
if (node == NULL)
return false;
@ -2877,8 +2869,8 @@ expression_tree_walker(Node *node,
return true;
/*
* Also invoke the walker on the sublink's Query node, so
* it can recurse into the sub-query if it wants to.
* Also invoke the walker on the sublink's Query node, so it
* can recurse into the sub-query if it wants to.
*/
return walker(sublink->subselect, context);
}
@ -3167,8 +3159,8 @@ expression_tree_mutator(Node *node,
void *context)
{
/*
* The mutator has already decided not to modify the current node, but
* we must call the mutator for any sub-nodes.
* The mutator has already decided not to modify the current node, but we
* must call the mutator for any sub-nodes.
*/
#define FLATCOPY(newnode, node, nodetype) \
@ -3286,8 +3278,8 @@ expression_tree_mutator(Node *node,
MUTATE(newnode->lefthand, sublink->lefthand, List *);
/*
* Also invoke the mutator on the sublink's Query node, so
* it can recurse into the sub-query if it wants to.
* Also invoke the mutator on the sublink's Query node, so it
* can recurse into the sub-query if it wants to.
*/
MUTATE(newnode->subselect, sublink->subselect, Node *);
return (Node *) newnode;
@ -3468,10 +3460,9 @@ expression_tree_mutator(Node *node,
case T_List:
{
/*
* We assume the mutator isn't interested in the list
* nodes per se, so just invoke it on each list element.
* NOTE: this would fail badly on a list with integer
* elements!
* We assume the mutator isn't interested in the list nodes
* per se, so just invoke it on each list element. NOTE: this
* would fail badly on a list with integer elements!
*/
List *resultlist;
ListCell *temp;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.124 2005/07/22 19:12:01 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.125 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -59,8 +59,8 @@ compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
return +1;
/*
* If paths have the same startup cost (not at all unlikely),
* order them by total cost.
* If paths have the same startup cost (not at all unlikely), order
* them by total cost.
*/
if (path1->total_cost < path2->total_cost)
return -1;
@ -111,8 +111,8 @@ compare_fuzzy_path_costs(Path *path1, Path *path2, CostSelector criterion)
return -1;
/*
* If paths have the same startup cost (not at all unlikely),
* order them by total cost.
* If paths have the same startup cost (not at all unlikely), order
* them by total cost.
*/
if (path1->total_cost > path2->total_cost * 1.01)
return +1;
@ -253,22 +253,21 @@ set_cheapest(RelOptInfo *parent_rel)
void
add_path(RelOptInfo *parent_rel, Path *new_path)
{
bool accept_new = true; /* unless we find a superior old
* path */
bool accept_new = true; /* unless we find a superior old path */
ListCell *insert_after = NULL; /* where to insert new item */
ListCell *p1_prev = NULL;
ListCell *p1;
/*
* This is a convenient place to check for query cancel --- no part
* of the planner goes very long without calling add_path().
* This is a convenient place to check for query cancel --- no part of the
* planner goes very long without calling add_path().
*/
CHECK_FOR_INTERRUPTS();
/*
* Loop to check proposed new path against old paths. Note it is
* possible for more than one old path to be tossed out because
* new_path dominates it.
* Loop to check proposed new path against old paths. Note it is possible
* for more than one old path to be tossed out because new_path dominates
* it.
*/
p1 = list_head(parent_rel->pathlist); /* cannot use foreach here */
while (p1 != NULL)
@ -278,20 +277,20 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
int costcmp;
/*
* As of Postgres 8.0, we use fuzzy cost comparison to avoid
* wasting cycles keeping paths that are really not significantly
* different in cost.
* As of Postgres 8.0, we use fuzzy cost comparison to avoid wasting
* cycles keeping paths that are really not significantly different in
* cost.
*/
costcmp = compare_fuzzy_path_costs(new_path, old_path, TOTAL_COST);
/*
* If the two paths compare differently for startup and total
* cost, then we want to keep both, and we can skip the (much
* slower) comparison of pathkeys. If they compare the same,
* proceed with the pathkeys comparison. Note: this test relies
* on the fact that compare_fuzzy_path_costs will only return 0 if
* both costs are effectively equal (and, therefore, there's no
* need to call it twice in that case).
* If the two paths compare differently for startup and total cost,
* then we want to keep both, and we can skip the (much slower)
* comparison of pathkeys. If they compare the same, proceed with the
* pathkeys comparison. Note: this test relies on the fact that
* compare_fuzzy_path_costs will only return 0 if both costs are
* effectively equal (and, therefore, there's no need to call it twice
* in that case).
*/
if (costcmp == 0 ||
costcmp == compare_fuzzy_path_costs(new_path, old_path,
@ -307,16 +306,15 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
else
{
/*
* Same pathkeys, and fuzzily the same cost, so
* keep just one --- but we'll do an exact cost
* comparison to decide which.
* Same pathkeys, and fuzzily the same cost, so keep
* just one --- but we'll do an exact cost comparison
* to decide which.
*/
if (compare_path_costs(new_path, old_path,
TOTAL_COST) < 0)
remove_old = true; /* new dominates old */
else
accept_new = false; /* old equals or dominates
* new */
accept_new = false; /* old equals or dominates new */
}
break;
case PATHKEYS_BETTER1:
@ -340,6 +338,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
{
parent_rel->pathlist = list_delete_cell(parent_rel->pathlist,
p1, p1_prev);
/*
* Delete the data pointed-to by the deleted cell, if possible
*/
@ -442,10 +441,9 @@ create_index_path(PlannerInfo *root,
/*
* For a join inner scan, there's no point in marking the path with any
* pathkeys, since it will only ever be used as the inner path of a
* nestloop, and so its ordering does not matter. For the same reason
* we don't really care what order it's scanned in. (We could expect
* the caller to supply the correct values, but it's easier to force
* it here.)
* nestloop, and so its ordering does not matter. For the same reason we
* don't really care what order it's scanned in. (We could expect the
* caller to supply the correct values, but it's easier to force it here.)
*/
if (isjoininner)
{
@ -476,15 +474,15 @@ create_index_path(PlannerInfo *root,
/*
* We must compute the estimated number of output rows for the
* indexscan. This is less than rel->rows because of the additional
* selectivity of the join clauses. Since clause_groups may
* contain both restriction and join clauses, we have to do a set
* union to get the full set of clauses that must be considered to
* compute the correct selectivity. (Without the union operation,
* we might have some restriction clauses appearing twice, which'd
* mislead clauselist_selectivity into double-counting their
* selectivity. However, since RestrictInfo nodes aren't copied when
* linking them into different lists, it should be sufficient to use
* pointer comparison to remove duplicates.)
* selectivity of the join clauses. Since clause_groups may contain
* both restriction and join clauses, we have to do a set union to get
* the full set of clauses that must be considered to compute the
* correct selectivity. (Without the union operation, we might have
* some restriction clauses appearing twice, which'd mislead
* clauselist_selectivity into double-counting their selectivity.
* However, since RestrictInfo nodes aren't copied when linking them
* into different lists, it should be sufficient to use pointer
* comparison to remove duplicates.)
*
* Always assume the join type is JOIN_INNER; even if some of the join
* clauses come from other contexts, that's not our problem.
@ -493,7 +491,7 @@ create_index_path(PlannerInfo *root,
pathnode->rows = rel->tuples *
clauselist_selectivity(root,
allclauses,
rel->relid, /* do not use 0! */
rel->relid, /* do not use 0! */
JOIN_INNER);
/* Like costsize.c, force estimate to be at least one row */
pathnode->rows = clamp_row_est(pathnode->rows);
@ -501,8 +499,8 @@ create_index_path(PlannerInfo *root,
else
{
/*
* The number of rows is the same as the parent rel's estimate,
* since this isn't a join inner indexscan.
* The number of rows is the same as the parent rel's estimate, since
* this isn't a join inner indexscan.
*/
pathnode->rows = rel->rows;
}
@ -528,7 +526,7 @@ create_bitmap_heap_path(PlannerInfo *root,
pathnode->path.pathtype = T_BitmapHeapScan;
pathnode->path.parent = rel;
pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapqual = bitmapqual;
pathnode->isjoininner = isjoininner;
@ -539,9 +537,9 @@ create_bitmap_heap_path(PlannerInfo *root,
* We must compute the estimated number of output rows for the
* indexscan. This is less than rel->rows because of the additional
* selectivity of the join clauses. We make use of the selectivity
* estimated for the bitmap to do this; this isn't really quite
* right since there may be restriction conditions not included
* in the bitmap ...
* estimated for the bitmap to do this; this isn't really quite right
* since there may be restriction conditions not included in the
* bitmap ...
*/
Cost indexTotalCost;
Selectivity indexSelectivity;
@ -556,8 +554,8 @@ create_bitmap_heap_path(PlannerInfo *root,
else
{
/*
* The number of rows is the same as the parent rel's estimate,
* since this isn't a join inner indexscan.
* The number of rows is the same as the parent rel's estimate, since
* this isn't a join inner indexscan.
*/
pathnode->rows = rel->rows;
}
@ -580,7 +578,7 @@ create_bitmap_and_path(PlannerInfo *root,
pathnode->path.pathtype = T_BitmapAnd;
pathnode->path.parent = rel;
pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapquals = bitmapquals;
@ -603,7 +601,7 @@ create_bitmap_or_path(PlannerInfo *root,
pathnode->path.pathtype = T_BitmapOr;
pathnode->path.parent = rel;
pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapquals = bitmapquals;
@ -759,8 +757,8 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
return (UniquePath *) rel->cheapest_unique_path;
/*
* We must ensure path struct is allocated in same context as parent
* rel; otherwise GEQO memory management causes trouble. (Compare
* We must ensure path struct is allocated in same context as parent rel;
* otherwise GEQO memory management causes trouble. (Compare
* best_inner_indexscan().)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
@ -774,17 +772,17 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
pathnode->path.parent = rel;
/*
* Treat the output as always unsorted, since we don't necessarily
* have pathkeys to represent it.
* Treat the output as always unsorted, since we don't necessarily have
* pathkeys to represent it.
*/
pathnode->path.pathkeys = NIL;
pathnode->subpath = subpath;
/*
* Try to identify the targetlist that will actually be unique-ified.
* In current usage, this routine is only used for sub-selects of IN
* clauses, so we should be able to find the tlist in in_info_list.
* Try to identify the targetlist that will actually be unique-ified. In
* current usage, this routine is only used for sub-selects of IN clauses,
* so we should be able to find the tlist in in_info_list.
*/
sub_targetlist = NIL;
foreach(l, root->in_info_list)
@ -799,19 +797,19 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
}
/*
* If the input is a subquery whose output must be unique already,
* then we don't need to do anything. The test for uniqueness has
* to consider exactly which columns we are extracting; for example
* "SELECT DISTINCT x,y" doesn't guarantee that x alone is distinct.
* So we cannot check for this optimization unless we found our own
* targetlist above, and it consists only of simple Vars referencing
* subquery outputs. (Possibly we could do something with expressions
* in the subquery outputs, too, but for now keep it simple.)
* If the input is a subquery whose output must be unique already, then we
* don't need to do anything. The test for uniqueness has to consider
* exactly which columns we are extracting; for example "SELECT DISTINCT
* x,y" doesn't guarantee that x alone is distinct. So we cannot check for
* this optimization unless we found our own targetlist above, and it
* consists only of simple Vars referencing subquery outputs. (Possibly
* we could do something with expressions in the subquery outputs, too,
* but for now keep it simple.)
*/
if (sub_targetlist && rel->rtekind == RTE_SUBQUERY)
{
RangeTblEntry *rte = rt_fetch(rel->relid, root->parse->rtable);
List *sub_tlist_colnos;
List *sub_tlist_colnos;
sub_tlist_colnos = translate_sub_tlist(sub_targetlist, rel->relid);
@ -854,24 +852,23 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
rel->width);
/*
* Charge one cpu_operator_cost per comparison per input tuple. We
* assume all columns get compared at most of the tuples. (XXX
* probably this is an overestimate.) This should agree with
* make_unique.
* Charge one cpu_operator_cost per comparison per input tuple. We assume
* all columns get compared at most of the tuples. (XXX probably this is
* an overestimate.) This should agree with make_unique.
*/
sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
/*
* Is it safe to use a hashed implementation? If so, estimate and
* compare costs. We only try this if we know the targetlist for sure
* (else we can't be sure about the datatypes involved).
* Is it safe to use a hashed implementation? If so, estimate and compare
* costs. We only try this if we know the targetlist for sure (else we
* can't be sure about the datatypes involved).
*/
pathnode->umethod = UNIQUE_PATH_SORT;
if (enable_hashagg && sub_targetlist && hash_safe_tlist(sub_targetlist))
{
/*
* Estimate the overhead per hashtable entry at 64 bytes (same as
* in planner.c).
* Estimate the overhead per hashtable entry at 64 bytes (same as in
* planner.c).
*/
int hashentrysize = rel->width + 64;
@ -923,7 +920,7 @@ translate_sub_tlist(List *tlist, int relid)
foreach(l, tlist)
{
Var *var = (Var *) lfirst(l);
Var *var = (Var *) lfirst(l);
if (!var || !IsA(var, Var) ||
var->varno != relid)
@ -987,8 +984,8 @@ query_is_distinct_for(Query *query, List *colnos)
else
{
/*
* If we have no GROUP BY, but do have aggregates or HAVING, then
* the result is at most one row so it's surely unique.
* If we have no GROUP BY, but do have aggregates or HAVING, then the
* result is at most one row so it's surely unique.
*/
if (query->hasAggs || query->havingQual)
return true;
@ -1167,8 +1164,8 @@ create_mergejoin_path(PlannerInfo *root,
MergePath *pathnode = makeNode(MergePath);
/*
* If the given paths are already well enough ordered, we can skip
* doing an explicit sort.
* If the given paths are already well enough ordered, we can skip doing
* an explicit sort.
*/
if (outersortkeys &&
pathkeys_contained_in(outersortkeys, outer_path->pathkeys))
@ -1178,15 +1175,15 @@ create_mergejoin_path(PlannerInfo *root,
innersortkeys = NIL;
/*
* If we are not sorting the inner path, we may need a materialize
* node to ensure it can be marked/restored. (Sort does support
* mark/restore, so no materialize is needed in that case.)
* If we are not sorting the inner path, we may need a materialize node to
* ensure it can be marked/restored. (Sort does support mark/restore, so
* no materialize is needed in that case.)
*
* Since the inner side must be ordered, and only Sorts and IndexScans
* can create order to begin with, you might think there's no problem
* --- but you'd be wrong. Nestloop and merge joins can *preserve*
* the order of their inputs, so they can be selected as the input of
* a mergejoin, and they don't support mark/restore at present.
* Since the inner side must be ordered, and only Sorts and IndexScans can
* create order to begin with, you might think there's no problem --- but
* you'd be wrong. Nestloop and merge joins can *preserve* the order of
* their inputs, so they can be selected as the input of a mergejoin, and
* they don't support mark/restore at present.
*/
if (innersortkeys == NIL &&
!ExecSupportsMarkRestore(inner_path->pathtype))

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.113 2005/07/23 21:05:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.114 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -41,7 +41,7 @@
static void estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples);
BlockNumber *pages, double *tuples);
/*
@ -71,18 +71,18 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Normally, we can assume the rewriter already acquired at least
* AccessShareLock on each relation used in the query. However this
* will not be the case for relations added to the query because they
* are inheritance children of some relation mentioned explicitly.
* For them, this is the first access during the parse/rewrite/plan
* pipeline, and so we need to obtain and keep a suitable lock.
* AccessShareLock on each relation used in the query. However this will
* not be the case for relations added to the query because they are
* inheritance children of some relation mentioned explicitly. For them,
* this is the first access during the parse/rewrite/plan pipeline, and so
* we need to obtain and keep a suitable lock.
*
* XXX really, a suitable lock is RowShareLock if the relation is
* an UPDATE/DELETE target, and AccessShareLock otherwise. However
* we cannot easily tell here which to get, so for the moment just
* get AccessShareLock always. The executor will get the right lock
* when it runs, which means there is a very small chance of deadlock
* trying to upgrade our lock.
* XXX really, a suitable lock is RowShareLock if the relation is an
* UPDATE/DELETE target, and AccessShareLock otherwise. However we cannot
* easily tell here which to get, so for the moment just get
* AccessShareLock always. The executor will get the right lock when it
* runs, which means there is a very small chance of deadlock trying to
* upgrade our lock.
*/
if (rel->reloptkind == RELOPT_BASEREL)
relation = heap_open(relationObjectId, NoLock);
@ -105,8 +105,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
&rel->pages, &rel->tuples);
/*
* Make list of indexes. Ignore indexes on system catalogs if told
* to.
* Make list of indexes. Ignore indexes on system catalogs if told to.
*/
if (IsIgnoringSystemIndexes() && IsSystemClass(relation->rd_rel))
hasindex = false;
@ -133,10 +132,10 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Extract info from the relation descriptor for the index.
*
* Note that we take no lock on the index; we assume our lock on
* the parent table will protect the index's schema information.
* When and if the executor actually uses the index, it will take
* a lock as needed to protect the access to the index contents.
* Note that we take no lock on the index; we assume our lock on the
* parent table will protect the index's schema information. When
* and if the executor actually uses the index, it will take a
* lock as needed to protect the access to the index contents.
*/
indexRelation = index_open(indexoid);
index = indexRelation->rd_index;
@ -148,8 +147,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->ncolumns = ncolumns = index->indnatts;
/*
* Need to make classlist and ordering arrays large enough to
* put a terminating 0 at the end of each one.
* Need to make classlist and ordering arrays large enough to put
* a terminating 0 at the end of each one.
*/
info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
info->classlist = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));
@ -166,8 +165,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->amoptionalkey = indexRelation->rd_am->amoptionalkey;
/*
* Fetch the ordering operators associated with the index, if
* any.
* Fetch the ordering operators associated with the index, if any.
*/
amorderstrategy = indexRelation->rd_am->amorderstrategy;
if (amorderstrategy != 0)
@ -184,8 +182,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Fetch the index expressions and predicate, if any. We must
* modify the copies we obtain from the relcache to have the
* correct varno for the parent relation, so that they match
* up correctly against qual clauses.
* correct varno for the parent relation, so that they match up
* correctly against qual clauses.
*/
info->indexprs = RelationGetIndexExpressions(indexRelation);
info->indpred = RelationGetIndexPredicate(indexRelation);
@ -197,11 +195,11 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->unique = index->indisunique;
/*
* Estimate the index size. If it's not a partial index, we
* lock the number-of-tuples estimate to equal the parent table;
* if it is partial then we have to use the same methods as we
* would for a table, except we can be sure that the index is
* not larger than the table.
* Estimate the index size. If it's not a partial index, we lock
* the number-of-tuples estimate to equal the parent table; if it
* is partial then we have to use the same methods as we would for
* a table, except we can be sure that the index is not larger
* than the table.
*/
if (info->indpred == NIL)
{
@ -241,8 +239,8 @@ static void
estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples)
{
BlockNumber curpages;
BlockNumber relpages;
BlockNumber curpages;
BlockNumber relpages;
double reltuples;
double density;
@ -256,22 +254,22 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
/*
* HACK: if the relation has never yet been vacuumed, use a
* minimum estimate of 10 pages. This emulates a desirable
* aspect of pre-8.0 behavior, which is that we wouldn't assume
* a newly created relation is really small, which saves us from
* making really bad plans during initial data loading. (The
* plans are not wrong when they are made, but if they are cached
* and used again after the table has grown a lot, they are bad.)
* It would be better to force replanning if the table size has
* changed a lot since the plan was made ... but we don't
* currently have any infrastructure for redoing cached plans at
* all, so we have to kluge things here instead.
* minimum estimate of 10 pages. This emulates a desirable aspect
* of pre-8.0 behavior, which is that we wouldn't assume a newly
* created relation is really small, which saves us from making
* really bad plans during initial data loading. (The plans are
* not wrong when they are made, but if they are cached and used
* again after the table has grown a lot, they are bad.) It would
* be better to force replanning if the table size has changed a
* lot since the plan was made ... but we don't currently have any
* infrastructure for redoing cached plans at all, so we have to
* kluge things here instead.
*
* We approximate "never vacuumed" by "has relpages = 0", which
* means this will also fire on genuinely empty relations. Not
* great, but fortunately that's a seldom-seen case in the real
* world, and it shouldn't degrade the quality of the plan too
* much anyway to err in this direction.
* We approximate "never vacuumed" by "has relpages = 0", which means
* this will also fire on genuinely empty relations. Not great,
* but fortunately that's a seldom-seen case in the real world,
* and it shouldn't degrade the quality of the plan too much
* anyway to err in this direction.
*/
if (curpages < 10 && rel->rd_rel->relpages == 0)
curpages = 10;
@ -287,6 +285,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
/* coerce values in pg_class to more desirable types */
relpages = (BlockNumber) rel->rd_rel->relpages;
reltuples = (double) rel->rd_rel->reltuples;
/*
* If it's an index, discount the metapage. This is a kluge
* because it assumes more than it ought to about index contents;
@ -307,19 +306,19 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* When we have no data because the relation was truncated,
* estimate tuple width from attribute datatypes. We assume
* here that the pages are completely full, which is OK for
* tables (since they've presumably not been VACUUMed yet)
* but is probably an overestimate for indexes. Fortunately
* tables (since they've presumably not been VACUUMed yet) but
* is probably an overestimate for indexes. Fortunately
* get_relation_info() can clamp the overestimate to the
* parent table's size.
*
* Note: this code intentionally disregards alignment
* considerations, because (a) that would be gilding the
* lily considering how crude the estimate is, and (b)
* it creates platform dependencies in the default plans
* which are kind of a headache for regression testing.
* considerations, because (a) that would be gilding the lily
* considering how crude the estimate is, and (b) it creates
* platform dependencies in the default plans which are kind
* of a headache for regression testing.
*/
int32 tuple_width = 0;
int i;
int32 tuple_width = 0;
int i;
for (i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
{
@ -391,12 +390,12 @@ get_relation_constraints(Oid relationObjectId, RelOptInfo *rel)
constr = relation->rd_att->constr;
if (constr != NULL)
{
int num_check = constr->num_check;
int i;
int num_check = constr->num_check;
int i;
for (i = 0; i < num_check; i++)
{
Node *cexpr;
Node *cexpr;
cexpr = stringToNode(constr->check[i].ccbin);
@ -425,8 +424,8 @@ get_relation_constraints(Oid relationObjectId, RelOptInfo *rel)
ChangeVarNodes(cexpr, 1, varno, 0);
/*
* Finally, convert to implicit-AND format (that is, a List)
* and append the resulting item(s) to our output list.
* Finally, convert to implicit-AND format (that is, a List) and
* append the resulting item(s) to our output list.
*/
result = list_concat(result,
make_ands_implicit((Expr *) cexpr));
@ -532,11 +531,12 @@ build_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
break;
case RTE_FUNCTION:
expandRTE(rte, varno, 0, true /* include dropped */,
expandRTE(rte, varno, 0, true /* include dropped */ ,
NULL, &colvars);
foreach(l, colvars)
{
var = (Var *) lfirst(l);
/*
* A non-Var in expandRTE's output means a dropped column;
* must punt.
@ -727,11 +727,11 @@ has_unique_index(RelOptInfo *rel, AttrNumber attno)
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
/*
* Note: ignore partial indexes, since they don't allow us to
* conclude that all attr values are distinct. We don't take any
* interest in expressional indexes either. Also, a multicolumn
* unique index doesn't allow us to conclude that just the
* specified attr is unique.
* Note: ignore partial indexes, since they don't allow us to conclude
* that all attr values are distinct. We don't take any interest in
* expressional indexes either. Also, a multicolumn unique index
* doesn't allow us to conclude that just the specified attr is
* unique.
*/
if (index->unique &&
index->ncolumns == 1 &&

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.3 2005/10/06 16:01:55 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.4 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -31,7 +31,7 @@ static bool predicate_refuted_by_recurse(Node *clause, Node *predicate);
static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause);
static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause);
static bool btree_predicate_proof(Expr *predicate, Node *clause,
bool refute_it);
bool refute_it);
/*
@ -66,9 +66,9 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
/*
* In all cases where the predicate is an AND-clause,
* predicate_implied_by_recurse() will prefer to iterate over the
* predicate's components. So we can just do that to start with here,
* and eliminate the need for predicate_implied_by_recurse() to handle
* a bare List on the predicate side.
* predicate's components. So we can just do that to start with here, and
* eliminate the need for predicate_implied_by_recurse() to handle a bare
* List on the predicate side.
*
* Logic is: restriction must imply each of the AND'ed predicate items.
*/
@ -110,11 +110,11 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
return false; /* no restriction: refutation must fail */
/*
* Unlike the implication case, predicate_refuted_by_recurse needs to
* be able to see the top-level AND structure on both sides --- otherwise
* it will fail to handle the case where one restriction clause is an OR
* that can refute the predicate AND as a whole, but not each predicate
* clause separately.
* Unlike the implication case, predicate_refuted_by_recurse needs to be
* able to see the top-level AND structure on both sides --- otherwise it
* will fail to handle the case where one restriction clause is an OR that
* can refute the predicate AND as a whole, but not each predicate clause
* separately.
*/
return predicate_refuted_by_recurse((Node *) restrictinfo_list,
(Node *) predicate_list);
@ -137,7 +137,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
* OR-expr A => AND-expr B iff: A => each of B's components
* OR-expr A => OR-expr B iff: each of A's components => any of B's
*
* An "atom" is anything other than an AND or OR node. Notice that we don't
* An "atom" is anything other than an AND or OR node. Notice that we don't
* have any special logic to handle NOT nodes; these should have been pushed
* down or eliminated where feasible by prepqual.c.
*
@ -152,7 +152,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
* under the assumption that both inputs have been AND/OR flattened.
*
* A bare List node on the restriction side is interpreted as an AND clause,
* in order to handle the top-level restriction List properly. However we
* in order to handle the top-level restriction List properly. However we
* need not consider a List on the predicate side since predicate_implied_by()
* already expanded it.
*
@ -228,8 +228,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
if (or_clause(predicate))
{
/*
* OR-clause => OR-clause if each of A's items implies any of
* B's items. Messy but can't do it any more simply.
* OR-clause => OR-clause if each of A's items implies any of B's
* items. Messy but can't do it any more simply.
*/
foreach(item, ((BoolExpr *) clause)->args)
{
@ -242,7 +242,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
break;
}
if (item2 == NULL)
return false; /* doesn't imply any of B's */
return false; /* doesn't imply any of B's */
}
return true;
}
@ -520,7 +520,7 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause)
*
* When the predicate is of the form "foo IS NULL", we can conclude that
* the predicate is refuted if the clause is a strict operator or function
* that has "foo" as an input. See notes for implication case.
* that has "foo" as an input. See notes for implication case.
*
* Finally, we may be able to deduce something using knowledge about btree
* operator classes; this is encapsulated in btree_predicate_proof().
@ -602,28 +602,28 @@ static const StrategyNumber BT_implic_table[6][6] = {
/*
* The target operator:
*
* LT LE EQ GE GT NE
* LT LE EQ GE GT NE
*/
{BTGE, BTGE, 0 , 0 , 0 , BTGE}, /* LT */
{BTGT, BTGE, 0 , 0 , 0 , BTGT}, /* LE */
{BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
{0 , 0 , 0 , BTLE, BTLT, BTLT}, /* GE */
{0 , 0 , 0 , BTLE, BTLE, BTLE}, /* GT */
{0 , 0 , 0 , 0 , 0 , BTEQ} /* NE */
{BTGE, BTGE, 0, 0, 0, BTGE}, /* LT */
{BTGT, BTGE, 0, 0, 0, BTGT}, /* LE */
{BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
{0, 0, 0, BTLE, BTLT, BTLT}, /* GE */
{0, 0, 0, BTLE, BTLE, BTLE}, /* GT */
{0, 0, 0, 0, 0, BTEQ} /* NE */
};
static const StrategyNumber BT_refute_table[6][6] = {
/*
* The target operator:
*
* LT LE EQ GE GT NE
* LT LE EQ GE GT NE
*/
{0 , 0 , BTGE, BTGE, BTGE, 0 }, /* LT */
{0 , 0 , BTGT, BTGT, BTGE, 0 }, /* LE */
{BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
{BTLE, BTLT, BTLT, 0 , 0 , 0 }, /* GE */
{BTLE, BTLE, BTLE, 0 , 0 , 0 }, /* GT */
{0 , 0 , BTEQ, 0 , 0 , 0 } /* NE */
{0, 0, BTGE, BTGE, BTGE, 0}, /* LT */
{0, 0, BTGT, BTGT, BTGE, 0}, /* LE */
{BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
{BTLE, BTLT, BTLT, 0, 0, 0}, /* GE */
{BTLE, BTLE, BTLE, 0, 0, 0}, /* GT */
{0, 0, BTEQ, 0, 0, 0} /* NE */
};
@ -683,13 +683,13 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
MemoryContext oldcontext;
/*
* Both expressions must be binary opclauses with a
* Const on one side, and identical subexpressions on the other sides.
* Note we don't have to think about binary relabeling of the Const
* node, since that would have been folded right into the Const.
* Both expressions must be binary opclauses with a Const on one side, and
* identical subexpressions on the other sides. Note we don't have to
* think about binary relabeling of the Const node, since that would have
* been folded right into the Const.
*
* If either Const is null, we also fail right away; this assumes that
* the test operator will always be strict.
* If either Const is null, we also fail right away; this assumes that the
* test operator will always be strict.
*/
if (!is_opclause(predicate))
return false;
@ -738,11 +738,11 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
return false;
/*
* Check for matching subexpressions on the non-Const sides. We used
* to only allow a simple Var, but it's about as easy to allow any
* expression. Remember we already know that the pred expression does
* not contain any non-immutable functions, so identical expressions
* should yield identical results.
* Check for matching subexpressions on the non-Const sides. We used to
* only allow a simple Var, but it's about as easy to allow any
* expression. Remember we already know that the pred expression does not
* contain any non-immutable functions, so identical expressions should
* yield identical results.
*/
if (!equal(pred_var, clause_var))
return false;
@ -772,24 +772,24 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
*
* We must find a btree opclass that contains both operators, else the
* implication can't be determined. Also, the pred_op has to be of
* default subtype (implying left and right input datatypes are the
* same); otherwise it's unsafe to put the pred_const on the left side
* of the test. Also, the opclass must contain a suitable test
* operator matching the clause_const's type (which we take to mean
* that it has the same subtype as the original clause_operator).
* default subtype (implying left and right input datatypes are the same);
* otherwise it's unsafe to put the pred_const on the left side of the
* test. Also, the opclass must contain a suitable test operator matching
* the clause_const's type (which we take to mean that it has the same
* subtype as the original clause_operator).
*
* If there are multiple matching opclasses, assume we can use any one to
* determine the logical relationship of the two operators and the
* correct corresponding test operator. This should work for any
* logically consistent opclasses.
* determine the logical relationship of the two operators and the correct
* corresponding test operator. This should work for any logically
* consistent opclasses.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(pred_op),
0, 0, 0);
/*
* If we couldn't find any opclass containing the pred_op, perhaps it
* is a <> operator. See if it has a negator that is in an opclass.
* If we couldn't find any opclass containing the pred_op, perhaps it is a
* <> operator. See if it has a negator that is in an opclass.
*/
pred_op_negated = false;
if (catlist->n_members == 0)
@ -800,7 +800,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
pred_op_negated = true;
ReleaseSysCacheList(catlist);
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(pred_op_negator),
ObjectIdGetDatum(pred_op_negator),
0, 0, 0);
}
}
@ -837,8 +837,8 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
}
/*
* From the same opclass, find a strategy number for the
* clause_op, if possible
* From the same opclass, find a strategy number for the clause_op, if
* possible
*/
clause_tuple = SearchSysCache(AMOPOPID,
ObjectIdGetDatum(clause_op),
@ -857,7 +857,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
else if (OidIsValid(clause_op_negator))
{
clause_tuple = SearchSysCache(AMOPOPID,
ObjectIdGetDatum(clause_op_negator),
ObjectIdGetDatum(clause_op_negator),
ObjectIdGetDatum(opclass_id),
0, 0);
if (HeapTupleIsValid(clause_tuple))
@ -896,8 +896,8 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
}
/*
* See if opclass has an operator for the test strategy and the
* clause datatype.
* See if opclass has an operator for the test strategy and the clause
* datatype.
*/
if (test_strategy == BTNE)
{
@ -918,9 +918,9 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
*
* Note that we require only the test_op to be immutable, not the
* original clause_op. (pred_op is assumed to have been checked
* immutable by the caller.) Essentially we are assuming that
* the opclass is consistent even if it contains operators that
* are merely stable.
* immutable by the caller.) Essentially we are assuming that the
* opclass is consistent even if it contains operators that are
* merely stable.
*/
if (op_volatile(test_op) == PROVOLATILE_IMMUTABLE)
{
@ -958,7 +958,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
/* And execute it. */
test_result = ExecEvalExprSwitchContext(test_exprstate,
GetPerTupleExprContext(estate),
GetPerTupleExprContext(estate),
&isNull, NULL);
/* Get back to outer memory context */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.71 2005/07/28 22:27:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.72 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -31,9 +31,9 @@ typedef struct JoinHashEntry
} JoinHashEntry;
static RelOptInfo *make_reloptinfo(PlannerInfo *root, int relid,
RelOptKind reloptkind);
RelOptKind reloptkind);
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
RelOptInfo *input_rel);
RelOptInfo *input_rel);
static List *build_joinrel_restrictlist(PlannerInfo *root,
RelOptInfo *joinrel,
RelOptInfo *outer_rel,
@ -165,8 +165,8 @@ make_reloptinfo(PlannerInfo *root, int relid, RelOptKind reloptkind)
/* Add the finished struct to the base_rel_array */
if (relid >= root->base_rel_array_size)
{
int oldsize = root->base_rel_array_size;
int newsize;
int oldsize = root->base_rel_array_size;
int newsize;
newsize = Max(oldsize * 2, relid + 1);
root->base_rel_array = (RelOptInfo **)
@ -225,7 +225,7 @@ build_join_rel_hash(PlannerInfo *root)
hashtab = hash_create("JoinRelHashTable",
256L,
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
/* Insert all the already-existing joinrels */
foreach(l, root->join_rel_list)
@ -254,7 +254,7 @@ RelOptInfo *
find_join_rel(PlannerInfo *root, Relids relids)
{
/*
* Switch to using hash lookup when list grows "too long". The threshold
* Switch to using hash lookup when list grows "too long". The threshold
* is arbitrary and is known only here.
*/
if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
@ -263,10 +263,10 @@ find_join_rel(PlannerInfo *root, Relids relids)
/*
* Use either hashtable lookup or linear search, as appropriate.
*
* Note: the seemingly redundant hashkey variable is used to avoid
* taking the address of relids; unless the compiler is exceedingly
* smart, doing so would force relids out of a register and thus
* probably slow down the list-search case.
* Note: the seemingly redundant hashkey variable is used to avoid taking the
* address of relids; unless the compiler is exceedingly smart, doing so
* would force relids out of a register and thus probably slow down the
* list-search case.
*/
if (root->join_rel_hash)
{
@ -331,8 +331,8 @@ build_join_rel(PlannerInfo *root,
if (joinrel)
{
/*
* Yes, so we only need to figure the restrictlist for this
* particular pair of component relations.
* Yes, so we only need to figure the restrictlist for this particular
* pair of component relations.
*/
if (restrictlist_ptr)
*restrictlist_ptr = build_joinrel_restrictlist(root,
@ -375,21 +375,20 @@ build_join_rel(PlannerInfo *root,
joinrel->index_inner_paths = NIL;
/*
* Create a new tlist containing just the vars that need to be output
* from this join (ie, are needed for higher joinclauses or final
* output).
* Create a new tlist containing just the vars that need to be output from
* this join (ie, are needed for higher joinclauses or final output).
*
* NOTE: the tlist order for a join rel will depend on which pair of
* outer and inner rels we first try to build it from. But the
* contents should be the same regardless.
* NOTE: the tlist order for a join rel will depend on which pair of outer
* and inner rels we first try to build it from. But the contents should
* be the same regardless.
*/
build_joinrel_tlist(root, joinrel, outer_rel);
build_joinrel_tlist(root, joinrel, inner_rel);
/*
* Construct restrict and join clause lists for the new joinrel. (The
* caller might or might not need the restrictlist, but I need it
* anyway for set_joinrel_size_estimates().)
* caller might or might not need the restrictlist, but I need it anyway
* for set_joinrel_size_estimates().)
*/
restrictlist = build_joinrel_restrictlist(root,
joinrel,
@ -407,9 +406,9 @@ build_join_rel(PlannerInfo *root,
jointype, restrictlist);
/*
* Add the joinrel to the query's joinrel list, and store it into
* the auxiliary hashtable if there is one. NB: GEQO requires us
* to append the new joinrel to the end of the list!
* Add the joinrel to the query's joinrel list, and store it into the
* auxiliary hashtable if there is one. NB: GEQO requires us to append
* the new joinrel to the end of the list!
*/
root->join_rel_list = lappend(root->join_rel_list, joinrel);
@ -527,18 +526,18 @@ build_joinrel_restrictlist(PlannerInfo *root,
* Collect all the clauses that syntactically belong at this level.
*/
rlist = list_concat(subbuild_joinrel_restrictlist(joinrel,
outer_rel->joininfo),
outer_rel->joininfo),
subbuild_joinrel_restrictlist(joinrel,
inner_rel->joininfo));
inner_rel->joininfo));
/*
* Eliminate duplicate and redundant clauses.
*
* We must eliminate duplicates, since we will see many of the same
* clauses arriving from both input relations. Also, if a clause is a
* mergejoinable clause, it's possible that it is redundant with
* previous clauses (see optimizer/README for discussion). We detect
* that case and omit the redundant clause from the result list.
* We must eliminate duplicates, since we will see many of the same clauses
* arriving from both input relations. Also, if a clause is a
* mergejoinable clause, it's possible that it is redundant with previous
* clauses (see optimizer/README for discussion). We detect that case and
* omit the redundant clause from the result list.
*/
result = remove_redundant_join_clauses(root, rlist,
IS_OUTER_JOIN(jointype));
@ -571,18 +570,17 @@ subbuild_joinrel_restrictlist(RelOptInfo *joinrel,
if (bms_is_subset(rinfo->required_relids, joinrel->relids))
{
/*
* This clause becomes a restriction clause for the joinrel,
* since it refers to no outside rels. We don't bother to
* check for duplicates here --- build_joinrel_restrictlist
* will do that.
* This clause becomes a restriction clause for the joinrel, since
* it refers to no outside rels. We don't bother to check for
* duplicates here --- build_joinrel_restrictlist will do that.
*/
restrictlist = lappend(restrictlist, rinfo);
}
else
{
/*
* This clause is still a join clause at this level, so we
* ignore it in this routine.
* This clause is still a join clause at this level, so we ignore
* it in this routine.
*/
}
}
@ -603,17 +601,17 @@ subbuild_joinrel_joinlist(RelOptInfo *joinrel,
if (bms_is_subset(rinfo->required_relids, joinrel->relids))
{
/*
* This clause becomes a restriction clause for the joinrel,
* since it refers to no outside rels. So we can ignore it
* in this routine.
* This clause becomes a restriction clause for the joinrel, since
* it refers to no outside rels. So we can ignore it in this
* routine.
*/
}
else
{
/*
* This clause is still a join clause at this level, so add
* it to the joininfo list for the joinrel, being careful to
* eliminate duplicates. (Since RestrictInfo nodes are normally
* This clause is still a join clause at this level, so add it to
* the joininfo list for the joinrel, being careful to eliminate
* duplicates. (Since RestrictInfo nodes are normally
* multiply-linked rather than copied, pointer equality should be
* a sufficient test. If two equal() nodes should happen to sneak
* in, no great harm is done --- they'll be detected by

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.40 2005/10/13 00:06:46 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.41 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -51,8 +51,8 @@ RestrictInfo *
make_restrictinfo(Expr *clause, bool is_pushed_down, Relids required_relids)
{
/*
* If it's an OR clause, build a modified copy with RestrictInfos
* inserted above each subclause of the top-level AND/OR structure.
* If it's an OR clause, build a modified copy with RestrictInfos inserted
* above each subclause of the top-level AND/OR structure.
*/
if (or_clause((Node *) clause))
return (RestrictInfo *) make_sub_restrictinfos(clause, is_pushed_down);
@ -101,9 +101,9 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
/*
* There may well be redundant quals among the subplans, since a
* top-level WHERE qual might have gotten used to form several
* different index quals. We don't try exceedingly hard to
* eliminate redundancies, but we do eliminate obvious duplicates
* by using list_concat_unique.
* different index quals. We don't try exceedingly hard to eliminate
* redundancies, but we do eliminate obvious duplicates by using
* list_concat_unique.
*/
result = NIL;
foreach(l, apath->bitmapquals)
@ -125,7 +125,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
* to just "true". We do not try to eliminate redundant subclauses
* to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@ -142,8 +142,8 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
{
/*
* If we find a qual-less subscan, it represents a constant
* TRUE, and hence the OR result is also constant TRUE, so
* we can stop here.
* TRUE, and hence the OR result is also constant TRUE, so we
* can stop here.
*/
return NIL;
}
@ -157,8 +157,8 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
}
/*
* Avoid generating one-element ORs, which could happen
* due to redundancy elimination.
* Avoid generating one-element ORs, which could happen due to
* redundancy elimination.
*/
if (list_length(withris) <= 1)
result = withris;
@ -174,20 +174,20 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
}
else if (IsA(bitmapqual, IndexPath))
{
IndexPath *ipath = (IndexPath *) bitmapqual;
IndexPath *ipath = (IndexPath *) bitmapqual;
result = list_copy(ipath->indexclauses);
if (include_predicates && ipath->indexinfo->indpred != NIL)
{
foreach(l, ipath->indexinfo->indpred)
{
Expr *pred = (Expr *) lfirst(l);
Expr *pred = (Expr *) lfirst(l);
/*
* We know that the index predicate must have been implied
* by the query condition as a whole, but it may or may not
* be implied by the conditions that got pushed into the
* bitmapqual. Avoid generating redundant conditions.
* We know that the index predicate must have been implied by
* the query condition as a whole, but it may or may not be
* implied by the conditions that got pushed into the
* bitmapqual. Avoid generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), result))
result = lappend(result,
@ -223,8 +223,8 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
restrictinfo->can_join = false; /* may get set below */
/*
* If it's a binary opclause, set up left/right relids info. In any
* case set up the total clause relids info.
* If it's a binary opclause, set up left/right relids info. In any case
* set up the total clause relids info.
*/
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
{
@ -232,13 +232,13 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
restrictinfo->right_relids = pull_varnos(get_rightop(clause));
restrictinfo->clause_relids = bms_union(restrictinfo->left_relids,
restrictinfo->right_relids);
restrictinfo->right_relids);
/*
* Does it look like a normal join clause, i.e., a binary operator
* relating expressions that come from distinct relations? If so
* we might be able to use it in a join algorithm. Note that this
* is a purely syntactic test that is made regardless of context.
* relating expressions that come from distinct relations? If so we
* might be able to use it in a join algorithm. Note that this is a
* purely syntactic test that is made regardless of context.
*/
if (!bms_is_empty(restrictinfo->left_relids) &&
!bms_is_empty(restrictinfo->right_relids) &&
@ -262,11 +262,11 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
restrictinfo->required_relids = restrictinfo->clause_relids;
/*
* Fill in all the cacheable fields with "not yet set" markers. None
* of these will be computed until/unless needed. Note in particular
* that we don't mark a binary opclause as mergejoinable or
* hashjoinable here; that happens only if it appears in the right
* context (top level of a joinclause list).
* Fill in all the cacheable fields with "not yet set" markers. None of
* these will be computed until/unless needed. Note in particular that we
* don't mark a binary opclause as mergejoinable or hashjoinable here;
* that happens only if it appears in the right context (top level of a
* joinclause list).
*/
restrictinfo->eval_cost.startup = -1;
restrictinfo->this_selec = -1;
@ -420,17 +420,16 @@ remove_redundant_join_clauses(PlannerInfo *root, List *restrictinfo_list,
QualCost cost;
/*
* If there are any redundant clauses, we want to eliminate the ones
* that are more expensive in favor of the ones that are less so. Run
* If there are any redundant clauses, we want to eliminate the ones that
* are more expensive in favor of the ones that are less so. Run
* cost_qual_eval() to ensure the eval_cost fields are set up.
*/
cost_qual_eval(&cost, restrictinfo_list);
/*
* We don't have enough knowledge yet to be able to estimate the
* number of times a clause might be evaluated, so it's hard to weight
* the startup and per-tuple costs appropriately. For now just weight
* 'em the same.
* We don't have enough knowledge yet to be able to estimate the number of
* times a clause might be evaluated, so it's hard to weight the startup
* and per-tuple costs appropriately. For now just weight 'em the same.
*/
#define CLAUSECOST(r) ((r)->eval_cost.startup + (r)->eval_cost.per_tuple)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.69 2005/04/06 16:34:06 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.70 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -93,7 +93,7 @@ add_to_flat_tlist(List *tlist, List *vars)
{
TargetEntry *tle;
tle = makeTargetEntry(copyObject(var), /* copy needed?? */
tle = makeTargetEntry(copyObject(var), /* copy needed?? */
next_resno++,
NULL,
false);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.65 2005/06/05 22:32:56 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.66 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -88,8 +88,8 @@ pull_varnos(Node *node)
context.sublevels_up = 0;
/*
* Must be prepared to start with a Query or a bare expression tree;
* if it's a Query, we don't want to increment sublevels_up.
* Must be prepared to start with a Query or a bare expression tree; if
* it's a Query, we don't want to increment sublevels_up.
*/
query_or_expression_tree_walker(node,
pull_varnos_walker,
@ -149,8 +149,8 @@ contain_var_reference(Node *node, int varno, int varattno, int levelsup)
context.sublevels_up = levelsup;
/*
* Must be prepared to start with a Query or a bare expression tree;
* if it's a Query, we don't want to increment sublevels_up.
* Must be prepared to start with a Query or a bare expression tree; if
* it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
contain_var_reference_walker,
@ -215,8 +215,7 @@ contain_var_clause_walker(Node *node, void *context)
if (IsA(node, Var))
{
if (((Var *) node)->varlevelsup == 0)
return true; /* abort the tree traversal and return
* true */
return true; /* abort the tree traversal and return true */
return false;
}
return expression_tree_walker(node, contain_var_clause_walker, context);
@ -286,7 +285,7 @@ contain_vars_above_level(Node *node, int levelsup)
int sublevels_up = levelsup;
return query_or_expression_tree_walker(node,
contain_vars_above_level_walker,
contain_vars_above_level_walker,
(void *) &sublevels_up,
0);
}
@ -370,8 +369,8 @@ find_minimum_var_level_walker(Node *node,
context->min_varlevel = varlevelsup;
/*
* As soon as we find a local variable, we can abort the
* tree traversal, since min_varlevel is then certainly 0.
* As soon as we find a local variable, we can abort the tree
* traversal, since min_varlevel is then certainly 0.
*/
if (varlevelsup == 0)
return true;
@ -380,10 +379,9 @@ find_minimum_var_level_walker(Node *node,
}
/*
* An Aggref must be treated like a Var of its level. Normally we'd
* get the same result from looking at the Vars in the aggregate's
* argument, but this fails in the case of a Var-less aggregate call
* (COUNT(*)).
* An Aggref must be treated like a Var of its level. Normally we'd get
* the same result from looking at the Vars in the aggregate's argument,
* but this fails in the case of a Var-less aggregate call (COUNT(*)).
*/
if (IsA(node, Aggref))
{
@ -400,8 +398,8 @@ find_minimum_var_level_walker(Node *node,
context->min_varlevel = agglevelsup;
/*
* As soon as we find a local aggregate, we can abort the
* tree traversal, since min_varlevel is then certainly 0.
* As soon as we find a local aggregate, we can abort the tree
* traversal, since min_varlevel is then certainly 0.
*/
if (agglevelsup == 0)
return true;
@ -553,8 +551,8 @@ flatten_join_alias_vars_mutator(Node *node,
newvar = (Node *) list_nth(rte->joinaliasvars, var->varattno - 1);
/*
* If we are expanding an alias carried down from an upper query,
* must adjust its varlevelsup fields.
* If we are expanding an alias carried down from an upper query, must
* adjust its varlevelsup fields.
*/
if (context->sublevels_up != 0)
{
@ -570,8 +568,8 @@ flatten_join_alias_vars_mutator(Node *node,
InClauseInfo *ininfo;
ininfo = (InClauseInfo *) expression_tree_mutator(node,
flatten_join_alias_vars_mutator,
(void *) context);
flatten_join_alias_vars_mutator,
(void *) context);
/* now fix InClauseInfo's relid sets */
if (context->sublevels_up == 0)
{