1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-18 17:42:25 +03:00

pgindent run for 9.0

This commit is contained in:
Bruce Momjian
2010-02-26 02:01:40 +00:00
parent 16040575a0
commit 65e806cba1
403 changed files with 6786 additions and 6530 deletions

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.92 2010/01/02 16:57:46 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.93 2010/02/26 02:00:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -40,7 +40,7 @@ typedef struct
} Clump;
static List *merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump,
bool force);
bool force);
static bool desirable_join(PlannerInfo *root,
RelOptInfo *outer_rel, RelOptInfo *inner_rel);
@ -156,14 +156,14 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
/*
* Sometimes, a relation can't yet be joined to others due to heuristics
* or actual semantic restrictions. We maintain a list of "clumps" of
* successfully joined relations, with larger clumps at the front.
* Each new relation from the tour is added to the first clump it can
* be joined to; if there is none then it becomes a new clump of its own.
* When we enlarge an existing clump we check to see if it can now be
* merged with any other clumps. After the tour is all scanned, we
* forget about the heuristics and try to forcibly join any remaining
* clumps. Some forced joins might still fail due to semantics, but
* we should always be able to find some join order that works.
* successfully joined relations, with larger clumps at the front. Each
* new relation from the tour is added to the first clump it can be joined
* to; if there is none then it becomes a new clump of its own. When we
* enlarge an existing clump we check to see if it can now be merged with
* any other clumps. After the tour is all scanned, we forget about the
* heuristics and try to forcibly join any remaining clumps. Some forced
* joins might still fail due to semantics, but we should always be able
* to find some join order that works.
*/
clumps = NIL;
@ -214,7 +214,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
* Merge a "clump" into the list of existing clumps for gimme_tree.
*
* We try to merge the clump into some existing clump, and repeat if
* successful. When no more merging is possible, insert the clump
* successful. When no more merging is possible, insert the clump
* into the list, preserving the list ordering rule (namely, that
* clumps of larger size appear earlier).
*
@ -265,7 +265,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
/*
* Recursively try to merge the enlarged old_clump with
* others. When no further merge is possible, we'll reinsert
* others. When no further merge is possible, we'll reinsert
* it into the list.
*/
return merge_clump(root, clumps, old_clump, force);
@ -276,7 +276,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
/*
* No merging is possible, so add new_clump as an independent clump, in
* proper order according to size. We can be fast for the common case
* proper order according to size. We can be fast for the common case
* where it has size 1 --- it should always go at the end.
*/
if (clumps == NIL || new_clump->size == 1)

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_random.c,v 1.2 2010/01/02 16:57:46 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_random.c,v 1.3 2010/02/26 02:00:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -22,8 +22,8 @@ geqo_set_seed(PlannerInfo *root, double seed)
GeqoPrivateData *private = (GeqoPrivateData *) root->join_search_private;
/*
* XXX. This seeding algorithm could certainly be improved - but
* it is not critical to do so.
* XXX. This seeding algorithm could certainly be improved - but it is not
* critical to do so.
*/
memset(private->random_state, 0, sizeof(private->random_state));
memcpy(private->random_state,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.192 2010/01/02 16:57:46 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.193 2010/02/26 02:00:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -347,11 +347,11 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
* can disregard this child.
*
* As of 8.4, the child rel's targetlist might contain non-Var
* expressions, which means that substitution into the quals
* could produce opportunities for const-simplification, and perhaps
* even pseudoconstant quals. To deal with this, we strip the
* RestrictInfo nodes, do the substitution, do const-simplification,
* and then reconstitute the RestrictInfo layer.
* expressions, which means that substitution into the quals could
* produce opportunities for const-simplification, and perhaps even
* pseudoconstant quals. To deal with this, we strip the RestrictInfo
* nodes, do the substitution, do const-simplification, and then
* reconstitute the RestrictInfo layer.
*/
childquals = get_all_actual_clauses(rel->baserestrictinfo);
childquals = (List *) adjust_appendrel_attrs((Node *) childquals,

View File

@ -59,7 +59,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.215 2010/02/19 21:49:10 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.216 2010/02/26 02:00:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -635,11 +635,11 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
pages_fetched = ceil(pages_fetched);
/*
* For small numbers of pages we should charge spc_random_page_cost apiece,
* while if nearly all the table's pages are being read, it's more
* appropriate to charge spc_seq_page_cost apiece. The effect is nonlinear,
* too. For lack of a better idea, interpolate like this to determine the
* cost per page.
* For small numbers of pages we should charge spc_random_page_cost
* apiece, while if nearly all the table's pages are being read, it's more
* appropriate to charge spc_seq_page_cost apiece. The effect is
* nonlinear, too. For lack of a better idea, interpolate like this to
* determine the cost per page.
*/
if (pages_fetched >= 2.0)
cost_per_page = spc_random_page_cost -
@ -936,13 +936,13 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
*
* Currently, nodeFunctionscan.c always executes the function to
* completion before returning any rows, and caches the results in a
* tuplestore. So the function eval cost is all startup cost, and
* per-row costs are minimal.
* tuplestore. So the function eval cost is all startup cost, and per-row
* costs are minimal.
*
* XXX in principle we ought to charge tuplestore spill costs if the
* number of rows is large. However, given how phony our rowcount
* estimates for functions tend to be, there's not a lot of point
* in that refinement right now.
* estimates for functions tend to be, there's not a lot of point in that
* refinement right now.
*/
cost_qual_eval_node(&exprcost, rte->funcexpr, root);
@ -1230,7 +1230,7 @@ cost_material(Path *path,
* if it is exactly the same then there will be a cost tie between
* nestloop with A outer, materialized B inner and nestloop with B outer,
* materialized A inner. The extra cost ensures we'll prefer
* materializing the smaller rel.) Note that this is normally a good deal
* materializing the smaller rel.) Note that this is normally a good deal
* less than cpu_tuple_cost; which is OK because a Material plan node
* doesn't do qual-checking or projection, so it's got less overhead than
* most plan nodes.
@ -1526,9 +1526,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
{
run_cost += (outer_path_rows - outer_matched_rows) *
inner_rescan_run_cost / inner_path_rows;
/*
* We won't be evaluating any quals at all for these rows,
* so don't add them to ntuples.
* We won't be evaluating any quals at all for these rows, so
* don't add them to ntuples.
*/
}
else
@ -1568,10 +1569,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* Unlike other costsize functions, this routine makes one actual decision:
* whether we should materialize the inner path. We do that either because
* the inner path can't support mark/restore, or because it's cheaper to
* use an interposed Material node to handle mark/restore. When the decision
* use an interposed Material node to handle mark/restore. When the decision
* is cost-based it would be logically cleaner to build and cost two separate
* paths with and without that flag set; but that would require repeating most
* of the calculations here, which are not all that cheap. Since the choice
* of the calculations here, which are not all that cheap. Since the choice
* will not affect output pathkeys or startup cost, only total cost, there is
* no possibility of wanting to keep both paths. So it seems best to make
* the decision here and record it in the path's materialize_inner field.
@ -1826,14 +1827,15 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* Decide whether we want to materialize the inner input to shield it from
* mark/restore and performing re-fetches. Our cost model for regular
* mark/restore and performing re-fetches. Our cost model for regular
* re-fetches is that a re-fetch costs the same as an original fetch,
* which is probably an overestimate; but on the other hand we ignore the
* bookkeeping costs of mark/restore. Not clear if it's worth developing
* a more refined model. So we just need to inflate the inner run cost
* by rescanratio.
* a more refined model. So we just need to inflate the inner run cost by
* rescanratio.
*/
bare_inner_cost = inner_run_cost * rescanratio;
/*
* When we interpose a Material node the re-fetch cost is assumed to be
* just cpu_operator_cost per tuple, independently of the underlying
@ -1842,7 +1844,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* never spill to disk, since it only has to remember tuples back to the
* last mark. (If there are a huge number of duplicates, our other cost
* factors will make the path so expensive that it probably won't get
* chosen anyway.) So we don't use cost_rescan here.
* chosen anyway.) So we don't use cost_rescan here.
*
* Note: keep this estimate in sync with create_mergejoin_plan's labeling
* of the generated Material node.
@ -1853,6 +1855,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
/* Prefer materializing if it looks cheaper */
if (mat_inner_cost < bare_inner_cost)
path->materialize_inner = true;
/*
* Even if materializing doesn't look cheaper, we *must* do it if the
* inner path is to be used directly (without sorting) and it doesn't
@ -1868,6 +1871,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
else if (innersortkeys == NIL &&
!ExecSupportsMarkRestore(inner_path->pathtype))
path->materialize_inner = true;
/*
* Also, force materializing if the inner path is to be sorted and the
* sort is expected to spill to disk. This is because the final merge
@ -2323,10 +2327,10 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
/*
* cost_rescan
* Given a finished Path, estimate the costs of rescanning it after
* having done so the first time. For some Path types a rescan is
* having done so the first time. For some Path types a rescan is
* cheaper than an original scan (if no parameters change), and this
* function embodies knowledge about that. The default is to return
* the same costs stored in the Path. (Note that the cost estimates
* the same costs stored in the Path. (Note that the cost estimates
* actually stored in Paths are always for first scans.)
*
* This function is not currently intended to model effects such as rescans
@ -2336,23 +2340,25 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
*/
static void
cost_rescan(PlannerInfo *root, Path *path,
Cost *rescan_startup_cost, /* output parameters */
Cost *rescan_startup_cost, /* output parameters */
Cost *rescan_total_cost)
{
switch (path->pathtype)
{
case T_FunctionScan:
/*
* Currently, nodeFunctionscan.c always executes the function
* to completion before returning any rows, and caches the
* results in a tuplestore. So the function eval cost is
* all startup cost and isn't paid over again on rescans.
* However, all run costs will be paid over again.
* Currently, nodeFunctionscan.c always executes the function to
* completion before returning any rows, and caches the results in
* a tuplestore. So the function eval cost is all startup cost
* and isn't paid over again on rescans. However, all run costs
* will be paid over again.
*/
*rescan_startup_cost = 0;
*rescan_total_cost = path->total_cost - path->startup_cost;
break;
case T_HashJoin:
/*
* Assume that all of the startup cost represents hash table
* building, which we won't have to do over.
@ -2365,14 +2371,14 @@ cost_rescan(PlannerInfo *root, Path *path,
{
/*
* These plan types materialize their final result in a
* tuplestore or tuplesort object. So the rescan cost is only
* tuplestore or tuplesort object. So the rescan cost is only
* cpu_tuple_cost per tuple, unless the result is large enough
* to spill to disk.
*/
Cost run_cost = cpu_tuple_cost * path->parent->rows;
double nbytes = relation_byte_size(path->parent->rows,
path->parent->width);
long work_mem_bytes = work_mem * 1024L;
Cost run_cost = cpu_tuple_cost * path->parent->rows;
double nbytes = relation_byte_size(path->parent->rows,
path->parent->width);
long work_mem_bytes = work_mem * 1024L;
if (nbytes > work_mem_bytes)
{
@ -2389,17 +2395,17 @@ cost_rescan(PlannerInfo *root, Path *path,
case T_Sort:
{
/*
* These plan types not only materialize their results, but
* do not implement qual filtering or projection. So they
* are even cheaper to rescan than the ones above. We charge
* only cpu_operator_cost per tuple. (Note: keep that in
* sync with the run_cost charge in cost_sort, and also see
* comments in cost_material before you change it.)
* These plan types not only materialize their results, but do
* not implement qual filtering or projection. So they are
* even cheaper to rescan than the ones above. We charge only
* cpu_operator_cost per tuple. (Note: keep that in sync with
* the run_cost charge in cost_sort, and also see comments in
* cost_material before you change it.)
*/
Cost run_cost = cpu_operator_cost * path->parent->rows;
double nbytes = relation_byte_size(path->parent->rows,
path->parent->width);
long work_mem_bytes = work_mem * 1024L;
Cost run_cost = cpu_operator_cost * path->parent->rows;
double nbytes = relation_byte_size(path->parent->rows,
path->parent->width);
long work_mem_bytes = work_mem * 1024L;
if (nbytes > work_mem_bytes)
{
@ -3212,8 +3218,8 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
{
/*
* We could be looking at an expression pulled up from a subquery,
* or a ROW() representing a whole-row child Var, etc. Do what
* we can using the expression type information.
* or a ROW() representing a whole-row child Var, etc. Do what we
* can using the expression type information.
*/
int32 item_width;

View File

@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/equivclass.c,v 1.22 2010/01/02 16:57:46 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/equivclass.c,v 1.23 2010/02/26 02:00:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -115,14 +115,13 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
item2_relids = restrictinfo->right_relids;
/*
* Reject clauses of the form X=X. These are not as redundant as they
* Reject clauses of the form X=X. These are not as redundant as they
* might seem at first glance: assuming the operator is strict, this is
* really an expensive way to write X IS NOT NULL. So we must not risk
* just losing the clause, which would be possible if there is already
* a single-element EquivalenceClass containing X. The case is not
* common enough to be worth contorting the EC machinery for, so just
* reject the clause and let it be processed as a normal restriction
* clause.
* really an expensive way to write X IS NOT NULL. So we must not risk
* just losing the clause, which would be possible if there is already a
* single-element EquivalenceClass containing X. The case is not common
* enough to be worth contorting the EC machinery for, so just reject the
* clause and let it be processed as a normal restriction clause.
*/
if (equal(item1, item2))
return false; /* X=X is not a useful equivalence */
@ -367,7 +366,7 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
* EquivalenceClass for it.
*
* sortref is the SortGroupRef of the originating SortGroupClause, if any,
* or zero if not. (It should never be zero if the expression is volatile!)
* or zero if not. (It should never be zero if the expression is volatile!)
*
* This can be used safely both before and after EquivalenceClass merging;
* since it never causes merging it does not invalidate any existing ECs
@ -448,7 +447,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
newec->ec_sortref = sortref;
newec->ec_merged = NULL;
if (newec->ec_has_volatile && sortref == 0) /* should not happen */
if (newec->ec_has_volatile && sortref == 0) /* should not happen */
elog(ERROR, "volatile EquivalenceClass has no sortref");
newem = add_eq_member(newec, expr, pull_varnos((Node *) expr),

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.245 2010/01/02 16:57:46 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.246 2010/02/26 02:00:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1944,8 +1944,8 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
/* Examine each index of the relation ... */
foreach(ic, rel->indexlist)
{
IndexOptInfo *ind = (IndexOptInfo *) lfirst(ic);
int c;
IndexOptInfo *ind = (IndexOptInfo *) lfirst(ic);
int c;
/*
* If the index is not unique or if it's a partial index that doesn't
@ -1964,13 +1964,13 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
foreach(lc, restrictlist)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
Node *rexpr;
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
Node *rexpr;
/*
* The condition's equality operator must be a member of the
* index opfamily, else it is not asserting the right kind
* of equality behavior for this index. We check this first
* index opfamily, else it is not asserting the right kind of
* equality behavior for this index. We check this first
* since it's probably cheaper than match_index_to_operand().
*/
if (!list_member_oid(rinfo->mergeopfamilies, ind->opfamily[c]))

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.129 2010/01/05 23:25:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.130 2010/02/26 02:00:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -161,7 +161,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* We already know that the clause is a binary opclause referencing only the
* rels in the current join. The point here is to check whether it has the
* form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
* rather than mixing outer and inner vars on either side. If it matches,
* rather than mixing outer and inner vars on either side. If it matches,
* we set the transient flag outer_is_left to identify which side is which.
*/
static inline bool
@ -212,7 +212,7 @@ join_is_removable(PlannerInfo *root,
/*
* Currently, we only know how to remove left joins to a baserel with
* unique indexes. We can check most of these criteria pretty trivially
* unique indexes. We can check most of these criteria pretty trivially
* to avoid doing useless extra work. But checking whether any of the
* indexes are unique would require iterating over the indexlist, so for
* now we just make sure there are indexes of some sort or other. If none
@ -225,13 +225,12 @@ join_is_removable(PlannerInfo *root,
return false;
/*
* We can't remove the join if any inner-rel attributes are used above
* the join.
* We can't remove the join if any inner-rel attributes are used above the
* join.
*
* Note that this test only detects use of inner-rel attributes in
* higher join conditions and the target list. There might be such
* attributes in pushed-down conditions at this join, too. We check
* that case below.
* Note that this test only detects use of inner-rel attributes in higher
* join conditions and the target list. There might be such attributes in
* pushed-down conditions at this join, too. We check that case below.
*
* As a micro-optimization, it seems better to start with max_attr and
* count down rather than starting with min_attr and counting up, on the
@ -249,9 +248,9 @@ join_is_removable(PlannerInfo *root,
/*
* Search for mergejoinable clauses that constrain the inner rel against
* either the outer rel or a pseudoconstant. If an operator is
* mergejoinable then it behaves like equality for some btree opclass,
* so it's what we want. The mergejoinability test also eliminates
* clauses containing volatile functions, which we couldn't depend on.
* mergejoinable then it behaves like equality for some btree opclass, so
* it's what we want. The mergejoinability test also eliminates clauses
* containing volatile functions, which we couldn't depend on.
*/
foreach(l, restrictlist)
{
@ -259,10 +258,10 @@ join_is_removable(PlannerInfo *root,
/*
* If we find a pushed-down clause, it must have come from above the
* outer join and it must contain references to the inner rel. (If
* it had only outer-rel variables, it'd have been pushed down into
* the outer rel.) Therefore, we can conclude that join removal
* is unsafe without any examination of the clause contents.
* outer join and it must contain references to the inner rel. (If it
* had only outer-rel variables, it'd have been pushed down into the
* outer rel.) Therefore, we can conclude that join removal is unsafe
* without any examination of the clause contents.
*/
if (restrictinfo->is_pushed_down)
return false;
@ -289,15 +288,15 @@ join_is_removable(PlannerInfo *root,
/*
* Note: can_join won't be set for a restriction clause, but
* mergeopfamilies will be if it has a mergejoinable operator
* and doesn't contain volatile functions.
* mergeopfamilies will be if it has a mergejoinable operator and
* doesn't contain volatile functions.
*/
if (restrictinfo->mergeopfamilies == NIL)
continue; /* not mergejoinable */
/*
* The clause certainly doesn't refer to anything but the given
* rel. If either side is pseudoconstant then we can use it.
* The clause certainly doesn't refer to anything but the given rel.
* If either side is pseudoconstant then we can use it.
*/
if (bms_is_empty(restrictinfo->left_relids))
{
@ -340,13 +339,13 @@ generate_outer_only(PlannerInfo *root, RelOptInfo *joinrel,
/*
* For the moment, replicate all of the outerrel's paths as join paths.
* Some of them might not really be interesting above the join, if they
* have sort orderings that have no real use except to do a mergejoin
* for the join we've just found we don't need. But distinguishing that
* case probably isn't worth the extra code it would take.
* have sort orderings that have no real use except to do a mergejoin for
* the join we've just found we don't need. But distinguishing that case
* probably isn't worth the extra code it would take.
*/
foreach(lc, outerrel->pathlist)
{
Path *outerpath = (Path *) lfirst(lc);
Path *outerpath = (Path *) lfirst(lc);
add_path(joinrel, (Path *)
create_noop_path(root, joinrel, outerpath));
@ -1189,8 +1188,8 @@ select_mergejoin_clauses(PlannerInfo *root,
restrictinfo->mergeopfamilies == NIL)
{
/*
* The executor can handle extra joinquals that are constants,
* but not anything else, when doing right/full merge join. (The
* The executor can handle extra joinquals that are constants, but
* not anything else, when doing right/full merge join. (The
* reason to support constants is so we can do FULL JOIN ON
* FALSE.)
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.104 2010/01/02 16:57:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.105 2010/02/26 02:00:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -373,10 +373,10 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
continue;
/*
* If it's a semijoin and we already joined the RHS to any other
* rels within either input, then we must have unique-ified the RHS
* at that point (see below). Therefore the semijoin is no longer
* relevant in this join path.
* If it's a semijoin and we already joined the RHS to any other rels
* within either input, then we must have unique-ified the RHS at that
* point (see below). Therefore the semijoin is no longer relevant in
* this join path.
*/
if (sjinfo->jointype == JOIN_SEMI)
{
@ -495,9 +495,9 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
/*
* Fail if violated some SJ's RHS and didn't match to another SJ.
* However, "matching" to a semijoin we are implementing by
* unique-ification doesn't count (think: it's really an inner join).
* Fail if violated some SJ's RHS and didn't match to another SJ. However,
* "matching" to a semijoin we are implementing by unique-ification
* doesn't count (think: it's really an inner join).
*/
if (!is_valid_inner &&
(match_sjinfo == NULL || unique_ified))

View File

@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.100 2010/01/02 16:57:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.101 2010/02/26 02:00:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -637,12 +637,12 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
0);
/*
* Note: it might look funny to be setting sortref = 0 for
* a reference to a volatile sub_eclass. However, the
* expression is *not* volatile in the outer query: it's
* just a Var referencing whatever the subquery emitted.
* (IOW, the outer query isn't going to re-execute the
* volatile expression itself.) So this is okay.
* Note: it might look funny to be setting sortref = 0 for a
* reference to a volatile sub_eclass. However, the
* expression is *not* volatile in the outer query: it's just
* a Var referencing whatever the subquery emitted. (IOW, the
* outer query isn't going to re-execute the volatile
* expression itself.) So this is okay.
*/
outer_ec =
get_eclass_for_sort_expr(root,
@ -1000,7 +1000,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* It's possible that multiple matching clauses might have different
* ECs on the other side, in which case the order we put them into our
* result makes a difference in the pathkeys required for the other
* input path. However this routine hasn't got any info about which
* input path. However this routine hasn't got any info about which
* order would be best, so we don't worry about that.
*
* It's also possible that the selected mergejoin clauses produce

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.272 2010/02/19 21:49:10 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.273 2010/02/26 02:00:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1694,8 +1694,8 @@ create_mergejoin_plan(PlannerInfo *root,
innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
/*
* If specified, add a materialize node to shield the inner plan from
* the need to handle mark/restore.
* If specified, add a materialize node to shield the inner plan from the
* need to handle mark/restore.
*/
if (best_path->materialize_inner)
{
@ -1754,9 +1754,9 @@ create_mergejoin_plan(PlannerInfo *root,
Assert(ieclass != NULL);
/*
* For debugging purposes, we check that the eclasses match the
* paths' pathkeys. In typical cases the merge clauses are one-to-one
* with the pathkeys, but when dealing with partially redundant query
* For debugging purposes, we check that the eclasses match the paths'
* pathkeys. In typical cases the merge clauses are one-to-one with
* the pathkeys, but when dealing with partially redundant query
* conditions, we might have clauses that re-reference earlier path
* keys. The case that we need to reject is where a pathkey is
* entirely skipped over.
@ -1861,9 +1861,9 @@ create_mergejoin_plan(PlannerInfo *root,
}
/*
* Note: it is not an error if we have additional pathkey elements
* (i.e., lop or lip isn't NULL here). The input paths might be
* better-sorted than we need for the current mergejoin.
* Note: it is not an error if we have additional pathkey elements (i.e.,
* lop or lip isn't NULL here). The input paths might be better-sorted
* than we need for the current mergejoin.
*/
/*
@ -3751,7 +3751,7 @@ make_result(PlannerInfo *root,
* Build a ModifyTable plan node
*
* Currently, we don't charge anything extra for the actual table modification
* work, nor for the RETURNING expressions if any. It would only be window
* work, nor for the RETURNING expressions if any. It would only be window
* dressing, since these are always top-level nodes and there is no way for
* the costs to change any higher-level planning choices. But we might want
* to make it look better sometime.
@ -3781,7 +3781,7 @@ make_modifytable(CmdType operation, List *resultRelations,
{
Plan *subplan = (Plan *) lfirst(subnode);
if (subnode == list_head(subplans)) /* first node? */
if (subnode == list_head(subplans)) /* first node? */
plan->startup_cost = subplan->startup_cost;
plan->total_cost += subplan->total_cost;
plan->plan_rows += subplan->plan_rows;
@ -3798,8 +3798,8 @@ make_modifytable(CmdType operation, List *resultRelations,
/*
* Set up the visible plan targetlist as being the same as the first
* RETURNING list. This is for the use of EXPLAIN; the executor won't
* pay any attention to the targetlist.
* RETURNING list. This is for the use of EXPLAIN; the executor won't pay
* any attention to the targetlist.
*/
if (returningLists)
node->plan.targetlist = copyObject(linitial(returningLists));

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.157 2010/01/02 16:57:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.158 2010/02/26 02:00:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -560,8 +560,8 @@ make_outerjoininfo(PlannerInfo *root,
* FOR UPDATE applied to a view. Only after rewriting and flattening do
* we know whether the view contains an outer join.
*
* We use the original RowMarkClause list here; the PlanRowMark list
* would list everything.
* We use the original RowMarkClause list here; the PlanRowMark list would
* list everything.
*/
foreach(l, root->parse->rowMarks)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.265 2010/02/12 17:33:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.266 2010/02/26 02:00:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -370,7 +370,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
}
/*
* Preprocess RowMark information. We need to do this after subquery
* Preprocess RowMark information. We need to do this after subquery
* pullup (so that all non-inherited RTEs are present) and before
* inheritance expansion (so that the info is available for
* expand_inherited_tables to examine and modify).
@ -525,8 +525,8 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
/* If it's not SELECT, we need a ModifyTable node */
if (parse->commandType != CMD_SELECT)
{
List *returningLists;
List *rowMarks;
List *returningLists;
List *rowMarks;
/*
* Deal with the RETURNING clause if any. It's convenient to pass
@ -542,7 +542,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
rlist = set_returning_clause_references(root->glob,
parse->returningList,
plan,
parse->resultRelation);
parse->resultRelation);
returningLists = list_make1(rlist);
}
else
@ -559,7 +559,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
rowMarks = root->rowMarks;
plan = (Plan *) make_modifytable(parse->commandType,
copyObject(root->resultRelations),
copyObject(root->resultRelations),
list_make1(plan),
returningLists,
rowMarks,
@ -614,11 +614,11 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
* Simplify constant expressions.
*
* Note: an essential effect of this is to convert named-argument function
* calls to positional notation and insert the current actual values
* of any default arguments for functions. To ensure that happens, we
* *must* process all expressions here. Previous PG versions sometimes
* skipped const-simplification if it didn't seem worth the trouble, but
* we can't do that anymore.
* calls to positional notation and insert the current actual values of
* any default arguments for functions. To ensure that happens, we *must*
* process all expressions here. Previous PG versions sometimes skipped
* const-simplification if it didn't seem worth the trouble, but we can't
* do that anymore.
*
* Note: this also flattens nested AND and OR expressions into N-argument
* form. All processing of a qual expression after this point must be
@ -783,7 +783,7 @@ inheritance_planner(PlannerInfo *root)
List *rlist;
rlist = set_returning_clause_references(root->glob,
subroot.parse->returningList,
subroot.parse->returningList,
subplan,
appinfo->child_relid);
returningLists = lappend(returningLists, rlist);
@ -796,8 +796,8 @@ inheritance_planner(PlannerInfo *root)
root->query_pathkeys = NIL;
/*
* If we managed to exclude every child rel, return a dummy plan;
* it doesn't even need a ModifyTable node.
* If we managed to exclude every child rel, return a dummy plan; it
* doesn't even need a ModifyTable node.
*/
if (subplans == NIL)
{
@ -825,9 +825,9 @@ inheritance_planner(PlannerInfo *root)
parse->rtable = rtable;
/*
* If there was a FOR UPDATE/SHARE clause, the LockRows node will
* have dealt with fetching non-locked marked rows, else we need
* to have ModifyTable do that.
* If there was a FOR UPDATE/SHARE clause, the LockRows node will have
* dealt with fetching non-locked marked rows, else we need to have
* ModifyTable do that.
*/
if (parse->rowMarks)
rowMarks = NIL;
@ -837,7 +837,7 @@ inheritance_planner(PlannerInfo *root)
/* And last, tack on a ModifyTable node to do the UPDATE/DELETE work */
return (Plan *) make_modifytable(parse->commandType,
copyObject(root->resultRelations),
subplans,
subplans,
returningLists,
rowMarks,
SS_assign_special_param(root));
@ -1121,8 +1121,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
}
else
{
path_rows = 1; /* assume non-set result */
path_width = 100; /* arbitrary */
path_rows = 1; /* assume non-set result */
path_width = 100; /* arbitrary */
}
if (parse->groupClause)
@ -1424,8 +1424,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* WindowFuncs. It's probably not worth trying to optimize that
* though.) We also need any volatile sort expressions, because
* make_sort_from_pathkeys won't add those on its own, and anyway
* we want them evaluated only once at the bottom of the stack.
* As we climb up the stack, we add outputs for the WindowFuncs
* we want them evaluated only once at the bottom of the stack. As
* we climb up the stack, we add outputs for the WindowFuncs
* computed at each level. Also, each input tlist has to present
* all the columns needed to sort the data for the next WindowAgg
* step. That's handled internally by make_sort_from_pathkeys,
@ -1659,16 +1659,17 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
}
/*
* If there is a FOR UPDATE/SHARE clause, add the LockRows node.
* (Note: we intentionally test parse->rowMarks not root->rowMarks here.
* If there are only non-locking rowmarks, they should be handled by
* the ModifyTable node instead.)
* If there is a FOR UPDATE/SHARE clause, add the LockRows node. (Note: we
* intentionally test parse->rowMarks not root->rowMarks here. If there
* are only non-locking rowmarks, they should be handled by the
* ModifyTable node instead.)
*/
if (parse->rowMarks)
{
result_plan = (Plan *) make_lockrows(result_plan,
root->rowMarks,
SS_assign_special_param(root));
/*
* The result can no longer be assumed sorted, since locking might
* cause the sort key columns to be replaced with new values.
@ -1811,9 +1812,9 @@ preprocess_rowmarks(PlannerInfo *root)
}
/*
* We need to have rowmarks for all base relations except the target.
* We make a bitmapset of all base rels and then remove the items we
* don't need or have FOR UPDATE/SHARE marks for.
* We need to have rowmarks for all base relations except the target. We
* make a bitmapset of all base rels and then remove the items we don't
* need or have FOR UPDATE/SHARE marks for.
*/
rels = get_base_rel_indexes((Node *) parse->jointree);
if (parse->resultRelation)
@ -1831,16 +1832,16 @@ preprocess_rowmarks(PlannerInfo *root)
/*
* Currently, it is syntactically impossible to have FOR UPDATE
* applied to an update/delete target rel. If that ever becomes
* applied to an update/delete target rel. If that ever becomes
* possible, we should drop the target from the PlanRowMark list.
*/
Assert(rc->rti != parse->resultRelation);
/*
* Ignore RowMarkClauses for subqueries; they aren't real tables
* and can't support true locking. Subqueries that got flattened
* into the main query should be ignored completely. Any that didn't
* will get ROW_MARK_COPY items in the next loop.
* Ignore RowMarkClauses for subqueries; they aren't real tables and
* can't support true locking. Subqueries that got flattened into the
* main query should be ignored completely. Any that didn't will get
* ROW_MARK_COPY items in the next loop.
*/
if (rte->rtekind != RTE_RELATION)
continue;
@ -1883,7 +1884,7 @@ preprocess_rowmarks(PlannerInfo *root)
newrc->markType = ROW_MARK_REFERENCE;
else
newrc->markType = ROW_MARK_COPY;
newrc->noWait = false; /* doesn't matter */
newrc->noWait = false; /* doesn't matter */
newrc->isParent = false;
/* attnos will be assigned in preprocess_targetlist */
newrc->ctidAttNo = InvalidAttrNumber;
@ -2196,7 +2197,7 @@ choose_hashed_grouping(PlannerInfo *root,
/*
* Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
* aggregates. (Doing so would imply storing *all* the input values in
* aggregates. (Doing so would imply storing *all* the input values in
* the hash table, and/or running many sorts in parallel, either of which
* seems like a certain loser.)
*/
@ -2364,8 +2365,8 @@ choose_hashed_distinct(PlannerInfo *root,
Path sorted_p;
/*
* If we have a sortable DISTINCT ON clause, we always use sorting.
* This enforces the expected behavior of DISTINCT ON.
* If we have a sortable DISTINCT ON clause, we always use sorting. This
* enforces the expected behavior of DISTINCT ON.
*/
can_sort = grouping_is_sortable(parse->distinctClause);
if (can_sort && parse->hasDistinctOn)

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.159 2010/02/14 18:42:15 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.160 2010/02/26 02:00:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -431,8 +431,8 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
/*
* Like the plan types above, LockRows doesn't evaluate its
* tlist or quals. But we have to fix up the RT indexes
* in its rowmarks.
* tlist or quals. But we have to fix up the RT indexes in
* its rowmarks.
*/
set_dummy_tlist_references(plan, rtoffset);
Assert(splan->plan.qual == NIL);
@ -471,7 +471,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_WindowAgg:
{
WindowAgg *wplan = (WindowAgg *) plan;
WindowAgg *wplan = (WindowAgg *) plan;
set_upper_references(glob, plan, rtoffset);
@ -1514,7 +1514,7 @@ search_indexed_tlist_for_sortgroupref(Node *node,
exprType((Node *) tle->expr),
exprTypmod((Node *) tle->expr),
0);
newvar->varnoold = 0; /* wasn't ever a plain Var */
newvar->varnoold = 0; /* wasn't ever a plain Var */
newvar->varoattno = 0;
return newvar;
}

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.160 2010/02/14 18:42:15 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.161 2010/02/26 02:00:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1766,8 +1766,8 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans)
* output parameters of any initPlans. (We do not include output
* parameters of regular subplans. Those should only appear within the
* testexpr of SubPlan nodes, and are taken care of locally within
* finalize_primnode. Likewise, special parameters that are generated
* by nodes such as ModifyTable are handled within finalize_plan.)
* finalize_primnode. Likewise, special parameters that are generated by
* nodes such as ModifyTable are handled within finalize_plan.)
*
* Note: this is a bit overly generous since some parameters of upper
* query levels might belong to query subtrees that don't include this
@ -1944,14 +1944,14 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params,
* You might think we should add the node's cteParam to
* paramids, but we shouldn't because that param is just a
* linkage mechanism for multiple CteScan nodes for the same
* CTE; it is never used for changed-param signaling. What
* we have to do instead is to find the referenced CTE plan
* and incorporate its external paramids, so that the correct
* CTE; it is never used for changed-param signaling. What we
* have to do instead is to find the referenced CTE plan and
* incorporate its external paramids, so that the correct
* things will happen if the CTE references outer-level
* variables. See test cases for bug #4902.
*/
int plan_id = ((CteScan *) plan)->ctePlanId;
Plan *cteplan;
int plan_id = ((CteScan *) plan)->ctePlanId;
Plan *cteplan;
/* so, do this ... */
if (plan_id < 1 || plan_id > list_length(root->glob->subplans))

View File

@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.70 2010/01/02 16:57:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.71 2010/02/26 02:00:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,13 +39,13 @@
typedef struct pullup_replace_vars_context
{
PlannerInfo *root;
List *targetlist; /* tlist of subquery being pulled up */
RangeTblEntry *target_rte; /* RTE of subquery */
bool *outer_hasSubLinks; /* -> outer query's hasSubLinks */
int varno; /* varno of subquery */
bool need_phvs; /* do we need PlaceHolderVars? */
bool wrap_non_vars; /* do we need 'em on *all* non-Vars? */
Node **rv_cache; /* cache for results with PHVs */
List *targetlist; /* tlist of subquery being pulled up */
RangeTblEntry *target_rte; /* RTE of subquery */
bool *outer_hasSubLinks; /* -> outer query's hasSubLinks */
int varno; /* varno of subquery */
bool need_phvs; /* do we need PlaceHolderVars? */
bool wrap_non_vars; /* do we need 'em on *all* non-Vars? */
Node **rv_cache; /* cache for results with PHVs */
} pullup_replace_vars_context;
typedef struct reduce_outer_joins_state
@ -79,7 +79,7 @@ static void replace_vars_in_jointree(Node *jtnode,
pullup_replace_vars_context *context,
JoinExpr *lowest_outer_join);
static Node *pullup_replace_vars(Node *expr,
pullup_replace_vars_context *context);
pullup_replace_vars_context *context);
static Node *pullup_replace_vars_callback(Var *var,
replace_rte_variables_context *context);
static reduce_outer_joins_state *reduce_outer_joins_pass1(Node *jtnode);
@ -708,7 +708,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* insert into the top query, but if we are under an outer join then
* non-nullable items may have to be turned into PlaceHolderVars. If we
* are dealing with an appendrel member then anything that's not a simple
* Var has to be turned into a PlaceHolderVar. Set up appropriate context
* Var has to be turned into a PlaceHolderVar. Set up appropriate context
* data for pullup_replace_vars.
*/
rvcontext.root = root;
@ -729,7 +729,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* replace any of the jointree structure. (This'd be a lot cleaner if we
* could use query_tree_mutator.) We have to use PHVs in the targetList,
* returningList, and havingQual, since those are certainly above any
* outer join. replace_vars_in_jointree tracks its location in the
* outer join. replace_vars_in_jointree tracks its location in the
* jointree and uses PHVs or not appropriately.
*/
parse->targetList = (List *)
@ -751,7 +751,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
foreach(lc, root->append_rel_list)
{
AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc);
bool save_need_phvs = rvcontext.need_phvs;
bool save_need_phvs = rvcontext.need_phvs;
if (appinfo == containing_appendrel)
rvcontext.need_phvs = false;
@ -796,9 +796,8 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* We also have to fix the relid sets of any PlaceHolderVar nodes in the
* parent query. (This could perhaps be done by pullup_replace_vars(),
* but it seems cleaner to use two passes.) Note in particular that any
* PlaceHolderVar nodes just created by pullup_replace_vars()
* will be adjusted, so having created them with the subquery's varno is
* correct.
* PlaceHolderVar nodes just created by pullup_replace_vars() will be
* adjusted, so having created them with the subquery's varno is correct.
*
* Likewise, relids appearing in AppendRelInfo nodes have to be fixed. We
* already checked that this won't require introducing multiple subrelids
@ -1033,8 +1032,8 @@ is_simple_subquery(Query *subquery)
*
* We also don't pull up a subquery that has explicit FOR UPDATE/SHARE
* clauses, because pullup would cause the locking to occur semantically
* higher than it should. Implicit FOR UPDATE/SHARE is okay because
* in that case the locking was originally declared in the upper query
* higher than it should. Implicit FOR UPDATE/SHARE is okay because in
* that case the locking was originally declared in the upper query
* anyway.
*/
if (subquery->hasAggs ||
@ -1227,7 +1226,7 @@ replace_vars_in_jointree(Node *jtnode,
else if (IsA(jtnode, JoinExpr))
{
JoinExpr *j = (JoinExpr *) jtnode;
bool save_need_phvs = context->need_phvs;
bool save_need_phvs = context->need_phvs;
if (j == lowest_outer_join)
{
@ -1310,7 +1309,7 @@ pullup_replace_vars_callback(Var *var,
* expansion with varlevelsup = 0, and then adjust if needed.
*/
expandRTE(rcon->target_rte,
var->varno, 0 /* not varlevelsup */, var->location,
var->varno, 0 /* not varlevelsup */ , var->location,
(var->vartype != RECORDOID),
&colnames, &fields);
/* Adjust the generated per-field Vars, but don't insert PHVs */
@ -1327,11 +1326,11 @@ pullup_replace_vars_callback(Var *var,
newnode = (Node *) rowexpr;
/*
* Insert PlaceHolderVar if needed. Notice that we are wrapping
* one PlaceHolderVar around the whole RowExpr, rather than putting
* one around each element of the row. This is because we need
* the expression to yield NULL, not ROW(NULL,NULL,...) when it
* is forced to null by an outer join.
* Insert PlaceHolderVar if needed. Notice that we are wrapping one
* PlaceHolderVar around the whole RowExpr, rather than putting one
* around each element of the row. This is because we need the
* expression to yield NULL, not ROW(NULL,NULL,...) when it is forced
* to null by an outer join.
*/
if (rcon->need_phvs)
{
@ -1359,7 +1358,7 @@ pullup_replace_vars_callback(Var *var,
/* Insert PlaceHolderVar if needed */
if (rcon->need_phvs)
{
bool wrap;
bool wrap;
if (newnode && IsA(newnode, Var) &&
((Var *) newnode)->varlevelsup == 0)
@ -1402,8 +1401,8 @@ pullup_replace_vars_callback(Var *var,
/*
* Cache it if possible (ie, if the attno is in range, which it
* probably always should be). We can cache the value even if
* we decided we didn't need a PHV, since this result will be
* probably always should be). We can cache the value even if we
* decided we didn't need a PHV, since this result will be
* suitable for any request that has need_phvs.
*/
if (varattno > InvalidAttrNumber &&
@ -1837,7 +1836,7 @@ reduce_outer_joins_pass2(Node *jtnode,
* top query could (yet) contain such a reference.
*
* NOTE: although this has the form of a walker, we cheat and modify the
* nodes in-place. This should be OK since the tree was copied by
* nodes in-place. This should be OK since the tree was copied by
* pullup_replace_vars earlier. Avoid scribbling on the original values of
* the bitmapsets, though, because expression_tree_mutator doesn't copy those.
*/

View File

@ -17,7 +17,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.99 2010/01/02 16:57:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.100 2010/02/26 02:00:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -109,11 +109,10 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
}
/*
* Add necessary junk columns for rowmarked rels. These values are
* needed for locking of rels selected FOR UPDATE/SHARE, and to do
* EvalPlanQual rechecking. While we are at it, store these junk attnos
* in the PlanRowMark list so that we don't have to redetermine them
* at runtime.
* Add necessary junk columns for rowmarked rels. These values are needed
* for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual
* rechecking. While we are at it, store these junk attnos in the
* PlanRowMark list so that we don't have to redetermine them at runtime.
*/
foreach(lc, root->rowMarks)
{

View File

@ -22,7 +22,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.180 2010/02/01 19:28:56 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.181 2010/02/26 02:00:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1196,8 +1196,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
/*
* If parent relation is selected FOR UPDATE/SHARE, we need to mark its
* PlanRowMark as isParent = true, and generate a new PlanRowMark for
* each child.
* PlanRowMark as isParent = true, and generate a new PlanRowMark for each
* child.
*/
if (oldrc)
oldrc->isParent = true;
@ -1244,7 +1244,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
childrte = copyObject(rte);
childrte->relid = childOID;
childrte->inh = false;
childrte->requiredPerms = 0; /* do not require permissions on child tables */
childrte->requiredPerms = 0; /* do not require permissions on child
* tables */
parse->rtable = lappend(parse->rtable, childrte);
childRTindex = list_length(parse->rtable);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.285 2010/02/14 18:42:15 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.286 2010/02/26 02:00:46 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@ -106,7 +106,7 @@ static List *add_function_defaults(List *args, Oid result_type,
eval_const_expressions_context *context);
static List *fetch_function_defaults(HeapTuple func_tuple);
static void recheck_cast_function_args(List *args, Oid result_type,
HeapTuple func_tuple);
HeapTuple func_tuple);
static Expr *evaluate_function(Oid funcid,
Oid result_type, int32 result_typmod, List *args,
HeapTuple func_tuple,
@ -2127,14 +2127,14 @@ eval_const_expressions_mutator(Node *node,
ListCell *lc;
/*
* Reduce constants in the FuncExpr's arguments, and check to see
* if there are any named args.
* Reduce constants in the FuncExpr's arguments, and check to see if
* there are any named args.
*/
args = NIL;
has_named_args = false;
foreach(lc, expr->args)
{
Node *arg = (Node *) lfirst(lc);
Node *arg = (Node *) lfirst(lc);
arg = eval_const_expressions_mutator(arg, context);
if (IsA(arg, NamedArgExpr))
@ -2158,8 +2158,8 @@ eval_const_expressions_mutator(Node *node,
/*
* The expression cannot be simplified any further, so build and
* return a replacement FuncExpr node using the possibly-simplified
* arguments. Note that we have also converted the argument list
* to positional notation.
* arguments. Note that we have also converted the argument list to
* positional notation.
*/
newexpr = makeNode(FuncExpr);
newexpr->funcid = expr->funcid;
@ -3219,16 +3219,16 @@ simplify_boolean_equality(Oid opno, List *args)
if (opno == BooleanEqualOperator)
{
if (DatumGetBool(((Const *) leftop)->constvalue))
return rightop; /* true = foo */
return rightop; /* true = foo */
else
return make_notclause(rightop); /* false = foo */
return make_notclause(rightop); /* false = foo */
}
else
{
if (DatumGetBool(((Const *) leftop)->constvalue))
return make_notclause(rightop); /* true <> foo */
return make_notclause(rightop); /* true <> foo */
else
return rightop; /* false <> foo */
return rightop; /* false <> foo */
}
}
if (rightop && IsA(rightop, Const))
@ -3237,16 +3237,16 @@ simplify_boolean_equality(Oid opno, List *args)
if (opno == BooleanEqualOperator)
{
if (DatumGetBool(((Const *) rightop)->constvalue))
return leftop; /* foo = true */
return leftop; /* foo = true */
else
return make_notclause(leftop); /* foo = false */
return make_notclause(leftop); /* foo = false */
}
else
{
if (DatumGetBool(((Const *) rightop)->constvalue))
return make_notclause(leftop); /* foo <> true */
return make_notclause(leftop); /* foo <> true */
else
return leftop; /* foo <> false */
return leftop; /* foo <> false */
}
}
return NULL;
@ -3340,7 +3340,7 @@ reorder_function_arguments(List *args, Oid result_type, HeapTuple func_tuple,
i = 0;
foreach(lc, args)
{
Node *arg = (Node *) lfirst(lc);
Node *arg = (Node *) lfirst(lc);
if (!IsA(arg, NamedArgExpr))
{
@ -3358,13 +3358,13 @@ reorder_function_arguments(List *args, Oid result_type, HeapTuple func_tuple,
}
/*
* Fetch default expressions, if needed, and insert into array at
* proper locations (they aren't necessarily consecutive or all used)
* Fetch default expressions, if needed, and insert into array at proper
* locations (they aren't necessarily consecutive or all used)
*/
defargnumbers = NULL;
if (nargsprovided < pronargs)
{
List *defaults = fetch_function_defaults(func_tuple);
List *defaults = fetch_function_defaults(func_tuple);
i = pronargs - funcform->pronargdefaults;
foreach(lc, defaults)
@ -3390,10 +3390,10 @@ reorder_function_arguments(List *args, Oid result_type, HeapTuple func_tuple,
recheck_cast_function_args(args, result_type, func_tuple);
/*
* Lastly, we have to recursively simplify the defaults we just added
* (but don't recurse on the args passed in, as we already did those).
* This isn't merely an optimization, it's *necessary* since there could
* be functions with named or defaulted arguments down in there.
* Lastly, we have to recursively simplify the defaults we just added (but
* don't recurse on the args passed in, as we already did those). This
* isn't merely an optimization, it's *necessary* since there could be
* functions with named or defaulted arguments down in there.
*
* Note that we do this last in hopes of simplifying any typecasts that
* were added by recheck_cast_function_args --- there shouldn't be any new
@ -3448,10 +3448,10 @@ add_function_defaults(List *args, Oid result_type, HeapTuple func_tuple,
recheck_cast_function_args(args, result_type, func_tuple);
/*
* Lastly, we have to recursively simplify the defaults we just added
* (but don't recurse on the args passed in, as we already did those).
* This isn't merely an optimization, it's *necessary* since there could
* be functions with named or defaulted arguments down in there.
* Lastly, we have to recursively simplify the defaults we just added (but
* don't recurse on the args passed in, as we already did those). This
* isn't merely an optimization, it's *necessary* since there could be
* functions with named or defaulted arguments down in there.
*
* Note that we do this last in hopes of simplifying any typecasts that
* were added by recheck_cast_function_args --- there shouldn't be any new
@ -4191,11 +4191,11 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
oldcxt = MemoryContextSwitchTo(mycxt);
/*
* Run eval_const_expressions on the function call. This is necessary
* to ensure that named-argument notation is converted to positional
* notation and any default arguments are inserted. It's a bit of
* overkill for the arguments, since they'll get processed again later,
* but no harm will be done.
* Run eval_const_expressions on the function call. This is necessary to
* ensure that named-argument notation is converted to positional notation
* and any default arguments are inserted. It's a bit of overkill for the
* arguments, since they'll get processed again later, but no harm will be
* done.
*/
fexpr = (FuncExpr *) eval_const_expressions(root, (Node *) fexpr);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.156 2010/01/02 16:57:48 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.157 2010/02/26 02:00:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1224,7 +1224,7 @@ create_noop_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
{
NoOpPath *pathnode = makeNode(NoOpPath);
pathnode->path.pathtype = T_Join; /* by convention */
pathnode->path.pathtype = T_Join; /* by convention */
pathnode->path.parent = rel;
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost;

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.32 2010/02/25 20:59:53 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.33 2010/02/26 02:00:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -657,10 +657,10 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
/*
* If A is a strong NOT-clause, A R=> B if B equals A's arg
*
* We cannot make the stronger conclusion that B is refuted if
* B implies A's arg; that would only prove that B is not-TRUE,
* not that it's not NULL either. Hence use equal() rather than
* predicate_implied_by_recurse(). We could do the latter if we
* We cannot make the stronger conclusion that B is refuted if B
* implies A's arg; that would only prove that B is not-TRUE, not
* that it's not NULL either. Hence use equal() rather than
* predicate_implied_by_recurse(). We could do the latter if we
* ever had a need for the weak form of refutation.
*/
not_arg = extract_strong_not_arg(clause);
@ -1678,7 +1678,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
else if (OidIsValid(clause_op_negator))
{
clause_tuple = SearchSysCache2(AMOPOPID,
ObjectIdGetDatum(clause_op_negator),
ObjectIdGetDatum(clause_op_negator),
ObjectIdGetDatum(opfamily_id));
if (HeapTupleIsValid(clause_tuple))
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.97 2010/01/02 16:57:48 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.98 2010/02/26 02:00:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -404,9 +404,9 @@ build_join_rel(PlannerInfo *root,
/*
* Also, if dynamic-programming join search is active, add the new joinrel
* to the appropriate sublist. Note: you might think the Assert on
* number of members should be for equality, but some of the level 1
* rels might have been joinrels already, so we can only assert <=.
* to the appropriate sublist. Note: you might think the Assert on number
* of members should be for equality, but some of the level 1 rels might
* have been joinrels already, so we can only assert <=.
*/
if (root->join_rel_level)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.62 2010/01/02 16:57:48 momjian Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.63 2010/02/26 02:00:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -292,15 +292,15 @@ make_restrictinfos_from_actual_clauses(PlannerInfo *root,
foreach(l, clause_list)
{
Expr *clause = (Expr *) lfirst(l);
bool pseudoconstant;
Expr *clause = (Expr *) lfirst(l);
bool pseudoconstant;
RestrictInfo *rinfo;
/*
* It's pseudoconstant if it contains no Vars and no volatile
* functions. We probably can't see any sublinks here, so
* contain_var_clause() would likely be enough, but for safety
* use contain_vars_of_level() instead.
* contain_var_clause() would likely be enough, but for safety use
* contain_vars_of_level() instead.
*/
pseudoconstant =
!contain_vars_of_level((Node *) clause, 0) &&