1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-02 09:02:37 +03:00

Standard pgindent run for 8.1.

This commit is contained in:
Bruce Momjian
2005-10-15 02:49:52 +00:00
parent 790c01d280
commit 1dc3498251
770 changed files with 34334 additions and 32507 deletions

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.190 2005/09/24 22:54:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.191 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -48,9 +48,9 @@
static List *find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
List *clauses, List *outer_clauses,
bool istoplevel, bool isjoininner,
Relids outer_relids);
List *clauses, List *outer_clauses,
bool istoplevel, bool isjoininner,
Relids outer_relids);
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths);
static int bitmap_path_comparator(const void *a, const void *b);
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths);
@ -62,25 +62,25 @@ static Oid indexable_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static Relids indexable_outerrelids(RelOptInfo *rel);
static bool matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel,
Relids outer_relids);
Relids outer_relids);
static List *find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
Relids outer_relids, bool isouterjoin);
Relids outer_relids, bool isouterjoin);
static ScanDirection match_variant_ordering(PlannerInfo *root,
IndexOptInfo *index,
List *restrictclauses);
IndexOptInfo *index,
List *restrictclauses);
static List *identify_ignorable_ordering_cols(PlannerInfo *root,
IndexOptInfo *index,
List *restrictclauses);
IndexOptInfo *index,
List *restrictclauses);
static bool match_index_to_query_keys(PlannerInfo *root,
IndexOptInfo *index,
ScanDirection indexscandir,
List *ignorables);
IndexOptInfo *index,
ScanDirection indexscandir,
List *ignorables);
static bool match_boolean_index_clause(Node *clause, int indexcol,
IndexOptInfo *index);
IndexOptInfo *index);
static bool match_special_index_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static Expr *expand_boolean_index_clause(Node *clause, int indexcol,
IndexOptInfo *index);
IndexOptInfo *index);
static List *expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass);
static List *prefix_quals(Node *leftop, Oid opclass,
Const *prefix, Pattern_Prefix_Status pstatus);
@ -153,8 +153,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
true, false, NULL);
/*
* We can submit them all to add_path. (This generates access paths for
* plain IndexScan plans.) However, for the next step we will only want
* We can submit them all to add_path. (This generates access paths for
* plain IndexScan plans.) However, for the next step we will only want
* the ones that have some selectivity; we must discard anything that was
* generated solely for ordering purposes.
*/
@ -180,8 +180,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
bitindexpaths = list_concat(bitindexpaths, indexpaths);
/*
* If we found anything usable, generate a BitmapHeapPath for the
* most promising combination of bitmap index paths.
* If we found anything usable, generate a BitmapHeapPath for the most
* promising combination of bitmap index paths.
*/
if (bitindexpaths != NIL)
{
@ -254,19 +254,19 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
bool index_is_ordered;
/*
* Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK; otherwise, if we
* are at top level we know it's not OK (since predOK is exactly
* whether its predicate could be proven from the toplevel clauses).
* Otherwise, we have to test whether the added clauses are
* sufficient to imply the predicate. If so, we could use
* the index in the current context.
* Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK; otherwise, if we are
* at top level we know it's not OK (since predOK is exactly whether
* its predicate could be proven from the toplevel clauses).
* Otherwise, we have to test whether the added clauses are sufficient
* to imply the predicate. If so, we could use the index in the
* current context.
*
* We set useful_predicate to true iff the predicate was proven
* using the current set of clauses. This is needed to prevent
* matching a predOK index to an arm of an OR, which would be
* a legal but pointlessly inefficient plan. (A better plan will
* be generated by just scanning the predOK index alone, no OR.)
* We set useful_predicate to true iff the predicate was proven using the
* current set of clauses. This is needed to prevent matching a
* predOK index to an arm of an OR, which would be a legal but
* pointlessly inefficient plan. (A better plan will be generated by
* just scanning the predOK index alone, no OR.)
*/
useful_predicate = false;
if (index->indpred != NIL)
@ -282,7 +282,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
else
{
if (istoplevel)
continue; /* no point in trying to prove it */
continue; /* no point in trying to prove it */
/* Form all_clauses if not done already */
if (all_clauses == NIL)
@ -290,7 +290,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
outer_clauses);
if (!predicate_implied_by(index->indpred, all_clauses))
continue; /* can't use it at all */
continue; /* can't use it at all */
if (!predicate_implied_by(index->indpred, outer_clauses))
useful_predicate = true;
@ -309,17 +309,17 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
&found_clause);
/*
* Not all index AMs support scans with no restriction clauses.
* We can't generate a scan over an index with amoptionalkey = false
* Not all index AMs support scans with no restriction clauses. We
* can't generate a scan over an index with amoptionalkey = false
* unless there's at least one restriction clause.
*/
if (restrictclauses == NIL && !index->amoptionalkey)
continue;
/*
* 2. Compute pathkeys describing index's ordering, if any, then
* see how many of them are actually useful for this query. This
* is not relevant unless we are at top level.
* 2. Compute pathkeys describing index's ordering, if any, then see
* how many of them are actually useful for this query. This is not
* relevant unless we are at top level.
*/
index_is_ordered = OidIsValid(index->ordering[0]);
if (istoplevel && index_is_ordered && !isjoininner)
@ -335,9 +335,8 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
/*
* 3. Generate an indexscan path if there are relevant restriction
* clauses in the current clauses, OR the index ordering is
* potentially useful for later merging or final output ordering,
* OR the index has a predicate that was proven by the current
* clauses.
* potentially useful for later merging or final output ordering, OR
* the index has a predicate that was proven by the current clauses.
*/
if (found_clause || useful_pathkeys != NIL || useful_predicate)
{
@ -352,16 +351,15 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
}
/*
* 4. If the index is ordered, and there is a requested query
* ordering that we failed to match, consider variant ways of
* achieving the ordering. Again, this is only interesting
* at top level.
* 4. If the index is ordered, and there is a requested query ordering
* that we failed to match, consider variant ways of achieving the
* ordering. Again, this is only interesting at top level.
*/
if (istoplevel && index_is_ordered && !isjoininner &&
root->query_pathkeys != NIL &&
pathkeys_useful_for_ordering(root, useful_pathkeys) == 0)
{
ScanDirection scandir;
ScanDirection scandir;
scandir = match_variant_ordering(root, index, restrictclauses);
if (!ScanDirectionIsNoMovement(scandir))
@ -409,9 +407,9 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
foreach(l, clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
List *pathlist;
Path *bitmapqual;
ListCell *j;
List *pathlist;
Path *bitmapqual;
ListCell *j;
Assert(IsA(rinfo, RestrictInfo));
/* Ignore RestrictInfos that aren't ORs */
@ -419,19 +417,19 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
continue;
/*
* We must be able to match at least one index to each of the arms
* of the OR, else we can't use it.
* We must be able to match at least one index to each of the arms of
* the OR, else we can't use it.
*/
pathlist = NIL;
foreach(j, ((BoolExpr *) rinfo->orclause)->args)
{
Node *orarg = (Node *) lfirst(j);
List *indlist;
Node *orarg = (Node *) lfirst(j);
List *indlist;
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
{
List *andargs = ((BoolExpr *) orarg)->args;
List *andargs = ((BoolExpr *) orarg)->args;
indlist = find_usable_indexes(root, rel,
andargs,
@ -458,25 +456,28 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
isjoininner,
outer_relids);
}
/*
* If nothing matched this arm, we can't do anything
* with this OR clause.
* If nothing matched this arm, we can't do anything with this OR
* clause.
*/
if (indlist == NIL)
{
pathlist = NIL;
break;
}
/*
* OK, pick the most promising AND combination,
* and add it to pathlist.
* OK, pick the most promising AND combination, and add it to
* pathlist.
*/
bitmapqual = choose_bitmap_and(root, rel, indlist);
pathlist = lappend(pathlist, bitmapqual);
}
/*
* If we have a match for every arm, then turn them
* into a BitmapOrPath, and add to result list.
* If we have a match for every arm, then turn them into a
* BitmapOrPath, and add to result list.
*/
if (pathlist != NIL)
{
@ -494,7 +495,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
* Given a nonempty list of bitmap paths, AND them into one path.
*
* This is a nontrivial decision since we can legally use any subset of the
* given path set. We want to choose a good tradeoff between selectivity
* given path set. We want to choose a good tradeoff between selectivity
* and cost of computing the bitmap.
*
* The result is either a single one of the inputs, or a BitmapAndPath
@ -511,7 +512,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
int i;
ListCell *l;
Assert(npaths > 0); /* else caller error */
Assert(npaths > 0); /* else caller error */
if (npaths == 1)
return (Path *) linitial(paths); /* easy case */
@ -519,24 +520,23 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
* OR clauses. As a compromise, we sort the paths by selectivity.
* We always take the first, and sequentially add on paths that result
* in a lower estimated cost.
* OR clauses. As a compromise, we sort the paths by selectivity. We
* always take the first, and sequentially add on paths that result in a
* lower estimated cost.
*
* We also make some effort to detect directly redundant input paths,
* as can happen if there are multiple possibly usable indexes. For
* this we look only at plain IndexPath inputs, not at sub-OR clauses.
* And we consider an index redundant if all its index conditions were
* already used by earlier indexes. (We could use predicate_implied_by
* to have a more intelligent, but much more expensive, check --- but in
* most cases simple pointer equality should suffice, since after all the
* index conditions are all coming from the same RestrictInfo lists.)
* We also make some effort to detect directly redundant input paths, as can
* happen if there are multiple possibly usable indexes. For this we look
* only at plain IndexPath inputs, not at sub-OR clauses. And we consider
* an index redundant if all its index conditions were already used by
* earlier indexes. (We could use predicate_implied_by to have a more
* intelligent, but much more expensive, check --- but in most cases
* simple pointer equality should suffice, since after all the index
* conditions are all coming from the same RestrictInfo lists.)
*
* XXX is there any risk of throwing away a useful partial index here
* because we don't explicitly look at indpred? At least in simple
* cases, the partial index will sort before competing non-partial
* indexes and so it makes the right choice, but perhaps we need to
* work harder.
* XXX is there any risk of throwing away a useful partial index here because
* we don't explicitly look at indpred? At least in simple cases, the
* partial index will sort before competing non-partial indexes and so it
* makes the right choice, but perhaps we need to work harder.
*
* Note: outputting the selected sub-paths in selectivity order is a good
* thing even if we weren't using that as part of the selection method,
@ -559,13 +559,13 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
qualsofar = list_copy(((IndexPath *) patharray[0])->indexclauses);
else
qualsofar = NIL;
lastcell = list_head(paths); /* for quick deletions */
lastcell = list_head(paths); /* for quick deletions */
for (i = 1; i < npaths; i++)
{
Path *newpath = patharray[i];
List *newqual = NIL;
Cost newcost;
Path *newpath = patharray[i];
List *newqual = NIL;
Cost newcost;
if (IsA(newpath, IndexPath))
{
@ -599,12 +599,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
static int
bitmap_path_comparator(const void *a, const void *b)
{
Path *pa = *(Path * const *) a;
Path *pb = *(Path * const *) b;
Path *pa = *(Path *const *) a;
Path *pb = *(Path *const *) b;
Cost acost;
Cost bcost;
Selectivity aselec;
Selectivity bselec;
Selectivity aselec;
Selectivity bselec;
cost_bitmap_tree_node(pa, &acost, &aselec);
cost_bitmap_tree_node(pb, &bcost, &bselec);
@ -660,7 +660,7 @@ bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths)
*
* We can use clauses from either the current clauses or outer_clauses lists,
* but *found_clause is set TRUE only if we used at least one clause from
* the "current clauses" list. See find_usable_indexes() for motivation.
* the "current clauses" list. See find_usable_indexes() for motivation.
*
* outer_relids determines what Vars will be allowed on the other side
* of a possible index qual; see match_clause_to_indexcol().
@ -770,7 +770,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
* to the caller-specified outer_relids relations (which had better not
* include the relation whose index is being tested). outer_relids should
* be NULL when checking simple restriction clauses, and the outer side
* of the join when building a join inner scan. Other than that, the
* of the join when building a join inner scan. Other than that, the
* only thing we don't like is volatile functions.
*
* Note: in most cases we already know that the clause as a whole uses
@ -836,8 +836,8 @@ match_clause_to_indexcol(IndexOptInfo *index,
return true;
/*
* If we didn't find a member of the index's opclass, see whether
* it is a "special" indexable operator.
* If we didn't find a member of the index's opclass, see whether it
* is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, true))
return true;
@ -852,8 +852,8 @@ match_clause_to_indexcol(IndexOptInfo *index,
return true;
/*
* If we didn't find a member of the index's opclass, see whether
* it is a "special" indexable operator.
* If we didn't find a member of the index's opclass, see whether it
* is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, false))
return true;
@ -914,14 +914,14 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* Note: if Postgres tried to optimize queries by forming equivalence
* classes over equi-joined attributes (i.e., if it recognized that a
* qualification such as "where a.b=c.d and a.b=5" could make use of
* an index on c.d), then we could use that equivalence class info
* here with joininfo lists to do more complete tests for the usability
* of a partial index. For now, the test only uses restriction
* clauses (those in baserestrictinfo). --Nels, Dec '92
* qualification such as "where a.b=c.d and a.b=5" could make use of an
* index on c.d), then we could use that equivalence class info here with
* joininfo lists to do more complete tests for the usability of a partial
* index. For now, the test only uses restriction clauses (those in
* baserestrictinfo). --Nels, Dec '92
*
* XXX as of 7.1, equivalence class info *is* available. Consider
* improving this code as foreseen by Nels.
* XXX as of 7.1, equivalence class info *is* available. Consider improving
* this code as foreseen by Nels.
*/
foreach(ilist, rel->indexlist)
@ -943,7 +943,7 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* indexable_outerrelids
* Finds all other relids that participate in any indexable join clause
* for the specified table. Returns a set of relids.
* for the specified table. Returns a set of relids.
*/
static Relids
indexable_outerrelids(RelOptInfo *rel)
@ -958,7 +958,7 @@ indexable_outerrelids(RelOptInfo *rel)
foreach(l, rel->joininfo)
{
RestrictInfo *joininfo = (RestrictInfo *) lfirst(l);
Relids other_rels;
Relids other_rels;
other_rels = bms_difference(joininfo->required_relids, rel->relids);
if (matches_any_index(joininfo, rel, other_rels))
@ -986,7 +986,7 @@ matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel, Relids outer_relids)
{
foreach(l, ((BoolExpr *) rinfo->orclause)->args)
{
Node *orarg = (Node *) lfirst(l);
Node *orarg = (Node *) lfirst(l);
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
@ -1092,17 +1092,17 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
return NULL;
/*
* Otherwise, we have to do path selection in the memory context of
* the given rel, so that any created path can be safely attached to
* the rel's cache of best inner paths. (This is not currently an
* issue for normal planning, but it is an issue for GEQO planning.)
* Otherwise, we have to do path selection in the memory context of the
* given rel, so that any created path can be safely attached to the rel's
* cache of best inner paths. (This is not currently an issue for normal
* planning, but it is an issue for GEQO planning.)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
/*
* Intersect the given outer_relids with index_outer_relids to find
* the set of outer relids actually relevant for this rel. If there
* are none, again we can fail immediately.
* Intersect the given outer_relids with index_outer_relids to find the
* set of outer relids actually relevant for this rel. If there are none,
* again we can fail immediately.
*/
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
if (bms_is_empty(outer_relids))
@ -1113,11 +1113,10 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
}
/*
* Look to see if we already computed the result for this set of
* relevant outerrels. (We include the isouterjoin status in the
* cache lookup key for safety. In practice I suspect this is not
* necessary because it should always be the same for a given
* innerrel.)
* Look to see if we already computed the result for this set of relevant
* outerrels. (We include the isouterjoin status in the cache lookup key
* for safety. In practice I suspect this is not necessary because it
* should always be the same for a given innerrel.)
*/
foreach(l, rel->index_inner_paths)
{
@ -1160,8 +1159,8 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
bitindexpaths = list_concat(bitindexpaths, list_copy(indexpaths));
/*
* If we found anything usable, generate a BitmapHeapPath for the
* most promising combination of bitmap index paths.
* If we found anything usable, generate a BitmapHeapPath for the most
* promising combination of bitmap index paths.
*/
if (bitindexpaths != NIL)
{
@ -1218,12 +1217,11 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
ListCell *l;
/*
* We can always use plain restriction clauses for the rel. We
* scan these first because we want them first in the clause
* list for the convenience of remove_redundant_join_clauses,
* which can never remove non-join clauses and hence won't be able
* to get rid of a non-join clause if it appears after a join
* clause it is redundant with.
* We can always use plain restriction clauses for the rel. We scan these
* first because we want them first in the clause list for the convenience
* of remove_redundant_join_clauses, which can never remove non-join
* clauses and hence won't be able to get rid of a non-join clause if it
* appears after a join clause it is redundant with.
*/
foreach(l, rel->baserestrictinfo)
{
@ -1305,7 +1303,7 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
*
* If able to match the requested query pathkeys, returns either
* ForwardScanDirection or BackwardScanDirection to indicate the proper index
* scan direction. If no match, returns NoMovementScanDirection.
* scan direction. If no match, returns NoMovementScanDirection.
*/
static ScanDirection
match_variant_ordering(PlannerInfo *root,
@ -1318,8 +1316,8 @@ match_variant_ordering(PlannerInfo *root,
* Forget the whole thing if not a btree index; our check for ignorable
* columns assumes we are dealing with btree opclasses. (It'd be possible
* to factor out just the try for backwards indexscan, but considering
* that we presently have no orderable indexes except btrees anyway,
* it's hardly worth contorting this code for that case.)
* that we presently have no orderable indexes except btrees anyway, it's
* hardly worth contorting this code for that case.)
*
* Note: if you remove this, you probably need to put in a check on
* amoptionalkey to prevent possible clauseless scan on an index that
@ -1327,17 +1325,19 @@ match_variant_ordering(PlannerInfo *root,
*/
if (index->relam != BTREE_AM_OID)
return NoMovementScanDirection;
/*
* Figure out which index columns can be optionally ignored because
* they have an equality constraint. This is the same set for either
* forward or backward scan, so we do it just once.
* Figure out which index columns can be optionally ignored because they
* have an equality constraint. This is the same set for either forward
* or backward scan, so we do it just once.
*/
ignorables = identify_ignorable_ordering_cols(root, index,
restrictclauses);
/*
* Try to match to forward scan, then backward scan. However, we can
* skip the forward-scan case if there are no ignorable columns,
* because find_usable_indexes() would have found the match already.
* Try to match to forward scan, then backward scan. However, we can skip
* the forward-scan case if there are no ignorable columns, because
* find_usable_indexes() would have found the match already.
*/
if (ignorables &&
match_index_to_query_keys(root, index, ForwardScanDirection,
@ -1365,24 +1365,24 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
List *restrictclauses)
{
List *result = NIL;
int indexcol = 0; /* note this is 0-based */
int indexcol = 0; /* note this is 0-based */
ListCell *l;
/* restrictclauses is either NIL or has a sublist per column */
foreach(l, restrictclauses)
{
List *sublist = (List *) lfirst(l);
Oid opclass = index->classlist[indexcol];
ListCell *l2;
List *sublist = (List *) lfirst(l);
Oid opclass = index->classlist[indexcol];
ListCell *l2;
foreach(l2, sublist)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l2);
OpExpr *clause = (OpExpr *) rinfo->clause;
Oid clause_op;
int op_strategy;
bool varonleft;
bool ispc;
Oid clause_op;
int op_strategy;
bool varonleft;
bool ispc;
/* We know this clause passed match_clause_to_indexcol */
@ -1393,11 +1393,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
index))
{
/*
* The clause means either col = TRUE or col = FALSE;
* we do not care which, it's an equality constraint
* either way.
* The clause means either col = TRUE or col = FALSE; we
* do not care which, it's an equality constraint either
* way.
*/
result = lappend_int(result, indexcol+1);
result = lappend_int(result, indexcol + 1);
break;
}
}
@ -1426,12 +1426,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
op_strategy = get_op_opclass_strategy(clause_op, opclass);
/*
* You might expect to see Assert(op_strategy != 0) here,
* but you won't: the clause might contain a special indexable
* operator rather than an ordinary opclass member. Currently
* none of the special operators are very likely to expand to
* an equality operator; we do not bother to check, but just
* assume no match.
* You might expect to see Assert(op_strategy != 0) here, but you
* won't: the clause might contain a special indexable operator
* rather than an ordinary opclass member. Currently none of the
* special operators are very likely to expand to an equality
* operator; we do not bother to check, but just assume no match.
*/
if (op_strategy != BTEqualStrategyNumber)
continue;
@ -1445,7 +1444,7 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
rinfo->left_relids);
if (ispc)
{
result = lappend_int(result, indexcol+1);
result = lappend_int(result, indexcol + 1);
break;
}
}
@ -1480,8 +1479,8 @@ match_index_to_query_keys(PlannerInfo *root,
index_pathkeys = build_index_pathkeys(root, index, indexscandir);
/*
* Can we match to the query's requested pathkeys? The inner loop
* skips over ignorable index columns while trying to match.
* Can we match to the query's requested pathkeys? The inner loop skips
* over ignorable index columns while trying to match.
*/
index_cell = list_head(index_pathkeys);
index_col = 0;
@ -1492,13 +1491,14 @@ match_index_to_query_keys(PlannerInfo *root,
for (;;)
{
List *isubkey;
List *isubkey;
if (index_cell == NULL)
return false;
isubkey = (List *) lfirst(index_cell);
index_cell = lnext(index_cell);
index_col++; /* index_col is now 1-based */
/*
* Since we are dealing with canonicalized pathkeys, pointer
* comparison is sufficient to determine a match.
@ -1561,9 +1561,9 @@ match_index_to_operand(Node *operand,
int indkey;
/*
* Ignore any RelabelType node above the operand. This is needed to
* be able to apply indexscanning in binary-compatible-operator cases.
* Note: we can assume there is at most one RelabelType node;
* Ignore any RelabelType node above the operand. This is needed to be
* able to apply indexscanning in binary-compatible-operator cases. Note:
* we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
*/
if (operand && IsA(operand, RelabelType))
@ -1583,9 +1583,9 @@ match_index_to_operand(Node *operand,
else
{
/*
* Index expression; find the correct expression. (This search
* could be avoided, at the cost of complicating all the callers
* of this routine; doesn't seem worth it.)
* Index expression; find the correct expression. (This search could
* be avoided, at the cost of complicating all the callers of this
* routine; doesn't seem worth it.)
*/
ListCell *indexpr_item;
int i;
@ -1645,7 +1645,7 @@ match_index_to_operand(Node *operand,
*
* Another thing that we do with this machinery is to provide special
* smarts for "boolean" indexes (that is, indexes on boolean columns
* that support boolean equality). We can transform a plain reference
* that support boolean equality). We can transform a plain reference
* to the indexkey into "indexkey = true", or "NOT indexkey" into
* "indexkey = false", so as to make the expression indexable using the
* regular index operators. (As of Postgres 8.1, we must do this here
@ -1696,14 +1696,15 @@ match_boolean_index_clause(Node *clause,
indexcol, index))
return true;
}
/*
* Since we only consider clauses at top level of WHERE, we can convert
* indexkey IS TRUE and indexkey IS FALSE to index searches as well.
* The different meaning for NULL isn't important.
* indexkey IS TRUE and indexkey IS FALSE to index searches as well. The
* different meaning for NULL isn't important.
*/
else if (clause && IsA(clause, BooleanTest))
{
BooleanTest *btest = (BooleanTest *) clause;
BooleanTest *btest = (BooleanTest *) clause;
if (btest->booltesttype == IS_TRUE ||
btest->booltesttype == IS_FALSE)
@ -1737,8 +1738,8 @@ match_special_index_operator(Expr *clause, Oid opclass,
/*
* Currently, all known special operators require the indexkey on the
* left, but this test could be pushed into the switch statement if
* some are added that do not...
* left, but this test could be pushed into the switch statement if some
* are added that do not...
*/
if (!indexkey_on_left)
return false;
@ -1760,12 +1761,12 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_LIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_BYTEA_LIKE_OP:
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICLIKE_OP:
@ -1773,7 +1774,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICLIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_REGEXEQ_OP:
@ -1781,7 +1782,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_REGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICREGEXEQ_OP:
@ -1789,7 +1790,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICREGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
&prefix, &rest) != Pattern_Prefix_None;
&prefix, &rest) != Pattern_Prefix_None;
break;
case OID_INET_SUB_OP:
@ -1815,9 +1816,9 @@ match_special_index_operator(Expr *clause, Oid opclass,
* want to apply. (A hash index, for example, will not support ">=".)
* Currently, only btree supports the operators we need.
*
* We insist on the opclass being the specific one we expect, else we'd
* do the wrong thing if someone were to make a reverse-sort opclass
* with the same operators.
* We insist on the opclass being the specific one we expect, else we'd do
* the wrong thing if someone were to make a reverse-sort opclass with the
* same operators.
*/
switch (expr_op)
{
@ -1906,7 +1907,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
/* First check for boolean cases */
if (IsBooleanOpclass(curClass))
{
Expr *boolqual;
Expr *boolqual;
boolqual = expand_boolean_index_clause((Node *) rinfo->clause,
indexcol,
@ -1960,7 +1961,7 @@ expand_boolean_index_clause(Node *clause,
/* NOT clause? */
if (not_clause(clause))
{
Node *arg = (Node *) get_notclausearg((Expr *) clause);
Node *arg = (Node *) get_notclausearg((Expr *) clause);
/* It must have matched the indexkey */
Assert(match_index_to_operand(arg, indexcol, index));
@ -1971,8 +1972,8 @@ expand_boolean_index_clause(Node *clause,
}
if (clause && IsA(clause, BooleanTest))
{
BooleanTest *btest = (BooleanTest *) clause;
Node *arg = (Node *) btest->arg;
BooleanTest *btest = (BooleanTest *) clause;
Node *arg = (Node *) btest->arg;
/* It must have matched the indexkey */
Assert(match_index_to_operand(arg, indexcol, index));
@ -2007,6 +2008,7 @@ static List *
expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
{
Expr *clause = rinfo->clause;
/* we know these will succeed */
Node *leftop = get_leftop(clause);
Node *rightop = get_rightop(clause);
@ -2020,10 +2022,9 @@ expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
switch (expr_op)
{
/*
* LIKE and regex operators are not members of any index
* opclass, so if we find one in an indexqual list we can
* assume that it was accepted by
* match_special_index_operator().
* LIKE and regex operators are not members of any index opclass,
* so if we find one in an indexqual list we can assume that it
* was accepted by match_special_index_operator().
*/
case OID_TEXT_LIKE_OP:
case OID_BPCHAR_LIKE_OP:
@ -2128,8 +2129,8 @@ prefix_quals(Node *leftop, Oid opclass,
}
/*
* If necessary, coerce the prefix constant to the right type. The
* given prefix constant is either text or bytea type.
* If necessary, coerce the prefix constant to the right type. The given
* prefix constant is either text or bytea type.
*/
if (prefix_const->consttype != datatype)
{
@ -2139,11 +2140,11 @@ prefix_quals(Node *leftop, Oid opclass,
{
case TEXTOID:
prefix = DatumGetCString(DirectFunctionCall1(textout,
prefix_const->constvalue));
prefix_const->constvalue));
break;
case BYTEAOID:
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
prefix_const->constvalue));
prefix_const->constvalue));
break;
default:
elog(ERROR, "unexpected const type: %u",