1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-22 12:22:45 +03:00

Standard pgindent run for 8.1.

This commit is contained in:
Bruce Momjian
2005-10-15 02:49:52 +00:00
parent 790c01d280
commit 1dc3498251
770 changed files with 34334 additions and 32507 deletions

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.113 2005/07/23 21:05:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.114 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@
static void estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples);
BlockNumber *pages, double *tuples);
/*
@@ -71,18 +71,18 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Normally, we can assume the rewriter already acquired at least
* AccessShareLock on each relation used in the query. However this
* will not be the case for relations added to the query because they
* are inheritance children of some relation mentioned explicitly.
* For them, this is the first access during the parse/rewrite/plan
* pipeline, and so we need to obtain and keep a suitable lock.
* AccessShareLock on each relation used in the query. However this will
* not be the case for relations added to the query because they are
* inheritance children of some relation mentioned explicitly. For them,
* this is the first access during the parse/rewrite/plan pipeline, and so
* we need to obtain and keep a suitable lock.
*
* XXX really, a suitable lock is RowShareLock if the relation is
* an UPDATE/DELETE target, and AccessShareLock otherwise. However
* we cannot easily tell here which to get, so for the moment just
* get AccessShareLock always. The executor will get the right lock
* when it runs, which means there is a very small chance of deadlock
* trying to upgrade our lock.
* XXX really, a suitable lock is RowShareLock if the relation is an
* UPDATE/DELETE target, and AccessShareLock otherwise. However we cannot
* easily tell here which to get, so for the moment just get
* AccessShareLock always. The executor will get the right lock when it
* runs, which means there is a very small chance of deadlock trying to
* upgrade our lock.
*/
if (rel->reloptkind == RELOPT_BASEREL)
relation = heap_open(relationObjectId, NoLock);
@@ -105,8 +105,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
&rel->pages, &rel->tuples);
/*
* Make list of indexes. Ignore indexes on system catalogs if told
* to.
* Make list of indexes. Ignore indexes on system catalogs if told to.
*/
if (IsIgnoringSystemIndexes() && IsSystemClass(relation->rd_rel))
hasindex = false;
@@ -133,10 +132,10 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Extract info from the relation descriptor for the index.
*
* Note that we take no lock on the index; we assume our lock on
* the parent table will protect the index's schema information.
* When and if the executor actually uses the index, it will take
* a lock as needed to protect the access to the index contents.
* Note that we take no lock on the index; we assume our lock on the
* parent table will protect the index's schema information. When
* and if the executor actually uses the index, it will take a
* lock as needed to protect the access to the index contents.
*/
indexRelation = index_open(indexoid);
index = indexRelation->rd_index;
@@ -148,8 +147,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->ncolumns = ncolumns = index->indnatts;
/*
* Need to make classlist and ordering arrays large enough to
* put a terminating 0 at the end of each one.
* Need to make classlist and ordering arrays large enough to put
* a terminating 0 at the end of each one.
*/
info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
info->classlist = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));
@@ -166,8 +165,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->amoptionalkey = indexRelation->rd_am->amoptionalkey;
/*
* Fetch the ordering operators associated with the index, if
* any.
* Fetch the ordering operators associated with the index, if any.
*/
amorderstrategy = indexRelation->rd_am->amorderstrategy;
if (amorderstrategy != 0)
@@ -184,8 +182,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Fetch the index expressions and predicate, if any. We must
* modify the copies we obtain from the relcache to have the
* correct varno for the parent relation, so that they match
* up correctly against qual clauses.
* correct varno for the parent relation, so that they match up
* correctly against qual clauses.
*/
info->indexprs = RelationGetIndexExpressions(indexRelation);
info->indpred = RelationGetIndexPredicate(indexRelation);
@@ -197,11 +195,11 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->unique = index->indisunique;
/*
* Estimate the index size. If it's not a partial index, we
* lock the number-of-tuples estimate to equal the parent table;
* if it is partial then we have to use the same methods as we
* would for a table, except we can be sure that the index is
* not larger than the table.
* Estimate the index size. If it's not a partial index, we lock
* the number-of-tuples estimate to equal the parent table; if it
* is partial then we have to use the same methods as we would for
* a table, except we can be sure that the index is not larger
* than the table.
*/
if (info->indpred == NIL)
{
@@ -241,8 +239,8 @@ static void
estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples)
{
BlockNumber curpages;
BlockNumber relpages;
BlockNumber curpages;
BlockNumber relpages;
double reltuples;
double density;
@@ -256,22 +254,22 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
/*
* HACK: if the relation has never yet been vacuumed, use a
* minimum estimate of 10 pages. This emulates a desirable
* aspect of pre-8.0 behavior, which is that we wouldn't assume
* a newly created relation is really small, which saves us from
* making really bad plans during initial data loading. (The
* plans are not wrong when they are made, but if they are cached
* and used again after the table has grown a lot, they are bad.)
* It would be better to force replanning if the table size has
* changed a lot since the plan was made ... but we don't
* currently have any infrastructure for redoing cached plans at
* all, so we have to kluge things here instead.
* minimum estimate of 10 pages. This emulates a desirable aspect
* of pre-8.0 behavior, which is that we wouldn't assume a newly
* created relation is really small, which saves us from making
* really bad plans during initial data loading. (The plans are
* not wrong when they are made, but if they are cached and used
* again after the table has grown a lot, they are bad.) It would
* be better to force replanning if the table size has changed a
* lot since the plan was made ... but we don't currently have any
* infrastructure for redoing cached plans at all, so we have to
* kluge things here instead.
*
* We approximate "never vacuumed" by "has relpages = 0", which
* means this will also fire on genuinely empty relations. Not
* great, but fortunately that's a seldom-seen case in the real
* world, and it shouldn't degrade the quality of the plan too
* much anyway to err in this direction.
* We approximate "never vacuumed" by "has relpages = 0", which means
* this will also fire on genuinely empty relations. Not great,
* but fortunately that's a seldom-seen case in the real world,
* and it shouldn't degrade the quality of the plan too much
* anyway to err in this direction.
*/
if (curpages < 10 && rel->rd_rel->relpages == 0)
curpages = 10;
@@ -287,6 +285,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
/* coerce values in pg_class to more desirable types */
relpages = (BlockNumber) rel->rd_rel->relpages;
reltuples = (double) rel->rd_rel->reltuples;
/*
* If it's an index, discount the metapage. This is a kluge
* because it assumes more than it ought to about index contents;
@@ -307,19 +306,19 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* When we have no data because the relation was truncated,
* estimate tuple width from attribute datatypes. We assume
* here that the pages are completely full, which is OK for
* tables (since they've presumably not been VACUUMed yet)
* but is probably an overestimate for indexes. Fortunately
* tables (since they've presumably not been VACUUMed yet) but
* is probably an overestimate for indexes. Fortunately
* get_relation_info() can clamp the overestimate to the
* parent table's size.
*
* Note: this code intentionally disregards alignment
* considerations, because (a) that would be gilding the
* lily considering how crude the estimate is, and (b)
* it creates platform dependencies in the default plans
* which are kind of a headache for regression testing.
* considerations, because (a) that would be gilding the lily
* considering how crude the estimate is, and (b) it creates
* platform dependencies in the default plans which are kind
* of a headache for regression testing.
*/
int32 tuple_width = 0;
int i;
int32 tuple_width = 0;
int i;
for (i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
{
@@ -391,12 +390,12 @@ get_relation_constraints(Oid relationObjectId, RelOptInfo *rel)
constr = relation->rd_att->constr;
if (constr != NULL)
{
int num_check = constr->num_check;
int i;
int num_check = constr->num_check;
int i;
for (i = 0; i < num_check; i++)
{
Node *cexpr;
Node *cexpr;
cexpr = stringToNode(constr->check[i].ccbin);
@@ -425,8 +424,8 @@ get_relation_constraints(Oid relationObjectId, RelOptInfo *rel)
ChangeVarNodes(cexpr, 1, varno, 0);
/*
* Finally, convert to implicit-AND format (that is, a List)
* and append the resulting item(s) to our output list.
* Finally, convert to implicit-AND format (that is, a List) and
* append the resulting item(s) to our output list.
*/
result = list_concat(result,
make_ands_implicit((Expr *) cexpr));
@@ -532,11 +531,12 @@ build_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
break;
case RTE_FUNCTION:
expandRTE(rte, varno, 0, true /* include dropped */,
expandRTE(rte, varno, 0, true /* include dropped */ ,
NULL, &colvars);
foreach(l, colvars)
{
var = (Var *) lfirst(l);
/*
* A non-Var in expandRTE's output means a dropped column;
* must punt.
@@ -727,11 +727,11 @@ has_unique_index(RelOptInfo *rel, AttrNumber attno)
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
/*
* Note: ignore partial indexes, since they don't allow us to
* conclude that all attr values are distinct. We don't take any
* interest in expressional indexes either. Also, a multicolumn
* unique index doesn't allow us to conclude that just the
* specified attr is unique.
* Note: ignore partial indexes, since they don't allow us to conclude
* that all attr values are distinct. We don't take any interest in
* expressional indexes either. Also, a multicolumn unique index
* doesn't allow us to conclude that just the specified attr is
* unique.
*/
if (index->unique &&
index->ncolumns == 1 &&