mirror of
https://github.com/postgres/postgres.git
synced 2025-07-08 11:42:09 +03:00
8.4 pgindent run, with new combined Linux/FreeBSD/MinGW typedef list
provided by Andrew.
This commit is contained in:
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.138 2009/06/06 22:13:51 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.139 2009/06/11 14:48:55 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -177,8 +177,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
|
||||
{
|
||||
if (onerel->rd_rel->relisshared)
|
||||
ereport(WARNING,
|
||||
(errmsg("skipping \"%s\" --- only superuser can analyze it",
|
||||
RelationGetRelationName(onerel))));
|
||||
(errmsg("skipping \"%s\" --- only superuser can analyze it",
|
||||
RelationGetRelationName(onerel))));
|
||||
else if (onerel->rd_rel->relnamespace == PG_CATALOG_NAMESPACE)
|
||||
ereport(WARNING,
|
||||
(errmsg("skipping \"%s\" --- only superuser or database owner can analyze it",
|
||||
@ -234,8 +234,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
|
||||
RelationGetRelationName(onerel))));
|
||||
|
||||
/*
|
||||
* Switch to the table owner's userid, so that any index functions are
|
||||
* run as that user.
|
||||
* Switch to the table owner's userid, so that any index functions are run
|
||||
* as that user.
|
||||
*/
|
||||
GetUserIdAndContext(&save_userid, &save_secdefcxt);
|
||||
SetUserIdAndContext(onerel->rd_rel->relowner, true);
|
||||
@ -467,7 +467,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
|
||||
|
||||
/*
|
||||
* Same for indexes. Vacuum always scans all indexes, so if we're part of
|
||||
* VACUUM ANALYZE, don't overwrite the accurate count already inserted by
|
||||
* VACUUM ANALYZE, don't overwrite the accurate count already inserted by
|
||||
* VACUUM.
|
||||
*/
|
||||
if (!vacstmt->vacuum)
|
||||
@ -719,8 +719,8 @@ examine_attribute(Relation onerel, int attnum)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Create the VacAttrStats struct. Note that we only have a copy of
|
||||
* the fixed fields of the pg_attribute tuple.
|
||||
* Create the VacAttrStats struct. Note that we only have a copy of the
|
||||
* fixed fields of the pg_attribute tuple.
|
||||
*/
|
||||
stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
|
||||
stats->attr = (Form_pg_attribute) palloc(ATTRIBUTE_FIXED_PART_SIZE);
|
||||
@ -737,10 +737,9 @@ examine_attribute(Relation onerel, int attnum)
|
||||
stats->tupattnum = attnum;
|
||||
|
||||
/*
|
||||
* The fields describing the stats->stavalues[n] element types default
|
||||
* to the type of the field being analyzed, but the type-specific
|
||||
* typanalyze function can change them if it wants to store something
|
||||
* else.
|
||||
* The fields describing the stats->stavalues[n] element types default to
|
||||
* the type of the field being analyzed, but the type-specific typanalyze
|
||||
* function can change them if it wants to store something else.
|
||||
*/
|
||||
for (i = 0; i < STATISTIC_NUM_SLOTS; i++)
|
||||
{
|
||||
@ -894,7 +893,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
double *totalrows, double *totaldeadrows)
|
||||
{
|
||||
int numrows = 0; /* # rows now in reservoir */
|
||||
double samplerows = 0; /* total # rows collected */
|
||||
double samplerows = 0; /* total # rows collected */
|
||||
double liverows = 0; /* # live rows seen */
|
||||
double deadrows = 0; /* # dead rows seen */
|
||||
double rowstoskip = -1; /* -1 means not set yet */
|
||||
@ -931,9 +930,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
* the maxoffset value stays good (else concurrent VACUUM might delete
|
||||
* tuples out from under us). Hence, pin the page until we are done
|
||||
* looking at it. We also choose to hold sharelock on the buffer
|
||||
* throughout --- we could release and re-acquire sharelock for
|
||||
* each tuple, but since we aren't doing much work per tuple, the
|
||||
* extra lock traffic is probably better avoided.
|
||||
* throughout --- we could release and re-acquire sharelock for each
|
||||
* tuple, but since we aren't doing much work per tuple, the extra
|
||||
* lock traffic is probably better avoided.
|
||||
*/
|
||||
targbuffer = ReadBufferExtended(onerel, MAIN_FORKNUM, targblock,
|
||||
RBM_NORMAL, vac_strategy);
|
||||
@ -952,9 +951,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
|
||||
/*
|
||||
* We ignore unused and redirect line pointers. DEAD line
|
||||
* pointers should be counted as dead, because we need vacuum
|
||||
* to run to get rid of them. Note that this rule agrees with
|
||||
* the way that heap_page_prune() counts things.
|
||||
* pointers should be counted as dead, because we need vacuum to
|
||||
* run to get rid of them. Note that this rule agrees with the
|
||||
* way that heap_page_prune() counts things.
|
||||
*/
|
||||
if (!ItemIdIsNormal(itemid))
|
||||
{
|
||||
@ -984,6 +983,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
break;
|
||||
|
||||
case HEAPTUPLE_INSERT_IN_PROGRESS:
|
||||
|
||||
/*
|
||||
* Insert-in-progress rows are not counted. We assume
|
||||
* that when the inserting transaction commits or aborts,
|
||||
@ -991,17 +991,17 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
* count. This works right only if that transaction ends
|
||||
* after we finish analyzing the table; if things happen
|
||||
* in the other order, its stats update will be
|
||||
* overwritten by ours. However, the error will be
|
||||
* large only if the other transaction runs long enough
|
||||
* to insert many tuples, so assuming it will finish
|
||||
* after us is the safer option.
|
||||
* overwritten by ours. However, the error will be large
|
||||
* only if the other transaction runs long enough to
|
||||
* insert many tuples, so assuming it will finish after us
|
||||
* is the safer option.
|
||||
*
|
||||
* A special case is that the inserting transaction might
|
||||
* be our own. In this case we should count and sample
|
||||
* be our own. In this case we should count and sample
|
||||
* the row, to accommodate users who load a table and
|
||||
* analyze it in one transaction. (pgstat_report_analyze
|
||||
* has to adjust the numbers we send to the stats collector
|
||||
* to make this come out right.)
|
||||
* has to adjust the numbers we send to the stats
|
||||
* collector to make this come out right.)
|
||||
*/
|
||||
if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(targtuple.t_data)))
|
||||
{
|
||||
@ -1011,6 +1011,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
break;
|
||||
|
||||
case HEAPTUPLE_DELETE_IN_PROGRESS:
|
||||
|
||||
/*
|
||||
* We count delete-in-progress rows as still live, using
|
||||
* the same reasoning given above; but we don't bother to
|
||||
@ -1019,8 +1020,8 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
* If the delete was done by our own transaction, however,
|
||||
* we must count the row as dead to make
|
||||
* pgstat_report_analyze's stats adjustments come out
|
||||
* right. (Note: this works out properly when the row
|
||||
* was both inserted and deleted in our xact.)
|
||||
* right. (Note: this works out properly when the row was
|
||||
* both inserted and deleted in our xact.)
|
||||
*/
|
||||
if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(targtuple.t_data)))
|
||||
deadrows += 1;
|
||||
@ -1054,8 +1055,8 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
/*
|
||||
* t in Vitter's paper is the number of records already
|
||||
* processed. If we need to compute a new S value, we
|
||||
* must use the not-yet-incremented value of samplerows
|
||||
* as t.
|
||||
* must use the not-yet-incremented value of samplerows as
|
||||
* t.
|
||||
*/
|
||||
if (rowstoskip < 0)
|
||||
rowstoskip = get_next_S(samplerows, targrows, &rstate);
|
||||
@ -1385,10 +1386,10 @@ update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats)
|
||||
{
|
||||
/* Yes, replace it */
|
||||
stup = heap_modify_tuple(oldtup,
|
||||
RelationGetDescr(sd),
|
||||
values,
|
||||
nulls,
|
||||
replaces);
|
||||
RelationGetDescr(sd),
|
||||
values,
|
||||
nulls,
|
||||
replaces);
|
||||
ReleaseSysCache(oldtup);
|
||||
simple_heap_update(sd, &stup->t_self, stup);
|
||||
}
|
||||
@ -1883,9 +1884,10 @@ compute_minimal_stats(VacAttrStatsP stats,
|
||||
stats->numnumbers[0] = num_mcv;
|
||||
stats->stavalues[0] = mcv_values;
|
||||
stats->numvalues[0] = num_mcv;
|
||||
|
||||
/*
|
||||
* Accept the defaults for stats->statypid and others.
|
||||
* They have been set before we were called (see vacuum.h)
|
||||
* Accept the defaults for stats->statypid and others. They have
|
||||
* been set before we were called (see vacuum.h)
|
||||
*/
|
||||
}
|
||||
}
|
||||
@ -2232,9 +2234,10 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
stats->numnumbers[slot_idx] = num_mcv;
|
||||
stats->stavalues[slot_idx] = mcv_values;
|
||||
stats->numvalues[slot_idx] = num_mcv;
|
||||
|
||||
/*
|
||||
* Accept the defaults for stats->statypid and others.
|
||||
* They have been set before we were called (see vacuum.h)
|
||||
* Accept the defaults for stats->statypid and others. They have
|
||||
* been set before we were called (see vacuum.h)
|
||||
*/
|
||||
slot_idx++;
|
||||
}
|
||||
@ -2312,7 +2315,7 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
|
||||
/*
|
||||
* The object of this loop is to copy the first and last values[]
|
||||
* entries along with evenly-spaced values in between. So the
|
||||
* entries along with evenly-spaced values in between. So the
|
||||
* i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But
|
||||
* computing that subscript directly risks integer overflow when
|
||||
* the stats target is more than a couple thousand. Instead we
|
||||
@ -2344,9 +2347,10 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
stats->staop[slot_idx] = mystats->ltopr;
|
||||
stats->stavalues[slot_idx] = hist_values;
|
||||
stats->numvalues[slot_idx] = num_hist;
|
||||
|
||||
/*
|
||||
* Accept the defaults for stats->statypid and others.
|
||||
* They have been set before we were called (see vacuum.h)
|
||||
* Accept the defaults for stats->statypid and others. They have
|
||||
* been set before we were called (see vacuum.h)
|
||||
*/
|
||||
slot_idx++;
|
||||
}
|
||||
|
Reference in New Issue
Block a user