1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-15 19:21:59 +03:00

Run pgindent on 9.2 source tree in preparation for first 9.3

commit-fest.
This commit is contained in:
Bruce Momjian
2012-06-10 15:20:04 -04:00
parent 60801944fa
commit 927d61eeff
494 changed files with 7343 additions and 7046 deletions

View File

@ -155,9 +155,9 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
BlockNumber possibly_freeable;
PGRUsage ru0;
TimestampTz starttime = 0;
long secs;
int usecs;
double read_rate,
long secs;
int usecs;
double read_rate,
write_rate;
bool scan_all;
TransactionId freezeTableLimit;
@ -222,17 +222,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
*
* A corner case here is that if we scanned no pages at all because every
* page is all-visible, we should not update relpages/reltuples, because
* we have no new information to contribute. In particular this keeps
* us from replacing relpages=reltuples=0 (which means "unknown tuple
* we have no new information to contribute. In particular this keeps us
* from replacing relpages=reltuples=0 (which means "unknown tuple
* density") with nonzero relpages and reltuples=0 (which means "zero
* tuple density") unless there's some actual evidence for the latter.
*
* We do update relallvisible even in the corner case, since if the
* table is all-visible we'd definitely like to know that. But clamp
* the value to be not more than what we're setting relpages to.
* We do update relallvisible even in the corner case, since if the table
* is all-visible we'd definitely like to know that. But clamp the value
* to be not more than what we're setting relpages to.
*
* Also, don't change relfrozenxid if we skipped any pages, since then
* we don't know for certain that all tuples have a newer xmin.
* Also, don't change relfrozenxid if we skipped any pages, since then we
* don't know for certain that all tuples have a newer xmin.
*/
new_rel_pages = vacrelstats->rel_pages;
new_rel_tuples = vacrelstats->new_rel_tuples;
@ -265,7 +265,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
/* and log the action if appropriate */
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
{
TimestampTz endtime = GetCurrentTimestamp();
TimestampTz endtime = GetCurrentTimestamp();
if (Log_autovacuum_min_duration == 0 ||
TimestampDifferenceExceeds(starttime, endtime,
@ -277,17 +277,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
write_rate = 0;
if ((secs > 0) || (usecs > 0))
{
read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
(secs + usecs / 1000000.0);
write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
(secs + usecs / 1000000.0);
read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) /
(secs + usecs / 1000000.0);
write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) /
(secs + usecs / 1000000.0);
}
ereport(LOG,
(errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"
"pages: %d removed, %d remain\n"
"tuples: %.0f removed, %.0f remain\n"
"buffer usage: %d hits, %d misses, %d dirtied\n"
"avg read rate: %.3f MiB/s, avg write rate: %.3f MiB/s\n"
"avg read rate: %.3f MiB/s, avg write rate: %.3f MiB/s\n"
"system usage: %s",
get_database_name(MyDatabaseId),
get_namespace_name(RelationGetNamespace(onerel)),
@ -300,7 +300,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
VacuumPageHit,
VacuumPageMiss,
VacuumPageDirty,
read_rate,write_rate,
read_rate, write_rate,
pg_rusage_show(&ru0))));
}
}
@ -501,10 +501,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacrelstats->num_dead_tuples > 0)
{
/*
* Before beginning index vacuuming, we release any pin we may hold
* on the visibility map page. This isn't necessary for correctness,
* but we do it anyway to avoid holding the pin across a lengthy,
* unrelated operation.
* Before beginning index vacuuming, we release any pin we may
* hold on the visibility map page. This isn't necessary for
* correctness, but we do it anyway to avoid holding the pin
* across a lengthy, unrelated operation.
*/
if (BufferIsValid(vmbuffer))
{
@ -535,10 +535,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* Pin the visibility map page in case we need to mark the page
* all-visible. In most cases this will be very cheap, because we'll
* already have the correct page pinned anyway. However, it's possible
* that (a) next_not_all_visible_block is covered by a different VM page
* than the current block or (b) we released our pin and did a cycle of
* index vacuuming.
* already have the correct page pinned anyway. However, it's
* possible that (a) next_not_all_visible_block is covered by a
* different VM page than the current block or (b) we released our pin
* and did a cycle of index vacuuming.
*/
visibilitymap_pin(onerel, blkno, &vmbuffer);
@ -873,10 +873,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
else if (!all_visible_according_to_vm)
{
/*
* It should never be the case that the visibility map page
* is set while the page-level bit is clear, but the reverse
* is allowed. Set the visibility map bit as well so that
* we get back in sync.
* It should never be the case that the visibility map page is
* set while the page-level bit is clear, but the reverse is
* allowed. Set the visibility map bit as well so that we get
* back in sync.
*/
visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer,
visibility_cutoff_xid);
@ -1152,7 +1152,7 @@ lazy_check_needs_freeze(Buffer buf)
if (heap_tuple_needs_freeze(tupleheader, FreezeLimit, buf))
return true;
} /* scan along page */
} /* scan along page */
return false;
}