mirror of
https://github.com/postgres/postgres.git
synced 2025-07-07 00:36:50 +03:00
Initial pgindent run for v12.
This is still using the 2.0 version of pg_bsd_indent. I thought it would be good to commit this separately, so as to document the differences between 2.0 and 2.1 behavior. Discussion: https://postgr.es/m/16296.1558103386@sss.pgh.pa.us
This commit is contained in:
@ -1684,8 +1684,8 @@ void
|
||||
heap_get_latest_tid(TableScanDesc sscan,
|
||||
ItemPointer tid)
|
||||
{
|
||||
Relation relation = sscan->rs_rd;
|
||||
Snapshot snapshot = sscan->rs_snapshot;
|
||||
Relation relation = sscan->rs_rd;
|
||||
Snapshot snapshot = sscan->rs_snapshot;
|
||||
ItemPointerData ctid;
|
||||
TransactionId priorXmax;
|
||||
|
||||
|
@ -474,6 +474,7 @@ tuple_lock_retry:
|
||||
HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
|
||||
{
|
||||
tmfd->xmax = priorXmax;
|
||||
|
||||
/*
|
||||
* Cmin is the problematic value, so store that. See
|
||||
* above.
|
||||
@ -1172,7 +1173,7 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
Snapshot snapshot;
|
||||
bool need_unregister_snapshot = false;
|
||||
TransactionId OldestXmin;
|
||||
BlockNumber previous_blkno = InvalidBlockNumber;
|
||||
BlockNumber previous_blkno = InvalidBlockNumber;
|
||||
BlockNumber root_blkno = InvalidBlockNumber;
|
||||
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
|
||||
|
||||
@ -1263,7 +1264,7 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
/* Publish number of blocks to scan */
|
||||
if (progress)
|
||||
{
|
||||
BlockNumber nblocks;
|
||||
BlockNumber nblocks;
|
||||
|
||||
if (hscan->rs_base.rs_parallel != NULL)
|
||||
{
|
||||
@ -1314,7 +1315,7 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
/* Report scan progress, if asked to. */
|
||||
if (progress)
|
||||
{
|
||||
BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
|
||||
BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
|
||||
|
||||
if (blocks_done != previous_blkno)
|
||||
{
|
||||
@ -1668,7 +1669,7 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
/* Report scan progress one last time. */
|
||||
if (progress)
|
||||
{
|
||||
BlockNumber blks_done;
|
||||
BlockNumber blks_done;
|
||||
|
||||
if (hscan->rs_base.rs_parallel != NULL)
|
||||
{
|
||||
@ -1720,7 +1721,7 @@ heapam_index_validate_scan(Relation heapRelation,
|
||||
BlockNumber root_blkno = InvalidBlockNumber;
|
||||
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
|
||||
bool in_index[MaxHeapTuplesPerPage];
|
||||
BlockNumber previous_blkno = InvalidBlockNumber;
|
||||
BlockNumber previous_blkno = InvalidBlockNumber;
|
||||
|
||||
/* state variables for the merge */
|
||||
ItemPointer indexcursor = NULL;
|
||||
@ -1955,8 +1956,8 @@ static BlockNumber
|
||||
heapam_scan_get_blocks_done(HeapScanDesc hscan)
|
||||
{
|
||||
ParallelBlockTableScanDesc bpscan = NULL;
|
||||
BlockNumber startblock;
|
||||
BlockNumber blocks_done;
|
||||
BlockNumber startblock;
|
||||
BlockNumber blocks_done;
|
||||
|
||||
if (hscan->rs_base.rs_parallel != NULL)
|
||||
{
|
||||
@ -1974,7 +1975,7 @@ heapam_scan_get_blocks_done(HeapScanDesc hscan)
|
||||
blocks_done = hscan->rs_cblock - startblock;
|
||||
else
|
||||
{
|
||||
BlockNumber nblocks;
|
||||
BlockNumber nblocks;
|
||||
|
||||
nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
|
||||
blocks_done = nblocks - startblock +
|
||||
|
@ -652,7 +652,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
|
||||
}
|
||||
else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
|
||||
{
|
||||
int options = HEAP_INSERT_SKIP_FSM;
|
||||
int options = HEAP_INSERT_SKIP_FSM;
|
||||
|
||||
if (!state->rs_use_wal)
|
||||
options |= HEAP_INSERT_SKIP_WAL;
|
||||
|
@ -2295,16 +2295,16 @@ static struct varlena *
|
||||
toast_decompress_datum_slice(struct varlena *attr, int32 slicelength)
|
||||
{
|
||||
struct varlena *result;
|
||||
int32 rawsize;
|
||||
int32 rawsize;
|
||||
|
||||
Assert(VARATT_IS_COMPRESSED(attr));
|
||||
|
||||
result = (struct varlena *) palloc(slicelength + VARHDRSZ);
|
||||
|
||||
rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr),
|
||||
VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
|
||||
VARDATA(result),
|
||||
slicelength, false);
|
||||
VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
|
||||
VARDATA(result),
|
||||
slicelength, false);
|
||||
if (rawsize < 0)
|
||||
elog(ERROR, "compressed data is corrupted");
|
||||
|
||||
|
@ -164,7 +164,7 @@ static void lazy_cleanup_index(Relation indrel,
|
||||
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
|
||||
int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
|
||||
static bool should_attempt_truncation(VacuumParams *params,
|
||||
LVRelStats *vacrelstats);
|
||||
LVRelStats *vacrelstats);
|
||||
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
|
||||
static BlockNumber count_nondeletable_pages(Relation onerel,
|
||||
LVRelStats *vacrelstats);
|
||||
@ -1067,9 +1067,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
* cheaper to get rid of it in the next pruning pass than
|
||||
* to treat it like an indexed tuple. Finally, if index
|
||||
* cleanup is disabled, the second heap pass will not
|
||||
* execute, and the tuple will not get removed, so we
|
||||
* must treat it like any other dead tuple that we choose
|
||||
* to keep.
|
||||
* execute, and the tuple will not get removed, so we must
|
||||
* treat it like any other dead tuple that we choose to
|
||||
* keep.
|
||||
*
|
||||
* If this were to happen for a tuple that actually needed
|
||||
* to be deleted, we'd be in trouble, because it'd
|
||||
@ -1087,6 +1087,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
all_visible = false;
|
||||
break;
|
||||
case HEAPTUPLE_LIVE:
|
||||
|
||||
/*
|
||||
* Count it as live. Not only is this natural, but it's
|
||||
* also what acquire_sample_rows() does.
|
||||
@ -1251,13 +1252,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Here, we have indexes but index cleanup is disabled. Instead of
|
||||
* vacuuming the dead tuples on the heap, we just forget them.
|
||||
* Here, we have indexes but index cleanup is disabled.
|
||||
* Instead of vacuuming the dead tuples on the heap, we just
|
||||
* forget them.
|
||||
*
|
||||
* Note that vacrelstats->dead_tuples could have tuples which
|
||||
* became dead after HOT-pruning but are not marked dead yet.
|
||||
* We do not process them because it's a very rare condition, and
|
||||
* the next vacuum will process them anyway.
|
||||
* We do not process them because it's a very rare condition,
|
||||
* and the next vacuum will process them anyway.
|
||||
*/
|
||||
Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
|
||||
}
|
||||
|
Reference in New Issue
Block a user