mirror of
https://github.com/postgres/postgres.git
synced 2025-07-18 17:42:25 +03:00
pgindent run for 9.6
This commit is contained in:
@ -1687,7 +1687,7 @@ heap_parallelscan_nextpage(HeapScanDesc scan)
|
||||
{
|
||||
BlockNumber page = InvalidBlockNumber;
|
||||
BlockNumber sync_startpage = InvalidBlockNumber;
|
||||
BlockNumber report_page = InvalidBlockNumber;
|
||||
BlockNumber report_page = InvalidBlockNumber;
|
||||
ParallelHeapScanDesc parallel_scan;
|
||||
|
||||
Assert(scan->rs_parallel);
|
||||
|
@ -178,7 +178,7 @@ static void
|
||||
RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
|
||||
{
|
||||
Page page;
|
||||
BlockNumber blockNum = InvalidBlockNumber,
|
||||
BlockNumber blockNum = InvalidBlockNumber,
|
||||
firstBlock = InvalidBlockNumber;
|
||||
int extraBlocks = 0;
|
||||
int lockWaiters = 0;
|
||||
@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
|
||||
return;
|
||||
|
||||
/*
|
||||
* It might seem like multiplying the number of lock waiters by as much
|
||||
* as 20 is too aggressive, but benchmarking revealed that smaller numbers
|
||||
* were insufficient. 512 is just an arbitrary cap to prevent pathological
|
||||
* results.
|
||||
* It might seem like multiplying the number of lock waiters by as much as
|
||||
* 20 is too aggressive, but benchmarking revealed that smaller numbers
|
||||
* were insufficient. 512 is just an arbitrary cap to prevent
|
||||
* pathological results.
|
||||
*/
|
||||
extraBlocks = Min(512, lockWaiters * 20);
|
||||
|
||||
@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
|
||||
}
|
||||
|
||||
/*
|
||||
* Updating the upper levels of the free space map is too expensive
|
||||
* to do for every block, but it's worth doing once at the end to make
|
||||
* sure that subsequent insertion activity sees all of those nifty free
|
||||
* pages we just inserted.
|
||||
* Updating the upper levels of the free space map is too expensive to do
|
||||
* for every block, but it's worth doing once at the end to make sure that
|
||||
* subsequent insertion activity sees all of those nifty free pages we
|
||||
* just inserted.
|
||||
*
|
||||
* Note that we're using the freespace value that was reported for the
|
||||
* last block we added as if it were the freespace value for every block
|
||||
@ -547,8 +547,8 @@ loop:
|
||||
}
|
||||
|
||||
/*
|
||||
* In addition to whatever extension we performed above, we always add
|
||||
* at least one block to satisfy our own request.
|
||||
* In addition to whatever extension we performed above, we always add at
|
||||
* least one block to satisfy our own request.
|
||||
*
|
||||
* XXX This does an lseek - rather expensive - but at the moment it is the
|
||||
* only way to accurately determine how many blocks are in a relation. Is
|
||||
|
@ -105,8 +105,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
|
||||
OldestXmin = RecentGlobalXmin;
|
||||
else
|
||||
OldestXmin =
|
||||
TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
|
||||
relation);
|
||||
TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
|
||||
relation);
|
||||
|
||||
Assert(TransactionIdIsValid(OldestXmin));
|
||||
|
||||
|
@ -272,7 +272,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
|
||||
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
|
||||
uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
|
||||
Page page;
|
||||
uint8 *map;
|
||||
uint8 *map;
|
||||
|
||||
#ifdef TRACE_VISIBILITYMAP
|
||||
elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
|
||||
@ -291,7 +291,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
|
||||
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
|
||||
|
||||
page = BufferGetPage(vmBuf);
|
||||
map = (uint8 *)PageGetContents(page);
|
||||
map = (uint8 *) PageGetContents(page);
|
||||
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))
|
||||
|
Reference in New Issue
Block a user