mirror of
https://github.com/postgres/postgres.git
synced 2025-09-02 04:21:28 +03:00
Pre-beta mechanical code beautification.
Run pgindent, pgperltidy, and reformat-dat-files. I manually fixed a couple of comments that pgindent uglified.
This commit is contained in:
@@ -673,9 +673,8 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
|
||||
{
|
||||
/*
|
||||
* It's now safe to pin the buffer. We can't pin first and ask
|
||||
* questions later, because it might confuse code paths
|
||||
* like InvalidateBuffer() if we pinned a random non-matching
|
||||
* buffer.
|
||||
* questions later, because it might confuse code paths like
|
||||
* InvalidateBuffer() if we pinned a random non-matching buffer.
|
||||
*/
|
||||
if (have_private_ref)
|
||||
PinBuffer(bufHdr, NULL); /* bump pin count */
|
||||
@@ -2945,10 +2944,10 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
|
||||
if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
|
||||
{
|
||||
/*
|
||||
* Not every table AM uses BLCKSZ wide fixed size blocks.
|
||||
* Therefore tableam returns the size in bytes - but for the
|
||||
* purpose of this routine, we want the number of blocks.
|
||||
* Therefore divide, rounding up.
|
||||
* Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
|
||||
* tableam returns the size in bytes - but for the purpose of this
|
||||
* routine, we want the number of blocks. Therefore divide, rounding
|
||||
* up.
|
||||
*/
|
||||
uint64 szbytes;
|
||||
|
||||
@@ -2958,7 +2957,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
|
||||
}
|
||||
else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
|
||||
{
|
||||
return smgrnblocks(RelationGetSmgr(relation), forkNum);
|
||||
return smgrnblocks(RelationGetSmgr(relation), forkNum);
|
||||
}
|
||||
else
|
||||
Assert(false);
|
||||
@@ -3707,9 +3706,9 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
|
||||
BufferAccessStrategy bstrategy_dst;
|
||||
|
||||
/*
|
||||
* In general, we want to write WAL whenever wal_level > 'minimal', but
|
||||
* we can skip it when copying any fork of an unlogged relation other
|
||||
* than the init fork.
|
||||
* In general, we want to write WAL whenever wal_level > 'minimal', but we
|
||||
* can skip it when copying any fork of an unlogged relation other than
|
||||
* the init fork.
|
||||
*/
|
||||
use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
|
||||
|
||||
@@ -3779,9 +3778,9 @@ void
|
||||
CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
|
||||
bool permanent)
|
||||
{
|
||||
Relation src_rel;
|
||||
Relation dst_rel;
|
||||
char relpersistence;
|
||||
Relation src_rel;
|
||||
Relation dst_rel;
|
||||
char relpersistence;
|
||||
|
||||
/* Set the relpersistence. */
|
||||
relpersistence = permanent ?
|
||||
@@ -3789,9 +3788,9 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
|
||||
|
||||
/*
|
||||
* We can't use a real relcache entry for a relation in some other
|
||||
* database, but since we're only going to access the fields related
|
||||
* to physical storage, a fake one is good enough. If we didn't do this
|
||||
* and used the smgr layer directly, we would have to worry about
|
||||
* database, but since we're only going to access the fields related to
|
||||
* physical storage, a fake one is good enough. If we didn't do this and
|
||||
* used the smgr layer directly, we would have to worry about
|
||||
* invalidations.
|
||||
*/
|
||||
src_rel = CreateFakeRelcacheEntry(src_rnode);
|
||||
|
@@ -1172,8 +1172,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
|
||||
*
|
||||
* We have to sort them logically, because in KnownAssignedXidsAdd we
|
||||
* call TransactionIdFollowsOrEquals and so on. But we know these XIDs
|
||||
* come from RUNNING_XACTS, which means there are only normal XIDs from
|
||||
* the same epoch, so this is safe.
|
||||
* come from RUNNING_XACTS, which means there are only normal XIDs
|
||||
* from the same epoch, so this is safe.
|
||||
*/
|
||||
qsort(xids, nxids, sizeof(TransactionId), xidLogicalComparator);
|
||||
|
||||
|
@@ -534,9 +534,9 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait,
|
||||
}
|
||||
|
||||
/*
|
||||
* If the caller has requested force flush or we have written more than 1/4
|
||||
* of the ring size, mark it as written in shared memory and notify the
|
||||
* receiver.
|
||||
* If the caller has requested force flush or we have written more than
|
||||
* 1/4 of the ring size, mark it as written in shared memory and notify
|
||||
* the receiver.
|
||||
*/
|
||||
if (force_flush || mqh->mqh_send_pending > (mq->mq_ring_size >> 2))
|
||||
{
|
||||
|
@@ -208,10 +208,11 @@ SInvalShmemSize(void)
|
||||
|
||||
/*
|
||||
* In Hot Standby mode, the startup process requests a procState array
|
||||
* slot using InitRecoveryTransactionEnvironment(). Even though MaxBackends
|
||||
* doesn't account for the startup process, it is guaranteed to get a
|
||||
* free slot. This is because the autovacuum launcher and worker processes,
|
||||
* which are included in MaxBackends, are not started in Hot Standby mode.
|
||||
* slot using InitRecoveryTransactionEnvironment(). Even though
|
||||
* MaxBackends doesn't account for the startup process, it is guaranteed
|
||||
* to get a free slot. This is because the autovacuum launcher and worker
|
||||
* processes, which are included in MaxBackends, are not started in Hot
|
||||
* Standby mode.
|
||||
*/
|
||||
size = add_size(size, mul_size(sizeof(ProcState), MaxBackends));
|
||||
|
||||
|
@@ -795,7 +795,7 @@ PageRepairFragmentation(Page page)
|
||||
if (finalusedlp != nline)
|
||||
{
|
||||
/* The last line pointer is not the last used line pointer */
|
||||
int nunusedend = nline - finalusedlp;
|
||||
int nunusedend = nline - finalusedlp;
|
||||
|
||||
Assert(nunused >= nunusedend && nunusedend > 0);
|
||||
|
||||
|
Reference in New Issue
Block a user