1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-02 04:21:28 +03:00

Initial pgindent and pgperltidy run for v13.

Includes some manual cleanup of places that pgindent messed up,
most of which weren't per project style anyway.

Notably, it seems some people didn't absorb the style rules of
commit c9d297751, because there were a bunch of new occurrences
of function calls with a newline just after the left paren, all
with faulty expectations about how the rest of the call would get
indented.
This commit is contained in:
Tom Lane
2020-05-14 13:06:38 -04:00
parent 1255466f83
commit 5cbfce562f
198 changed files with 2019 additions and 1786 deletions

View File

@@ -2994,7 +2994,7 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber *forkNum,
bufHdr->tag.forkNum == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
InvalidateBuffer(bufHdr); /* releases spinlock */
InvalidateBuffer(bufHdr); /* releases spinlock */
break;
}
}

View File

@@ -287,7 +287,8 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks)
{
buf = fsm_readbuf(rel, first_removed_address, false);
if (!BufferIsValid(buf))
return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
return InvalidBlockNumber; /* nothing to do; the FSM was already
* smaller */
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/* NO EREPORT(ERROR) from here till changes are logged */
@@ -317,7 +318,8 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks)
{
new_nfsmblocks = fsm_logical_to_physical(first_removed_address);
if (smgrnblocks(rel->rd_smgr, FSM_FORKNUM) <= new_nfsmblocks)
return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
return InvalidBlockNumber; /* nothing to do; the FSM was already
* smaller */
}
return new_nfsmblocks;

View File

@@ -1099,9 +1099,9 @@ WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
!PostmasterIsAlive())
{
/*
* The extra PostmasterIsAliveInternal() check prevents false alarms on
* systems that give a different value for getppid() while being traced
* by a debugger.
* The extra PostmasterIsAliveInternal() check prevents false alarms
* on systems that give a different value for getppid() while being
* traced by a debugger.
*/
set->report_postmaster_not_running = true;
}

View File

@@ -434,7 +434,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
Assert(pgxact->nxids == 0);
@@ -456,7 +456,7 @@ ProcArrayEndTransactionInternal(PGPROC *proc, PGXACT *pgxact,
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */

View File

@@ -60,8 +60,8 @@ typedef struct
{
pid_t pss_pid;
sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
pg_atomic_uint64 pss_barrierGeneration;
pg_atomic_uint32 pss_barrierCheckMask;
pg_atomic_uint64 pss_barrierGeneration;
pg_atomic_uint32 pss_barrierCheckMask;
} ProcSignalSlot;
/*
@@ -72,8 +72,8 @@ typedef struct
*/
typedef struct
{
pg_atomic_uint64 psh_barrierGeneration;
ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
pg_atomic_uint64 psh_barrierGeneration;
ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
} ProcSignalHeader;
/*
@@ -101,7 +101,7 @@ static void ProcessBarrierPlaceholder(void);
Size
ProcSignalShmemSize(void)
{
Size size;
Size size;
size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
@@ -124,7 +124,7 @@ ProcSignalShmemInit(void)
/* If we're first, initialize. */
if (!found)
{
int i;
int i;
pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
@@ -168,13 +168,13 @@ ProcSignalInit(int pss_idx)
/*
* Initialize barrier state. Since we're a brand-new process, there
* shouldn't be any leftover backend-private state that needs to be
* updated. Therefore, we can broadcast the latest barrier generation
* and disregard any previously-set check bits.
* updated. Therefore, we can broadcast the latest barrier generation and
* disregard any previously-set check bits.
*
* NB: This only works if this initialization happens early enough in the
* startup sequence that we haven't yet cached any state that might need
* to be invalidated. That's also why we have a memory barrier here, to
* be sure that any later reads of memory happen strictly after this.
* to be invalidated. That's also why we have a memory barrier here, to be
* sure that any later reads of memory happen strictly after this.
*/
pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
barrier_generation =
@@ -320,16 +320,16 @@ SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
uint64
EmitProcSignalBarrier(ProcSignalBarrierType type)
{
uint64 flagbit = UINT64CONST(1) << (uint64) type;
uint64 generation;
uint64 flagbit = UINT64CONST(1) << (uint64) type;
uint64 generation;
/*
* Set all the flags.
*
* Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this
* is totally ordered with respect to anything the caller did before, and
* anything that we do afterwards. (This is also true of the later call
* to pg_atomic_add_fetch_u64.)
* Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
* totally ordered with respect to anything the caller did before, and
* anything that we do afterwards. (This is also true of the later call to
* pg_atomic_add_fetch_u64.)
*/
for (int i = 0; i < NumProcSignalSlots; i++)
{
@@ -349,18 +349,18 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
* generation.
*
* Concurrency is not a problem here. Backends that have exited don't
* matter, and new backends that have joined since we entered this function
* must already have current state, since the caller is responsible for
* making sure that the relevant state is entirely visible before calling
* this function in the first place. We still have to wake them up -
* because we can't distinguish between such backends and older backends
* that need to update state - but they won't actually need to change
* any state.
* matter, and new backends that have joined since we entered this
* function must already have current state, since the caller is
* responsible for making sure that the relevant state is entirely visible
* before calling this function in the first place. We still have to wake
* them up - because we can't distinguish between such backends and older
* backends that need to update state - but they won't actually need to
* change any state.
*/
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
pid_t pid = slot->pss_pid;
pid_t pid = slot->pss_pid;
if (pid != 0)
kill(pid, SIGUSR1);
@@ -381,17 +381,17 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
void
WaitForProcSignalBarrier(uint64 generation)
{
long timeout = 125L;
long timeout = 125L;
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
uint64 oldval;
uint64 oldval;
oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
while (oldval < generation)
{
int events;
int events;
CHECK_FOR_INTERRUPTS();
@@ -408,11 +408,11 @@ WaitForProcSignalBarrier(uint64 generation)
}
/*
* The caller is probably calling this function because it wants to
* read the shared state or perform further writes to shared state once
* all backends are known to have absorbed the barrier. However, the
* read of pss_barrierGeneration was performed unlocked; insert a memory
* barrier to separate it from whatever follows.
* The caller is probably calling this function because it wants to read
* the shared state or perform further writes to shared state once all
* backends are known to have absorbed the barrier. However, the read of
* pss_barrierGeneration was performed unlocked; insert a memory barrier
* to separate it from whatever follows.
*/
pg_memory_barrier();
}
@@ -428,8 +428,8 @@ WaitForProcSignalBarrier(uint64 generation)
void
ProcessProcSignalBarrier(void)
{
uint64 generation;
uint32 flags;
uint64 generation;
uint32 flags;
/* Exit quickly if there's no work to do. */
if (!ProcSignalBarrierPending)
@@ -437,8 +437,8 @@ ProcessProcSignalBarrier(void)
ProcSignalBarrierPending = false;
/*
* Read the current barrier generation, and then get the flags that
* are set for this backend. Note that pg_atomic_exchange_u32 is a full
* Read the current barrier generation, and then get the flags that are
* set for this backend. Note that pg_atomic_exchange_u32 is a full
* barrier, so we're guaranteed that the read of the barrier generation
* happens before we atomically extract the flags, and that any subsequent
* state changes happen afterward.
@@ -477,8 +477,8 @@ ProcessBarrierPlaceholder(void)
* machinery gets committed. Rename PROCSIGNAL_BARRIER_PLACEHOLDER to
* PROCSIGNAL_BARRIER_SOMETHING_ELSE where SOMETHING_ELSE is something
* appropriately descriptive. Get rid of this function and instead have
* ProcessBarrierSomethingElse. Most likely, that function should live
* in the file pertaining to that subsystem, rather than here.
* ProcessBarrierSomethingElse. Most likely, that function should live in
* the file pertaining to that subsystem, rather than here.
*/
}
@@ -515,8 +515,8 @@ CheckProcSignalBarrier(void)
if (slot != NULL)
{
uint64 mygen;
uint64 curgen;
uint64 mygen;
uint64 curgen;
mygen = pg_atomic_read_u64(&slot->pss_barrierGeneration);
curgen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);

View File

@@ -461,7 +461,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
}
else
{
Size allocated_size;
Size allocated_size;
/* It isn't in the table yet. allocate and initialize it */
structPtr = ShmemAllocRaw(size, &allocated_size);
@@ -539,7 +539,7 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
MemoryContext oldcontext;
HASH_SEQ_STATUS hstat;
ShmemIndexEnt *ent;
Size named_allocated = 0;
Size named_allocated = 0;
Datum values[PG_GET_SHMEM_SIZES_COLS];
bool nulls[PG_GET_SHMEM_SIZES_COLS];

View File

@@ -1035,7 +1035,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
found_conflict = true;
else
found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
lock, proclock);
lock, proclock);
if (!found_conflict)
{

View File

@@ -553,7 +553,7 @@ smgrnblocks(SMgrRelation reln, ForkNumber forknum)
void
smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks)
{
int i;
int i;
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
@@ -580,11 +580,11 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
/*
* We might as well update the local smgr_fsm_nblocks and
* smgr_vm_nblocks settings. The smgr cache inval message that
* this function sent will cause other backends to invalidate
* their copies of smgr_fsm_nblocks and smgr_vm_nblocks,
* and these ones too at the next command boundary.
* But these ensure they aren't outright wrong until then.
* smgr_vm_nblocks settings. The smgr cache inval message that this
* function sent will cause other backends to invalidate their copies
* of smgr_fsm_nblocks and smgr_vm_nblocks, and these ones too at the
* next command boundary. But these ensure they aren't outright wrong
* until then.
*/
if (forknum[i] == FSM_FORKNUM)
reln->smgr_fsm_nblocks = nblocks[i];