mirror of
https://github.com/postgres/postgres.git
synced 2025-09-08 00:47:37 +03:00
pgindent run for 9.5
This commit is contained in:
@@ -69,9 +69,9 @@ InitBufferPool(void)
|
||||
|
||||
/* Align descriptors to a cacheline boundary. */
|
||||
BufferDescriptors = (BufferDescPadded *) CACHELINEALIGN(
|
||||
ShmemInitStruct("Buffer Descriptors",
|
||||
NBuffers * sizeof(BufferDescPadded) + PG_CACHE_LINE_SIZE,
|
||||
&foundDescs));
|
||||
ShmemInitStruct("Buffer Descriptors",
|
||||
NBuffers * sizeof(BufferDescPadded) + PG_CACHE_LINE_SIZE,
|
||||
&foundDescs));
|
||||
|
||||
BufferBlocks = (char *)
|
||||
ShmemInitStruct("Buffer Blocks",
|
||||
|
@@ -68,8 +68,8 @@
|
||||
|
||||
typedef struct PrivateRefCountEntry
|
||||
{
|
||||
Buffer buffer;
|
||||
int32 refcount;
|
||||
Buffer buffer;
|
||||
int32 refcount;
|
||||
} PrivateRefCountEntry;
|
||||
|
||||
/* 64 bytes, about the size of a cache line on common systems */
|
||||
@@ -132,8 +132,8 @@ static uint32 PrivateRefCountClock = 0;
|
||||
static PrivateRefCountEntry *ReservedRefCountEntry = NULL;
|
||||
|
||||
static void ReservePrivateRefCountEntry(void);
|
||||
static PrivateRefCountEntry* NewPrivateRefCountEntry(Buffer buffer);
|
||||
static PrivateRefCountEntry* GetPrivateRefCountEntry(Buffer buffer, bool do_move);
|
||||
static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
|
||||
static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
|
||||
static inline int32 GetPrivateRefCount(Buffer buffer);
|
||||
static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
|
||||
|
||||
@@ -154,7 +154,7 @@ ReservePrivateRefCountEntry(void)
|
||||
* majority of cases.
|
||||
*/
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
|
||||
{
|
||||
@@ -180,10 +180,10 @@ ReservePrivateRefCountEntry(void)
|
||||
* hashtable. Use that slot.
|
||||
*/
|
||||
PrivateRefCountEntry *hashent;
|
||||
bool found;
|
||||
bool found;
|
||||
|
||||
/* select victim slot */
|
||||
ReservedRefCountEntry =
|
||||
ReservedRefCountEntry =
|
||||
&PrivateRefCountArray[PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES];
|
||||
|
||||
/* Better be used, otherwise we shouldn't get here. */
|
||||
@@ -208,7 +208,7 @@ ReservePrivateRefCountEntry(void)
|
||||
/*
|
||||
* Fill a previously reserved refcount entry.
|
||||
*/
|
||||
static PrivateRefCountEntry*
|
||||
static PrivateRefCountEntry *
|
||||
NewPrivateRefCountEntry(Buffer buffer)
|
||||
{
|
||||
PrivateRefCountEntry *res;
|
||||
@@ -234,7 +234,7 @@ NewPrivateRefCountEntry(Buffer buffer)
|
||||
* do_move is true, and the entry resides in the hashtable the entry is
|
||||
* optimized for frequent access by moving it to the array.
|
||||
*/
|
||||
static PrivateRefCountEntry*
|
||||
static PrivateRefCountEntry *
|
||||
GetPrivateRefCountEntry(Buffer buffer, bool do_move)
|
||||
{
|
||||
PrivateRefCountEntry *res;
|
||||
@@ -280,7 +280,7 @@ GetPrivateRefCountEntry(Buffer buffer, bool do_move)
|
||||
else
|
||||
{
|
||||
/* move buffer from hashtable into the free array slot */
|
||||
bool found;
|
||||
bool found;
|
||||
PrivateRefCountEntry *free;
|
||||
|
||||
/* Ensure there's a free array slot */
|
||||
@@ -346,6 +346,7 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
|
||||
ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
|
||||
{
|
||||
ref->buffer = InvalidBuffer;
|
||||
|
||||
/*
|
||||
* Mark the just used entry as reserved - in many scenarios that
|
||||
* allows us to avoid ever having to search the array/hash for free
|
||||
@@ -355,8 +356,9 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
|
||||
}
|
||||
else
|
||||
{
|
||||
bool found;
|
||||
Buffer buffer = ref->buffer;
|
||||
bool found;
|
||||
Buffer buffer = ref->buffer;
|
||||
|
||||
hash_search(PrivateRefCountHash,
|
||||
(void *) &buffer,
|
||||
HASH_REMOVE,
|
||||
@@ -669,8 +671,8 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
|
||||
found);
|
||||
|
||||
/*
|
||||
* In RBM_ZERO_AND_LOCK mode the caller expects the page to
|
||||
* be locked on return.
|
||||
* In RBM_ZERO_AND_LOCK mode the caller expects the page to be
|
||||
* locked on return.
|
||||
*/
|
||||
if (!isLocalBuf)
|
||||
{
|
||||
@@ -809,9 +811,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
|
||||
* page before the caller has had a chance to initialize it.
|
||||
*
|
||||
* Since no-one else can be looking at the page contents yet, there is no
|
||||
* difference between an exclusive lock and a cleanup-strength lock.
|
||||
* (Note that we cannot use LockBuffer() of LockBufferForCleanup() here,
|
||||
* because they assert that the buffer is already valid.)
|
||||
* difference between an exclusive lock and a cleanup-strength lock. (Note
|
||||
* that we cannot use LockBuffer() of LockBufferForCleanup() here, because
|
||||
* they assert that the buffer is already valid.)
|
||||
*/
|
||||
if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
|
||||
!isLocalBuf)
|
||||
@@ -939,8 +941,8 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
|
||||
for (;;)
|
||||
{
|
||||
/*
|
||||
* Ensure, while the spinlock's not yet held, that there's a free refcount
|
||||
* entry.
|
||||
* Ensure, while the spinlock's not yet held, that there's a free
|
||||
* refcount entry.
|
||||
*/
|
||||
ReservePrivateRefCountEntry();
|
||||
|
||||
@@ -2169,6 +2171,7 @@ CheckForBufferLeaks(void)
|
||||
if (PrivateRefCountOverflowed)
|
||||
{
|
||||
HASH_SEQ_STATUS hstat;
|
||||
|
||||
hash_seq_init(&hstat, PrivateRefCountHash);
|
||||
while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
|
||||
{
|
||||
@@ -2974,6 +2977,7 @@ IncrBufferRefCount(Buffer buffer)
|
||||
else
|
||||
{
|
||||
PrivateRefCountEntry *ref;
|
||||
|
||||
ref = GetPrivateRefCountEntry(buffer, true);
|
||||
Assert(ref != NULL);
|
||||
ref->refcount++;
|
||||
|
@@ -50,7 +50,7 @@ typedef struct
|
||||
* Statistics. These counters should be wide enough that they can't
|
||||
* overflow during a single bgwriter cycle.
|
||||
*/
|
||||
uint32 completePasses; /* Complete cycles of the clock sweep */
|
||||
uint32 completePasses; /* Complete cycles of the clock sweep */
|
||||
pg_atomic_uint32 numBufferAllocs; /* Buffers allocated since last reset */
|
||||
|
||||
/*
|
||||
@@ -111,7 +111,7 @@ static void AddBufferToRing(BufferAccessStrategy strategy,
|
||||
static inline uint32
|
||||
ClockSweepTick(void)
|
||||
{
|
||||
uint32 victim;
|
||||
uint32 victim;
|
||||
|
||||
/*
|
||||
* Atomically move hand ahead one buffer - if there's several processes
|
||||
@@ -123,7 +123,7 @@ ClockSweepTick(void)
|
||||
|
||||
if (victim >= NBuffers)
|
||||
{
|
||||
uint32 originalVictim = victim;
|
||||
uint32 originalVictim = victim;
|
||||
|
||||
/* always wrap what we look up in BufferDescriptors */
|
||||
victim = victim % NBuffers;
|
||||
@@ -136,9 +136,9 @@ ClockSweepTick(void)
|
||||
*/
|
||||
if (victim == 0)
|
||||
{
|
||||
uint32 expected;
|
||||
uint32 wrapped;
|
||||
bool success = false;
|
||||
uint32 expected;
|
||||
uint32 wrapped;
|
||||
bool success = false;
|
||||
|
||||
expected = originalVictim + 1;
|
||||
|
||||
@@ -381,6 +381,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
|
||||
if (complete_passes)
|
||||
{
|
||||
*complete_passes = StrategyControl->completePasses;
|
||||
|
||||
/*
|
||||
* Additionally add the number of wraparounds that happened before
|
||||
* completePasses could be incremented. C.f. ClockSweepTick().
|
||||
|
Reference in New Issue
Block a user