1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-31 22:04:40 +03:00

pgindent run for 9.5

This commit is contained in:
Bruce Momjian
2015-05-23 21:35:49 -04:00
parent 225892552b
commit 807b9e0dff
414 changed files with 5810 additions and 5308 deletions

View File

@ -68,8 +68,8 @@
typedef struct PrivateRefCountEntry
{
Buffer buffer;
int32 refcount;
Buffer buffer;
int32 refcount;
} PrivateRefCountEntry;
/* 64 bytes, about the size of a cache line on common systems */
@ -132,8 +132,8 @@ static uint32 PrivateRefCountClock = 0;
static PrivateRefCountEntry *ReservedRefCountEntry = NULL;
static void ReservePrivateRefCountEntry(void);
static PrivateRefCountEntry* NewPrivateRefCountEntry(Buffer buffer);
static PrivateRefCountEntry* GetPrivateRefCountEntry(Buffer buffer, bool do_move);
static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
static inline int32 GetPrivateRefCount(Buffer buffer);
static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
@ -154,7 +154,7 @@ ReservePrivateRefCountEntry(void)
* majority of cases.
*/
{
int i;
int i;
for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
{
@ -180,10 +180,10 @@ ReservePrivateRefCountEntry(void)
* hashtable. Use that slot.
*/
PrivateRefCountEntry *hashent;
bool found;
bool found;
/* select victim slot */
ReservedRefCountEntry =
ReservedRefCountEntry =
&PrivateRefCountArray[PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES];
/* Better be used, otherwise we shouldn't get here. */
@ -208,7 +208,7 @@ ReservePrivateRefCountEntry(void)
/*
* Fill a previously reserved refcount entry.
*/
static PrivateRefCountEntry*
static PrivateRefCountEntry *
NewPrivateRefCountEntry(Buffer buffer)
{
PrivateRefCountEntry *res;
@ -234,7 +234,7 @@ NewPrivateRefCountEntry(Buffer buffer)
* do_move is true, and the entry resides in the hashtable the entry is
* optimized for frequent access by moving it to the array.
*/
static PrivateRefCountEntry*
static PrivateRefCountEntry *
GetPrivateRefCountEntry(Buffer buffer, bool do_move)
{
PrivateRefCountEntry *res;
@ -280,7 +280,7 @@ GetPrivateRefCountEntry(Buffer buffer, bool do_move)
else
{
/* move buffer from hashtable into the free array slot */
bool found;
bool found;
PrivateRefCountEntry *free;
/* Ensure there's a free array slot */
@ -346,6 +346,7 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
{
ref->buffer = InvalidBuffer;
/*
* Mark the just used entry as reserved - in many scenarios that
* allows us to avoid ever having to search the array/hash for free
@ -355,8 +356,9 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
}
else
{
bool found;
Buffer buffer = ref->buffer;
bool found;
Buffer buffer = ref->buffer;
hash_search(PrivateRefCountHash,
(void *) &buffer,
HASH_REMOVE,
@ -669,8 +671,8 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
found);
/*
* In RBM_ZERO_AND_LOCK mode the caller expects the page to
* be locked on return.
* In RBM_ZERO_AND_LOCK mode the caller expects the page to be
* locked on return.
*/
if (!isLocalBuf)
{
@ -809,9 +811,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* page before the caller has had a chance to initialize it.
*
* Since no-one else can be looking at the page contents yet, there is no
* difference between an exclusive lock and a cleanup-strength lock.
* (Note that we cannot use LockBuffer() of LockBufferForCleanup() here,
* because they assert that the buffer is already valid.)
* difference between an exclusive lock and a cleanup-strength lock. (Note
* that we cannot use LockBuffer() of LockBufferForCleanup() here, because
* they assert that the buffer is already valid.)
*/
if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
!isLocalBuf)
@ -939,8 +941,8 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
for (;;)
{
/*
* Ensure, while the spinlock's not yet held, that there's a free refcount
* entry.
* Ensure, while the spinlock's not yet held, that there's a free
* refcount entry.
*/
ReservePrivateRefCountEntry();
@ -2169,6 +2171,7 @@ CheckForBufferLeaks(void)
if (PrivateRefCountOverflowed)
{
HASH_SEQ_STATUS hstat;
hash_seq_init(&hstat, PrivateRefCountHash);
while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
{
@ -2974,6 +2977,7 @@ IncrBufferRefCount(Buffer buffer)
else
{
PrivateRefCountEntry *ref;
ref = GetPrivateRefCountEntry(buffer, true);
Assert(ref != NULL);
ref->refcount++;