mirror of
https://github.com/postgres/postgres.git
synced 2025-04-25 21:42:33 +03:00
Convert macros to static inline functions (buf_internals.h)
Dilip Kumar, reviewed by Vignesh C, Ashutosh Sharma, and me. Discussion: http://postgr.es/m/CAFiTN-tYbM7D+2UGiNc2kAFMSQTa5FTeYvmg-Vj2HvPdVw2Gvg@mail.gmail.com
This commit is contained in:
parent
03361a368e
commit
3ac88fddd9
@ -116,7 +116,7 @@ InitBufferPool(void)
|
||||
{
|
||||
BufferDesc *buf = GetBufferDescriptor(i);
|
||||
|
||||
CLEAR_BUFFERTAG(buf->tag);
|
||||
ClearBufferTag(&buf->tag);
|
||||
|
||||
pg_atomic_init_u32(&buf->state, 0);
|
||||
buf->wait_backend_pgprocno = INVALID_PGPROCNO;
|
||||
|
@ -515,7 +515,7 @@ PrefetchSharedBuffer(SMgrRelation smgr_reln,
|
||||
Assert(BlockNumberIsValid(blockNum));
|
||||
|
||||
/* create a tag so we can lookup the buffer */
|
||||
INIT_BUFFERTAG(newTag, smgr_reln->smgr_rlocator.locator,
|
||||
InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
|
||||
forkNum, blockNum);
|
||||
|
||||
/* determine its hash code and partition lock ID */
|
||||
@ -632,7 +632,7 @@ ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockN
|
||||
|
||||
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
|
||||
ReservePrivateRefCountEntry();
|
||||
INIT_BUFFERTAG(tag, rlocator, forkNum, blockNum);
|
||||
InitBufferTag(&tag, &rlocator, forkNum, blockNum);
|
||||
|
||||
if (BufferIsLocal(recent_buffer))
|
||||
{
|
||||
@ -642,7 +642,7 @@ ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockN
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
|
||||
/* Is it still valid and holding the right tag? */
|
||||
if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
|
||||
if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
|
||||
{
|
||||
/*
|
||||
* Bump buffer's ref and usage counts. This is equivalent of
|
||||
@ -679,7 +679,7 @@ ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockN
|
||||
else
|
||||
buf_state = LockBufHdr(bufHdr);
|
||||
|
||||
if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
|
||||
if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
|
||||
{
|
||||
/*
|
||||
* It's now safe to pin the buffer. We can't pin first and ask
|
||||
@ -1134,7 +1134,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
|
||||
uint32 buf_state;
|
||||
|
||||
/* create a tag so we can lookup the buffer */
|
||||
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
|
||||
InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
|
||||
|
||||
/* determine its hash code and partition lock ID */
|
||||
newHash = BufTableHashCode(&newTag);
|
||||
@ -1517,7 +1517,7 @@ retry:
|
||||
buf_state = LockBufHdr(buf);
|
||||
|
||||
/* If it's changed while we were waiting for lock, do nothing */
|
||||
if (!BUFFERTAGS_EQUAL(buf->tag, oldTag))
|
||||
if (!BufferTagsEqual(&buf->tag, &oldTag))
|
||||
{
|
||||
UnlockBufHdr(buf, buf_state);
|
||||
LWLockRelease(oldPartitionLock);
|
||||
@ -1549,7 +1549,7 @@ retry:
|
||||
* linear scans of the buffer array don't think the buffer is valid.
|
||||
*/
|
||||
oldFlags = buf_state & BUF_FLAG_MASK;
|
||||
CLEAR_BUFFERTAG(buf->tag);
|
||||
ClearBufferTag(&buf->tag);
|
||||
buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
|
||||
UnlockBufHdr(buf, buf_state);
|
||||
|
||||
@ -3365,7 +3365,7 @@ FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum,
|
||||
uint32 buf_state;
|
||||
|
||||
/* create a tag so we can lookup the buffer */
|
||||
INIT_BUFFERTAG(bufTag, rlocator, forkNum, curBlock);
|
||||
InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
|
||||
|
||||
/* determine its hash code and partition lock ID */
|
||||
bufHash = BufTableHashCode(&bufTag);
|
||||
|
@ -68,7 +68,7 @@ PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum,
|
||||
BufferTag newTag; /* identity of requested block */
|
||||
LocalBufferLookupEnt *hresult;
|
||||
|
||||
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
|
||||
InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
|
||||
|
||||
/* Initialize local buffers if first request in this session */
|
||||
if (LocalBufHash == NULL)
|
||||
@ -117,7 +117,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
|
||||
bool found;
|
||||
uint32 buf_state;
|
||||
|
||||
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
|
||||
InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
|
||||
|
||||
/* Initialize local buffers if first request in this session */
|
||||
if (LocalBufHash == NULL)
|
||||
@ -131,7 +131,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
|
||||
{
|
||||
b = hresult->id;
|
||||
bufHdr = GetLocalBufferDescriptor(b);
|
||||
Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag));
|
||||
Assert(BufferTagsEqual(&bufHdr->tag, &newTag));
|
||||
#ifdef LBDEBUG
|
||||
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
|
||||
smgr->smgr_rlocator.locator.relNumber, forkNum, blockNum, -b - 1);
|
||||
@ -253,7 +253,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
|
||||
if (!hresult) /* shouldn't happen */
|
||||
elog(ERROR, "local buffer hash table corrupted");
|
||||
/* mark buffer invalid just in case hash insert fails */
|
||||
CLEAR_BUFFERTAG(bufHdr->tag);
|
||||
ClearBufferTag(&bufHdr->tag);
|
||||
buf_state &= ~(BM_VALID | BM_TAG_VALID);
|
||||
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
|
||||
}
|
||||
@ -354,7 +354,7 @@ DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
|
||||
if (!hresult) /* shouldn't happen */
|
||||
elog(ERROR, "local buffer hash table corrupted");
|
||||
/* Mark buffer invalid */
|
||||
CLEAR_BUFFERTAG(bufHdr->tag);
|
||||
ClearBufferTag(&bufHdr->tag);
|
||||
buf_state &= ~BUF_FLAG_MASK;
|
||||
buf_state &= ~BUF_USAGECOUNT_MASK;
|
||||
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
|
||||
@ -398,7 +398,7 @@ DropRelationAllLocalBuffers(RelFileLocator rlocator)
|
||||
if (!hresult) /* shouldn't happen */
|
||||
elog(ERROR, "local buffer hash table corrupted");
|
||||
/* Mark buffer invalid */
|
||||
CLEAR_BUFFERTAG(bufHdr->tag);
|
||||
ClearBufferTag(&bufHdr->tag);
|
||||
buf_state &= ~BUF_FLAG_MASK;
|
||||
buf_state &= ~BUF_USAGECOUNT_MASK;
|
||||
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
|
||||
|
@ -85,7 +85,7 @@
|
||||
* relation is visible yet (its xact may have started before the xact that
|
||||
* created the rel). The storage manager must be able to cope anyway.
|
||||
*
|
||||
* Note: if there's any pad bytes in the struct, INIT_BUFFERTAG will have
|
||||
* Note: if there's any pad bytes in the struct, InitBufferTag will have
|
||||
* to be fixed to zero them, since this struct is used as a hash key.
|
||||
*/
|
||||
typedef struct buftag
|
||||
@ -95,28 +95,32 @@ typedef struct buftag
|
||||
BlockNumber blockNum; /* blknum relative to begin of reln */
|
||||
} BufferTag;
|
||||
|
||||
#define CLEAR_BUFFERTAG(a) \
|
||||
( \
|
||||
(a).rlocator.spcOid = InvalidOid, \
|
||||
(a).rlocator.dbOid = InvalidOid, \
|
||||
(a).rlocator.relNumber = InvalidRelFileNumber, \
|
||||
(a).forkNum = InvalidForkNumber, \
|
||||
(a).blockNum = InvalidBlockNumber \
|
||||
)
|
||||
static inline void
|
||||
ClearBufferTag(BufferTag *tag)
|
||||
{
|
||||
tag->rlocator.spcOid = InvalidOid;
|
||||
tag->rlocator.dbOid = InvalidOid;
|
||||
tag->rlocator.relNumber = InvalidRelFileNumber;
|
||||
tag->forkNum = InvalidForkNumber;
|
||||
tag->blockNum = InvalidBlockNumber;
|
||||
}
|
||||
|
||||
#define INIT_BUFFERTAG(a,xx_rlocator,xx_forkNum,xx_blockNum) \
|
||||
( \
|
||||
(a).rlocator = (xx_rlocator), \
|
||||
(a).forkNum = (xx_forkNum), \
|
||||
(a).blockNum = (xx_blockNum) \
|
||||
)
|
||||
static inline void
|
||||
InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator,
|
||||
ForkNumber forkNum, BlockNumber blockNum)
|
||||
{
|
||||
tag->rlocator = *rlocator;
|
||||
tag->forkNum = forkNum;
|
||||
tag->blockNum = blockNum;
|
||||
}
|
||||
|
||||
#define BUFFERTAGS_EQUAL(a,b) \
|
||||
( \
|
||||
RelFileLocatorEquals((a).rlocator, (b).rlocator) && \
|
||||
(a).blockNum == (b).blockNum && \
|
||||
(a).forkNum == (b).forkNum \
|
||||
)
|
||||
static inline bool
|
||||
BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
|
||||
{
|
||||
return RelFileLocatorEquals(tag1->rlocator, tag2->rlocator) &&
|
||||
(tag1->blockNum == tag2->blockNum) &&
|
||||
(tag1->forkNum == tag2->forkNum);
|
||||
}
|
||||
|
||||
/*
|
||||
* The shared buffer mapping table is partitioned to reduce contention.
|
||||
@ -124,13 +128,24 @@ typedef struct buftag
|
||||
* hash code with BufTableHashCode(), then apply BufMappingPartitionLock().
|
||||
* NB: NUM_BUFFER_PARTITIONS must be a power of 2!
|
||||
*/
|
||||
#define BufTableHashPartition(hashcode) \
|
||||
((hashcode) % NUM_BUFFER_PARTITIONS)
|
||||
#define BufMappingPartitionLock(hashcode) \
|
||||
(&MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + \
|
||||
BufTableHashPartition(hashcode)].lock)
|
||||
#define BufMappingPartitionLockByIndex(i) \
|
||||
(&MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + (i)].lock)
|
||||
static inline uint32
|
||||
BufTableHashPartition(uint32 hashcode)
|
||||
{
|
||||
return hashcode % NUM_BUFFER_PARTITIONS;
|
||||
}
|
||||
|
||||
static inline LWLock *
|
||||
BufMappingPartitionLock(uint32 hashcode)
|
||||
{
|
||||
return &MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET +
|
||||
BufTableHashPartition(hashcode)].lock;
|
||||
}
|
||||
|
||||
static inline LWLock *
|
||||
BufMappingPartitionLockByIndex(uint32 index)
|
||||
{
|
||||
return &MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + index].lock;
|
||||
}
|
||||
|
||||
/*
|
||||
* BufferDesc -- shared descriptor/state data for a single shared buffer.
|
||||
@ -220,37 +235,6 @@ typedef union BufferDescPadded
|
||||
char pad[BUFFERDESC_PAD_TO_SIZE];
|
||||
} BufferDescPadded;
|
||||
|
||||
#define GetBufferDescriptor(id) (&BufferDescriptors[(id)].bufferdesc)
|
||||
#define GetLocalBufferDescriptor(id) (&LocalBufferDescriptors[(id)])
|
||||
|
||||
#define BufferDescriptorGetBuffer(bdesc) ((bdesc)->buf_id + 1)
|
||||
|
||||
#define BufferDescriptorGetIOCV(bdesc) \
|
||||
(&(BufferIOCVArray[(bdesc)->buf_id]).cv)
|
||||
#define BufferDescriptorGetContentLock(bdesc) \
|
||||
((LWLock*) (&(bdesc)->content_lock))
|
||||
|
||||
extern PGDLLIMPORT ConditionVariableMinimallyPadded *BufferIOCVArray;
|
||||
|
||||
/*
|
||||
* The freeNext field is either the index of the next freelist entry,
|
||||
* or one of these special values:
|
||||
*/
|
||||
#define FREENEXT_END_OF_LIST (-1)
|
||||
#define FREENEXT_NOT_IN_LIST (-2)
|
||||
|
||||
/*
|
||||
* Functions for acquiring/releasing a shared buffer header's spinlock. Do
|
||||
* not apply these to local buffers!
|
||||
*/
|
||||
extern uint32 LockBufHdr(BufferDesc *desc);
|
||||
#define UnlockBufHdr(desc, s) \
|
||||
do { \
|
||||
pg_write_barrier(); \
|
||||
pg_atomic_write_u32(&(desc)->state, (s) & (~BM_LOCKED)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/*
|
||||
* The PendingWriteback & WritebackContext structure are used to keep
|
||||
* information about pending flush requests to be issued to the OS.
|
||||
@ -276,11 +260,63 @@ typedef struct WritebackContext
|
||||
|
||||
/* in buf_init.c */
|
||||
extern PGDLLIMPORT BufferDescPadded *BufferDescriptors;
|
||||
extern PGDLLIMPORT ConditionVariableMinimallyPadded *BufferIOCVArray;
|
||||
extern PGDLLIMPORT WritebackContext BackendWritebackContext;
|
||||
|
||||
/* in localbuf.c */
|
||||
extern PGDLLIMPORT BufferDesc *LocalBufferDescriptors;
|
||||
|
||||
|
||||
static inline BufferDesc *
|
||||
GetBufferDescriptor(uint32 id)
|
||||
{
|
||||
return &(BufferDescriptors[id]).bufferdesc;
|
||||
}
|
||||
|
||||
static inline BufferDesc *
|
||||
GetLocalBufferDescriptor(uint32 id)
|
||||
{
|
||||
return &LocalBufferDescriptors[id];
|
||||
}
|
||||
|
||||
static inline Buffer
|
||||
BufferDescriptorGetBuffer(const BufferDesc *bdesc)
|
||||
{
|
||||
return (Buffer) (bdesc->buf_id + 1);
|
||||
}
|
||||
|
||||
static inline ConditionVariable *
|
||||
BufferDescriptorGetIOCV(const BufferDesc *bdesc)
|
||||
{
|
||||
return &(BufferIOCVArray[bdesc->buf_id]).cv;
|
||||
}
|
||||
|
||||
static inline LWLock *
|
||||
BufferDescriptorGetContentLock(const BufferDesc *bdesc)
|
||||
{
|
||||
return (LWLock *) (&bdesc->content_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* The freeNext field is either the index of the next freelist entry,
|
||||
* or one of these special values:
|
||||
*/
|
||||
#define FREENEXT_END_OF_LIST (-1)
|
||||
#define FREENEXT_NOT_IN_LIST (-2)
|
||||
|
||||
/*
|
||||
* Functions for acquiring/releasing a shared buffer header's spinlock. Do
|
||||
* not apply these to local buffers!
|
||||
*/
|
||||
extern uint32 LockBufHdr(BufferDesc *desc);
|
||||
|
||||
static inline void
|
||||
UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
|
||||
{
|
||||
pg_write_barrier();
|
||||
pg_atomic_write_u32(&desc->state, buf_state & (~BM_LOCKED));
|
||||
}
|
||||
|
||||
/* in bufmgr.c */
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user