mirror of
https://github.com/postgres/postgres.git
synced 2026-01-26 09:41:40 +03:00
bufmgr: Change BufferDesc.state to be a 64-bit atomic
This is motivated by wanting to merge buffer content locks into
BufferDesc.state in a future commit, rather than having a separate lwlock (see
commit c75ebc657f for more details). As this change is rather mechanical, it
seems to make sense to split it out into a separate commit, for easier review.
Reviewed-by: Melanie Plageman <melanieplageman@gmail.com>
Reviewed-by: Chao Li <li.evan.chao@gmail.com>
Discussion: https://postgr.es/m/fvfmkr5kk4nyex56ejgxj3uzi63isfxovp2biecb4bspbjrze7@az2pljabhnff
This commit is contained in:
@@ -199,7 +199,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
|
||||
for (i = 0; i < NBuffers; i++)
|
||||
{
|
||||
BufferDesc *bufHdr;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
@@ -615,7 +615,7 @@ pg_buffercache_summary(PG_FUNCTION_ARGS)
|
||||
for (int i = 0; i < NBuffers; i++)
|
||||
{
|
||||
BufferDesc *bufHdr;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
@@ -626,7 +626,7 @@ pg_buffercache_summary(PG_FUNCTION_ARGS)
|
||||
* noticeably increase the cost of the function.
|
||||
*/
|
||||
bufHdr = GetBufferDescriptor(i);
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
|
||||
if (buf_state & BM_VALID)
|
||||
{
|
||||
@@ -676,7 +676,7 @@ pg_buffercache_usage_counts(PG_FUNCTION_ARGS)
|
||||
for (int i = 0; i < NBuffers; i++)
|
||||
{
|
||||
BufferDesc *bufHdr = GetBufferDescriptor(i);
|
||||
uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
uint64 buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
int usage_count;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
@@ -703,7 +703,7 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
|
||||
|
||||
for (num_blocks = 0, i = 0; i < NBuffers; i++)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ BufferManagerShmemInit(void)
|
||||
|
||||
ClearBufferTag(&buf->tag);
|
||||
|
||||
pg_atomic_init_u32(&buf->state, 0);
|
||||
pg_atomic_init_u64(&buf->state, 0);
|
||||
buf->wait_backend_pgprocno = INVALID_PROC_NUMBER;
|
||||
|
||||
buf->buf_id = i;
|
||||
|
||||
@@ -780,7 +780,7 @@ ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockN
|
||||
{
|
||||
BufferDesc *bufHdr;
|
||||
BufferTag tag;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
Assert(BufferIsValid(recent_buffer));
|
||||
|
||||
@@ -793,7 +793,7 @@ ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockN
|
||||
int b = -recent_buffer - 1;
|
||||
|
||||
bufHdr = GetLocalBufferDescriptor(b);
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
|
||||
/* Is it still valid and holding the right tag? */
|
||||
if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
|
||||
@@ -1386,8 +1386,8 @@ StartReadBuffersImpl(ReadBuffersOperation *operation,
|
||||
bufHdr = GetLocalBufferDescriptor(-buffers[i] - 1);
|
||||
else
|
||||
bufHdr = GetBufferDescriptor(buffers[i] - 1);
|
||||
Assert(pg_atomic_read_u32(&bufHdr->state) & BM_TAG_VALID);
|
||||
found = pg_atomic_read_u32(&bufHdr->state) & BM_VALID;
|
||||
Assert(pg_atomic_read_u64(&bufHdr->state) & BM_TAG_VALID);
|
||||
found = pg_atomic_read_u64(&bufHdr->state) & BM_VALID;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1613,10 +1613,10 @@ CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete)
|
||||
GetBufferDescriptor(buffer - 1);
|
||||
|
||||
Assert(BufferGetBlockNumber(buffer) == operation->blocknum + i);
|
||||
Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_TAG_VALID);
|
||||
Assert(pg_atomic_read_u64(&buf_hdr->state) & BM_TAG_VALID);
|
||||
|
||||
if (i < operation->nblocks_done)
|
||||
Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_VALID);
|
||||
Assert(pg_atomic_read_u64(&buf_hdr->state) & BM_VALID);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -2083,8 +2083,8 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
|
||||
int existing_buf_id;
|
||||
Buffer victim_buffer;
|
||||
BufferDesc *victim_buf_hdr;
|
||||
uint32 victim_buf_state;
|
||||
uint32 set_bits = 0;
|
||||
uint64 victim_buf_state;
|
||||
uint64 set_bits = 0;
|
||||
|
||||
/* Make sure we will have room to remember the buffer pin */
|
||||
ResourceOwnerEnlarge(CurrentResourceOwner);
|
||||
@@ -2251,7 +2251,7 @@ InvalidateBuffer(BufferDesc *buf)
|
||||
uint32 oldHash; /* hash value for oldTag */
|
||||
LWLock *oldPartitionLock; /* buffer partition lock for it */
|
||||
uint32 oldFlags;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
/* Save the original buffer tag before dropping the spinlock */
|
||||
oldTag = buf->tag;
|
||||
@@ -2342,7 +2342,7 @@ retry:
|
||||
static bool
|
||||
InvalidateVictimBuffer(BufferDesc *buf_hdr)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
uint32 hash;
|
||||
LWLock *partition_lock;
|
||||
BufferTag tag;
|
||||
@@ -2402,10 +2402,10 @@ InvalidateVictimBuffer(BufferDesc *buf_hdr)
|
||||
|
||||
LWLockRelease(partition_lock);
|
||||
|
||||
buf_state = pg_atomic_read_u32(&buf_hdr->state);
|
||||
buf_state = pg_atomic_read_u64(&buf_hdr->state);
|
||||
Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
|
||||
Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
|
||||
Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u32(&buf_hdr->state)) > 0);
|
||||
Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u64(&buf_hdr->state)) > 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -2415,7 +2415,7 @@ GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context)
|
||||
{
|
||||
BufferDesc *buf_hdr;
|
||||
Buffer buf;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
bool from_ring;
|
||||
|
||||
/*
|
||||
@@ -2548,7 +2548,7 @@ again:
|
||||
|
||||
/* a final set of sanity checks */
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
buf_state = pg_atomic_read_u32(&buf_hdr->state);
|
||||
buf_state = pg_atomic_read_u64(&buf_hdr->state);
|
||||
|
||||
Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
|
||||
Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
|
||||
@@ -2839,13 +2839,13 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
|
||||
*/
|
||||
do
|
||||
{
|
||||
pg_atomic_fetch_and_u32(&existing_hdr->state, ~BM_VALID);
|
||||
pg_atomic_fetch_and_u64(&existing_hdr->state, ~BM_VALID);
|
||||
} while (!StartBufferIO(existing_hdr, true, false));
|
||||
}
|
||||
else
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint32 set_bits = 0;
|
||||
uint64 buf_state;
|
||||
uint64 set_bits = 0;
|
||||
|
||||
buf_state = LockBufHdr(victim_buf_hdr);
|
||||
|
||||
@@ -3021,7 +3021,7 @@ BufferIsDirty(Buffer buffer)
|
||||
Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
|
||||
}
|
||||
|
||||
return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
|
||||
return pg_atomic_read_u64(&bufHdr->state) & BM_DIRTY;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3037,8 +3037,8 @@ void
|
||||
MarkBufferDirty(Buffer buffer)
|
||||
{
|
||||
BufferDesc *bufHdr;
|
||||
uint32 buf_state;
|
||||
uint32 old_buf_state;
|
||||
uint64 buf_state;
|
||||
uint64 old_buf_state;
|
||||
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(ERROR, "bad buffer ID: %d", buffer);
|
||||
@@ -3058,7 +3058,7 @@ MarkBufferDirty(Buffer buffer)
|
||||
* NB: We have to wait for the buffer header spinlock to be not held, as
|
||||
* TerminateBufferIO() relies on the spinlock.
|
||||
*/
|
||||
old_buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
old_buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
for (;;)
|
||||
{
|
||||
if (old_buf_state & BM_LOCKED)
|
||||
@@ -3069,7 +3069,7 @@ MarkBufferDirty(Buffer buffer)
|
||||
Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
|
||||
buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
|
||||
|
||||
if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
|
||||
if (pg_atomic_compare_exchange_u64(&bufHdr->state, &old_buf_state,
|
||||
buf_state))
|
||||
break;
|
||||
}
|
||||
@@ -3173,10 +3173,10 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy,
|
||||
|
||||
if (ref == NULL)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint32 old_buf_state;
|
||||
uint64 buf_state;
|
||||
uint64 old_buf_state;
|
||||
|
||||
old_buf_state = pg_atomic_read_u32(&buf->state);
|
||||
old_buf_state = pg_atomic_read_u64(&buf->state);
|
||||
for (;;)
|
||||
{
|
||||
if (unlikely(skip_if_not_valid && !(old_buf_state & BM_VALID)))
|
||||
@@ -3210,7 +3210,7 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy,
|
||||
buf_state += BUF_USAGECOUNT_ONE;
|
||||
}
|
||||
|
||||
if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
|
||||
if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
|
||||
buf_state))
|
||||
{
|
||||
result = (buf_state & BM_VALID) != 0;
|
||||
@@ -3237,7 +3237,7 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy,
|
||||
* that the buffer page is legitimately non-accessible here. We
|
||||
* cannot meddle with that.
|
||||
*/
|
||||
result = (pg_atomic_read_u32(&buf->state) & BM_VALID) != 0;
|
||||
result = (pg_atomic_read_u64(&buf->state) & BM_VALID) != 0;
|
||||
|
||||
Assert(ref->data.refcount > 0);
|
||||
ref->data.refcount++;
|
||||
@@ -3272,7 +3272,7 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy,
|
||||
static void
|
||||
PinBuffer_Locked(BufferDesc *buf)
|
||||
{
|
||||
uint32 old_buf_state;
|
||||
uint64 old_buf_state;
|
||||
|
||||
/*
|
||||
* As explained, We don't expect any preexisting pins. That allows us to
|
||||
@@ -3284,7 +3284,7 @@ PinBuffer_Locked(BufferDesc *buf)
|
||||
* Since we hold the buffer spinlock, we can update the buffer state and
|
||||
* release the lock in one operation.
|
||||
*/
|
||||
old_buf_state = pg_atomic_read_u32(&buf->state);
|
||||
old_buf_state = pg_atomic_read_u64(&buf->state);
|
||||
|
||||
UnlockBufHdrExt(buf, old_buf_state,
|
||||
0, 0, 1);
|
||||
@@ -3314,7 +3314,7 @@ WakePinCountWaiter(BufferDesc *buf)
|
||||
* BM_PIN_COUNT_WAITER if it stops waiting for a reason other than this
|
||||
* backend waking it up.
|
||||
*/
|
||||
uint32 buf_state = LockBufHdr(buf);
|
||||
uint64 buf_state = LockBufHdr(buf);
|
||||
|
||||
if ((buf_state & BM_PIN_COUNT_WAITER) &&
|
||||
BUF_STATE_GET_REFCOUNT(buf_state) == 1)
|
||||
@@ -3361,7 +3361,7 @@ UnpinBufferNoOwner(BufferDesc *buf)
|
||||
ref->data.refcount--;
|
||||
if (ref->data.refcount == 0)
|
||||
{
|
||||
uint32 old_buf_state;
|
||||
uint64 old_buf_state;
|
||||
|
||||
/*
|
||||
* Mark buffer non-accessible to Valgrind.
|
||||
@@ -3379,7 +3379,7 @@ UnpinBufferNoOwner(BufferDesc *buf)
|
||||
Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf)));
|
||||
|
||||
/* decrement the shared reference count */
|
||||
old_buf_state = pg_atomic_fetch_sub_u32(&buf->state, BUF_REFCOUNT_ONE);
|
||||
old_buf_state = pg_atomic_fetch_sub_u64(&buf->state, BUF_REFCOUNT_ONE);
|
||||
|
||||
/* Support LockBufferForCleanup() */
|
||||
if (old_buf_state & BM_PIN_COUNT_WAITER)
|
||||
@@ -3436,7 +3436,7 @@ TrackNewBufferPin(Buffer buf)
|
||||
static void
|
||||
BufferSync(int flags)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
int buf_id;
|
||||
int num_to_scan;
|
||||
int num_spaces;
|
||||
@@ -3446,7 +3446,7 @@ BufferSync(int flags)
|
||||
Oid last_tsid;
|
||||
binaryheap *ts_heap;
|
||||
int i;
|
||||
uint32 mask = BM_DIRTY;
|
||||
uint64 mask = BM_DIRTY;
|
||||
WritebackContext wb_context;
|
||||
|
||||
/*
|
||||
@@ -3478,7 +3478,7 @@ BufferSync(int flags)
|
||||
for (buf_id = 0; buf_id < NBuffers; buf_id++)
|
||||
{
|
||||
BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
|
||||
uint32 set_bits = 0;
|
||||
uint64 set_bits = 0;
|
||||
|
||||
/*
|
||||
* Header spinlock is enough to examine BM_DIRTY, see comment in
|
||||
@@ -3645,7 +3645,7 @@ BufferSync(int flags)
|
||||
* write the buffer though we didn't need to. It doesn't seem worth
|
||||
* guarding against this, though.
|
||||
*/
|
||||
if (pg_atomic_read_u32(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
|
||||
if (pg_atomic_read_u64(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
|
||||
{
|
||||
if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
|
||||
{
|
||||
@@ -4015,7 +4015,7 @@ SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
|
||||
{
|
||||
BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
|
||||
int result = 0;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
BufferTag tag;
|
||||
|
||||
/* Make sure we can handle the pin */
|
||||
@@ -4264,7 +4264,7 @@ DebugPrintBufferRefcount(Buffer buffer)
|
||||
int32 loccount;
|
||||
char *result;
|
||||
ProcNumber backend;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
Assert(BufferIsValid(buffer));
|
||||
if (BufferIsLocal(buffer))
|
||||
@@ -4281,9 +4281,9 @@ DebugPrintBufferRefcount(Buffer buffer)
|
||||
}
|
||||
|
||||
/* theoretically we should lock the bufHdr here */
|
||||
buf_state = pg_atomic_read_u32(&buf->state);
|
||||
buf_state = pg_atomic_read_u64(&buf->state);
|
||||
|
||||
result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
|
||||
result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%" PRIx64 ", refcount=%u %d)",
|
||||
buffer,
|
||||
relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
|
||||
BufTagGetForkNum(&buf->tag)).str,
|
||||
@@ -4383,7 +4383,7 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object,
|
||||
instr_time io_start;
|
||||
Block bufBlock;
|
||||
char *bufToWrite;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
/*
|
||||
* Try to start an I/O operation. If StartBufferIO returns false, then
|
||||
@@ -4581,7 +4581,7 @@ BufferIsPermanent(Buffer buffer)
|
||||
* not random garbage.
|
||||
*/
|
||||
bufHdr = GetBufferDescriptor(buffer - 1);
|
||||
return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
|
||||
return (pg_atomic_read_u64(&bufHdr->state) & BM_PERMANENT) != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5044,11 +5044,11 @@ FlushRelationBuffers(Relation rel)
|
||||
{
|
||||
for (i = 0; i < NLocBuffer; i++)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
bufHdr = GetLocalBufferDescriptor(i);
|
||||
if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
|
||||
((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
|
||||
((buf_state = pg_atomic_read_u64(&bufHdr->state)) &
|
||||
(BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
|
||||
{
|
||||
ErrorContextCallback errcallback;
|
||||
@@ -5084,7 +5084,7 @@ FlushRelationBuffers(Relation rel)
|
||||
|
||||
for (i = 0; i < NBuffers; i++)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
bufHdr = GetBufferDescriptor(i);
|
||||
|
||||
@@ -5156,7 +5156,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
|
||||
{
|
||||
SMgrSortArray *srelent = NULL;
|
||||
BufferDesc *bufHdr = GetBufferDescriptor(i);
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
/*
|
||||
* As in DropRelationBuffers, an unlocked precheck should be safe and
|
||||
@@ -5405,7 +5405,7 @@ FlushDatabaseBuffers(Oid dbid)
|
||||
|
||||
for (i = 0; i < NBuffers; i++)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
bufHdr = GetBufferDescriptor(i);
|
||||
|
||||
@@ -5553,13 +5553,13 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
|
||||
* is only intended to be used in cases where failing to write out the
|
||||
* data would be harmless anyway, it doesn't really matter.
|
||||
*/
|
||||
if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
|
||||
if ((pg_atomic_read_u64(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
|
||||
(BM_DIRTY | BM_JUST_DIRTIED))
|
||||
{
|
||||
XLogRecPtr lsn = InvalidXLogRecPtr;
|
||||
bool dirtied = false;
|
||||
bool delayChkptFlags = false;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
/*
|
||||
* If we need to protect hint bit updates from torn writes, WAL-log a
|
||||
@@ -5571,7 +5571,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
|
||||
* when we call XLogInsert() since the value changes dynamically.
|
||||
*/
|
||||
if (XLogHintBitIsNeeded() &&
|
||||
(pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
|
||||
(pg_atomic_read_u64(&bufHdr->state) & BM_PERMANENT))
|
||||
{
|
||||
/*
|
||||
* If we must not write WAL, due to a relfilelocator-specific
|
||||
@@ -5671,8 +5671,8 @@ UnlockBuffers(void)
|
||||
|
||||
if (buf)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint32 unset_bits = 0;
|
||||
uint64 buf_state;
|
||||
uint64 unset_bits = 0;
|
||||
|
||||
buf_state = LockBufHdr(buf);
|
||||
|
||||
@@ -5803,8 +5803,8 @@ LockBufferForCleanup(Buffer buffer)
|
||||
|
||||
for (;;)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint32 unset_bits = 0;
|
||||
uint64 buf_state;
|
||||
uint64 unset_bits = 0;
|
||||
|
||||
/* Try to acquire lock */
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
@@ -5952,7 +5952,7 @@ bool
|
||||
ConditionalLockBufferForCleanup(Buffer buffer)
|
||||
{
|
||||
BufferDesc *bufHdr;
|
||||
uint32 buf_state,
|
||||
uint64 buf_state,
|
||||
refcount;
|
||||
|
||||
Assert(BufferIsValid(buffer));
|
||||
@@ -6010,7 +6010,7 @@ bool
|
||||
IsBufferCleanupOK(Buffer buffer)
|
||||
{
|
||||
BufferDesc *bufHdr;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
Assert(BufferIsValid(buffer));
|
||||
|
||||
@@ -6066,7 +6066,7 @@ WaitIO(BufferDesc *buf)
|
||||
ConditionVariablePrepareToSleep(cv);
|
||||
for (;;)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
PgAioWaitRef iow;
|
||||
|
||||
/*
|
||||
@@ -6140,7 +6140,7 @@ WaitIO(BufferDesc *buf)
|
||||
bool
|
||||
StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
ResourceOwnerEnlarge(CurrentResourceOwner);
|
||||
|
||||
@@ -6196,11 +6196,11 @@ StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
|
||||
* is being released)
|
||||
*/
|
||||
void
|
||||
TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
|
||||
TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint64 set_flag_bits,
|
||||
bool forget_owner, bool release_aio)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint32 unset_flag_bits = 0;
|
||||
uint64 buf_state;
|
||||
uint64 unset_flag_bits = 0;
|
||||
int refcount_change = 0;
|
||||
|
||||
buf_state = LockBufHdr(buf);
|
||||
@@ -6261,7 +6261,7 @@ static void
|
||||
AbortBufferIO(Buffer buffer)
|
||||
{
|
||||
BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
buf_state = LockBufHdr(buf_hdr);
|
||||
Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
|
||||
@@ -6355,10 +6355,10 @@ rlocator_comparator(const void *p1, const void *p2)
|
||||
/*
|
||||
* Lock buffer header - set BM_LOCKED in buffer state.
|
||||
*/
|
||||
uint32
|
||||
uint64
|
||||
LockBufHdr(BufferDesc *desc)
|
||||
{
|
||||
uint32 old_buf_state;
|
||||
uint64 old_buf_state;
|
||||
|
||||
Assert(!BufferIsLocal(BufferDescriptorGetBuffer(desc)));
|
||||
|
||||
@@ -6369,7 +6369,7 @@ LockBufHdr(BufferDesc *desc)
|
||||
* the spin-delay infrastructure. The work necessary for that shows up
|
||||
* in profiles and is rarely necessary.
|
||||
*/
|
||||
old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
|
||||
old_buf_state = pg_atomic_fetch_or_u64(&desc->state, BM_LOCKED);
|
||||
if (likely(!(old_buf_state & BM_LOCKED)))
|
||||
break; /* got lock */
|
||||
|
||||
@@ -6382,7 +6382,7 @@ LockBufHdr(BufferDesc *desc)
|
||||
while (old_buf_state & BM_LOCKED)
|
||||
{
|
||||
perform_spin_delay(&delayStatus);
|
||||
old_buf_state = pg_atomic_read_u32(&desc->state);
|
||||
old_buf_state = pg_atomic_read_u64(&desc->state);
|
||||
}
|
||||
finish_spin_delay(&delayStatus);
|
||||
}
|
||||
@@ -6403,20 +6403,20 @@ LockBufHdr(BufferDesc *desc)
|
||||
* Obviously the buffer could be locked by the time the value is returned, so
|
||||
* this is primarily useful in CAS style loops.
|
||||
*/
|
||||
pg_noinline uint32
|
||||
pg_noinline uint64
|
||||
WaitBufHdrUnlocked(BufferDesc *buf)
|
||||
{
|
||||
SpinDelayStatus delayStatus;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
init_local_spin_delay(&delayStatus);
|
||||
|
||||
buf_state = pg_atomic_read_u32(&buf->state);
|
||||
buf_state = pg_atomic_read_u64(&buf->state);
|
||||
|
||||
while (buf_state & BM_LOCKED)
|
||||
{
|
||||
perform_spin_delay(&delayStatus);
|
||||
buf_state = pg_atomic_read_u32(&buf->state);
|
||||
buf_state = pg_atomic_read_u64(&buf->state);
|
||||
}
|
||||
|
||||
finish_spin_delay(&delayStatus);
|
||||
@@ -6704,12 +6704,12 @@ ResOwnerPrintBufferPin(Datum res)
|
||||
static bool
|
||||
EvictUnpinnedBufferInternal(BufferDesc *desc, bool *buffer_flushed)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
bool result;
|
||||
|
||||
*buffer_flushed = false;
|
||||
|
||||
buf_state = pg_atomic_read_u32(&(desc->state));
|
||||
buf_state = pg_atomic_read_u64(&(desc->state));
|
||||
Assert(buf_state & BM_LOCKED);
|
||||
|
||||
if ((buf_state & BM_VALID) == 0)
|
||||
@@ -6803,12 +6803,12 @@ EvictAllUnpinnedBuffers(int32 *buffers_evicted, int32 *buffers_flushed,
|
||||
for (int buf = 1; buf <= NBuffers; buf++)
|
||||
{
|
||||
BufferDesc *desc = GetBufferDescriptor(buf - 1);
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
bool buffer_flushed;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
buf_state = pg_atomic_read_u32(&desc->state);
|
||||
buf_state = pg_atomic_read_u64(&desc->state);
|
||||
if (!(buf_state & BM_VALID))
|
||||
continue;
|
||||
|
||||
@@ -6855,7 +6855,7 @@ EvictRelUnpinnedBuffers(Relation rel, int32 *buffers_evicted,
|
||||
for (int buf = 1; buf <= NBuffers; buf++)
|
||||
{
|
||||
BufferDesc *desc = GetBufferDescriptor(buf - 1);
|
||||
uint32 buf_state = pg_atomic_read_u32(&(desc->state));
|
||||
uint64 buf_state = pg_atomic_read_u64(&(desc->state));
|
||||
bool buffer_flushed;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
@@ -6897,12 +6897,12 @@ static bool
|
||||
MarkDirtyUnpinnedBufferInternal(Buffer buf, BufferDesc *desc,
|
||||
bool *buffer_already_dirty)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
bool result = false;
|
||||
|
||||
*buffer_already_dirty = false;
|
||||
|
||||
buf_state = pg_atomic_read_u32(&(desc->state));
|
||||
buf_state = pg_atomic_read_u64(&(desc->state));
|
||||
Assert(buf_state & BM_LOCKED);
|
||||
|
||||
if ((buf_state & BM_VALID) == 0)
|
||||
@@ -7000,7 +7000,7 @@ MarkDirtyRelUnpinnedBuffers(Relation rel,
|
||||
for (int buf = 1; buf <= NBuffers; buf++)
|
||||
{
|
||||
BufferDesc *desc = GetBufferDescriptor(buf - 1);
|
||||
uint32 buf_state = pg_atomic_read_u32(&(desc->state));
|
||||
uint64 buf_state = pg_atomic_read_u64(&(desc->state));
|
||||
bool buffer_already_dirty;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
@@ -7054,12 +7054,12 @@ MarkDirtyAllUnpinnedBuffers(int32 *buffers_dirtied,
|
||||
for (int buf = 1; buf <= NBuffers; buf++)
|
||||
{
|
||||
BufferDesc *desc = GetBufferDescriptor(buf - 1);
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
bool buffer_already_dirty;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
buf_state = pg_atomic_read_u32(&desc->state);
|
||||
buf_state = pg_atomic_read_u64(&desc->state);
|
||||
if (!(buf_state & BM_VALID))
|
||||
continue;
|
||||
|
||||
@@ -7110,7 +7110,7 @@ buffer_stage_common(PgAioHandle *ioh, bool is_write, bool is_temp)
|
||||
BufferDesc *buf_hdr = is_temp ?
|
||||
GetLocalBufferDescriptor(-buffer - 1)
|
||||
: GetBufferDescriptor(buffer - 1);
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
/*
|
||||
* Check that all the buffers are actually ones that could conceivably
|
||||
@@ -7128,7 +7128,7 @@ buffer_stage_common(PgAioHandle *ioh, bool is_write, bool is_temp)
|
||||
}
|
||||
|
||||
if (is_temp)
|
||||
buf_state = pg_atomic_read_u32(&buf_hdr->state);
|
||||
buf_state = pg_atomic_read_u64(&buf_hdr->state);
|
||||
else
|
||||
buf_state = LockBufHdr(buf_hdr);
|
||||
|
||||
@@ -7166,7 +7166,7 @@ buffer_stage_common(PgAioHandle *ioh, bool is_write, bool is_temp)
|
||||
if (is_temp)
|
||||
{
|
||||
buf_state += BUF_REFCOUNT_ONE;
|
||||
pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
|
||||
}
|
||||
else
|
||||
UnlockBufHdrExt(buf_hdr, buf_state, 0, 0, 1);
|
||||
@@ -7352,13 +7352,13 @@ buffer_readv_complete_one(PgAioTargetData *td, uint8 buf_off, Buffer buffer,
|
||||
: GetBufferDescriptor(buffer - 1);
|
||||
BufferTag tag = buf_hdr->tag;
|
||||
char *bufdata = BufferGetBlock(buffer);
|
||||
uint32 set_flag_bits;
|
||||
uint64 set_flag_bits;
|
||||
int piv_flags;
|
||||
|
||||
/* check that the buffer is in the expected state for a read */
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
{
|
||||
uint32 buf_state = pg_atomic_read_u32(&buf_hdr->state);
|
||||
uint64 buf_state = pg_atomic_read_u64(&buf_hdr->state);
|
||||
|
||||
Assert(buf_state & BM_TAG_VALID);
|
||||
Assert(!(buf_state & BM_VALID));
|
||||
|
||||
@@ -86,7 +86,7 @@ typedef struct BufferAccessStrategyData
|
||||
|
||||
/* Prototypes for internal functions */
|
||||
static BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy,
|
||||
uint32 *buf_state);
|
||||
uint64 *buf_state);
|
||||
static void AddBufferToRing(BufferAccessStrategy strategy,
|
||||
BufferDesc *buf);
|
||||
|
||||
@@ -171,7 +171,7 @@ ClockSweepTick(void)
|
||||
* before returning.
|
||||
*/
|
||||
BufferDesc *
|
||||
StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_ring)
|
||||
StrategyGetBuffer(BufferAccessStrategy strategy, uint64 *buf_state, bool *from_ring)
|
||||
{
|
||||
BufferDesc *buf;
|
||||
int bgwprocno;
|
||||
@@ -230,8 +230,8 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r
|
||||
trycounter = NBuffers;
|
||||
for (;;)
|
||||
{
|
||||
uint32 old_buf_state;
|
||||
uint32 local_buf_state;
|
||||
uint64 old_buf_state;
|
||||
uint64 local_buf_state;
|
||||
|
||||
buf = GetBufferDescriptor(ClockSweepTick());
|
||||
|
||||
@@ -239,7 +239,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r
|
||||
* Check whether the buffer can be used and pin it if so. Do this
|
||||
* using a CAS loop, to avoid having to lock the buffer header.
|
||||
*/
|
||||
old_buf_state = pg_atomic_read_u32(&buf->state);
|
||||
old_buf_state = pg_atomic_read_u64(&buf->state);
|
||||
for (;;)
|
||||
{
|
||||
local_buf_state = old_buf_state;
|
||||
@@ -277,7 +277,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r
|
||||
{
|
||||
local_buf_state -= BUF_USAGECOUNT_ONE;
|
||||
|
||||
if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
|
||||
if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
|
||||
local_buf_state))
|
||||
{
|
||||
trycounter = NBuffers;
|
||||
@@ -289,7 +289,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r
|
||||
/* pin the buffer if the CAS succeeds */
|
||||
local_buf_state += BUF_REFCOUNT_ONE;
|
||||
|
||||
if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
|
||||
if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
|
||||
local_buf_state))
|
||||
{
|
||||
/* Found a usable buffer */
|
||||
@@ -655,12 +655,12 @@ FreeAccessStrategy(BufferAccessStrategy strategy)
|
||||
* returning.
|
||||
*/
|
||||
static BufferDesc *
|
||||
GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state)
|
||||
GetBufferFromRing(BufferAccessStrategy strategy, uint64 *buf_state)
|
||||
{
|
||||
BufferDesc *buf;
|
||||
Buffer bufnum;
|
||||
uint32 old_buf_state;
|
||||
uint32 local_buf_state; /* to avoid repeated (de-)referencing */
|
||||
uint64 old_buf_state;
|
||||
uint64 local_buf_state; /* to avoid repeated (de-)referencing */
|
||||
|
||||
|
||||
/* Advance to next ring slot */
|
||||
@@ -682,7 +682,7 @@ GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state)
|
||||
* Check whether the buffer can be used and pin it if so. Do this using a
|
||||
* CAS loop, to avoid having to lock the buffer header.
|
||||
*/
|
||||
old_buf_state = pg_atomic_read_u32(&buf->state);
|
||||
old_buf_state = pg_atomic_read_u64(&buf->state);
|
||||
for (;;)
|
||||
{
|
||||
local_buf_state = old_buf_state;
|
||||
@@ -710,7 +710,7 @@ GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state)
|
||||
/* pin the buffer if the CAS succeeds */
|
||||
local_buf_state += BUF_REFCOUNT_ONE;
|
||||
|
||||
if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
|
||||
if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
|
||||
local_buf_state))
|
||||
{
|
||||
*buf_state = local_buf_state;
|
||||
|
||||
@@ -148,7 +148,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
|
||||
}
|
||||
else
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
victim_buffer = GetLocalVictimBuffer();
|
||||
bufid = -victim_buffer - 1;
|
||||
@@ -165,10 +165,10 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
|
||||
*/
|
||||
bufHdr->tag = newTag;
|
||||
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
|
||||
buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
|
||||
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
|
||||
|
||||
*foundPtr = false;
|
||||
}
|
||||
@@ -245,12 +245,12 @@ GetLocalVictimBuffer(void)
|
||||
|
||||
if (LocalRefCount[victim_bufid] == 0)
|
||||
{
|
||||
uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
uint64 buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
|
||||
if (BUF_STATE_GET_USAGECOUNT(buf_state) > 0)
|
||||
{
|
||||
buf_state -= BUF_USAGECOUNT_ONE;
|
||||
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
|
||||
trycounter = NLocBuffer;
|
||||
}
|
||||
else if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
|
||||
@@ -286,13 +286,13 @@ GetLocalVictimBuffer(void)
|
||||
* this buffer is not referenced but it might still be dirty. if that's
|
||||
* the case, write it out before reusing it!
|
||||
*/
|
||||
if (pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY)
|
||||
if (pg_atomic_read_u64(&bufHdr->state) & BM_DIRTY)
|
||||
FlushLocalBuffer(bufHdr, NULL);
|
||||
|
||||
/*
|
||||
* Remove the victim buffer from the hashtable and mark as invalid.
|
||||
*/
|
||||
if (pg_atomic_read_u32(&bufHdr->state) & BM_TAG_VALID)
|
||||
if (pg_atomic_read_u64(&bufHdr->state) & BM_TAG_VALID)
|
||||
{
|
||||
InvalidateLocalBuffer(bufHdr, false);
|
||||
|
||||
@@ -417,7 +417,7 @@ ExtendBufferedRelLocal(BufferManagerRelation bmr,
|
||||
if (found)
|
||||
{
|
||||
BufferDesc *existing_hdr;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
UnpinLocalBuffer(BufferDescriptorGetBuffer(victim_buf_hdr));
|
||||
|
||||
@@ -428,18 +428,18 @@ ExtendBufferedRelLocal(BufferManagerRelation bmr,
|
||||
/*
|
||||
* Clear the BM_VALID bit, do StartLocalBufferIO() and proceed.
|
||||
*/
|
||||
buf_state = pg_atomic_read_u32(&existing_hdr->state);
|
||||
buf_state = pg_atomic_read_u64(&existing_hdr->state);
|
||||
Assert(buf_state & BM_TAG_VALID);
|
||||
Assert(!(buf_state & BM_DIRTY));
|
||||
buf_state &= ~BM_VALID;
|
||||
pg_atomic_unlocked_write_u32(&existing_hdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&existing_hdr->state, buf_state);
|
||||
|
||||
/* no need to loop for local buffers */
|
||||
StartLocalBufferIO(existing_hdr, true, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
uint32 buf_state = pg_atomic_read_u32(&victim_buf_hdr->state);
|
||||
uint64 buf_state = pg_atomic_read_u64(&victim_buf_hdr->state);
|
||||
|
||||
Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
|
||||
|
||||
@@ -447,7 +447,7 @@ ExtendBufferedRelLocal(BufferManagerRelation bmr,
|
||||
|
||||
buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
|
||||
|
||||
pg_atomic_unlocked_write_u32(&victim_buf_hdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&victim_buf_hdr->state, buf_state);
|
||||
|
||||
hresult->id = victim_buf_id;
|
||||
|
||||
@@ -467,13 +467,13 @@ ExtendBufferedRelLocal(BufferManagerRelation bmr,
|
||||
{
|
||||
Buffer buf = buffers[i];
|
||||
BufferDesc *buf_hdr;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
buf_hdr = GetLocalBufferDescriptor(-buf - 1);
|
||||
|
||||
buf_state = pg_atomic_read_u32(&buf_hdr->state);
|
||||
buf_state = pg_atomic_read_u64(&buf_hdr->state);
|
||||
buf_state |= BM_VALID;
|
||||
pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
|
||||
}
|
||||
|
||||
*extended_by = extend_by;
|
||||
@@ -492,7 +492,7 @@ MarkLocalBufferDirty(Buffer buffer)
|
||||
{
|
||||
int bufid;
|
||||
BufferDesc *bufHdr;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
Assert(BufferIsLocal(buffer));
|
||||
|
||||
@@ -506,14 +506,14 @@ MarkLocalBufferDirty(Buffer buffer)
|
||||
|
||||
bufHdr = GetLocalBufferDescriptor(bufid);
|
||||
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
|
||||
if (!(buf_state & BM_DIRTY))
|
||||
pgBufferUsage.local_blks_dirtied++;
|
||||
|
||||
buf_state |= BM_DIRTY;
|
||||
|
||||
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -522,7 +522,7 @@ MarkLocalBufferDirty(Buffer buffer)
|
||||
bool
|
||||
StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
/*
|
||||
* With AIO the buffer could have IO in progress, e.g. when there are two
|
||||
@@ -542,7 +542,7 @@ StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait)
|
||||
/* Once we get here, there is definitely no I/O active on this buffer */
|
||||
|
||||
/* Check if someone else already did the I/O */
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
|
||||
{
|
||||
return false;
|
||||
@@ -559,11 +559,11 @@ StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait)
|
||||
* Like TerminateBufferIO, but for local buffers
|
||||
*/
|
||||
void
|
||||
TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty, uint32 set_flag_bits,
|
||||
TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty, uint64 set_flag_bits,
|
||||
bool release_aio)
|
||||
{
|
||||
/* Only need to adjust flags */
|
||||
uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
uint64 buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
|
||||
/* BM_IO_IN_PROGRESS isn't currently used for local buffers */
|
||||
|
||||
@@ -582,7 +582,7 @@ TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty, uint32 set_flag_bit
|
||||
}
|
||||
|
||||
buf_state |= set_flag_bits;
|
||||
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
|
||||
|
||||
/* local buffers don't track IO using resowners */
|
||||
|
||||
@@ -606,7 +606,7 @@ InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
|
||||
{
|
||||
Buffer buffer = BufferDescriptorGetBuffer(bufHdr);
|
||||
int bufid = -buffer - 1;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
LocalBufferLookupEnt *hresult;
|
||||
|
||||
/*
|
||||
@@ -622,7 +622,7 @@ InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
|
||||
Assert(!pgaio_wref_valid(&bufHdr->io_wref));
|
||||
}
|
||||
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
|
||||
/*
|
||||
* We need to test not just LocalRefCount[bufid] but also the BufferDesc
|
||||
@@ -647,7 +647,7 @@ InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
|
||||
ClearBufferTag(&bufHdr->tag);
|
||||
buf_state &= ~BUF_FLAG_MASK;
|
||||
buf_state &= ~BUF_USAGECOUNT_MASK;
|
||||
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&bufHdr->state, buf_state);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -671,9 +671,9 @@ DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber *forkNum,
|
||||
for (i = 0; i < NLocBuffer; i++)
|
||||
{
|
||||
BufferDesc *bufHdr = GetLocalBufferDescriptor(i);
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
|
||||
if (!(buf_state & BM_TAG_VALID) ||
|
||||
!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
|
||||
@@ -706,9 +706,9 @@ DropRelationAllLocalBuffers(RelFileLocator rlocator)
|
||||
for (i = 0; i < NLocBuffer; i++)
|
||||
{
|
||||
BufferDesc *bufHdr = GetLocalBufferDescriptor(i);
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
buf_state = pg_atomic_read_u32(&bufHdr->state);
|
||||
buf_state = pg_atomic_read_u64(&bufHdr->state);
|
||||
|
||||
if ((buf_state & BM_TAG_VALID) &&
|
||||
BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
|
||||
@@ -804,11 +804,11 @@ InitLocalBuffers(void)
|
||||
bool
|
||||
PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
|
||||
{
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
Buffer buffer = BufferDescriptorGetBuffer(buf_hdr);
|
||||
int bufid = -buffer - 1;
|
||||
|
||||
buf_state = pg_atomic_read_u32(&buf_hdr->state);
|
||||
buf_state = pg_atomic_read_u64(&buf_hdr->state);
|
||||
|
||||
if (LocalRefCount[bufid] == 0)
|
||||
{
|
||||
@@ -819,7 +819,7 @@ PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
|
||||
{
|
||||
buf_state += BUF_USAGECOUNT_ONE;
|
||||
}
|
||||
pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
|
||||
|
||||
/*
|
||||
* See comment in PinBuffer().
|
||||
@@ -856,14 +856,14 @@ UnpinLocalBufferNoOwner(Buffer buffer)
|
||||
if (--LocalRefCount[buffid] == 0)
|
||||
{
|
||||
BufferDesc *buf_hdr = GetLocalBufferDescriptor(buffid);
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
|
||||
NLocalPinnedBuffers--;
|
||||
|
||||
buf_state = pg_atomic_read_u32(&buf_hdr->state);
|
||||
buf_state = pg_atomic_read_u64(&buf_hdr->state);
|
||||
Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
|
||||
buf_state -= BUF_REFCOUNT_ONE;
|
||||
pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
|
||||
|
||||
/* see comment in UnpinBufferNoOwner */
|
||||
VALGRIND_MAKE_MEM_NOACCESS(LocalBufHdrGetBlock(buf_hdr), BLCKSZ);
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
#include "utils/resowner.h"
|
||||
|
||||
/*
|
||||
* Buffer state is a single 32-bit variable where following data is combined.
|
||||
* Buffer state is a single 64-bit variable where following data is combined.
|
||||
*
|
||||
* State of the buffer itself (in order):
|
||||
* - 18 bits refcount
|
||||
@@ -40,6 +40,9 @@
|
||||
* Combining these values allows to perform some operations without locking
|
||||
* the buffer header, by modifying them together with a CAS loop.
|
||||
*
|
||||
* NB: A future commit will use a significant portion of the remaining bits to
|
||||
* implement buffer locking as part of the state variable.
|
||||
*
|
||||
* The definition of buffer state components is below.
|
||||
*/
|
||||
#define BUF_REFCOUNT_BITS 18
|
||||
@@ -52,27 +55,27 @@ StaticAssertDecl(BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS + BUF_FLAG_BITS == 32,
|
||||
/* refcount related definitions */
|
||||
#define BUF_REFCOUNT_ONE 1
|
||||
#define BUF_REFCOUNT_MASK \
|
||||
((1U << BUF_REFCOUNT_BITS) - 1)
|
||||
((UINT64CONST(1) << BUF_REFCOUNT_BITS) - 1)
|
||||
|
||||
/* usage count related definitions */
|
||||
#define BUF_USAGECOUNT_SHIFT \
|
||||
BUF_REFCOUNT_BITS
|
||||
#define BUF_USAGECOUNT_MASK \
|
||||
(((1U << BUF_USAGECOUNT_BITS) - 1) << (BUF_USAGECOUNT_SHIFT))
|
||||
(((UINT64CONST(1) << BUF_USAGECOUNT_BITS) - 1) << (BUF_USAGECOUNT_SHIFT))
|
||||
#define BUF_USAGECOUNT_ONE \
|
||||
(1U << BUF_REFCOUNT_BITS)
|
||||
(UINT64CONST(1) << BUF_REFCOUNT_BITS)
|
||||
|
||||
/* flags related definitions */
|
||||
#define BUF_FLAG_SHIFT \
|
||||
(BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS)
|
||||
#define BUF_FLAG_MASK \
|
||||
(((1U << BUF_FLAG_BITS) - 1) << BUF_FLAG_SHIFT)
|
||||
(((UINT64CONST(1) << BUF_FLAG_BITS) - 1) << BUF_FLAG_SHIFT)
|
||||
|
||||
/* Get refcount and usagecount from buffer state */
|
||||
#define BUF_STATE_GET_REFCOUNT(state) \
|
||||
((state) & BUF_REFCOUNT_MASK)
|
||||
((uint32)((state) & BUF_REFCOUNT_MASK))
|
||||
#define BUF_STATE_GET_USAGECOUNT(state) \
|
||||
(((state) & BUF_USAGECOUNT_MASK) >> BUF_USAGECOUNT_SHIFT)
|
||||
((uint32)(((state) & BUF_USAGECOUNT_MASK) >> BUF_USAGECOUNT_SHIFT))
|
||||
|
||||
/*
|
||||
* Flags for buffer descriptors
|
||||
@@ -82,7 +85,7 @@ StaticAssertDecl(BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS + BUF_FLAG_BITS == 32,
|
||||
*/
|
||||
|
||||
#define BUF_DEFINE_FLAG(flagno) \
|
||||
(1U << (BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS + (flagno)))
|
||||
(UINT64CONST(1) << (BUF_FLAG_SHIFT + (flagno)))
|
||||
|
||||
/* buffer header is locked */
|
||||
#define BM_LOCKED BUF_DEFINE_FLAG( 0)
|
||||
@@ -115,7 +118,7 @@ StaticAssertDecl(BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS + BUF_FLAG_BITS == 32,
|
||||
*/
|
||||
#define BM_MAX_USAGE_COUNT 5
|
||||
|
||||
StaticAssertDecl(BM_MAX_USAGE_COUNT < (1 << BUF_USAGECOUNT_BITS),
|
||||
StaticAssertDecl(BM_MAX_USAGE_COUNT < (UINT64CONST(1) << BUF_USAGECOUNT_BITS),
|
||||
"BM_MAX_USAGE_COUNT doesn't fit in BUF_USAGECOUNT_BITS bits");
|
||||
StaticAssertDecl(MAX_BACKENDS_BITS <= BUF_REFCOUNT_BITS,
|
||||
"MAX_BACKENDS_BITS needs to be <= BUF_REFCOUNT_BITS");
|
||||
@@ -280,8 +283,8 @@ BufMappingPartitionLockByIndex(uint32 index)
|
||||
* We use this same struct for local buffer headers, but the locks are not
|
||||
* used and not all of the flag bits are useful either. To avoid unnecessary
|
||||
* overhead, manipulations of the state field should be done without actual
|
||||
* atomic operations (i.e. only pg_atomic_read_u32() and
|
||||
* pg_atomic_unlocked_write_u32()).
|
||||
* atomic operations (i.e. only pg_atomic_read_u64() and
|
||||
* pg_atomic_unlocked_write_u64()).
|
||||
*
|
||||
* Be careful to avoid increasing the size of the struct when adding or
|
||||
* reordering members. Keeping it below 64 bytes (the most common CPU
|
||||
@@ -309,7 +312,7 @@ typedef struct BufferDesc
|
||||
* State of the buffer, containing flags, refcount and usagecount. See
|
||||
* BUF_* and BM_* defines at the top of this file.
|
||||
*/
|
||||
pg_atomic_uint32 state;
|
||||
pg_atomic_uint64 state;
|
||||
|
||||
/*
|
||||
* Backend of pin-count waiter. The buffer header spinlock needs to be
|
||||
@@ -415,7 +418,7 @@ BufferDescriptorGetContentLock(const BufferDesc *bdesc)
|
||||
* Functions for acquiring/releasing a shared buffer header's spinlock. Do
|
||||
* not apply these to local buffers!
|
||||
*/
|
||||
extern uint32 LockBufHdr(BufferDesc *desc);
|
||||
extern uint64 LockBufHdr(BufferDesc *desc);
|
||||
|
||||
/*
|
||||
* Unlock the buffer header.
|
||||
@@ -426,9 +429,9 @@ extern uint32 LockBufHdr(BufferDesc *desc);
|
||||
static inline void
|
||||
UnlockBufHdr(BufferDesc *desc)
|
||||
{
|
||||
Assert(pg_atomic_read_u32(&desc->state) & BM_LOCKED);
|
||||
Assert(pg_atomic_read_u64(&desc->state) & BM_LOCKED);
|
||||
|
||||
pg_atomic_fetch_sub_u32(&desc->state, BM_LOCKED);
|
||||
pg_atomic_fetch_sub_u64(&desc->state, BM_LOCKED);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -439,14 +442,14 @@ UnlockBufHdr(BufferDesc *desc)
|
||||
* Note that this approach would not work for usagecount, since we need to cap
|
||||
* the usagecount at BM_MAX_USAGE_COUNT.
|
||||
*/
|
||||
static inline uint32
|
||||
UnlockBufHdrExt(BufferDesc *desc, uint32 old_buf_state,
|
||||
uint32 set_bits, uint32 unset_bits,
|
||||
static inline uint64
|
||||
UnlockBufHdrExt(BufferDesc *desc, uint64 old_buf_state,
|
||||
uint64 set_bits, uint64 unset_bits,
|
||||
int refcount_change)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
uint32 buf_state = old_buf_state;
|
||||
uint64 buf_state = old_buf_state;
|
||||
|
||||
Assert(buf_state & BM_LOCKED);
|
||||
|
||||
@@ -457,7 +460,7 @@ UnlockBufHdrExt(BufferDesc *desc, uint32 old_buf_state,
|
||||
if (refcount_change != 0)
|
||||
buf_state += BUF_REFCOUNT_ONE * refcount_change;
|
||||
|
||||
if (pg_atomic_compare_exchange_u32(&desc->state, &old_buf_state,
|
||||
if (pg_atomic_compare_exchange_u64(&desc->state, &old_buf_state,
|
||||
buf_state))
|
||||
{
|
||||
return old_buf_state;
|
||||
@@ -465,7 +468,7 @@ UnlockBufHdrExt(BufferDesc *desc, uint32 old_buf_state,
|
||||
}
|
||||
}
|
||||
|
||||
extern uint32 WaitBufHdrUnlocked(BufferDesc *buf);
|
||||
extern uint64 WaitBufHdrUnlocked(BufferDesc *buf);
|
||||
|
||||
/* in bufmgr.c */
|
||||
|
||||
@@ -525,14 +528,14 @@ extern void TrackNewBufferPin(Buffer buf);
|
||||
|
||||
/* solely to make it easier to write tests */
|
||||
extern bool StartBufferIO(BufferDesc *buf, bool forInput, bool nowait);
|
||||
extern void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
|
||||
extern void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint64 set_flag_bits,
|
||||
bool forget_owner, bool release_aio);
|
||||
|
||||
|
||||
/* freelist.c */
|
||||
extern IOContext IOContextForStrategy(BufferAccessStrategy strategy);
|
||||
extern BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy,
|
||||
uint32 *buf_state, bool *from_ring);
|
||||
uint64 *buf_state, bool *from_ring);
|
||||
extern bool StrategyRejectBuffer(BufferAccessStrategy strategy,
|
||||
BufferDesc *buf, bool from_ring);
|
||||
|
||||
@@ -568,7 +571,7 @@ extern BlockNumber ExtendBufferedRelLocal(BufferManagerRelation bmr,
|
||||
uint32 *extended_by);
|
||||
extern void MarkLocalBufferDirty(Buffer buffer);
|
||||
extern void TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty,
|
||||
uint32 set_flag_bits, bool release_aio);
|
||||
uint64 set_flag_bits, bool release_aio);
|
||||
extern bool StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait);
|
||||
extern void FlushLocalBuffer(BufferDesc *bufHdr, SMgrRelation reln);
|
||||
extern void InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced);
|
||||
|
||||
@@ -27,13 +27,13 @@ typedef int ProcNumber;
|
||||
|
||||
/*
|
||||
* Note: MAX_BACKENDS_BITS is 18 as that is the space available for buffer
|
||||
* refcounts in buf_internals.h. This limitation could be lifted by using a
|
||||
* 64bit state; but it's unlikely to be worthwhile as 2^18-1 backends exceed
|
||||
* currently realistic configurations. Even if that limitation were removed,
|
||||
* we still could not a) exceed 2^23-1 because inval.c stores the ProcNumber
|
||||
* as a 3-byte signed integer, b) INT_MAX/4 because some places compute
|
||||
* 4*MaxBackends without any overflow check. We check that the configured
|
||||
* number of backends does not exceed MAX_BACKENDS in InitializeMaxBackends().
|
||||
* refcounts in buf_internals.h. This limitation could be lifted, but it's
|
||||
* unlikely to be worthwhile as 2^18-1 backends exceed currently realistic
|
||||
* configurations. Even if that limitation were removed, we still could not a)
|
||||
* exceed 2^23-1 because inval.c stores the ProcNumber as a 3-byte signed
|
||||
* integer, b) INT_MAX/4 because some places compute 4*MaxBackends without any
|
||||
* overflow check. We check that the configured number of backends does not
|
||||
* exceed MAX_BACKENDS in InitializeMaxBackends().
|
||||
*/
|
||||
#define MAX_BACKENDS_BITS 18
|
||||
#define MAX_BACKENDS ((1U << MAX_BACKENDS_BITS)-1)
|
||||
|
||||
@@ -308,9 +308,9 @@ create_toy_buffer(Relation rel, BlockNumber blkno)
|
||||
{
|
||||
Buffer buf;
|
||||
BufferDesc *buf_hdr;
|
||||
uint32 buf_state;
|
||||
uint64 buf_state;
|
||||
bool was_pinned = false;
|
||||
uint32 unset_bits = 0;
|
||||
uint64 unset_bits = 0;
|
||||
|
||||
/* place buffer in shared buffers without erroring out */
|
||||
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK, NULL);
|
||||
@@ -319,7 +319,7 @@ create_toy_buffer(Relation rel, BlockNumber blkno)
|
||||
if (RelationUsesLocalBuffers(rel))
|
||||
{
|
||||
buf_hdr = GetLocalBufferDescriptor(-buf - 1);
|
||||
buf_state = pg_atomic_read_u32(&buf_hdr->state);
|
||||
buf_state = pg_atomic_read_u64(&buf_hdr->state);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -340,7 +340,7 @@ create_toy_buffer(Relation rel, BlockNumber blkno)
|
||||
if (RelationUsesLocalBuffers(rel))
|
||||
{
|
||||
buf_state &= ~unset_bits;
|
||||
pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
|
||||
pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -489,7 +489,7 @@ invalidate_rel_block(PG_FUNCTION_ARGS)
|
||||
|
||||
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
if (pg_atomic_read_u32(&buf_hdr->state) & BM_DIRTY)
|
||||
if (pg_atomic_read_u64(&buf_hdr->state) & BM_DIRTY)
|
||||
{
|
||||
if (BufferIsLocal(buf))
|
||||
FlushLocalBuffer(buf_hdr, NULL);
|
||||
@@ -572,7 +572,7 @@ buffer_call_terminate_io(PG_FUNCTION_ARGS)
|
||||
bool io_error = PG_GETARG_BOOL(3);
|
||||
bool release_aio = PG_GETARG_BOOL(4);
|
||||
bool clear_dirty = false;
|
||||
uint32 set_flag_bits = 0;
|
||||
uint64 set_flag_bits = 0;
|
||||
|
||||
if (io_error)
|
||||
set_flag_bits |= BM_IO_ERROR;
|
||||
|
||||
Reference in New Issue
Block a user