1
0
mirror of https://github.com/postgres/postgres.git synced 2025-10-19 15:49:24 +03:00

bufmgr: fewer calls to BufferDescriptorGetContentLock

We're planning to merge buffer content locks into BufferDesc.state. To reduce
the size of that patch, centralize calls to BufferDescriptorGetContentLock().

The biggest part of the change is in assertions, by introducing
BufferIsLockedByMe[InMode]() (and removing BufferIsExclusiveLocked()). This
seems like an improvement even without aforementioned plans.

Additionally replace some direct calls to LWLockAcquire() with calls to
LockBuffer().

Reviewed-by: Matthias van de Meent <boekewurm+postgres@gmail.com>
Discussion: https://postgr.es/m/fvfmkr5kk4nyex56ejgxj3uzi63isfxovp2biecb4bspbjrze7@az2pljabhnff
This commit is contained in:
Andres Freund
2025-10-08 16:06:19 -04:00
parent 2a2e1b470b
commit 3baae90013
4 changed files with 61 additions and 18 deletions

View File

@@ -270,7 +270,8 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
Assert(!BufferIsValid(heapBuf) || BufferIsExclusiveLocked(heapBuf));
Assert(!BufferIsValid(heapBuf) ||
BufferIsLockedByMeInMode(heapBuf, BUFFER_LOCK_EXCLUSIVE));
/* Check that we have the right VM page pinned */
if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)

View File

@@ -258,7 +258,8 @@ XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
*/
#ifdef USE_ASSERT_CHECKING
if (!(flags & REGBUF_NO_CHANGE))
Assert(BufferIsExclusiveLocked(buffer) && BufferIsDirty(buffer));
Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE) &&
BufferIsDirty(buffer));
#endif
if (block_id >= max_registered_block_id)

View File

@@ -1068,7 +1068,7 @@ ZeroAndLockBuffer(Buffer buffer, ReadBufferMode mode, bool already_valid)
* already valid.)
*/
if (!isLocalBuf)
LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_EXCLUSIVE);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/* Set BM_VALID, terminate IO, and wake up any waiters */
if (isLocalBuf)
@@ -2825,7 +2825,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
}
if (lock)
LWLockAcquire(BufferDescriptorGetContentLock(buf_hdr), LW_EXCLUSIVE);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
TerminateBufferIO(buf_hdr, false, BM_VALID, true, false);
}
@@ -2838,14 +2838,14 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
}
/*
* BufferIsExclusiveLocked
* BufferIsLockedByMe
*
* Checks if buffer is exclusive-locked.
* Checks if this backend has the buffer locked in any mode.
*
* Buffer must be pinned.
*/
bool
BufferIsExclusiveLocked(Buffer buffer)
BufferIsLockedByMe(Buffer buffer)
{
BufferDesc *bufHdr;
@@ -2858,9 +2858,49 @@ BufferIsExclusiveLocked(Buffer buffer)
}
else
{
bufHdr = GetBufferDescriptor(buffer - 1);
return LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr));
}
}
/*
* BufferIsLockedByMeInMode
*
* Checks if this backend has the buffer locked in the specified mode.
*
* Buffer must be pinned.
*/
bool
BufferIsLockedByMeInMode(Buffer buffer, int mode)
{
BufferDesc *bufHdr;
Assert(BufferIsPinned(buffer));
if (BufferIsLocal(buffer))
{
/* Content locks are not maintained for local buffers. */
return true;
}
else
{
LWLockMode lw_mode;
switch (mode)
{
case BUFFER_LOCK_EXCLUSIVE:
lw_mode = LW_EXCLUSIVE;
break;
case BUFFER_LOCK_SHARE:
lw_mode = LW_SHARED;
break;
default:
pg_unreachable();
}
bufHdr = GetBufferDescriptor(buffer - 1);
return LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
LW_EXCLUSIVE);
lw_mode);
}
}
@@ -2889,8 +2929,7 @@ BufferIsDirty(Buffer buffer)
else
{
bufHdr = GetBufferDescriptor(buffer - 1);
Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
LW_EXCLUSIVE));
Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
}
return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
@@ -2924,8 +2963,7 @@ MarkBufferDirty(Buffer buffer)
bufHdr = GetBufferDescriptor(buffer - 1);
Assert(BufferIsPinned(buffer));
Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
LW_EXCLUSIVE));
Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
old_buf_state = pg_atomic_read_u32(&bufHdr->state);
for (;;)
@@ -3259,7 +3297,10 @@ UnpinBufferNoOwner(BufferDesc *buf)
*/
VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), BLCKSZ);
/* I'd better not still hold the buffer content lock */
/*
* I'd better not still hold the buffer content lock. Can't use
* BufferIsLockedByMe(), as that asserts the buffer is pinned.
*/
Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf)));
/*
@@ -5324,7 +5365,7 @@ FlushOneBuffer(Buffer buffer)
bufHdr = GetBufferDescriptor(buffer - 1);
Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr)));
Assert(BufferIsLockedByMe(buffer));
FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
}
@@ -5415,7 +5456,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Assert(GetPrivateRefCount(buffer) > 0);
/* here, either share or exclusive lock is OK */
Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr)));
Assert(BufferIsLockedByMe(buffer));
/*
* This routine might get called many times on the same page, if we are
@@ -5898,8 +5939,7 @@ IsBufferCleanupOK(Buffer buffer)
bufHdr = GetBufferDescriptor(buffer - 1);
/* caller must hold exclusive lock on buffer */
Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
LW_EXCLUSIVE));
Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
buf_state = LockBufHdr(bufHdr);

View File

@@ -230,7 +230,8 @@ extern void WaitReadBuffers(ReadBuffersOperation *operation);
extern void ReleaseBuffer(Buffer buffer);
extern void UnlockReleaseBuffer(Buffer buffer);
extern bool BufferIsExclusiveLocked(Buffer buffer);
extern bool BufferIsLockedByMe(Buffer buffer);
extern bool BufferIsLockedByMeInMode(Buffer buffer, int mode);
extern bool BufferIsDirty(Buffer buffer);
extern void MarkBufferDirty(Buffer buffer);
extern void IncrBufferRefCount(Buffer buffer);