1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-07 11:02:12 +03:00

Use ExtendBufferedRelTo() in {vm,fsm}_extend()

This uses ExtendBufferedRelTo(), introduced in 31966b151e6, to extend the
visibilitymap and freespacemap to the size needed.

It also happens to fix a warning introduced in 3d6a98457d8, reported by Tom
Lane.

Discussion: https://postgr.es/m/20221029025420.eplyow6k7tgu6he3@awork3.anarazel.de
Discussion: https://postgr.es/m/2194723.1680736788@sss.pgh.pa.us
This commit is contained in:
Andres Freund 2023-04-05 17:29:57 -07:00
parent bccd6908ca
commit fcdda1e4b5
2 changed files with 48 additions and 115 deletions

View File

@ -126,7 +126,7 @@
/* prototypes for internal routines */ /* prototypes for internal routines */
static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend); static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
static void vm_extend(Relation rel, BlockNumber vm_nblocks); static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
/* /*
@ -574,22 +574,29 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0; reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
} }
/* Handle requests beyond EOF */ /*
* For reading we use ZERO_ON_ERROR mode, and initialize the page if
* necessary. It's always safe to clear bits, so it's better to clear
* corrupt pages than error out.
*
* We use the same path below to initialize pages when extending the
* relation, as a concurrent extension can end up with vm_extend()
* returning an already-initialized page.
*/
if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM]) if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
{ {
if (extend) if (extend)
vm_extend(rel, blkno + 1); buf = vm_extend(rel, blkno + 1);
else else
return InvalidBuffer; return InvalidBuffer;
} }
else
buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
RBM_ZERO_ON_ERROR, NULL);
/* /*
* Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's * Initializing the page when needed is trickier than it looks, because of
* always safe to clear bits, so it's better to clear corrupt pages than * the possibility of multiple backends doing this concurrently, and our
* error out.
*
* The initialize-the-page part is trickier than it looks, because of the
* possibility of multiple backends doing this concurrently, and our
* desire to not uselessly take the buffer lock in the normal path where * desire to not uselessly take the buffer lock in the normal path where
* the page is OK. We must take the lock to initialize the page, so * the page is OK. We must take the lock to initialize the page, so
* recheck page newness after we have the lock, in case someone else * recheck page newness after we have the lock, in case someone else
@ -602,8 +609,6 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
* long as it doesn't depend on the page header having correct contents. * long as it doesn't depend on the page header having correct contents.
* Current usage is safe because PageGetContents() does not require that. * Current usage is safe because PageGetContents() does not require that.
*/ */
buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
RBM_ZERO_ON_ERROR, NULL);
if (PageIsNew(BufferGetPage(buf))) if (PageIsNew(BufferGetPage(buf)))
{ {
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@ -618,51 +623,16 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
* Ensure that the visibility map fork is at least vm_nblocks long, extending * Ensure that the visibility map fork is at least vm_nblocks long, extending
* it if necessary with zeroed pages. * it if necessary with zeroed pages.
*/ */
static void static Buffer
vm_extend(Relation rel, BlockNumber vm_nblocks) vm_extend(Relation rel, BlockNumber vm_nblocks)
{ {
BlockNumber vm_nblocks_now; Buffer buf;
PGAlignedBlock pg = {0};
SMgrRelation reln;
/* buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
* We use the relation extension lock to lock out other backends trying to EB_CREATE_FORK_IF_NEEDED |
* extend the visibility map at the same time. It also locks out extension EB_CLEAR_SIZE_CACHE,
* of the main fork, unnecessarily, but extending the visibility map vm_nblocks,
* happens seldom enough that it doesn't seem worthwhile to have a RBM_ZERO_ON_ERROR);
* separate lock tag type for it.
*
* Note that another backend might have extended or created the relation
* by the time we get the lock.
*/
LockRelationForExtension(rel, ExclusiveLock);
/*
* Caution: re-using this smgr pointer could fail if the relcache entry
* gets closed. It's safe as long as we only do smgr-level operations
* between here and the last use of the pointer.
*/
reln = RelationGetSmgr(rel);
/*
* Create the file first if it doesn't exist. If smgr_vm_nblocks is
* positive then it must exist, no need for an smgrexists call.
*/
if ((reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == 0 ||
reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber) &&
!smgrexists(reln, VISIBILITYMAP_FORKNUM))
smgrcreate(reln, VISIBILITYMAP_FORKNUM, false);
/* Invalidate cache so that smgrnblocks() asks the kernel. */
reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = InvalidBlockNumber;
vm_nblocks_now = smgrnblocks(reln, VISIBILITYMAP_FORKNUM);
/* Now extend the file */
while (vm_nblocks_now < vm_nblocks)
{
smgrextend(reln, VISIBILITYMAP_FORKNUM, vm_nblocks_now, pg.data, false);
vm_nblocks_now++;
}
/* /*
* Send a shared-inval message to force other backends to close any smgr * Send a shared-inval message to force other backends to close any smgr
@ -671,7 +641,7 @@ vm_extend(Relation rel, BlockNumber vm_nblocks)
* to keep checking for creation or extension of the file, which happens * to keep checking for creation or extension of the file, which happens
* infrequently. * infrequently.
*/ */
CacheInvalidateSmgr(reln->smgr_rlocator); CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
UnlockRelationForExtension(rel, ExclusiveLock); return buf;
} }

View File

@ -98,7 +98,7 @@ static BlockNumber fsm_get_heap_blk(FSMAddress addr, uint16 slot);
static BlockNumber fsm_logical_to_physical(FSMAddress addr); static BlockNumber fsm_logical_to_physical(FSMAddress addr);
static Buffer fsm_readbuf(Relation rel, FSMAddress addr, bool extend); static Buffer fsm_readbuf(Relation rel, FSMAddress addr, bool extend);
static void fsm_extend(Relation rel, BlockNumber fsm_nblocks); static Buffer fsm_extend(Relation rel, BlockNumber fsm_nblocks);
/* functions to convert amount of free space to a FSM category */ /* functions to convert amount of free space to a FSM category */
static uint8 fsm_space_avail_to_cat(Size avail); static uint8 fsm_space_avail_to_cat(Size avail);
@ -558,24 +558,30 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
reln->smgr_cached_nblocks[FSM_FORKNUM] = 0; reln->smgr_cached_nblocks[FSM_FORKNUM] = 0;
} }
/* Handle requests beyond EOF */ /*
* For reading we use ZERO_ON_ERROR mode, and initialize the page if
* necessary. The FSM information is not accurate anyway, so it's better
* to clear corrupt pages than error out. Since the FSM changes are not
* WAL-logged, the so-called torn page problem on crash can lead to pages
* with corrupt headers, for example.
*
* We use the same path below to initialize pages when extending the
* relation, as a concurrent extension can end up with vm_extend()
* returning an already-initialized page.
*/
if (blkno >= reln->smgr_cached_nblocks[FSM_FORKNUM]) if (blkno >= reln->smgr_cached_nblocks[FSM_FORKNUM])
{ {
if (extend) if (extend)
fsm_extend(rel, blkno + 1); buf = fsm_extend(rel, blkno + 1);
else else
return InvalidBuffer; return InvalidBuffer;
} }
else
buf = ReadBufferExtended(rel, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL);
/* /*
* Use ZERO_ON_ERROR mode, and initialize the page if necessary. The FSM * Initializing the page when needed is trickier than it looks, because of
* information is not accurate anyway, so it's better to clear corrupt * the possibility of multiple backends doing this concurrently, and our
* pages than error out. Since the FSM changes are not WAL-logged, the
* so-called torn page problem on crash can lead to pages with corrupt
* headers, for example.
*
* The initialize-the-page part is trickier than it looks, because of the
* possibility of multiple backends doing this concurrently, and our
* desire to not uselessly take the buffer lock in the normal path where * desire to not uselessly take the buffer lock in the normal path where
* the page is OK. We must take the lock to initialize the page, so * the page is OK. We must take the lock to initialize the page, so
* recheck page newness after we have the lock, in case someone else * recheck page newness after we have the lock, in case someone else
@ -588,7 +594,6 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
* long as it doesn't depend on the page header having correct contents. * long as it doesn't depend on the page header having correct contents.
* Current usage is safe because PageGetContents() does not require that. * Current usage is safe because PageGetContents() does not require that.
*/ */
buf = ReadBufferExtended(rel, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL);
if (PageIsNew(BufferGetPage(buf))) if (PageIsNew(BufferGetPage(buf)))
{ {
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@ -604,56 +609,14 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
* it if necessary with empty pages. And by empty, I mean pages filled * it if necessary with empty pages. And by empty, I mean pages filled
* with zeros, meaning there's no free space. * with zeros, meaning there's no free space.
*/ */
static void static Buffer
fsm_extend(Relation rel, BlockNumber fsm_nblocks) fsm_extend(Relation rel, BlockNumber fsm_nblocks)
{ {
BlockNumber fsm_nblocks_now; return ExtendBufferedRelTo(EB_REL(rel), FSM_FORKNUM, NULL,
PGAlignedBlock pg = {0}; EB_CREATE_FORK_IF_NEEDED |
SMgrRelation reln; EB_CLEAR_SIZE_CACHE,
fsm_nblocks,
RBM_ZERO_ON_ERROR);
/*
* We use the relation extension lock to lock out other backends trying to
* extend the FSM at the same time. It also locks out extension of the
* main fork, unnecessarily, but extending the FSM happens seldom enough
* that it doesn't seem worthwhile to have a separate lock tag type for
* it.
*
* Note that another backend might have extended or created the relation
* by the time we get the lock.
*/
LockRelationForExtension(rel, ExclusiveLock);
/*
* Caution: re-using this smgr pointer could fail if the relcache entry
* gets closed. It's safe as long as we only do smgr-level operations
* between here and the last use of the pointer.
*/
reln = RelationGetSmgr(rel);
/*
* Create the FSM file first if it doesn't exist. If
* smgr_cached_nblocks[FSM_FORKNUM] is positive then it must exist, no
* need for an smgrexists call.
*/
if ((reln->smgr_cached_nblocks[FSM_FORKNUM] == 0 ||
reln->smgr_cached_nblocks[FSM_FORKNUM] == InvalidBlockNumber) &&
!smgrexists(reln, FSM_FORKNUM))
smgrcreate(reln, FSM_FORKNUM, false);
/* Invalidate cache so that smgrnblocks() asks the kernel. */
reln->smgr_cached_nblocks[FSM_FORKNUM] = InvalidBlockNumber;
fsm_nblocks_now = smgrnblocks(reln, FSM_FORKNUM);
/* Extend as needed. */
while (fsm_nblocks_now < fsm_nblocks)
{
smgrextend(reln, FSM_FORKNUM, fsm_nblocks_now,
pg.data, false);
fsm_nblocks_now++;
}
UnlockRelationForExtension(rel, ExclusiveLock);
} }
/* /*