1
0
mirror of https://github.com/postgres/postgres.git synced 2025-05-06 19:59:18 +03:00

Revert "Move page initialization from RelationAddExtraBlocks() to use."

This reverts commit fc02e6724f3ce069b33284bce092052ab55bd751 and
e6799d5a53011985d916fdb48fe014a4ae70422e.

Parts of the buildfarm error out with
ERROR: page %u of relation "%s" should be empty but is not
errors, and so far I/we do not know why. fc02e672 didn't fix the
issue.  As I cannot reproduce the issue locally, it seems best to get
the buildfarm green again, and reproduce the issue without time
pressure.
This commit is contained in:
Andres Freund 2019-01-28 17:16:56 -08:00
parent fc02e6724f
commit 684200543b
2 changed files with 43 additions and 79 deletions

View File

@ -204,8 +204,7 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
/* /*
* Extend by one page. This should generally match the main-line * Extend by one page. This should generally match the main-line
* extension code in RelationGetBufferForTuple, except that we hold * extension code in RelationGetBufferForTuple, except that we hold
* the relation extension lock throughout, and we don't immediately * the relation extension lock throughout.
* initialize the page (see below).
*/ */
buffer = ReadBufferBI(relation, P_NEW, bistate); buffer = ReadBufferBI(relation, P_NEW, bistate);
@ -217,16 +216,18 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
BufferGetBlockNumber(buffer), BufferGetBlockNumber(buffer),
RelationGetRelationName(relation)); RelationGetRelationName(relation));
PageInit(page, BufferGetPageSize(buffer), 0);
/* /*
* Add the page to the FSM without initializing. If we were to * We mark all the new buffers dirty, but do nothing to write them
* initialize here the page would potentially get flushed out to disk * out; they'll probably get used soon, and even if they are not, a
* before we add any useful content. There's no guarantee that that'd * crash will leave an okay all-zeroes page on disk.
* happen before a potential crash, so we need to deal with
* uninitialized pages anyway, thus avoid the potential for
* unnecessary writes.
*/ */
MarkBufferDirty(buffer);
/* we'll need this info below */
blockNum = BufferGetBlockNumber(buffer); blockNum = BufferGetBlockNumber(buffer);
freespace = BufferGetPageSize(buffer) - SizeOfPageHeaderData; freespace = PageGetHeapFreeSpace(page);
UnlockReleaseBuffer(buffer); UnlockReleaseBuffer(buffer);
@ -478,18 +479,6 @@ loop:
* we're done. * we're done.
*/ */
page = BufferGetPage(buffer); page = BufferGetPage(buffer);
/*
* Initialize page, it'll be used soon. We could avoid dirtying the
* buffer here, and rely on the caller to do so whenever it puts a
* tuple onto the page, but there seems not much benefit in doing so.
*/
if (PageIsNew(page))
{
PageInit(page, BufferGetPageSize(buffer), 0);
MarkBufferDirty(buffer);
}
pageFreeSpace = PageGetHeapFreeSpace(page); pageFreeSpace = PageGetHeapFreeSpace(page);
if (len + saveFreeSpace <= pageFreeSpace) if (len + saveFreeSpace <= pageFreeSpace)
{ {

View File

@ -860,65 +860,43 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
if (PageIsNew(page)) if (PageIsNew(page))
{ {
bool still_new;
/* /*
* All-zeroes pages can be left over if either a backend extends * An all-zeroes page could be left over if a backend extends the
* the relation by a single page, but crashes before the newly * relation but crashes before initializing the page. Reclaim such
* initialized page has been written out, or when bulk-extending * pages for use.
* the relation (which creates a number of empty pages at the tail
* end of the relation, but enters them into the FSM).
*
* Make sure these pages are in the FSM, to ensure they can be
* reused. Do that by testing if there's any space recorded for
* the page. If not, enter it.
*
* Note we do not enter the page into the visibilitymap. That has
* the downside that we repeatedly visit this page in subsequent
* vacuums, but otherwise we'll never not discover the space on a
* promoted standby. The harm of repeated checking ought to
* normally not be too bad - the space usually should be used at
* some point, otherwise there wouldn't be any regular vacuums.
* *
* We have to be careful here because we could be looking at a * We have to be careful here because we could be looking at a
* page that someone has just added to the relation and the * page that someone has just added to the relation and not yet
* extending backend might not yet have been able to lock the page * been able to initialize (see RelationGetBufferForTuple). To
* (see RelationGetBufferForTuple), which is problematic because * protect against that, release the buffer lock, grab the
* of cross-checks that new pages are actually new. If we add this * relation extension lock momentarily, and re-lock the buffer. If
* page to the FSM, this page could be reused, and such * the page is still uninitialized by then, it must be left over
* crosschecks could fail. To protect against that, release the * from a crashed backend, and we can initialize it.
* buffer lock, grab the relation extension lock momentarily, and *
* re-lock the buffer. If the page is still empty and not in the * We don't really need the relation lock when this is a new or
* FSM by then, it must be left over from a from a crashed * temp relation, but it's probably not worth the code space to
* backend, and we can record the free space. * check that, since this surely isn't a critical path.
* *
* Note: the comparable code in vacuum.c need not worry because * Note: the comparable code in vacuum.c need not worry because
* it's got an exclusive lock on the whole relation. * it's got exclusive lock on the whole relation.
*/ */
LockBuffer(buf, BUFFER_LOCK_UNLOCK); LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockRelationForExtension(onerel, ExclusiveLock); LockRelationForExtension(onerel, ExclusiveLock);
UnlockRelationForExtension(onerel, ExclusiveLock); UnlockRelationForExtension(onerel, ExclusiveLock);
LockBufferForCleanup(buf); LockBufferForCleanup(buf);
if (PageIsNew(page))
/* {
* Perform checking of FSM after releasing lock, the fsm is ereport(WARNING,
* approximate, after all. (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
*/ relname, blkno)));
still_new = PageIsNew(page); PageInit(page, BufferGetPageSize(buf), 0);
empty_pages++;
}
freespace = PageGetHeapFreeSpace(page);
MarkBufferDirty(buf);
UnlockReleaseBuffer(buf); UnlockReleaseBuffer(buf);
if (still_new) RecordPageWithFreeSpace(onerel, blkno, freespace);
{
empty_pages++;
if (GetRecordedFreeSpace(onerel, blkno) == 0)
{
Size freespace;
freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData;
RecordPageWithFreeSpace(onerel, blkno, freespace);
}
}
continue; continue;
} }
@ -927,10 +905,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
empty_pages++; empty_pages++;
freespace = PageGetHeapFreeSpace(page); freespace = PageGetHeapFreeSpace(page);
/* /* empty pages are always all-visible and all-frozen */
* Empty pages are always all-visible and all-frozen (note that
* the same is currently not true for new pages, see above).
*/
if (!PageIsAllVisible(page)) if (!PageIsAllVisible(page))
{ {
START_CRIT_SECTION(); START_CRIT_SECTION();
@ -1664,13 +1639,12 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup)
*hastup = false; *hastup = false;
/* /* If we hit an uninitialized page, we want to force vacuuming it. */
* New and empty pages, obviously, don't contain tuples. We could make if (PageIsNew(page))
* sure that the page is registered in the FSM, but it doesn't seem worth return true;
* waiting for a cleanup lock just for that, especially because it's
* likely that the pin holder will do so. /* Quick out for ordinary empty page. */
*/ if (PageIsEmpty(page))
if (PageIsNew(page) || PageIsEmpty(page))
return false; return false;
maxoff = PageGetMaxOffsetNumber(page); maxoff = PageGetMaxOffsetNumber(page);
@ -2055,6 +2029,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
if (PageIsNew(page) || PageIsEmpty(page)) if (PageIsNew(page) || PageIsEmpty(page))
{ {
/* PageIsNew probably shouldn't happen... */
UnlockReleaseBuffer(buf); UnlockReleaseBuffer(buf);
continue; continue;
} }