mirror of
https://github.com/postgres/postgres.git
synced 2025-07-26 01:22:12 +03:00
Combine FSM updates for prune and no-prune cases.
lazy_scan_prune() and lazy_scan_noprune() update the freespace map
with identical conditions; combine them. This consolidation is easier
now that cb970240f1
moved visibility map
updates into lazy_scan_prune().
While combining the FSM updates, simplify the logic for calling
lazy_scan_new_or_empty() and lazy_scan_noprune().
Also update a few comemnts in this part of the code to make them,
hopefully, clearer.
Melanie Plageman and Robert Haas
Discussion: https://postgr.es/m/CA%2BTgmoaLTvipm%3Dxx4rJLr07m908PCu%3DQH3uCjD1UOn8YaEuO2g%40mail.gmail.com
This commit is contained in:
@ -838,6 +838,7 @@ lazy_scan_heap(LVRelState *vacrel)
|
|||||||
Page page;
|
Page page;
|
||||||
bool all_visible_according_to_vm;
|
bool all_visible_according_to_vm;
|
||||||
bool has_lpdead_items;
|
bool has_lpdead_items;
|
||||||
|
bool got_cleanup_lock = false;
|
||||||
|
|
||||||
if (blkno == next_unskippable_block)
|
if (blkno == next_unskippable_block)
|
||||||
{
|
{
|
||||||
@ -931,63 +932,40 @@ lazy_scan_heap(LVRelState *vacrel)
|
|||||||
*/
|
*/
|
||||||
visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
|
visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
|
||||||
|
|
||||||
|
buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
|
||||||
|
vacrel->bstrategy);
|
||||||
|
page = BufferGetPage(buf);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need a buffer cleanup lock to prune HOT chains and defragment
|
* We need a buffer cleanup lock to prune HOT chains and defragment
|
||||||
* the page in lazy_scan_prune. But when it's not possible to acquire
|
* the page in lazy_scan_prune. But when it's not possible to acquire
|
||||||
* a cleanup lock right away, we may be able to settle for reduced
|
* a cleanup lock right away, we may be able to settle for reduced
|
||||||
* processing using lazy_scan_noprune.
|
* processing using lazy_scan_noprune.
|
||||||
*/
|
*/
|
||||||
buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
|
got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
|
||||||
vacrel->bstrategy);
|
|
||||||
page = BufferGetPage(buf);
|
if (!got_cleanup_lock)
|
||||||
if (!ConditionalLockBufferForCleanup(buf))
|
|
||||||
{
|
|
||||||
LockBuffer(buf, BUFFER_LOCK_SHARE);
|
LockBuffer(buf, BUFFER_LOCK_SHARE);
|
||||||
|
|
||||||
/* Check for new or empty pages before lazy_scan_noprune call */
|
/* Check for new or empty pages before lazy_scan_[no]prune call */
|
||||||
if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, true,
|
if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
|
||||||
vmbuffer))
|
vmbuffer))
|
||||||
{
|
{
|
||||||
/* Processed as new/empty page (lock and pin released) */
|
/* Processed as new/empty page (lock and pin released) */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Collect LP_DEAD items in dead_items array, count tuples,
|
|
||||||
* determine if rel truncation is safe
|
|
||||||
*/
|
|
||||||
if (lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
|
|
||||||
{
|
|
||||||
Size freespace = 0;
|
|
||||||
bool recordfreespace;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We processed the page successfully (without a cleanup
|
|
||||||
* lock).
|
|
||||||
*
|
|
||||||
* Update the FSM, just as we would in the case where
|
|
||||||
* lazy_scan_prune() is called. Our goal is to update the
|
|
||||||
* freespace map the last time we touch the page. If the
|
|
||||||
* relation has no indexes, or if index vacuuming is disabled,
|
|
||||||
* there will be no second heap pass; if this particular page
|
|
||||||
* has no dead items, the second heap pass will not touch this
|
|
||||||
* page. So, in those cases, update the FSM now.
|
|
||||||
*
|
|
||||||
* After a call to lazy_scan_prune(), we would also try to
|
|
||||||
* adjust the page-level all-visible bit and the visibility
|
|
||||||
* map, but we skip that step in this path.
|
|
||||||
*/
|
|
||||||
recordfreespace = vacrel->nindexes == 0
|
|
||||||
|| !vacrel->do_index_vacuuming
|
|
||||||
|| !has_lpdead_items;
|
|
||||||
if (recordfreespace)
|
|
||||||
freespace = PageGetHeapFreeSpace(page);
|
|
||||||
UnlockReleaseBuffer(buf);
|
|
||||||
if (recordfreespace)
|
|
||||||
RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we didn't get the cleanup lock, we can still collect LP_DEAD
|
||||||
|
* items in the dead_items array for later vacuuming, count live and
|
||||||
|
* recently dead tuples for vacuum logging, and determine if this
|
||||||
|
* block could later be truncated. If we encounter any xid/mxids that
|
||||||
|
* require advancing the relfrozenxid/relminxid, we'll have to wait
|
||||||
|
* for a cleanup lock and call lazy_scan_prune().
|
||||||
|
*/
|
||||||
|
if (!got_cleanup_lock &&
|
||||||
|
!lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
|
||||||
|
{
|
||||||
/*
|
/*
|
||||||
* lazy_scan_noprune could not do all required processing. Wait
|
* lazy_scan_noprune could not do all required processing. Wait
|
||||||
* for a cleanup lock, and call lazy_scan_prune in the usual way.
|
* for a cleanup lock, and call lazy_scan_prune in the usual way.
|
||||||
@ -995,45 +973,45 @@ lazy_scan_heap(LVRelState *vacrel)
|
|||||||
Assert(vacrel->aggressive);
|
Assert(vacrel->aggressive);
|
||||||
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
|
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
|
||||||
LockBufferForCleanup(buf);
|
LockBufferForCleanup(buf);
|
||||||
}
|
got_cleanup_lock = true;
|
||||||
|
|
||||||
/* Check for new or empty pages before lazy_scan_prune call */
|
|
||||||
if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, false, vmbuffer))
|
|
||||||
{
|
|
||||||
/* Processed as new/empty page (lock and pin released) */
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prune, freeze, and count tuples.
|
* If we have a cleanup lock, we must now prune, freeze, and count
|
||||||
|
* tuples. We may have acquired the cleanup lock originally, or we may
|
||||||
|
* have gone back and acquired it after lazy_scan_noprune() returned
|
||||||
|
* false. Either way, the page hasn't been processed yet.
|
||||||
*
|
*
|
||||||
* Accumulates details of remaining LP_DEAD line pointers on page in
|
* Like lazy_scan_noprune(), lazy_scan_prune() will count
|
||||||
* dead_items array. This includes LP_DEAD line pointers that we
|
* recently_dead_tuples and live tuples for vacuum logging, determine
|
||||||
* pruned ourselves, as well as existing LP_DEAD line pointers that
|
* if the block can later be truncated, and accumulate the details of
|
||||||
* were pruned some time earlier. Also considers freezing XIDs in the
|
* remaining LP_DEAD line pointers on the page in the dead_items
|
||||||
* tuple headers of remaining items with storage. It also determines
|
* array. These dead items include those pruned by lazy_scan_prune()
|
||||||
* if truncating this block is safe.
|
* as well we line pointers previously marked LP_DEAD.
|
||||||
*/
|
*/
|
||||||
lazy_scan_prune(vacrel, buf, blkno, page,
|
if (got_cleanup_lock)
|
||||||
vmbuffer, all_visible_according_to_vm,
|
lazy_scan_prune(vacrel, buf, blkno, page,
|
||||||
&has_lpdead_items);
|
vmbuffer, all_visible_according_to_vm,
|
||||||
|
&has_lpdead_items);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Final steps for block: drop cleanup lock, record free space in the
|
* Now drop the buffer lock and, potentially, update the FSM.
|
||||||
* FSM.
|
|
||||||
*
|
*
|
||||||
* If we will likely do index vacuuming, wait until
|
* Our goal is to update the freespace map the last time we touch the
|
||||||
* lazy_vacuum_heap_rel() to save free space. This doesn't just save
|
* page. If we'll process a block in the second pass, we may free up
|
||||||
* us some cycles; it also allows us to record any additional free
|
* additional space on the page, so it is better to update the FSM
|
||||||
* space that lazy_vacuum_heap_page() will make available in cases
|
* after the second pass. If the relation has no indexes, or if index
|
||||||
* where it's possible to truncate the page's line pointer array.
|
* vacuuming is disabled, there will be no second heap pass; if this
|
||||||
|
* particular page has no dead items, the second heap pass will not
|
||||||
|
* touch this page. So, in those cases, update the FSM now.
|
||||||
*
|
*
|
||||||
* Note: It's not in fact 100% certain that we really will call
|
* Note: In corner cases, it's possible to miss updating the FSM
|
||||||
* lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip index
|
* entirely. If index vacuuming is currently enabled, we'll skip the
|
||||||
* vacuuming (and so must skip heap vacuuming). This is deemed okay
|
* FSM update now. But if failsafe mode is later activated, or there
|
||||||
* because it only happens in emergencies, or when there is very
|
* are so few dead tuples that index vacuuming is bypassed, there will
|
||||||
* little free space anyway. (Besides, we start recording free space
|
* also be no opportunity to update the FSM later, because we'll never
|
||||||
* in the FSM once index vacuuming has been abandoned.)
|
* revisit this page. Since updating the FSM is desirable but not
|
||||||
|
* absolutely required, that's OK.
|
||||||
*/
|
*/
|
||||||
if (vacrel->nindexes == 0
|
if (vacrel->nindexes == 0
|
||||||
|| !vacrel->do_index_vacuuming
|
|| !vacrel->do_index_vacuuming
|
||||||
@ -1047,9 +1025,10 @@ lazy_scan_heap(LVRelState *vacrel)
|
|||||||
/*
|
/*
|
||||||
* Periodically perform FSM vacuuming to make newly-freed space
|
* Periodically perform FSM vacuuming to make newly-freed space
|
||||||
* visible on upper FSM pages. This is done after vacuuming if the
|
* visible on upper FSM pages. This is done after vacuuming if the
|
||||||
* table has indexes.
|
* table has indexes. There will only be newly-freed space if we
|
||||||
|
* held the cleanup lock and lazy_scan_prune() was called.
|
||||||
*/
|
*/
|
||||||
if (vacrel->nindexes == 0 && has_lpdead_items &&
|
if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
|
||||||
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
|
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
|
||||||
{
|
{
|
||||||
FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
|
FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
|
||||||
|
Reference in New Issue
Block a user