1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-17 06:41:09 +03:00

Revert "Avoid creation of the free space map for small heap relations."

This reverts commit ac88d2962a.
This commit is contained in:
Amit Kapila
2019-01-28 11:31:44 +05:30
parent ac88d2962a
commit a23676503b
16 changed files with 102 additions and 576 deletions

View File

@ -239,14 +239,8 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
* Immediately update the bottom level of the FSM. This has a good
* chance of making this page visible to other concurrently inserting
* backends, and we want that to happen without delay.
*
* Since we know the table will end up with extraBlocks additional
* pages, we pass the final number to avoid possible unnecessary
* system calls and to make sure the FSM is created when we add the
* first new page.
*/
RecordPageWithFreeSpace(relation, blockNum, freespace,
firstBlock + extraBlocks);
RecordPageWithFreeSpace(relation, blockNum, freespace);
}
while (--extraBlocks > 0);
@ -383,9 +377,20 @@ RelationGetBufferForTuple(Relation relation, Size len,
* We have no cached target page, so ask the FSM for an initial
* target.
*/
targetBlock = GetPageWithFreeSpace(relation,
len + saveFreeSpace,
false);
targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
/*
* If the FSM knows nothing of the rel, try the last page before we
* give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
{
BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
if (nblocks > 0)
targetBlock = nblocks - 1;
}
}
loop:
@ -479,14 +484,6 @@ loop:
{
/* use this page as future insert target, too */
RelationSetTargetBlock(relation, targetBlock);
/*
* In case we used an in-memory map of available blocks, reset it
* for next use.
*/
if (targetBlock < HEAP_FSM_CREATION_THRESHOLD)
FSMClearLocalMap();
return buffer;
}
@ -546,12 +543,9 @@ loop:
/*
* Check if some other backend has extended a block for us while
* we were waiting on the lock. We only check the FSM -- if there
* isn't one we don't recheck the number of blocks.
* we were waiting on the lock.
*/
targetBlock = GetPageWithFreeSpace(relation,
len + saveFreeSpace,
true);
targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
/*
* If some other waiter has already extended the relation, we
@ -631,12 +625,5 @@ loop:
*/
RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));
/*
* In case we used an in-memory map of available blocks, reset it for next
* use. We do this unconditionally since after relation extension we
* can't skip this based on the targetBlock.
*/
FSMClearLocalMap();
return buffer;
}

View File

@ -153,7 +153,7 @@ static BufferAccessStrategy vac_strategy;
static void lazy_scan_heap(Relation onerel, int options,
LVRelStats *vacrelstats, Relation *Irel, int nindexes,
bool aggressive);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
static void lazy_vacuum_index(Relation indrel,
IndexBulkDeleteResult **stats,
@ -758,7 +758,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
/* Remove tuples from heap */
lazy_vacuum_heap(onerel, vacrelstats, nblocks);
lazy_vacuum_heap(onerel, vacrelstats);
/*
* Forget the now-vacuumed tuples, and press on, but be careful
@ -896,7 +896,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
MarkBufferDirty(buf);
UnlockReleaseBuffer(buf);
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
RecordPageWithFreeSpace(onerel, blkno, freespace);
continue;
}
@ -935,7 +935,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
}
UnlockReleaseBuffer(buf);
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
RecordPageWithFreeSpace(onerel, blkno, freespace);
continue;
}
@ -1332,7 +1332,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
RecordPageWithFreeSpace(onerel, blkno, freespace);
}
/* report that everything is scanned and vacuumed */
@ -1394,7 +1394,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
/* Remove tuples from heap */
pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
lazy_vacuum_heap(onerel, vacrelstats, nblocks);
lazy_vacuum_heap(onerel, vacrelstats);
vacrelstats->num_index_scans++;
}
@ -1465,10 +1465,9 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* Note: the reason for doing this as a second pass is we cannot remove
* the tuples until we've removed their index entries, and we want to
* process index entry removal in batches as large as possible.
* Note: nblocks is passed as an optimization for RecordPageWithFreeSpace().
*/
static void
lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks)
lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
{
int tupindex;
int npages;
@ -1505,7 +1504,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks)
freespace = PageGetHeapFreeSpace(page);
UnlockReleaseBuffer(buf);
RecordPageWithFreeSpace(onerel, tblk, freespace, nblocks);
RecordPageWithFreeSpace(onerel, tblk, freespace);
npages++;
}