mirror of
https://github.com/postgres/postgres.git
synced 2025-07-28 23:42:10 +03:00
Revert "Avoid the creation of the free space map for small heap relations".
This feature was using a process local map to track the first few blocks in the relation. The map was reset each time we get the block with enough freespace. It was discussed that it would be better to track this map on a per-relation basis in relcache and then invalidate the same whenever vacuum frees up some space in the page or when FSM is created. The new design would be better both in terms of API design and performance. List of commits reverted, in reverse chronological order:06c8a5090e
Improve code comments inb0eaa4c51b
.13e8643bfc
During pg_upgrade, conditionally skip transfer of FSMs.6f918159a9
Add more tests for FSM.9c32e4c350
Clear the local map when not used.29d108cdec
Update the documentation for FSM behavior..08ecdfe7e5
Make FSM test portable.b0eaa4c51b
Avoid creation of the free space map for small heap relations. Discussion: https://postgr.es/m/20190416180452.3pm6uegx54iitbt5@alap3.anarazel.de
This commit is contained in:
@ -246,14 +246,8 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
|
||||
* Immediately update the bottom level of the FSM. This has a good
|
||||
* chance of making this page visible to other concurrently inserting
|
||||
* backends, and we want that to happen without delay.
|
||||
*
|
||||
* Since we know the table will end up with extraBlocks additional
|
||||
* pages, we pass the final number to avoid possible unnecessary
|
||||
* system calls and to make sure the FSM is created when we add the
|
||||
* first new page.
|
||||
*/
|
||||
RecordPageWithFreeSpace(relation, blockNum, freespace,
|
||||
firstBlock + extraBlocks);
|
||||
RecordPageWithFreeSpace(relation, blockNum, freespace);
|
||||
}
|
||||
while (--extraBlocks > 0);
|
||||
|
||||
@ -390,9 +384,20 @@ RelationGetBufferForTuple(Relation relation, Size len,
|
||||
* We have no cached target page, so ask the FSM for an initial
|
||||
* target.
|
||||
*/
|
||||
targetBlock = GetPageWithFreeSpace(relation,
|
||||
len + saveFreeSpace,
|
||||
false);
|
||||
targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
|
||||
|
||||
/*
|
||||
* If the FSM knows nothing of the rel, try the last page before we
|
||||
* give up and extend. This avoids one-tuple-per-page syndrome during
|
||||
* bootstrapping or in a recently-started system.
|
||||
*/
|
||||
if (targetBlock == InvalidBlockNumber)
|
||||
{
|
||||
BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
|
||||
|
||||
if (nblocks > 0)
|
||||
targetBlock = nblocks - 1;
|
||||
}
|
||||
}
|
||||
|
||||
loop:
|
||||
@ -499,13 +504,6 @@ loop:
|
||||
{
|
||||
/* use this page as future insert target, too */
|
||||
RelationSetTargetBlock(relation, targetBlock);
|
||||
|
||||
/*
|
||||
* In case we used an in-memory map of available blocks, reset it
|
||||
* for next use.
|
||||
*/
|
||||
FSMClearLocalMap();
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
@ -565,12 +563,9 @@ loop:
|
||||
|
||||
/*
|
||||
* Check if some other backend has extended a block for us while
|
||||
* we were waiting on the lock. We only check the FSM -- if there
|
||||
* isn't one we don't recheck the number of blocks.
|
||||
* we were waiting on the lock.
|
||||
*/
|
||||
targetBlock = GetPageWithFreeSpace(relation,
|
||||
len + saveFreeSpace,
|
||||
true);
|
||||
targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
|
||||
|
||||
/*
|
||||
* If some other waiter has already extended the relation, we
|
||||
@ -675,8 +670,5 @@ loop:
|
||||
*/
|
||||
RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));
|
||||
|
||||
/* This should already be cleared by now, but make sure it is. */
|
||||
FSMClearLocalMap();
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ static BufferAccessStrategy vac_strategy;
|
||||
static void lazy_scan_heap(Relation onerel, VacuumParams *params,
|
||||
LVRelStats *vacrelstats, Relation *Irel, int nindexes,
|
||||
bool aggressive);
|
||||
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks);
|
||||
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
|
||||
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
|
||||
static void lazy_vacuum_index(Relation indrel,
|
||||
IndexBulkDeleteResult **stats,
|
||||
@ -780,7 +780,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
|
||||
|
||||
/* Remove tuples from heap */
|
||||
lazy_vacuum_heap(onerel, vacrelstats, nblocks);
|
||||
lazy_vacuum_heap(onerel, vacrelstats);
|
||||
|
||||
/*
|
||||
* Forget the now-vacuumed tuples, and press on, but be careful
|
||||
@ -919,7 +919,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
Size freespace;
|
||||
|
||||
freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData;
|
||||
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
|
||||
RecordPageWithFreeSpace(onerel, blkno, freespace);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
@ -963,7 +963,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
}
|
||||
|
||||
UnlockReleaseBuffer(buf);
|
||||
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
|
||||
RecordPageWithFreeSpace(onerel, blkno, freespace);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1381,7 +1381,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
* taken if there are no indexes.)
|
||||
*/
|
||||
if (vacrelstats->num_dead_tuples == prev_dead_count)
|
||||
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
|
||||
RecordPageWithFreeSpace(onerel, blkno, freespace);
|
||||
}
|
||||
|
||||
/* report that everything is scanned and vacuumed */
|
||||
@ -1443,7 +1443,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
/* Remove tuples from heap */
|
||||
pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
|
||||
PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
|
||||
lazy_vacuum_heap(onerel, vacrelstats, nblocks);
|
||||
lazy_vacuum_heap(onerel, vacrelstats);
|
||||
vacrelstats->num_index_scans++;
|
||||
}
|
||||
|
||||
@ -1517,10 +1517,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
* Note: the reason for doing this as a second pass is we cannot remove
|
||||
* the tuples until we've removed their index entries, and we want to
|
||||
* process index entry removal in batches as large as possible.
|
||||
* Note: nblocks is passed as an optimization for RecordPageWithFreeSpace().
|
||||
*/
|
||||
static void
|
||||
lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks)
|
||||
lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
|
||||
{
|
||||
int tupindex;
|
||||
int npages;
|
||||
@ -1557,7 +1556,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks)
|
||||
freespace = PageGetHeapFreeSpace(page);
|
||||
|
||||
UnlockReleaseBuffer(buf);
|
||||
RecordPageWithFreeSpace(onerel, tblk, freespace, nblocks);
|
||||
RecordPageWithFreeSpace(onerel, tblk, freespace);
|
||||
npages++;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user