mirror of
https://github.com/postgres/postgres.git
synced 2025-07-28 23:42:10 +03:00
Set PD_ALL_VISIBLE and visibility map bits in COPY FREEZE
Make sure COPY FREEZE marks the pages as PD_ALL_VISIBLE and updates the visibility map. Until now we only marked individual tuples as frozen, but page-level flags were not updated, so the first VACUUM after the COPY FREEZE had to rewrite the whole table. This is a fairly old patch, and multiple people worked on it. The first version was written by Jeff Janes, and then reworked by Pavan Deolasee and Anastasia Lubennikova. Author: Anastasia Lubennikova, Pavan Deolasee, Jeff Janes Reviewed-by: Kuntal Ghosh, Jeff Janes, Tomas Vondra, Masahiko Sawada, Andres Freund, Ibrar Ahmed, Robert Haas, Tatsuro Ishii, Darafei Praliaskouski Discussion: https://postgr.es/m/CABOikdN-ptGv0mZntrK2Q8OtfUuAjqaYMGmkdU1dCKFtUxVLrg@mail.gmail.com Discussion: https://postgr.es/m/CAMkU%3D1w3osJJ2FneELhhNRLxfZitDgp9FPHee08NT2FQFmz_pQ%40mail.gmail.com
This commit is contained in:
@ -2121,6 +2121,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
int ndone;
|
||||
PGAlignedBlock scratch;
|
||||
Page page;
|
||||
Buffer vmbuffer = InvalidBuffer;
|
||||
bool needwal;
|
||||
Size saveFreeSpace;
|
||||
bool need_tuple_data = RelationIsLogicallyLogged(relation);
|
||||
@ -2175,8 +2176,9 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
while (ndone < ntuples)
|
||||
{
|
||||
Buffer buffer;
|
||||
Buffer vmbuffer = InvalidBuffer;
|
||||
bool starting_with_empty_page;
|
||||
bool all_visible_cleared = false;
|
||||
bool all_frozen_set = false;
|
||||
int nthispage;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
@ -2184,12 +2186,20 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
/*
|
||||
* Find buffer where at least the next tuple will fit. If the page is
|
||||
* all-visible, this will also pin the requisite visibility map page.
|
||||
*
|
||||
* Also pin visibility map page if COPY FREEZE inserts tuples into an
|
||||
* empty page. See all_frozen_set below.
|
||||
*/
|
||||
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
|
||||
InvalidBuffer, options, bistate,
|
||||
&vmbuffer, NULL);
|
||||
page = BufferGetPage(buffer);
|
||||
|
||||
starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
|
||||
|
||||
if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
|
||||
all_frozen_set = true;
|
||||
|
||||
/* NO EREPORT(ERROR) from here till changes are logged */
|
||||
START_CRIT_SECTION();
|
||||
|
||||
@ -2223,7 +2233,14 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
log_heap_new_cid(relation, heaptup);
|
||||
}
|
||||
|
||||
if (PageIsAllVisible(page))
|
||||
/*
|
||||
* If the page is all visible, need to clear that, unless we're only
|
||||
* going to add further frozen rows to it.
|
||||
*
|
||||
* If we're only adding already frozen rows to a previously empty
|
||||
* page, mark it as all-visible.
|
||||
*/
|
||||
if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
|
||||
{
|
||||
all_visible_cleared = true;
|
||||
PageClearAllVisible(page);
|
||||
@ -2231,6 +2248,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
BufferGetBlockNumber(buffer),
|
||||
vmbuffer, VISIBILITYMAP_VALID_BITS);
|
||||
}
|
||||
else if (all_frozen_set)
|
||||
PageSetAllVisible(page);
|
||||
|
||||
/*
|
||||
* XXX Should we set PageSetPrunable on this page ? See heap_insert()
|
||||
@ -2254,8 +2273,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
* If the page was previously empty, we can reinit the page
|
||||
* instead of restoring the whole thing.
|
||||
*/
|
||||
init = (ItemPointerGetOffsetNumber(&(heaptuples[ndone]->t_self)) == FirstOffsetNumber &&
|
||||
PageGetMaxOffsetNumber(page) == FirstOffsetNumber + nthispage - 1);
|
||||
init = starting_with_empty_page;
|
||||
|
||||
/* allocate xl_heap_multi_insert struct from the scratch area */
|
||||
xlrec = (xl_heap_multi_insert *) scratchptr;
|
||||
@ -2273,7 +2291,15 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
/* the rest of the scratch space is used for tuple data */
|
||||
tupledata = scratchptr;
|
||||
|
||||
xlrec->flags = all_visible_cleared ? XLH_INSERT_ALL_VISIBLE_CLEARED : 0;
|
||||
/* check that the mutually exclusive flags are not both set */
|
||||
Assert (!(all_visible_cleared && all_frozen_set));
|
||||
|
||||
xlrec->flags = 0;
|
||||
if (all_visible_cleared)
|
||||
xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
|
||||
if (all_frozen_set)
|
||||
xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
|
||||
|
||||
xlrec->ntuples = nthispage;
|
||||
|
||||
/*
|
||||
@ -2347,13 +2373,40 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
|
||||
END_CRIT_SECTION();
|
||||
|
||||
UnlockReleaseBuffer(buffer);
|
||||
if (vmbuffer != InvalidBuffer)
|
||||
ReleaseBuffer(vmbuffer);
|
||||
/*
|
||||
* If we've frozen everything on the page, update the visibilitymap.
|
||||
* We're already holding pin on the vmbuffer.
|
||||
*/
|
||||
if (all_frozen_set)
|
||||
{
|
||||
Assert(PageIsAllVisible(page));
|
||||
Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer));
|
||||
|
||||
/*
|
||||
* It's fine to use InvalidTransactionId here - this is only used
|
||||
* when HEAP_INSERT_FROZEN is specified, which intentionally
|
||||
* violates visibility rules.
|
||||
*/
|
||||
visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
|
||||
InvalidXLogRecPtr, vmbuffer,
|
||||
InvalidTransactionId,
|
||||
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
|
||||
}
|
||||
|
||||
UnlockReleaseBuffer(buffer);
|
||||
ndone += nthispage;
|
||||
|
||||
/*
|
||||
* NB: Only release vmbuffer after inserting all tuples - it's fairly
|
||||
* likely that we'll insert into subsequent heap pages that are likely
|
||||
* to use the same vm page.
|
||||
*/
|
||||
}
|
||||
|
||||
/* We're done with inserting all tuples, so release the last vmbuffer. */
|
||||
if (vmbuffer != InvalidBuffer)
|
||||
ReleaseBuffer(vmbuffer);
|
||||
|
||||
/*
|
||||
* We're done with the actual inserts. Check for conflicts again, to
|
||||
* ensure that all rw-conflicts in to these inserts are detected. Without
|
||||
@ -8725,6 +8778,10 @@ heap_xlog_insert(XLogReaderState *record)
|
||||
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
|
||||
PageClearAllVisible(page);
|
||||
|
||||
/* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
|
||||
if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
|
||||
PageSetAllVisible(page);
|
||||
|
||||
MarkBufferDirty(buffer);
|
||||
}
|
||||
if (BufferIsValid(buffer))
|
||||
@ -8775,6 +8832,10 @@ heap_xlog_multi_insert(XLogReaderState *record)
|
||||
|
||||
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
|
||||
|
||||
/* check that the mutually exclusive flags are not both set */
|
||||
Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
|
||||
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
|
||||
|
||||
/*
|
||||
* The visibility map may need to be fixed even if the heap page is
|
||||
* already up-to-date.
|
||||
|
Reference in New Issue
Block a user