1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-19 04:21:08 +03:00

Microvacuum for GIST

Mark index tuple as dead if it's pointed by kill_prior_tuple during
ordinary (search) scan and remove it during insert process if there is no
enough space for new tuple to insert. This improves select performance
because index will not return tuple marked as dead and improves insert
performance because it reduces number of page split.

Anastasia Lubennikova <a.lubennikova@postgrespro.ru> with
 minor editorialization by me
This commit is contained in:
Teodor Sigaev
2015-09-09 18:43:37 +03:00
parent 96f6a0cb41
commit 013ebc0a7b
5 changed files with 241 additions and 3 deletions

View File

@ -24,6 +24,77 @@
#include "utils/memutils.h"
#include "utils/rel.h"
/*
* gistkillitems() -- set LP_DEAD state for items an indexscan caller has
* told us were killed.
*
* We re-read page here, so it's important to check page LSN. If the page
* has been modified since the last read (as determined by LSN), we cannot
* flag any entries because it is possible that the old entry was vacuumed
* away and the TID was re-used by a completely different heap tuple.
*/
static void
gistkillitems(IndexScanDesc scan)
{
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
Buffer buffer;
Page page;
OffsetNumber offnum;
ItemId iid;
int i;
bool killedsomething = false;
Assert(so->curBlkno != InvalidBlockNumber);
Assert(!XLogRecPtrIsInvalid(so->curPageLSN));
Assert(so->killedItems != NULL);
buffer = ReadBuffer(scan->indexRelation, so->curBlkno);
if (!BufferIsValid(buffer))
return;
LockBuffer(buffer, GIST_SHARE);
gistcheckpage(scan->indexRelation, buffer);
page = BufferGetPage(buffer);
/*
* If page LSN differs it means that the page was modified since the last read.
* killedItems could be not valid so LP_DEAD hints applying is not safe.
*/
if(PageGetLSN(page) != so->curPageLSN)
{
UnlockReleaseBuffer(buffer);
so->numKilled = 0; /* reset counter */
return;
}
Assert(GistPageIsLeaf(page));
/*
* Mark all killedItems as dead. We need no additional recheck,
* because, if page was modified, pageLSN must have changed.
*/
for (i = 0; i < so->numKilled; i++)
{
offnum = so->killedItems[i];
iid = PageGetItemId(page, offnum);
ItemIdMarkDead(iid);
killedsomething = true;
}
if (killedsomething)
{
GistMarkPageHasGarbage(page);
MarkBufferDirtyHint(buffer, true);
}
UnlockReleaseBuffer(buffer);
/*
* Always reset the scan state, so we don't look for same items on other
* pages.
*/
so->numKilled = 0;
}
/*
* gistindex_keytest() -- does this index tuple satisfy the scan key(s)?
@ -305,17 +376,33 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
if (so->pageDataCxt)
MemoryContextReset(so->pageDataCxt);
/*
* We save the LSN of the page as we read it, so that we know whether it
* safe to apply LP_DEAD hints to the page later. This allows us to drop
* the pin for MVCC scans, which allows vacuum to avoid blocking.
*/
so->curPageLSN = PageGetLSN(page);
/*
* check all tuples on page
*/
maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
IndexTuple it = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
ItemId iid = PageGetItemId(page, i);
IndexTuple it;
bool match;
bool recheck;
bool recheck_distances;
/*
* If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual.
*/
if(scan->ignore_killed_tuples && ItemIdIsDead(iid))
continue;
it = (IndexTuple) PageGetItem(page, iid);
/*
* Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward.
@ -348,6 +435,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
*/
so->pageData[so->nPageData].heapPtr = it->t_tid;
so->pageData[so->nPageData].recheck = recheck;
so->pageData[so->nPageData].offnum = i;
/*
* In an index-only scan, also fetch the data from the tuple.
@ -572,7 +660,24 @@ gistgettuple(PG_FUNCTION_ARGS)
{
if (so->curPageData < so->nPageData)
{
if (scan->kill_prior_tuple && so->curPageData > 0)
{
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
* sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}
if (so->numKilled < MaxIndexTuplesPerPage)
so->killedItems[so->numKilled++] =
so->pageData[so->curPageData - 1].offnum;
}
/* continuing to return tuples from a leaf page */
scan->xs_ctup.t_self = so->pageData[so->curPageData].heapPtr;
scan->xs_recheck = so->pageData[so->curPageData].recheck;
@ -586,9 +691,36 @@ gistgettuple(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
}
/*
* Check the last returned tuple and add it to killitems if
* necessary
*/
if (scan->kill_prior_tuple
&& so->curPageData > 0
&& so->curPageData == so->nPageData)
{
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
* sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}
if (so->numKilled < MaxIndexTuplesPerPage)
so->killedItems[so->numKilled++] =
so->pageData[so->curPageData - 1].offnum;
}
/* find and process the next index page */
do
{
if ((so->curBlkno != InvalidBlockNumber) && (so->numKilled > 0))
gistkillitems(scan);
GISTSearchItem *item = getNextGISTSearchItem(so);
if (!item)
@ -596,6 +728,9 @@ gistgettuple(PG_FUNCTION_ARGS)
CHECK_FOR_INTERRUPTS();
/* save current item BlockNumber for next gistkillitems() call */
so->curBlkno = item->blkno;
/*
* While scanning a leaf page, ItemPointers of matching heap
* tuples are stored in so->pageData. If there are any on