mirror of
https://github.com/postgres/postgres.git
synced 2025-07-05 07:21:24 +03:00
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
This commit is contained in:
@ -24,7 +24,7 @@
|
||||
/*
|
||||
* SPPageDesc tracks all info about a page we are inserting into. In some
|
||||
* situations it actually identifies a tuple, or even a specific node within
|
||||
* an inner tuple. But any of the fields can be invalid. If the buffer
|
||||
* an inner tuple. But any of the fields can be invalid. If the buffer
|
||||
* field is valid, it implies we hold pin and exclusive lock on that buffer.
|
||||
* page pointer should be valid exactly when buffer is.
|
||||
*/
|
||||
@ -129,8 +129,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page,
|
||||
int firststate, int reststate,
|
||||
BlockNumber blkno, OffsetNumber offnum)
|
||||
{
|
||||
OffsetNumber firstItem;
|
||||
OffsetNumber *sortednos;
|
||||
OffsetNumber firstItem;
|
||||
OffsetNumber *sortednos;
|
||||
SpGistDeadTuple tuple = NULL;
|
||||
int i;
|
||||
|
||||
@ -155,8 +155,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page,
|
||||
|
||||
for (i = 0; i < nitems; i++)
|
||||
{
|
||||
OffsetNumber itemno = sortednos[i];
|
||||
int tupstate;
|
||||
OffsetNumber itemno = sortednos[i];
|
||||
int tupstate;
|
||||
|
||||
tupstate = (itemno == firstItem) ? firststate : reststate;
|
||||
if (tuple == NULL || tuple->tupstate != tupstate)
|
||||
@ -200,7 +200,7 @@ saveNodeLink(Relation index, SPPageDesc *parent,
|
||||
*/
|
||||
static void
|
||||
addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
|
||||
SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew)
|
||||
SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew)
|
||||
{
|
||||
XLogRecData rdata[4];
|
||||
spgxlogAddLeaf xlrec;
|
||||
@ -230,7 +230,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
|
||||
/* Tuple is not part of a chain */
|
||||
leafTuple->nextOffset = InvalidOffsetNumber;
|
||||
current->offnum = SpGistPageAddNewItem(state, current->page,
|
||||
(Item) leafTuple, leafTuple->size,
|
||||
(Item) leafTuple, leafTuple->size,
|
||||
NULL, false);
|
||||
|
||||
xlrec.offnumLeaf = current->offnum;
|
||||
@ -250,9 +250,9 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Tuple must be inserted into existing chain. We mustn't change
|
||||
* the chain's head address, but we don't need to chase the entire
|
||||
* chain to put the tuple at the end; we can insert it second.
|
||||
* Tuple must be inserted into existing chain. We mustn't change the
|
||||
* chain's head address, but we don't need to chase the entire chain
|
||||
* to put the tuple at the end; we can insert it second.
|
||||
*
|
||||
* Also, it's possible that the "chain" consists only of a DEAD tuple,
|
||||
* in which case we should replace the DEAD tuple in-place.
|
||||
@ -261,7 +261,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
|
||||
OffsetNumber offnum;
|
||||
|
||||
head = (SpGistLeafTuple) PageGetItem(current->page,
|
||||
PageGetItemId(current->page, current->offnum));
|
||||
PageGetItemId(current->page, current->offnum));
|
||||
if (head->tupstate == SPGIST_LIVE)
|
||||
{
|
||||
leafTuple->nextOffset = head->nextOffset;
|
||||
@ -274,7 +274,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
|
||||
* and set new second element
|
||||
*/
|
||||
head = (SpGistLeafTuple) PageGetItem(current->page,
|
||||
PageGetItemId(current->page, current->offnum));
|
||||
PageGetItemId(current->page, current->offnum));
|
||||
head->nextOffset = offnum;
|
||||
|
||||
xlrec.offnumLeaf = offnum;
|
||||
@ -483,7 +483,7 @@ moveLeafs(Relation index, SpGistState *state,
|
||||
for (i = 0; i < nDelete; i++)
|
||||
{
|
||||
it = (SpGistLeafTuple) PageGetItem(current->page,
|
||||
PageGetItemId(current->page, toDelete[i]));
|
||||
PageGetItemId(current->page, toDelete[i]));
|
||||
Assert(it->tupstate == SPGIST_LIVE);
|
||||
|
||||
/*
|
||||
@ -516,12 +516,12 @@ moveLeafs(Relation index, SpGistState *state,
|
||||
leafptr += newLeafTuple->size;
|
||||
|
||||
/*
|
||||
* Now delete the old tuples, leaving a redirection pointer behind for
|
||||
* the first one, unless we're doing an index build; in which case there
|
||||
* can't be any concurrent scan so we need not provide a redirect.
|
||||
* Now delete the old tuples, leaving a redirection pointer behind for the
|
||||
* first one, unless we're doing an index build; in which case there can't
|
||||
* be any concurrent scan so we need not provide a redirect.
|
||||
*/
|
||||
spgPageIndexMultiDelete(state, current->page, toDelete, nDelete,
|
||||
state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
|
||||
state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
|
||||
SPGIST_PLACEHOLDER,
|
||||
nblkno, r);
|
||||
|
||||
@ -575,7 +575,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position,
|
||||
SpGistDeadTuple dt;
|
||||
|
||||
dt = (SpGistDeadTuple) PageGetItem(current->page,
|
||||
PageGetItemId(current->page, position));
|
||||
PageGetItemId(current->page, position));
|
||||
Assert(dt->tupstate == SPGIST_REDIRECT);
|
||||
Assert(ItemPointerGetBlockNumber(&dt->pointer) == SPGIST_METAPAGE_BLKNO);
|
||||
ItemPointerSet(&dt->pointer, blkno, offnum);
|
||||
@ -640,7 +640,7 @@ checkAllTheSame(spgPickSplitIn *in, spgPickSplitOut *out, bool tooBig,
|
||||
/* The opclass may not use node labels, but if it does, duplicate 'em */
|
||||
if (out->nodeLabels)
|
||||
{
|
||||
Datum theLabel = out->nodeLabels[theNode];
|
||||
Datum theLabel = out->nodeLabels[theNode];
|
||||
|
||||
out->nodeLabels = (Datum *) palloc(sizeof(Datum) * out->nNodes);
|
||||
for (i = 0; i < out->nNodes; i++)
|
||||
@ -754,8 +754,8 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
{
|
||||
/*
|
||||
* We are splitting the root (which up to now is also a leaf page).
|
||||
* Its tuples are not linked, so scan sequentially to get them all.
|
||||
* We ignore the original value of current->offnum.
|
||||
* Its tuples are not linked, so scan sequentially to get them all. We
|
||||
* ignore the original value of current->offnum.
|
||||
*/
|
||||
for (i = FirstOffsetNumber; i <= max; i++)
|
||||
{
|
||||
@ -773,7 +773,7 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
/* we will delete the tuple altogether, so count full space */
|
||||
spaceToDelete += it->size + sizeof(ItemIdData);
|
||||
}
|
||||
else /* tuples on root should be live */
|
||||
else /* tuples on root should be live */
|
||||
elog(ERROR, "unexpected SPGiST tuple state: %d", it->tupstate);
|
||||
}
|
||||
}
|
||||
@ -820,7 +820,7 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
* We may not actually insert new tuple because another picksplit may be
|
||||
* necessary due to too large value, but we will try to allocate enough
|
||||
* space to include it; and in any case it has to be included in the input
|
||||
* for the picksplit function. So don't increment nToInsert yet.
|
||||
* for the picksplit function. So don't increment nToInsert yet.
|
||||
*/
|
||||
in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state);
|
||||
heapPtrs[in.nTuples] = newLeafTuple->heapPtr;
|
||||
@ -878,7 +878,7 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
/*
|
||||
* Check to see if the picksplit function failed to separate the values,
|
||||
* ie, it put them all into the same child node. If so, select allTheSame
|
||||
* mode and create a random split instead. See comments for
|
||||
* mode and create a random split instead. See comments for
|
||||
* checkAllTheSame as to why we need to know if the new leaf tuples could
|
||||
* fit on one page.
|
||||
*/
|
||||
@ -924,8 +924,8 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
innerTuple->allTheSame = allTheSame;
|
||||
|
||||
/*
|
||||
* Update nodes[] array to point into the newly formed innerTuple, so
|
||||
* that we can adjust their downlinks below.
|
||||
* Update nodes[] array to point into the newly formed innerTuple, so that
|
||||
* we can adjust their downlinks below.
|
||||
*/
|
||||
SGITITERATE(innerTuple, i, node)
|
||||
{
|
||||
@ -944,13 +944,13 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
}
|
||||
|
||||
/*
|
||||
* To perform the split, we must insert a new inner tuple, which can't
|
||||
* go on a leaf page; and unless we are splitting the root page, we
|
||||
* must then update the parent tuple's downlink to point to the inner
|
||||
* tuple. If there is room, we'll put the new inner tuple on the same
|
||||
* page as the parent tuple, otherwise we need another non-leaf buffer.
|
||||
* But if the parent page is the root, we can't add the new inner tuple
|
||||
* there, because the root page must have only one inner tuple.
|
||||
* To perform the split, we must insert a new inner tuple, which can't go
|
||||
* on a leaf page; and unless we are splitting the root page, we must then
|
||||
* update the parent tuple's downlink to point to the inner tuple. If
|
||||
* there is room, we'll put the new inner tuple on the same page as the
|
||||
* parent tuple, otherwise we need another non-leaf buffer. But if the
|
||||
* parent page is the root, we can't add the new inner tuple there,
|
||||
* because the root page must have only one inner tuple.
|
||||
*/
|
||||
xlrec.initInner = false;
|
||||
if (parent->buffer != InvalidBuffer &&
|
||||
@ -965,9 +965,9 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
{
|
||||
/* Send tuple to page with next triple parity (see README) */
|
||||
newInnerBuffer = SpGistGetBuffer(index,
|
||||
GBUF_INNER_PARITY(parent->blkno + 1) |
|
||||
GBUF_INNER_PARITY(parent->blkno + 1) |
|
||||
(isNulls ? GBUF_NULLS : 0),
|
||||
innerTuple->size + sizeof(ItemIdData),
|
||||
innerTuple->size + sizeof(ItemIdData),
|
||||
&xlrec.initInner);
|
||||
}
|
||||
else
|
||||
@ -977,22 +977,22 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
}
|
||||
|
||||
/*
|
||||
* Because a WAL record can't involve more than four buffers, we can
|
||||
* only afford to deal with two leaf pages in each picksplit action,
|
||||
* ie the current page and at most one other.
|
||||
* Because a WAL record can't involve more than four buffers, we can only
|
||||
* afford to deal with two leaf pages in each picksplit action, ie the
|
||||
* current page and at most one other.
|
||||
*
|
||||
* The new leaf tuples converted from the existing ones should require
|
||||
* the same or less space, and therefore should all fit onto one page
|
||||
* The new leaf tuples converted from the existing ones should require the
|
||||
* same or less space, and therefore should all fit onto one page
|
||||
* (although that's not necessarily the current page, since we can't
|
||||
* delete the old tuples but only replace them with placeholders).
|
||||
* However, the incoming new tuple might not also fit, in which case
|
||||
* we might need another picksplit cycle to reduce it some more.
|
||||
* However, the incoming new tuple might not also fit, in which case we
|
||||
* might need another picksplit cycle to reduce it some more.
|
||||
*
|
||||
* If there's not room to put everything back onto the current page,
|
||||
* then we decide on a per-node basis which tuples go to the new page.
|
||||
* (We do it like that because leaf tuple chains can't cross pages,
|
||||
* so we must place all leaf tuples belonging to the same parent node
|
||||
* on the same page.)
|
||||
* If there's not room to put everything back onto the current page, then
|
||||
* we decide on a per-node basis which tuples go to the new page. (We do
|
||||
* it like that because leaf tuple chains can't cross pages, so we must
|
||||
* place all leaf tuples belonging to the same parent node on the same
|
||||
* page.)
|
||||
*
|
||||
* If we are splitting the root page (turning it from a leaf page into an
|
||||
* inner page), then no leaf tuples can go back to the current page; they
|
||||
@ -1037,12 +1037,13 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
int newspace;
|
||||
|
||||
newLeafBuffer = SpGistGetBuffer(index,
|
||||
GBUF_LEAF | (isNulls ? GBUF_NULLS : 0),
|
||||
GBUF_LEAF | (isNulls ? GBUF_NULLS : 0),
|
||||
Min(totalLeafSizes,
|
||||
SPGIST_PAGE_CAPACITY),
|
||||
&xlrec.initDest);
|
||||
|
||||
/*
|
||||
* Attempt to assign node groups to the two pages. We might fail to
|
||||
* Attempt to assign node groups to the two pages. We might fail to
|
||||
* do so, even if totalLeafSizes is less than the available space,
|
||||
* because we can't split a group across pages.
|
||||
*/
|
||||
@ -1054,12 +1055,12 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
{
|
||||
if (leafSizes[i] <= curspace)
|
||||
{
|
||||
nodePageSelect[i] = 0; /* signifies current page */
|
||||
nodePageSelect[i] = 0; /* signifies current page */
|
||||
curspace -= leafSizes[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
nodePageSelect[i] = 1; /* signifies new leaf page */
|
||||
nodePageSelect[i] = 1; /* signifies new leaf page */
|
||||
newspace -= leafSizes[i];
|
||||
}
|
||||
}
|
||||
@ -1075,7 +1076,7 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
else if (includeNew)
|
||||
{
|
||||
/* We must exclude the new leaf tuple from the split */
|
||||
int nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1];
|
||||
int nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1];
|
||||
|
||||
leafSizes[nodeOfNewTuple] -=
|
||||
newLeafs[in.nTuples - 1]->size + sizeof(ItemIdData);
|
||||
@ -1087,12 +1088,12 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
{
|
||||
if (leafSizes[i] <= curspace)
|
||||
{
|
||||
nodePageSelect[i] = 0; /* signifies current page */
|
||||
nodePageSelect[i] = 0; /* signifies current page */
|
||||
curspace -= leafSizes[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
nodePageSelect[i] = 1; /* signifies new leaf page */
|
||||
nodePageSelect[i] = 1; /* signifies new leaf page */
|
||||
newspace -= leafSizes[i];
|
||||
}
|
||||
}
|
||||
@ -1204,7 +1205,7 @@ doPickSplit(Relation index, SpGistState *state,
|
||||
for (i = 0; i < nToInsert; i++)
|
||||
{
|
||||
SpGistLeafTuple it = newLeafs[i];
|
||||
Buffer leafBuffer;
|
||||
Buffer leafBuffer;
|
||||
BlockNumber leafBlock;
|
||||
OffsetNumber newoffset;
|
||||
|
||||
@ -1584,12 +1585,12 @@ spgAddNodeAction(Relation index, SpGistState *state,
|
||||
xlrec.nodeI = parent->node;
|
||||
|
||||
/*
|
||||
* obtain new buffer with the same parity as current, since it will
|
||||
* be a child of same parent tuple
|
||||
* obtain new buffer with the same parity as current, since it will be
|
||||
* a child of same parent tuple
|
||||
*/
|
||||
current->buffer = SpGistGetBuffer(index,
|
||||
GBUF_INNER_PARITY(current->blkno),
|
||||
newInnerTuple->size + sizeof(ItemIdData),
|
||||
newInnerTuple->size + sizeof(ItemIdData),
|
||||
&xlrec.newPage);
|
||||
current->blkno = BufferGetBlockNumber(current->buffer);
|
||||
current->page = BufferGetPage(current->buffer);
|
||||
@ -1597,15 +1598,15 @@ spgAddNodeAction(Relation index, SpGistState *state,
|
||||
xlrec.blknoNew = current->blkno;
|
||||
|
||||
/*
|
||||
* Let's just make real sure new current isn't same as old. Right
|
||||
* now that's impossible, but if SpGistGetBuffer ever got smart enough
|
||||
* to delete placeholder tuples before checking space, maybe it
|
||||
* wouldn't be impossible. The case would appear to work except that
|
||||
* WAL replay would be subtly wrong, so I think a mere assert isn't
|
||||
* enough here.
|
||||
* Let's just make real sure new current isn't same as old. Right now
|
||||
* that's impossible, but if SpGistGetBuffer ever got smart enough to
|
||||
* delete placeholder tuples before checking space, maybe it wouldn't
|
||||
* be impossible. The case would appear to work except that WAL
|
||||
* replay would be subtly wrong, so I think a mere assert isn't enough
|
||||
* here.
|
||||
*/
|
||||
if (xlrec.blknoNew == xlrec.blkno)
|
||||
elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
|
||||
if (xlrec.blknoNew == xlrec.blkno)
|
||||
elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
|
||||
|
||||
/*
|
||||
* New current and parent buffer will both be modified; but note that
|
||||
@ -1707,9 +1708,9 @@ spgSplitNodeAction(Relation index, SpGistState *state,
|
||||
Assert(!SpGistPageStoresNulls(current->page));
|
||||
|
||||
/*
|
||||
* Construct new prefix tuple, containing a single node with the
|
||||
* specified label. (We'll update the node's downlink to point to the
|
||||
* new postfix tuple, below.)
|
||||
* Construct new prefix tuple, containing a single node with the specified
|
||||
* label. (We'll update the node's downlink to point to the new postfix
|
||||
* tuple, below.)
|
||||
*/
|
||||
node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false);
|
||||
|
||||
@ -1888,9 +1889,9 @@ spgdoinsert(Relation index, SpGistState *state,
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
|
||||
(unsigned long) (leafSize - sizeof(ItemIdData)),
|
||||
(unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
|
||||
(unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
|
||||
RelationGetRelationName(index)),
|
||||
errhint("Values larger than a buffer page cannot be indexed.")));
|
||||
errhint("Values larger than a buffer page cannot be indexed.")));
|
||||
|
||||
/* Initialize "current" to the appropriate root page */
|
||||
current.blkno = isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO;
|
||||
@ -1920,7 +1921,7 @@ spgdoinsert(Relation index, SpGistState *state,
|
||||
if (current.blkno == InvalidBlockNumber)
|
||||
{
|
||||
/*
|
||||
* Create a leaf page. If leafSize is too large to fit on a page,
|
||||
* Create a leaf page. If leafSize is too large to fit on a page,
|
||||
* we won't actually use the page yet, but it simplifies the API
|
||||
* for doPickSplit to always have a leaf page at hand; so just
|
||||
* quietly limit our request to a page size.
|
||||
@ -1968,7 +1969,7 @@ spgdoinsert(Relation index, SpGistState *state,
|
||||
}
|
||||
else if ((sizeToSplit =
|
||||
checkSplitConditions(index, state, ¤t,
|
||||
&nToSplit)) < SPGIST_PAGE_CAPACITY / 2 &&
|
||||
&nToSplit)) < SPGIST_PAGE_CAPACITY / 2 &&
|
||||
nToSplit < 64 &&
|
||||
leafTuple->size + sizeof(ItemIdData) + sizeToSplit <= SPGIST_PAGE_CAPACITY)
|
||||
{
|
||||
@ -2077,8 +2078,8 @@ spgdoinsert(Relation index, SpGistState *state,
|
||||
}
|
||||
|
||||
/*
|
||||
* Loop around and attempt to insert the new leafDatum
|
||||
* at "current" (which might reference an existing child
|
||||
* Loop around and attempt to insert the new leafDatum at
|
||||
* "current" (which might reference an existing child
|
||||
* tuple, or might be invalid to force us to find a new
|
||||
* page for the tuple).
|
||||
*
|
||||
@ -2102,8 +2103,8 @@ spgdoinsert(Relation index, SpGistState *state,
|
||||
out.result.addNode.nodeLabel);
|
||||
|
||||
/*
|
||||
* Retry insertion into the enlarged node. We assume
|
||||
* that we'll get a MatchNode result this time.
|
||||
* Retry insertion into the enlarged node. We assume that
|
||||
* we'll get a MatchNode result this time.
|
||||
*/
|
||||
goto process_inner_tuple;
|
||||
break;
|
||||
|
@ -123,7 +123,7 @@ spgbuild(PG_FUNCTION_ARGS)
|
||||
buildstate.spgstate.isBuild = true;
|
||||
|
||||
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"SP-GiST build temporary context",
|
||||
"SP-GiST build temporary context",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
@ -135,12 +135,12 @@ spg_kd_picksplit(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* Note: points that have coordinates exactly equal to coord may get
|
||||
* classified into either node, depending on where they happen to fall
|
||||
* in the sorted list. This is okay as long as the inner_consistent
|
||||
* function descends into both sides for such cases. This is better
|
||||
* than the alternative of trying to have an exact boundary, because
|
||||
* it keeps the tree balanced even when we have many instances of the
|
||||
* same point value. So we should never trigger the allTheSame logic.
|
||||
* classified into either node, depending on where they happen to fall in
|
||||
* the sorted list. This is okay as long as the inner_consistent function
|
||||
* descends into both sides for such cases. This is better than the
|
||||
* alternative of trying to have an exact boundary, because it keeps the
|
||||
* tree balanced even when we have many instances of the same point value.
|
||||
* So we should never trigger the allTheSame logic.
|
||||
*/
|
||||
for (i = 0; i < in->nTuples; i++)
|
||||
{
|
||||
|
@ -253,8 +253,8 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS)
|
||||
boxQuery = DatumGetBoxP(in->scankeys[i].sk_argument);
|
||||
|
||||
if (DatumGetBool(DirectFunctionCall2(box_contain_pt,
|
||||
PointerGetDatum(boxQuery),
|
||||
PointerGetDatum(centroid))))
|
||||
PointerGetDatum(boxQuery),
|
||||
PointerGetDatum(centroid))))
|
||||
{
|
||||
/* centroid is in box, so all quadrants are OK */
|
||||
}
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
|
||||
typedef void (*storeRes_func) (SpGistScanOpaque so, ItemPointer heapPtr,
|
||||
Datum leafValue, bool isnull, bool recheck);
|
||||
Datum leafValue, bool isnull, bool recheck);
|
||||
|
||||
typedef struct ScanStackEntry
|
||||
{
|
||||
@ -88,7 +88,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
|
||||
if (so->want_itup)
|
||||
{
|
||||
/* Must pfree IndexTuples to avoid memory leak */
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < so->nPtrs; i++)
|
||||
pfree(so->indexTups[i]);
|
||||
@ -102,7 +102,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
|
||||
* Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
|
||||
*
|
||||
* The point here is to eliminate null-related considerations from what the
|
||||
* opclass consistent functions need to deal with. We assume all SPGiST-
|
||||
* opclass consistent functions need to deal with. We assume all SPGiST-
|
||||
* indexable operators are strict, so any null RHS value makes the scan
|
||||
* condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL
|
||||
* conditions; their effect is reflected into searchNulls/searchNonNulls.
|
||||
@ -177,6 +177,7 @@ spgbeginscan(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Relation rel = (Relation) PG_GETARG_POINTER(0);
|
||||
int keysz = PG_GETARG_INT32(1);
|
||||
|
||||
/* ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); */
|
||||
IndexScanDesc scan;
|
||||
SpGistScanOpaque so;
|
||||
@ -457,7 +458,7 @@ redirect:
|
||||
MemoryContext oldCtx;
|
||||
|
||||
innerTuple = (SpGistInnerTuple) PageGetItem(page,
|
||||
PageGetItemId(page, offset));
|
||||
PageGetItemId(page, offset));
|
||||
|
||||
if (innerTuple->tupstate != SPGIST_LIVE)
|
||||
{
|
||||
@ -522,7 +523,7 @@ redirect:
|
||||
|
||||
for (i = 0; i < out.nNodes; i++)
|
||||
{
|
||||
int nodeN = out.nodeNumbers[i];
|
||||
int nodeN = out.nodeNumbers[i];
|
||||
|
||||
Assert(nodeN >= 0 && nodeN < in.nNodes);
|
||||
if (ItemPointerIsValid(&nodes[nodeN]->t_tid))
|
||||
@ -598,7 +599,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
|
||||
if (so->want_itup)
|
||||
{
|
||||
/*
|
||||
* Reconstruct desired IndexTuple. We have to copy the datum out of
|
||||
* Reconstruct desired IndexTuple. We have to copy the datum out of
|
||||
* the temp context anyway, so we may as well create the tuple here.
|
||||
*/
|
||||
so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc,
|
||||
@ -636,7 +637,7 @@ spggettuple(PG_FUNCTION_ARGS)
|
||||
if (so->want_itup)
|
||||
{
|
||||
/* Must pfree IndexTuples to avoid memory leak */
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < so->nPtrs; i++)
|
||||
pfree(so->indexTups[i]);
|
||||
|
@ -26,7 +26,7 @@
|
||||
* In the worst case, a inner tuple in a text suffix tree could have as many
|
||||
* as 256 nodes (one for each possible byte value). Each node can take 16
|
||||
* bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page
|
||||
* of size BLCKSZ. Rather than assuming we know the exact amount of overhead
|
||||
* of size BLCKSZ. Rather than assuming we know the exact amount of overhead
|
||||
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
|
||||
* (the actual overhead should be no more than 56 bytes at this writing, so
|
||||
* there is slop in this number). The upshot is that the maximum safe prefix
|
||||
@ -209,9 +209,9 @@ spg_text_choose(PG_FUNCTION_ARGS)
|
||||
{
|
||||
/*
|
||||
* Descend to existing node. (If in->allTheSame, the core code will
|
||||
* ignore our nodeN specification here, but that's OK. We still
|
||||
* have to provide the correct levelAdd and restDatum values, and
|
||||
* those are the same regardless of which node gets chosen by core.)
|
||||
* ignore our nodeN specification here, but that's OK. We still have
|
||||
* to provide the correct levelAdd and restDatum values, and those are
|
||||
* the same regardless of which node gets chosen by core.)
|
||||
*/
|
||||
out->resultType = spgMatchNode;
|
||||
out->result.matchNode.nodeN = i;
|
||||
@ -227,10 +227,10 @@ spg_text_choose(PG_FUNCTION_ARGS)
|
||||
else if (in->allTheSame)
|
||||
{
|
||||
/*
|
||||
* Can't use AddNode action, so split the tuple. The upper tuple
|
||||
* has the same prefix as before and uses an empty node label for
|
||||
* the lower tuple. The lower tuple has no prefix and the same
|
||||
* node labels as the original tuple.
|
||||
* Can't use AddNode action, so split the tuple. The upper tuple has
|
||||
* the same prefix as before and uses an empty node label for the
|
||||
* lower tuple. The lower tuple has no prefix and the same node
|
||||
* labels as the original tuple.
|
||||
*/
|
||||
out->resultType = spgSplitTuple;
|
||||
out->result.splitTuple.prefixHasPrefix = in->hasPrefix;
|
||||
@ -315,13 +315,13 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
|
||||
if (commonLen < VARSIZE_ANY_EXHDR(texti))
|
||||
nodes[i].c = *(uint8 *) (VARDATA_ANY(texti) + commonLen);
|
||||
else
|
||||
nodes[i].c = '\0'; /* use \0 if string is all common */
|
||||
nodes[i].c = '\0'; /* use \0 if string is all common */
|
||||
nodes[i].i = i;
|
||||
nodes[i].d = in->datums[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* Sort by label bytes so that we can group the values into nodes. This
|
||||
* Sort by label bytes so that we can group the values into nodes. This
|
||||
* also ensures that the nodes are ordered by label value, allowing the
|
||||
* use of binary search in searchChar.
|
||||
*/
|
||||
@ -371,7 +371,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* Reconstruct values represented at this tuple, including parent data,
|
||||
* prefix of this tuple if any, and the node label if any. in->level
|
||||
* prefix of this tuple if any, and the node label if any. in->level
|
||||
* should be the length of the previously reconstructed value, and the
|
||||
* number of bytes added here is prefixSize or prefixSize + 1.
|
||||
*
|
||||
@ -381,7 +381,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
|
||||
* long-format reconstructed values.
|
||||
*/
|
||||
Assert(in->level == 0 ? DatumGetPointer(in->reconstructedValue) == NULL :
|
||||
VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level);
|
||||
VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level);
|
||||
|
||||
maxReconstrLen = in->level + 1;
|
||||
if (in->hasPrefix)
|
||||
@ -530,7 +530,7 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
|
||||
}
|
||||
else
|
||||
{
|
||||
text *fullText = palloc(VARHDRSZ + fullLen);
|
||||
text *fullText = palloc(VARHDRSZ + fullLen);
|
||||
|
||||
SET_VARSIZE(fullText, VARHDRSZ + fullLen);
|
||||
fullValue = VARDATA(fullText);
|
||||
|
@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index)
|
||||
*
|
||||
* When requesting an inner page, if we get one with the wrong parity,
|
||||
* we just release the buffer and try again. We will get a different page
|
||||
* because GetFreeIndexPage will have marked the page used in FSM. The page
|
||||
* because GetFreeIndexPage will have marked the page used in FSM. The page
|
||||
* is entered in our local lastUsedPages cache, so there's some hope of
|
||||
* making use of it later in this session, but otherwise we rely on VACUUM
|
||||
* to eventually re-enter the page in FSM, making it available for recycling.
|
||||
@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index)
|
||||
*
|
||||
* When we return a buffer to the caller, the page is *not* entered into
|
||||
* the lastUsedPages cache; we expect the caller will do so after it's taken
|
||||
* whatever space it will use. This is because after the caller has used up
|
||||
* whatever space it will use. This is because after the caller has used up
|
||||
* some space, the page might have less space than whatever was cached already
|
||||
* so we'd rather not trash the old cache entry.
|
||||
*/
|
||||
@ -275,7 +275,7 @@ allocNewBuffer(Relation index, int flags)
|
||||
else
|
||||
{
|
||||
BlockNumber blkno = BufferGetBlockNumber(buffer);
|
||||
int blkFlags = GBUF_INNER_PARITY(blkno);
|
||||
int blkFlags = GBUF_INNER_PARITY(blkno);
|
||||
|
||||
if ((flags & GBUF_PARITY_MASK) == blkFlags)
|
||||
{
|
||||
@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
|
||||
|
||||
/*
|
||||
* If possible, increase the space request to include relation's
|
||||
* fillfactor. This ensures that when we add unrelated tuples to a page,
|
||||
* fillfactor. This ensures that when we add unrelated tuples to a page,
|
||||
* we try to keep 100-fillfactor% available for adding tuples that are
|
||||
* related to the ones already on it. But fillfactor mustn't cause an
|
||||
* error for requests that would otherwise be legal.
|
||||
@ -664,7 +664,7 @@ spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix,
|
||||
errmsg("SPGiST inner tuple size %lu exceeds maximum %lu",
|
||||
(unsigned long) size,
|
||||
(unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData))),
|
||||
errhint("Values larger than a buffer page cannot be indexed.")));
|
||||
errhint("Values larger than a buffer page cannot be indexed.")));
|
||||
|
||||
/*
|
||||
* Check for overflow of header fields --- probably can't fail if the
|
||||
@ -801,7 +801,7 @@ SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size,
|
||||
for (; i <= maxoff; i++)
|
||||
{
|
||||
SpGistDeadTuple it = (SpGistDeadTuple) PageGetItem(page,
|
||||
PageGetItemId(page, i));
|
||||
PageGetItemId(page, i));
|
||||
|
||||
if (it->tupstate == SPGIST_PLACEHOLDER)
|
||||
{
|
||||
|
@ -31,8 +31,8 @@
|
||||
/* Entry in pending-list of TIDs we need to revisit */
|
||||
typedef struct spgVacPendingItem
|
||||
{
|
||||
ItemPointerData tid; /* redirection target to visit */
|
||||
bool done; /* have we dealt with this? */
|
||||
ItemPointerData tid; /* redirection target to visit */
|
||||
bool done; /* have we dealt with this? */
|
||||
struct spgVacPendingItem *next; /* list link */
|
||||
} spgVacPendingItem;
|
||||
|
||||
@ -46,10 +46,10 @@ typedef struct spgBulkDeleteState
|
||||
void *callback_state;
|
||||
|
||||
/* Additional working state */
|
||||
SpGistState spgstate; /* for SPGiST operations that need one */
|
||||
spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
|
||||
TransactionId myXmin; /* for detecting newly-added redirects */
|
||||
TransactionId OldestXmin; /* for deciding a redirect is obsolete */
|
||||
SpGistState spgstate; /* for SPGiST operations that need one */
|
||||
spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
|
||||
TransactionId myXmin; /* for detecting newly-added redirects */
|
||||
TransactionId OldestXmin; /* for deciding a redirect is obsolete */
|
||||
BlockNumber lastFilledBlock; /* last non-deletable block */
|
||||
} spgBulkDeleteState;
|
||||
|
||||
@ -213,7 +213,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
|
||||
* Figure out exactly what we have to do. We do this separately from
|
||||
* actually modifying the page, mainly so that we have a representation
|
||||
* that can be dumped into WAL and then the replay code can do exactly
|
||||
* the same thing. The output of this step consists of six arrays
|
||||
* the same thing. The output of this step consists of six arrays
|
||||
* describing four kinds of operations, to be performed in this order:
|
||||
*
|
||||
* toDead[]: tuple numbers to be replaced with DEAD tuples
|
||||
@ -276,8 +276,8 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
|
||||
else if (prevLive == InvalidOffsetNumber)
|
||||
{
|
||||
/*
|
||||
* This is the first live tuple in the chain. It has
|
||||
* to move to the head position.
|
||||
* This is the first live tuple in the chain. It has to move
|
||||
* to the head position.
|
||||
*/
|
||||
moveSrc[xlrec.nMove] = j;
|
||||
moveDest[xlrec.nMove] = i;
|
||||
@ -289,7 +289,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Second or later live tuple. Arrange to re-chain it to the
|
||||
* Second or later live tuple. Arrange to re-chain it to the
|
||||
* previous live one, if there was a gap.
|
||||
*/
|
||||
if (interveningDeletable)
|
||||
@ -353,11 +353,11 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
|
||||
InvalidBlockNumber, InvalidOffsetNumber);
|
||||
|
||||
/*
|
||||
* We implement the move step by swapping the item pointers of the
|
||||
* source and target tuples, then replacing the newly-source tuples
|
||||
* with placeholders. This is perhaps unduly friendly with the page
|
||||
* data representation, but it's fast and doesn't risk page overflow
|
||||
* when a tuple to be relocated is large.
|
||||
* We implement the move step by swapping the item pointers of the source
|
||||
* and target tuples, then replacing the newly-source tuples with
|
||||
* placeholders. This is perhaps unduly friendly with the page data
|
||||
* representation, but it's fast and doesn't risk page overflow when a
|
||||
* tuple to be relocated is large.
|
||||
*/
|
||||
for (i = 0; i < xlrec.nMove; i++)
|
||||
{
|
||||
@ -518,7 +518,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer,
|
||||
*/
|
||||
for (i = max;
|
||||
i >= FirstOffsetNumber &&
|
||||
(opaque->nRedirection > 0 || !hasNonPlaceholder);
|
||||
(opaque->nRedirection > 0 || !hasNonPlaceholder);
|
||||
i--)
|
||||
{
|
||||
SpGistDeadTuple dt;
|
||||
@ -651,9 +651,9 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno)
|
||||
|
||||
/*
|
||||
* The root pages must never be deleted, nor marked as available in FSM,
|
||||
* because we don't want them ever returned by a search for a place to
|
||||
* put a new tuple. Otherwise, check for empty/deletable page, and
|
||||
* make sure FSM knows about it.
|
||||
* because we don't want them ever returned by a search for a place to put
|
||||
* a new tuple. Otherwise, check for empty/deletable page, and make sure
|
||||
* FSM knows about it.
|
||||
*/
|
||||
if (!SpGistBlockIsRoot(blkno))
|
||||
{
|
||||
@ -688,7 +688,7 @@ spgprocesspending(spgBulkDeleteState *bds)
|
||||
Relation index = bds->info->index;
|
||||
spgVacPendingItem *pitem;
|
||||
spgVacPendingItem *nitem;
|
||||
BlockNumber blkno;
|
||||
BlockNumber blkno;
|
||||
Buffer buffer;
|
||||
Page page;
|
||||
|
||||
@ -741,11 +741,11 @@ spgprocesspending(spgBulkDeleteState *bds)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* On an inner page, visit the referenced inner tuple and add
|
||||
* all its downlinks to the pending list. We might have pending
|
||||
* items for more than one inner tuple on the same page (in fact
|
||||
* this is pretty likely given the way space allocation works),
|
||||
* so get them all while we are here.
|
||||
* On an inner page, visit the referenced inner tuple and add all
|
||||
* its downlinks to the pending list. We might have pending items
|
||||
* for more than one inner tuple on the same page (in fact this is
|
||||
* pretty likely given the way space allocation works), so get
|
||||
* them all while we are here.
|
||||
*/
|
||||
for (nitem = pitem; nitem != NULL; nitem = nitem->next)
|
||||
{
|
||||
@ -774,7 +774,7 @@ spgprocesspending(spgBulkDeleteState *bds)
|
||||
{
|
||||
/* transfer attention to redirect point */
|
||||
spgAddPendingTID(bds,
|
||||
&((SpGistDeadTuple) innerTuple)->pointer);
|
||||
&((SpGistDeadTuple) innerTuple)->pointer);
|
||||
}
|
||||
else
|
||||
elog(ERROR, "unexpected SPGiST tuple state: %d",
|
||||
@ -825,8 +825,8 @@ spgvacuumscan(spgBulkDeleteState *bds)
|
||||
* physical order (we hope the kernel will cooperate in providing
|
||||
* read-ahead for speed). It is critical that we visit all leaf pages,
|
||||
* including ones added after we start the scan, else we might fail to
|
||||
* delete some deletable tuples. See more extensive comments about
|
||||
* this in btvacuumscan().
|
||||
* delete some deletable tuples. See more extensive comments about this
|
||||
* in btvacuumscan().
|
||||
*/
|
||||
blkno = SPGIST_METAPAGE_BLKNO + 1;
|
||||
for (;;)
|
||||
|
@ -40,7 +40,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc)
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a leaf tuple, or replace an existing placeholder tuple. This is used
|
||||
* Add a leaf tuple, or replace an existing placeholder tuple. This is used
|
||||
* to replay SpGistPageAddNewItem() operations. If the offset points at an
|
||||
* existing tuple, it had better be a placeholder tuple.
|
||||
*/
|
||||
@ -50,7 +50,7 @@ addOrReplaceTuple(Page page, Item tuple, int size, OffsetNumber offset)
|
||||
if (offset <= PageGetMaxOffsetNumber(page))
|
||||
{
|
||||
SpGistDeadTuple dt = (SpGistDeadTuple) PageGetItem(page,
|
||||
PageGetItemId(page, offset));
|
||||
PageGetItemId(page, offset));
|
||||
|
||||
if (dt->tupstate != SPGIST_PLACEHOLDER)
|
||||
elog(ERROR, "SPGiST tuple to be replaced is not a placeholder");
|
||||
@ -126,7 +126,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
if (xldata->newPage)
|
||||
SpGistInitBuffer(buffer,
|
||||
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
|
||||
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
|
||||
|
||||
if (!XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
@ -143,7 +143,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
|
||||
SpGistLeafTuple head;
|
||||
|
||||
head = (SpGistLeafTuple) PageGetItem(page,
|
||||
PageGetItemId(page, xldata->offnumHeadLeaf));
|
||||
PageGetItemId(page, xldata->offnumHeadLeaf));
|
||||
Assert(head->nextOffset == leafTuple->nextOffset);
|
||||
head->nextOffset = xldata->offnumLeaf;
|
||||
}
|
||||
@ -154,7 +154,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
|
||||
PageIndexTupleDelete(page, xldata->offnumLeaf);
|
||||
if (PageAddItem(page,
|
||||
(Item) leafTuple, leafTuple->size,
|
||||
xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
|
||||
xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
|
||||
elog(ERROR, "failed to add item of size %u to SPGiST index page",
|
||||
leafTuple->size);
|
||||
}
|
||||
@ -180,7 +180,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
|
||||
SpGistInnerTuple tuple;
|
||||
|
||||
tuple = (SpGistInnerTuple) PageGetItem(page,
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
|
||||
spgUpdateNodeLink(tuple, xldata->nodeI,
|
||||
xldata->blknoLeaf, xldata->offnumLeaf);
|
||||
@ -229,7 +229,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
if (xldata->newPage)
|
||||
SpGistInitBuffer(buffer,
|
||||
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
|
||||
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
|
||||
|
||||
if (!XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
@ -261,7 +261,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
|
||||
if (!XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
|
||||
state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
|
||||
state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
|
||||
SPGIST_PLACEHOLDER,
|
||||
xldata->blknoDst,
|
||||
toInsert[nInsert - 1]);
|
||||
@ -286,7 +286,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
|
||||
SpGistInnerTuple tuple;
|
||||
|
||||
tuple = (SpGistInnerTuple) PageGetItem(page,
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
|
||||
spgUpdateNodeLink(tuple, xldata->nodeI,
|
||||
xldata->blknoDst, toInsert[nInsert - 1]);
|
||||
@ -413,7 +413,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
|
||||
}
|
||||
|
||||
/*
|
||||
* Update parent downlink. Since parent could be in either of the
|
||||
* Update parent downlink. Since parent could be in either of the
|
||||
* previous two buffers, it's a bit tricky to determine which BKP bit
|
||||
* applies.
|
||||
*/
|
||||
@ -435,7 +435,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
|
||||
SpGistInnerTuple innerTuple;
|
||||
|
||||
innerTuple = (SpGistInnerTuple) PageGetItem(page,
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
|
||||
spgUpdateNodeLink(innerTuple, xldata->nodeI,
|
||||
xldata->blknoNew, xldata->offnumNew);
|
||||
@ -504,7 +504,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
|
||||
{
|
||||
PageIndexTupleDelete(page, xldata->offnumPrefix);
|
||||
if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size,
|
||||
xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
|
||||
xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
|
||||
elog(ERROR, "failed to add item of size %u to SPGiST index page",
|
||||
prefixTuple->size);
|
||||
|
||||
@ -571,7 +571,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
|
||||
page = (Page) BufferGetPage(srcBuffer);
|
||||
|
||||
SpGistInitBuffer(srcBuffer,
|
||||
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
|
||||
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
|
||||
/* don't update LSN etc till we're done with it */
|
||||
}
|
||||
else
|
||||
@ -587,8 +587,8 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
|
||||
{
|
||||
/*
|
||||
* We have it a bit easier here than in doPickSplit(),
|
||||
* because we know the inner tuple's location already,
|
||||
* so we can inject the correct redirection tuple now.
|
||||
* because we know the inner tuple's location already, so
|
||||
* we can inject the correct redirection tuple now.
|
||||
*/
|
||||
if (!state.isBuild)
|
||||
spgPageIndexMultiDelete(&state, page,
|
||||
@ -627,7 +627,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
|
||||
page = (Page) BufferGetPage(destBuffer);
|
||||
|
||||
SpGistInitBuffer(destBuffer,
|
||||
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
|
||||
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
|
||||
/* don't update LSN etc till we're done with it */
|
||||
}
|
||||
else
|
||||
@ -707,9 +707,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
|
||||
SpGistInnerTuple parent;
|
||||
|
||||
parent = (SpGistInnerTuple) PageGetItem(page,
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
spgUpdateNodeLink(parent, xldata->nodeI,
|
||||
xldata->blknoInner, xldata->offnumInner);
|
||||
xldata->blknoInner, xldata->offnumInner);
|
||||
}
|
||||
|
||||
PageSetLSN(page, lsn);
|
||||
@ -742,9 +742,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
|
||||
SpGistInnerTuple parent;
|
||||
|
||||
parent = (SpGistInnerTuple) PageGetItem(page,
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
PageGetItemId(page, xldata->offnumParent));
|
||||
spgUpdateNodeLink(parent, xldata->nodeI,
|
||||
xldata->blknoInner, xldata->offnumInner);
|
||||
xldata->blknoInner, xldata->offnumInner);
|
||||
|
||||
PageSetLSN(page, lsn);
|
||||
PageSetTLI(page, ThisTimeLineID);
|
||||
@ -803,7 +803,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
spgPageIndexMultiDelete(&state, page,
|
||||
toPlaceholder, xldata->nPlaceholder,
|
||||
SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
|
||||
SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
|
||||
InvalidBlockNumber,
|
||||
InvalidOffsetNumber);
|
||||
|
||||
@ -821,7 +821,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
spgPageIndexMultiDelete(&state, page,
|
||||
moveSrc, xldata->nMove,
|
||||
SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
|
||||
SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
|
||||
InvalidBlockNumber,
|
||||
InvalidOffsetNumber);
|
||||
|
||||
@ -906,7 +906,7 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
|
||||
SpGistDeadTuple dt;
|
||||
|
||||
dt = (SpGistDeadTuple) PageGetItem(page,
|
||||
PageGetItemId(page, itemToPlaceholder[i]));
|
||||
PageGetItemId(page, itemToPlaceholder[i]));
|
||||
Assert(dt->tupstate == SPGIST_REDIRECT);
|
||||
dt->tupstate = SPGIST_PLACEHOLDER;
|
||||
ItemPointerSetInvalid(&dt->pointer);
|
||||
|
Reference in New Issue
Block a user