mirror of
https://github.com/postgres/postgres.git
synced 2025-06-26 12:21:12 +03:00
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
This commit is contained in:
@ -1382,7 +1382,7 @@ initGISTstate(Relation index)
|
||||
/*
|
||||
* If the index column has a specified collation, we should honor that
|
||||
* while doing comparisons. However, we may have a collatable storage
|
||||
* type for a noncollatable indexed data type. If there's no index
|
||||
* type for a noncollatable indexed data type. If there's no index
|
||||
* collation then specify default collation in case the support
|
||||
* functions need collation. This is harmless if the support
|
||||
* functions don't care about collation, so we just do it
|
||||
|
@ -31,7 +31,7 @@
|
||||
*
|
||||
* On success return for a heap tuple, *recheck_p is set to indicate
|
||||
* whether recheck is needed. We recheck if any of the consistent() functions
|
||||
* request it. recheck is not interesting when examining a non-leaf entry,
|
||||
* request it. recheck is not interesting when examining a non-leaf entry,
|
||||
* since we must visit the lower index page if there's any doubt.
|
||||
*
|
||||
* If we are doing an ordered scan, so->distances[] is filled with distance
|
||||
@ -62,7 +62,7 @@ gistindex_keytest(IndexScanDesc scan,
|
||||
|
||||
/*
|
||||
* If it's a leftover invalid tuple from pre-9.1, treat it as a match with
|
||||
* minimum possible distances. This means we'll always follow it to the
|
||||
* minimum possible distances. This means we'll always follow it to the
|
||||
* referenced page.
|
||||
*/
|
||||
if (GistTupleIsInvalid(tuple))
|
||||
@ -224,7 +224,7 @@ gistindex_keytest(IndexScanDesc scan,
|
||||
* ntids: if not NULL, gistgetbitmap's output tuple counter
|
||||
*
|
||||
* If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
|
||||
* tuples should be reported directly into the bitmap. If they are NULL,
|
||||
* tuples should be reported directly into the bitmap. If they are NULL,
|
||||
* we're doing a plain or ordered indexscan. For a plain indexscan, heap
|
||||
* tuple TIDs are returned into so->pageData[]. For an ordered indexscan,
|
||||
* heap tuple TIDs are pushed into individual search queue items.
|
||||
|
@ -56,7 +56,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg)
|
||||
/*
|
||||
* If new item is heap tuple, it goes to front of chain; otherwise insert
|
||||
* it before the first index-page item, so that index pages are visited in
|
||||
* LIFO order, ensuring depth-first search of index pages. See comments
|
||||
* LIFO order, ensuring depth-first search of index pages. See comments
|
||||
* in gist_private.h.
|
||||
*/
|
||||
if (GISTSearchItemIsHeap(*newitem))
|
||||
|
@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
|
||||
* Recompute unions of left- and right-side subkeys after a page split,
|
||||
* ignoring any tuples that are marked in spl->spl_dontcare[].
|
||||
*
|
||||
* Note: we always recompute union keys for all index columns. In some cases
|
||||
* Note: we always recompute union keys for all index columns. In some cases
|
||||
* this might represent duplicate work for the leftmost column(s), but it's
|
||||
* not safe to assume that "zero penalty to move a tuple" means "the union
|
||||
* key doesn't change at all". Penalty functions aren't 100% accurate.
|
||||
@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
|
||||
|
||||
/*
|
||||
* Remove tuples that are marked don't-cares from the tuple index array a[]
|
||||
* of length *len. This is applied separately to the spl_left and spl_right
|
||||
* of length *len. This is applied separately to the spl_left and spl_right
|
||||
* arrays.
|
||||
*/
|
||||
static void
|
||||
@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
|
||||
/*
|
||||
* Place a single don't-care tuple into either the left or right side of the
|
||||
* split, according to which has least penalty for merging the tuple into
|
||||
* the previously-computed union keys. We need consider only columns starting
|
||||
* the previously-computed union keys. We need consider only columns starting
|
||||
* at attno.
|
||||
*/
|
||||
static void
|
||||
@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
|
||||
|
||||
/*
|
||||
* There is only one previously defined union, so we just choose swap
|
||||
* or not by lowest penalty for that side. We can only get here if a
|
||||
* or not by lowest penalty for that side. We can only get here if a
|
||||
* secondary split happened to have all NULLs in its column in the
|
||||
* tuples that the outer recursion level had assigned to one side.
|
||||
* (Note that the null checks in gistSplitByKey don't prevent the
|
||||
@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
|
||||
sv->spl_rdatum = v->spl_rattr[attno];
|
||||
|
||||
/*
|
||||
* Let the opclass-specific PickSplit method do its thing. Note that at
|
||||
* Let the opclass-specific PickSplit method do its thing. Note that at
|
||||
* this point we know there are no null keys in the entryvec.
|
||||
*/
|
||||
FunctionCall2Coll(&giststate->picksplitFn[attno],
|
||||
|
@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
|
||||
* some inserts to go to other equally-good subtrees.
|
||||
*
|
||||
* keep_current_best is -1 if we haven't yet had to make a random choice
|
||||
* whether to keep the current best tuple. If we have done so, and
|
||||
* whether to keep the current best tuple. If we have done so, and
|
||||
* decided to keep it, keep_current_best is 1; if we've decided to
|
||||
* replace, keep_current_best is 0. (This state will be reset to -1 as
|
||||
* soon as we've made the replacement, but sometimes we make the choice in
|
||||
@ -456,7 +456,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
|
||||
{
|
||||
/*
|
||||
* New best penalty for column. Tentatively select this tuple
|
||||
* as the target, and record the best penalty. Then reset the
|
||||
* as the target, and record the best penalty. Then reset the
|
||||
* next column's penalty to "unknown" (and indirectly, the
|
||||
* same for all the ones to its right). This will force us to
|
||||
* adopt this tuple's penalty values as the best for all the
|
||||
@ -475,7 +475,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
|
||||
{
|
||||
/*
|
||||
* The current tuple is exactly as good for this column as the
|
||||
* best tuple seen so far. The next iteration of this loop
|
||||
* best tuple seen so far. The next iteration of this loop
|
||||
* will compare the next column.
|
||||
*/
|
||||
}
|
||||
@ -681,7 +681,7 @@ gistcheckpage(Relation rel, Buffer buf)
|
||||
/*
|
||||
* ReadBuffer verifies that every newly-read page passes
|
||||
* PageHeaderIsValid, which means it either contains a reasonably sane
|
||||
* page header or is all-zero. We have to defend against the all-zero
|
||||
* page header or is all-zero. We have to defend against the all-zero
|
||||
* case, however.
|
||||
*/
|
||||
if (PageIsNew(page))
|
||||
|
@ -49,7 +49,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
stats->estimated_count = info->estimated_count;
|
||||
|
||||
/*
|
||||
* XXX the above is wrong if index is partial. Would it be OK to just
|
||||
* XXX the above is wrong if index is partial. Would it be OK to just
|
||||
* return NULL, or is there work we must do below?
|
||||
*/
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */
|
||||
* follow-right flag, because that change is not included in the full-page
|
||||
* image. To be sure that the intermediate state with the wrong flag value is
|
||||
* not visible to concurrent Hot Standby queries, this function handles
|
||||
* restoring the full-page image as well as updating the flag. (Note that
|
||||
* restoring the full-page image as well as updating the flag. (Note that
|
||||
* we never need to do anything else to the child page in the current WAL
|
||||
* action.)
|
||||
*/
|
||||
@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
/*
|
||||
* We need to acquire and hold lock on target page while updating the left
|
||||
* child page. If we have a full-page image of target page, getting the
|
||||
* child page. If we have a full-page image of target page, getting the
|
||||
* lock is a side-effect of restoring that image. Note that even if the
|
||||
* target page no longer exists, we'll still attempt to replay the change
|
||||
* on the child page.
|
||||
@ -387,6 +387,7 @@ gistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf,
|
||||
|
||||
for (ptr = dist; ptr; ptr = ptr->next)
|
||||
npage++;
|
||||
|
||||
/*
|
||||
* the caller should've checked this already, but doesn't hurt to check
|
||||
* again.
|
||||
|
Reference in New Issue
Block a user