mirror of
https://github.com/postgres/postgres.git
synced 2025-06-13 07:41:39 +03:00
Remove tabs after spaces in C comments
This was not changed in HEAD, but will be done later as part of a pgindent run. Future pgindent runs will also do this. Report by Tom Lane Backpatch through all supported branches, but not HEAD
This commit is contained in:
@ -1372,7 +1372,7 @@ initGISTstate(Relation index)
|
||||
/*
|
||||
* If the index column has a specified collation, we should honor that
|
||||
* while doing comparisons. However, we may have a collatable storage
|
||||
* type for a noncollatable indexed data type. If there's no index
|
||||
* type for a noncollatable indexed data type. If there's no index
|
||||
* collation then specify default collation in case the support
|
||||
* functions need collation. This is harmless if the support
|
||||
* functions don't care about collation, so we just do it
|
||||
|
@ -31,7 +31,7 @@
|
||||
*
|
||||
* On success return for a heap tuple, *recheck_p is set to indicate
|
||||
* whether recheck is needed. We recheck if any of the consistent() functions
|
||||
* request it. recheck is not interesting when examining a non-leaf entry,
|
||||
* request it. recheck is not interesting when examining a non-leaf entry,
|
||||
* since we must visit the lower index page if there's any doubt.
|
||||
*
|
||||
* If we are doing an ordered scan, so->distances[] is filled with distance
|
||||
@ -62,7 +62,7 @@ gistindex_keytest(IndexScanDesc scan,
|
||||
|
||||
/*
|
||||
* If it's a leftover invalid tuple from pre-9.1, treat it as a match with
|
||||
* minimum possible distances. This means we'll always follow it to the
|
||||
* minimum possible distances. This means we'll always follow it to the
|
||||
* referenced page.
|
||||
*/
|
||||
if (GistTupleIsInvalid(tuple))
|
||||
@ -224,7 +224,7 @@ gistindex_keytest(IndexScanDesc scan,
|
||||
* ntids: if not NULL, gistgetbitmap's output tuple counter
|
||||
*
|
||||
* If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
|
||||
* tuples should be reported directly into the bitmap. If they are NULL,
|
||||
* tuples should be reported directly into the bitmap. If they are NULL,
|
||||
* we're doing a plain or ordered indexscan. For a plain indexscan, heap
|
||||
* tuple TIDs are returned into so->pageData[]. For an ordered indexscan,
|
||||
* heap tuple TIDs are pushed into individual search queue items.
|
||||
|
@ -56,7 +56,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg)
|
||||
/*
|
||||
* If new item is heap tuple, it goes to front of chain; otherwise insert
|
||||
* it before the first index-page item, so that index pages are visited in
|
||||
* LIFO order, ensuring depth-first search of index pages. See comments
|
||||
* LIFO order, ensuring depth-first search of index pages. See comments
|
||||
* in gist_private.h.
|
||||
*/
|
||||
if (GISTSearchItemIsHeap(*newitem))
|
||||
|
@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
|
||||
* Recompute unions of left- and right-side subkeys after a page split,
|
||||
* ignoring any tuples that are marked in spl->spl_dontcare[].
|
||||
*
|
||||
* Note: we always recompute union keys for all index columns. In some cases
|
||||
* Note: we always recompute union keys for all index columns. In some cases
|
||||
* this might represent duplicate work for the leftmost column(s), but it's
|
||||
* not safe to assume that "zero penalty to move a tuple" means "the union
|
||||
* key doesn't change at all". Penalty functions aren't 100% accurate.
|
||||
@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
|
||||
|
||||
/*
|
||||
* Remove tuples that are marked don't-cares from the tuple index array a[]
|
||||
* of length *len. This is applied separately to the spl_left and spl_right
|
||||
* of length *len. This is applied separately to the spl_left and spl_right
|
||||
* arrays.
|
||||
*/
|
||||
static void
|
||||
@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
|
||||
/*
|
||||
* Place a single don't-care tuple into either the left or right side of the
|
||||
* split, according to which has least penalty for merging the tuple into
|
||||
* the previously-computed union keys. We need consider only columns starting
|
||||
* the previously-computed union keys. We need consider only columns starting
|
||||
* at attno.
|
||||
*/
|
||||
static void
|
||||
@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
|
||||
|
||||
/*
|
||||
* There is only one previously defined union, so we just choose swap
|
||||
* or not by lowest penalty for that side. We can only get here if a
|
||||
* or not by lowest penalty for that side. We can only get here if a
|
||||
* secondary split happened to have all NULLs in its column in the
|
||||
* tuples that the outer recursion level had assigned to one side.
|
||||
* (Note that the null checks in gistSplitByKey don't prevent the
|
||||
@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
|
||||
sv->spl_rdatum = v->spl_rattr[attno];
|
||||
|
||||
/*
|
||||
* Let the opclass-specific PickSplit method do its thing. Note that at
|
||||
* Let the opclass-specific PickSplit method do its thing. Note that at
|
||||
* this point we know there are no null keys in the entryvec.
|
||||
*/
|
||||
FunctionCall2Coll(&giststate->picksplitFn[attno],
|
||||
|
@ -430,7 +430,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
|
||||
{
|
||||
/*
|
||||
* New best penalty for column. Tentatively select this tuple
|
||||
* as the target, and record the best penalty. Then reset the
|
||||
* as the target, and record the best penalty. Then reset the
|
||||
* next column's penalty to "unknown" (and indirectly, the
|
||||
* same for all the ones to its right). This will force us to
|
||||
* adopt this tuple's penalty values as the best for all the
|
||||
@ -446,7 +446,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
|
||||
{
|
||||
/*
|
||||
* The current tuple is exactly as good for this column as the
|
||||
* best tuple seen so far. The next iteration of this loop
|
||||
* best tuple seen so far. The next iteration of this loop
|
||||
* will compare the next column.
|
||||
*/
|
||||
}
|
||||
@ -623,7 +623,7 @@ gistcheckpage(Relation rel, Buffer buf)
|
||||
/*
|
||||
* ReadBuffer verifies that every newly-read page passes
|
||||
* PageHeaderIsValid, which means it either contains a reasonably sane
|
||||
* page header or is all-zero. We have to defend against the all-zero
|
||||
* page header or is all-zero. We have to defend against the all-zero
|
||||
* case, however.
|
||||
*/
|
||||
if (PageIsNew(page))
|
||||
|
@ -49,7 +49,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
stats->estimated_count = info->estimated_count;
|
||||
|
||||
/*
|
||||
* XXX the above is wrong if index is partial. Would it be OK to just
|
||||
* XXX the above is wrong if index is partial. Would it be OK to just
|
||||
* return NULL, or is there work we must do below?
|
||||
*/
|
||||
}
|
||||
|
Reference in New Issue
Block a user