mirror of
https://github.com/postgres/postgres.git
synced 2025-11-10 17:42:29 +03:00
Standard pgindent run for 8.1.
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtget.c,v 1.36 2005/10/06 02:29:14 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtget.c,v 1.37 2005/10/15 02:49:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -32,12 +32,12 @@ rtgettuple(PG_FUNCTION_ARGS)
|
||||
IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
|
||||
ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
|
||||
RTreeScanOpaque so = (RTreeScanOpaque) s->opaque;
|
||||
Page page;
|
||||
Page page;
|
||||
OffsetNumber offnum;
|
||||
|
||||
/*
|
||||
* If we've already produced a tuple and the executor has informed
|
||||
* us that it should be marked "killed", do so now.
|
||||
* If we've already produced a tuple and the executor has informed us that
|
||||
* it should be marked "killed", do so now.
|
||||
*/
|
||||
if (s->kill_prior_tuple && ItemPointerIsValid(&(s->currentItemData)))
|
||||
{
|
||||
@@ -48,14 +48,13 @@ rtgettuple(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the next tuple that matches the search key; if asked to
|
||||
* skip killed tuples, find the first non-killed tuple that
|
||||
* matches. Return as soon as we've run out of matches or we've
|
||||
* found an acceptable match.
|
||||
* Get the next tuple that matches the search key; if asked to skip killed
|
||||
* tuples, find the first non-killed tuple that matches. Return as soon as
|
||||
* we've run out of matches or we've found an acceptable match.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
bool res = rtnext(s, dir);
|
||||
bool res = rtnext(s, dir);
|
||||
|
||||
if (res && s->ignore_killed_tuples)
|
||||
{
|
||||
@@ -73,7 +72,7 @@ Datum
|
||||
rtgetmulti(PG_FUNCTION_ARGS)
|
||||
{
|
||||
IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
|
||||
ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
|
||||
ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
|
||||
int32 max_tids = PG_GETARG_INT32(2);
|
||||
int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
|
||||
RTreeScanOpaque so = (RTreeScanOpaque) s->opaque;
|
||||
@@ -86,7 +85,7 @@ rtgetmulti(PG_FUNCTION_ARGS)
|
||||
res = rtnext(s, ForwardScanDirection);
|
||||
if (res && s->ignore_killed_tuples)
|
||||
{
|
||||
Page page;
|
||||
Page page;
|
||||
OffsetNumber offnum;
|
||||
|
||||
offnum = ItemPointerGetOffsetNumber(&(s->currentItemData));
|
||||
@@ -201,12 +200,11 @@ rtnext(IndexScanDesc s, ScanDirection dir)
|
||||
blk = ItemPointerGetBlockNumber(&(it->t_tid));
|
||||
|
||||
/*
|
||||
* Note that we release the pin on the page as we descend
|
||||
* down the tree, even though there's a good chance we'll
|
||||
* eventually need to re-read the buffer later in this
|
||||
* scan. This may or may not be optimal, but it doesn't
|
||||
* seem likely to make a huge performance difference
|
||||
* either way.
|
||||
* Note that we release the pin on the page as we descend down the
|
||||
* tree, even though there's a good chance we'll eventually need
|
||||
* to re-read the buffer later in this scan. This may or may not
|
||||
* be optimal, but it doesn't seem likely to make a huge
|
||||
* performance difference either way.
|
||||
*/
|
||||
so->curbuf = ReleaseAndReadBuffer(so->curbuf, s->indexRelation, blk);
|
||||
p = BufferGetPage(so->curbuf);
|
||||
@@ -233,7 +231,7 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir)
|
||||
IndexTuple it;
|
||||
RTreePageOpaque po;
|
||||
RTreeScanOpaque so;
|
||||
Page p;
|
||||
Page p;
|
||||
|
||||
so = (RTreeScanOpaque) s->opaque;
|
||||
p = BufferGetPage(so->curbuf);
|
||||
@@ -242,8 +240,8 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir)
|
||||
po = (RTreePageOpaque) PageGetSpecialPointer(p);
|
||||
|
||||
/*
|
||||
* If we modified the index during the scan, we may have a pointer to
|
||||
* a ghost tuple, before the scan. If this is the case, back up one.
|
||||
* If we modified the index during the scan, we may have a pointer to a
|
||||
* ghost tuple, before the scan. If this is the case, back up one.
|
||||
*/
|
||||
|
||||
if (so->s_flags & RTS_CURBEFORE)
|
||||
@@ -277,7 +275,7 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir)
|
||||
}
|
||||
|
||||
if (n >= FirstOffsetNumber && n <= maxoff)
|
||||
return n; /* found a match on this page */
|
||||
return n; /* found a match on this page */
|
||||
else
|
||||
return InvalidOffsetNumber; /* no match, go to next page */
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtproc.c,v 1.42 2004/12/31 21:59:26 pgsql Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtproc.c,v 1.43 2005/10/15 02:49:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -146,8 +146,8 @@ rt_poly_size(PG_FUNCTION_ARGS)
|
||||
ydim;
|
||||
|
||||
/*
|
||||
* Can't just use GETARG because of possibility that input is NULL;
|
||||
* since POLYGON is toastable, GETARG will try to inspect its value
|
||||
* Can't just use GETARG because of possibility that input is NULL; since
|
||||
* POLYGON is toastable, GETARG will try to inspect its value
|
||||
*/
|
||||
if (aptr == NULL)
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.91 2005/08/10 21:36:46 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.92 2005/10/15 02:49:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -121,8 +121,8 @@ rtbuild(PG_FUNCTION_ARGS)
|
||||
initRtstate(&buildstate.rtState, index);
|
||||
|
||||
/*
|
||||
* We expect to be called exactly once for any index relation. If
|
||||
* that's not the case, big trouble's what we have.
|
||||
* We expect to be called exactly once for any index relation. If that's
|
||||
* not the case, big trouble's what we have.
|
||||
*/
|
||||
if (RelationGetNumberOfBlocks(index) != 0)
|
||||
elog(ERROR, "index \"%s\" already contains data",
|
||||
@@ -175,10 +175,10 @@ rtbuildCallback(Relation index,
|
||||
|
||||
/*
|
||||
* Since we already have the index relation locked, we call rtdoinsert
|
||||
* directly. Normal access method calls dispatch through rtinsert,
|
||||
* which locks the relation for write. This is the right thing to do
|
||||
* if you're inserting single tups, but not when you're initializing
|
||||
* the whole index at once.
|
||||
* directly. Normal access method calls dispatch through rtinsert, which
|
||||
* locks the relation for write. This is the right thing to do if you're
|
||||
* inserting single tups, but not when you're initializing the whole index
|
||||
* at once.
|
||||
*/
|
||||
rtdoinsert(index, itup, &buildstate->rtState);
|
||||
|
||||
@@ -226,9 +226,8 @@ rtinsert(PG_FUNCTION_ARGS)
|
||||
initRtstate(&rtState, r);
|
||||
|
||||
/*
|
||||
* Since rtree is not marked "amconcurrent" in pg_am, caller should
|
||||
* have acquired exclusive lock on index relation. We need no locking
|
||||
* here.
|
||||
* Since rtree is not marked "amconcurrent" in pg_am, caller should have
|
||||
* acquired exclusive lock on index relation. We need no locking here.
|
||||
*/
|
||||
rtdoinsert(r, itup, &rtState);
|
||||
|
||||
@@ -331,7 +330,7 @@ rttighten(Relation r,
|
||||
p = BufferGetPage(b);
|
||||
|
||||
oldud = IndexTupleGetDatum(PageGetItem(p,
|
||||
PageGetItemId(p, stk->rts_child)));
|
||||
PageGetItemId(p, stk->rts_child)));
|
||||
|
||||
FunctionCall2(&rtstate->sizeFn, oldud,
|
||||
PointerGetDatum(&old_size));
|
||||
@@ -342,8 +341,8 @@ rttighten(Relation r,
|
||||
PointerGetDatum(&newd_size));
|
||||
|
||||
/*
|
||||
* If newd_size == 0 we have degenerate rectangles, so we don't know
|
||||
* if there was any change, so we have to assume there was.
|
||||
* If newd_size == 0 we have degenerate rectangles, so we don't know if
|
||||
* there was any change, so we have to assume there was.
|
||||
*/
|
||||
if ((newd_size == 0) || (newd_size != old_size))
|
||||
{
|
||||
@@ -370,8 +369,8 @@ rttighten(Relation r,
|
||||
/*
|
||||
* The user may be defining an index on variable-sized data (like
|
||||
* polygons). If so, we need to get a constant-sized datum for
|
||||
* insertion on the internal page. We do this by calling the
|
||||
* union proc, which is required to return a rectangle.
|
||||
* insertion on the internal page. We do this by calling the union
|
||||
* proc, which is required to return a rectangle.
|
||||
*/
|
||||
tdatum = FunctionCall2(&rtstate->unionFn, datum, datum);
|
||||
|
||||
@@ -428,8 +427,8 @@ rtdosplit(Relation r,
|
||||
|
||||
/*
|
||||
* The root of the tree is the first block in the relation. If we're
|
||||
* about to split the root, we need to do some hocus-pocus to enforce
|
||||
* this guarantee.
|
||||
* about to split the root, we need to do some hocus-pocus to enforce this
|
||||
* guarantee.
|
||||
*/
|
||||
|
||||
if (BufferGetBlockNumber(buffer) == P_ROOT)
|
||||
@@ -459,10 +458,9 @@ rtdosplit(Relation r,
|
||||
newitemoff = OffsetNumberNext(maxoff);
|
||||
|
||||
/*
|
||||
* spl_left contains a list of the offset numbers of the tuples that
|
||||
* will go to the left page. For each offset number, get the tuple
|
||||
* item, then add the item to the left page. Similarly for the right
|
||||
* side.
|
||||
* spl_left contains a list of the offset numbers of the tuples that will
|
||||
* go to the left page. For each offset number, get the tuple item, then
|
||||
* add the item to the left page. Similarly for the right side.
|
||||
*/
|
||||
|
||||
/* fill left node */
|
||||
@@ -525,13 +523,13 @@ rtdosplit(Relation r,
|
||||
* introduced in its structure by splitting this page.
|
||||
*
|
||||
* 2) "Tighten" the bounding box of the pointer to the left page in the
|
||||
* parent node in the tree, if any. Since we moved a bunch of stuff
|
||||
* off the left page, we expect it to get smaller. This happens in
|
||||
* the internal insertion routine.
|
||||
* parent node in the tree, if any. Since we moved a bunch of stuff off
|
||||
* the left page, we expect it to get smaller. This happens in the
|
||||
* internal insertion routine.
|
||||
*
|
||||
* 3) Insert a pointer to the right page in the parent. This may cause
|
||||
* the parent to split. If it does, we need to repeat steps one and
|
||||
* two for each split node in the tree.
|
||||
* 3) Insert a pointer to the right page in the parent. This may cause the
|
||||
* parent to split. If it does, we need to repeat steps one and two for
|
||||
* each split node in the tree.
|
||||
*/
|
||||
|
||||
/* adjust active scans */
|
||||
@@ -583,10 +581,10 @@ rtintinsert(Relation r,
|
||||
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
|
||||
|
||||
/*
|
||||
* This is a hack. Right now, we force rtree internal keys to be
|
||||
* constant size. To fix this, need delete the old key and add both
|
||||
* left and right for the two new pages. The insertion of left may
|
||||
* force a split if the new left key is bigger than the old key.
|
||||
* This is a hack. Right now, we force rtree internal keys to be constant
|
||||
* size. To fix this, need delete the old key and add both left and right
|
||||
* for the two new pages. The insertion of left may force a split if the
|
||||
* new left key is bigger than the old key.
|
||||
*/
|
||||
|
||||
if (IndexTupleSize(old) != IndexTupleSize(ltup))
|
||||
@@ -603,8 +601,7 @@ rtintinsert(Relation r,
|
||||
rttighten(r, stk->rts_parent, newdatum,
|
||||
IndexTupleAttSize(ltup), rtstate);
|
||||
rtdosplit(r, b, stk->rts_parent, rtup, rtstate);
|
||||
WriteBuffer(b); /* don't forget to release buffer! -
|
||||
* 01/31/94 */
|
||||
WriteBuffer(b); /* don't forget to release buffer! - 01/31/94 */
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -716,16 +713,15 @@ rtpicksplit(Relation r,
|
||||
int total_num_tuples,
|
||||
num_tuples_without_seeds,
|
||||
max_after_split; /* in Guttman's lingo, (M - m) */
|
||||
float diff; /* diff between cost of putting tuple left
|
||||
* or right */
|
||||
float diff; /* diff between cost of putting tuple left or
|
||||
* right */
|
||||
SPLITCOST *cost_vector;
|
||||
int n;
|
||||
|
||||
/*
|
||||
* First, make sure the new item is not so large that we can't
|
||||
* possibly fit it on a page, even by itself. (It's sufficient to
|
||||
* make this test here, since any oversize tuple must lead to a page
|
||||
* split attempt.)
|
||||
* First, make sure the new item is not so large that we can't possibly
|
||||
* fit it on a page, even by itself. (It's sufficient to make this test
|
||||
* here, since any oversize tuple must lead to a page split attempt.)
|
||||
*/
|
||||
newitemsz = IndexTupleTotalSize(itup);
|
||||
if (newitemsz > RTPageAvailSpace)
|
||||
@@ -734,11 +730,10 @@ rtpicksplit(Relation r,
|
||||
errmsg("index row size %lu exceeds rtree maximum, %lu",
|
||||
(unsigned long) newitemsz,
|
||||
(unsigned long) RTPageAvailSpace),
|
||||
errhint("Values larger than a buffer page cannot be indexed.")));
|
||||
errhint("Values larger than a buffer page cannot be indexed.")));
|
||||
|
||||
maxoff = PageGetMaxOffsetNumber(page);
|
||||
newitemoff = OffsetNumberNext(maxoff); /* phony index for new
|
||||
* item */
|
||||
newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */
|
||||
total_num_tuples = newitemoff;
|
||||
num_tuples_without_seeds = total_num_tuples - 2;
|
||||
max_after_split = total_num_tuples / 2; /* works for m = M/2 */
|
||||
@@ -793,8 +788,7 @@ rtpicksplit(Relation r,
|
||||
pfree(DatumGetPointer(inter_d));
|
||||
|
||||
/*
|
||||
* are these a more promising split that what we've already
|
||||
* seen?
|
||||
* are these a more promising split that what we've already seen?
|
||||
*/
|
||||
if (size_waste > waste || firsttime)
|
||||
{
|
||||
@@ -809,10 +803,10 @@ rtpicksplit(Relation r,
|
||||
if (firsttime)
|
||||
{
|
||||
/*
|
||||
* There is no possible split except to put the new item on its
|
||||
* own page. Since we still have to compute the union rectangles,
|
||||
* we play dumb and run through the split algorithm anyway,
|
||||
* setting seed_1 = first item on page and seed_2 = new item.
|
||||
* There is no possible split except to put the new item on its own
|
||||
* page. Since we still have to compute the union rectangles, we play
|
||||
* dumb and run through the split algorithm anyway, setting seed_1 =
|
||||
* first item on page and seed_2 = new item.
|
||||
*/
|
||||
seed_1 = FirstOffsetNumber;
|
||||
seed_2 = newitemoff;
|
||||
@@ -840,25 +834,23 @@ rtpicksplit(Relation r,
|
||||
/*
|
||||
* Now split up the regions between the two seeds.
|
||||
*
|
||||
* The cost_vector array will contain hints for determining where each
|
||||
* tuple should go. Each record in the array will contain a boolean,
|
||||
* choose_left, that indicates which node the tuple prefers to be on,
|
||||
* and the absolute difference in cost between putting the tuple in
|
||||
* its favored node and in the other node.
|
||||
* The cost_vector array will contain hints for determining where each tuple
|
||||
* should go. Each record in the array will contain a boolean,
|
||||
* choose_left, that indicates which node the tuple prefers to be on, and
|
||||
* the absolute difference in cost between putting the tuple in its
|
||||
* favored node and in the other node.
|
||||
*
|
||||
* Later, we will sort the cost_vector in descending order by cost
|
||||
* difference, and consider the tuples in that order for placement.
|
||||
* That way, the tuples that *really* want to be in one node or the
|
||||
* other get to choose first, and the tuples that don't really care
|
||||
* choose last.
|
||||
* difference, and consider the tuples in that order for placement. That
|
||||
* way, the tuples that *really* want to be in one node or the other get
|
||||
* to choose first, and the tuples that don't really care choose last.
|
||||
*
|
||||
* First, build the cost_vector array. The new index tuple will also be
|
||||
* handled in this loop, and represented in the array, with
|
||||
* i==newitemoff.
|
||||
* handled in this loop, and represented in the array, with i==newitemoff.
|
||||
*
|
||||
* In the case of variable size tuples it is possible that we only have
|
||||
* the two seeds and no other tuples, in which case we don't do any of
|
||||
* this cost_vector stuff.
|
||||
* In the case of variable size tuples it is possible that we only have the
|
||||
* two seeds and no other tuples, in which case we don't do any of this
|
||||
* cost_vector stuff.
|
||||
*/
|
||||
|
||||
/* to keep compiler quiet */
|
||||
@@ -908,13 +900,13 @@ rtpicksplit(Relation r,
|
||||
}
|
||||
|
||||
/*
|
||||
* Now make the final decisions about where each tuple will go, and
|
||||
* build the vectors to return in the SPLITVEC record.
|
||||
* Now make the final decisions about where each tuple will go, and build
|
||||
* the vectors to return in the SPLITVEC record.
|
||||
*
|
||||
* The cost_vector array contains (descriptions of) all the tuples, in
|
||||
* the order that we want to consider them, so we we just iterate
|
||||
* through it and place each tuple in left or right nodes, according
|
||||
* to the criteria described below.
|
||||
* The cost_vector array contains (descriptions of) all the tuples, in the
|
||||
* order that we want to consider them, so we we just iterate through it
|
||||
* and place each tuple in left or right nodes, according to the criteria
|
||||
* described below.
|
||||
*/
|
||||
|
||||
left = v->spl_left;
|
||||
@@ -923,8 +915,8 @@ rtpicksplit(Relation r,
|
||||
v->spl_nright = 0;
|
||||
|
||||
/*
|
||||
* Place the seeds first. left avail space, left union, right avail
|
||||
* space, and right union have already been adjusted for the seeds.
|
||||
* Place the seeds first. left avail space, left union, right avail space,
|
||||
* and right union have already been adjusted for the seeds.
|
||||
*/
|
||||
|
||||
*left++ = seed_1;
|
||||
@@ -966,32 +958,30 @@ rtpicksplit(Relation r,
|
||||
PointerGetDatum(&size_beta));
|
||||
|
||||
/*
|
||||
* We prefer the page that shows smaller enlargement of its union
|
||||
* area (Guttman's algorithm), but we must take care that at least
|
||||
* one page will still have room for the new item after this one
|
||||
* is added.
|
||||
* We prefer the page that shows smaller enlargement of its union area
|
||||
* (Guttman's algorithm), but we must take care that at least one page
|
||||
* will still have room for the new item after this one is added.
|
||||
*
|
||||
* (We know that all the old items together can fit on one page, so
|
||||
* we need not worry about any other problem than failing to fit
|
||||
* the new item.)
|
||||
* (We know that all the old items together can fit on one page, so we
|
||||
* need not worry about any other problem than failing to fit the new
|
||||
* item.)
|
||||
*
|
||||
* Guttman's algorithm actually has two factors to consider (in
|
||||
* order): 1. if one node has so many tuples already assigned to
|
||||
* it that the other needs all the rest in order to satisfy the
|
||||
* condition that neither node has fewer than m tuples, then that
|
||||
* is decisive; 2. otherwise, choose the page that shows the
|
||||
* smaller enlargement of its union area.
|
||||
* Guttman's algorithm actually has two factors to consider (in order):
|
||||
* 1. if one node has so many tuples already assigned to it that the
|
||||
* other needs all the rest in order to satisfy the condition that
|
||||
* neither node has fewer than m tuples, then that is decisive; 2.
|
||||
* otherwise, choose the page that shows the smaller enlargement of
|
||||
* its union area.
|
||||
*
|
||||
* I have chosen m = M/2, where M is the maximum number of tuples on
|
||||
* a page. (Actually, this is only strictly true for fixed size
|
||||
* tuples. For variable size tuples, there still might have to be
|
||||
* only one tuple on a page, if it is really big. But even with
|
||||
* variable size tuples we still try to get m as close as possible
|
||||
* to M/2.)
|
||||
* I have chosen m = M/2, where M is the maximum number of tuples on a
|
||||
* page. (Actually, this is only strictly true for fixed size tuples.
|
||||
* For variable size tuples, there still might have to be only one
|
||||
* tuple on a page, if it is really big. But even with variable size
|
||||
* tuples we still try to get m as close as possible to M/2.)
|
||||
*
|
||||
* The question of which page shows the smaller enlargement of its
|
||||
* union area has already been answered, and the answer stored in
|
||||
* the choose_left field of the SPLITCOST record.
|
||||
* The question of which page shows the smaller enlargement of its union
|
||||
* area has already been answered, and the answer stored in the
|
||||
* choose_left field of the SPLITCOST record.
|
||||
*/
|
||||
left_feasible = (left_avail_space >= item_1_sz &&
|
||||
((left_avail_space - item_1_sz) >= newitemsz ||
|
||||
@@ -1003,9 +993,8 @@ rtpicksplit(Relation r,
|
||||
{
|
||||
/*
|
||||
* Both feasible, use Guttman's algorithm. First check the m
|
||||
* condition described above, and if that doesn't apply,
|
||||
* choose the page with the smaller enlargement of its union
|
||||
* area.
|
||||
* condition described above, and if that doesn't apply, choose
|
||||
* the page with the smaller enlargement of its union area.
|
||||
*/
|
||||
if (v->spl_nleft > max_after_split)
|
||||
choose_left = false;
|
||||
@@ -1153,9 +1142,8 @@ rtbulkdelete(PG_FUNCTION_ARGS)
|
||||
num_index_tuples = 0;
|
||||
|
||||
/*
|
||||
* Since rtree is not marked "amconcurrent" in pg_am, caller should
|
||||
* have acquired exclusive lock on index relation. We need no locking
|
||||
* here.
|
||||
* Since rtree is not marked "amconcurrent" in pg_am, caller should have
|
||||
* acquired exclusive lock on index relation. We need no locking here.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.59 2005/06/24 00:18:52 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.60 2005/10/15 02:49:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -123,11 +123,11 @@ rtrescan(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* Scans on internal pages use different operators than they do on
|
||||
* leaf pages. For example, if the user wants all boxes that
|
||||
* exactly match (x1,y1,x2,y2), then on internal pages we need to
|
||||
* find all boxes that contain (x1,y1,x2,y2). rtstrat.c knows
|
||||
* how to pick the opclass member to use for internal pages.
|
||||
* In some cases we need to negate the result of the opclass member.
|
||||
* leaf pages. For example, if the user wants all boxes that exactly
|
||||
* match (x1,y1,x2,y2), then on internal pages we need to find all
|
||||
* boxes that contain (x1,y1,x2,y2). rtstrat.c knows how to pick the
|
||||
* opclass member to use for internal pages. In some cases we need to
|
||||
* negate the result of the opclass member.
|
||||
*/
|
||||
for (i = 0; i < s->numberOfKeys; i++)
|
||||
{
|
||||
@@ -333,9 +333,9 @@ ReleaseResources_rtree(void)
|
||||
RTScanList next;
|
||||
|
||||
/*
|
||||
* Note: this should be a no-op during normal query shutdown. However,
|
||||
* in an abort situation ExecutorEnd is not called and so there may be
|
||||
* open index scans to clean up.
|
||||
* Note: this should be a no-op during normal query shutdown. However, in
|
||||
* an abort situation ExecutorEnd is not called and so there may be open
|
||||
* index scans to clean up.
|
||||
*/
|
||||
prev = NULL;
|
||||
|
||||
@@ -440,8 +440,7 @@ adjustiptr(IndexScanDesc s,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* remember that we're before the current
|
||||
* tuple
|
||||
* remember that we're before the current tuple
|
||||
*/
|
||||
ItemPointerSet(iptr, blkno, FirstOffsetNumber);
|
||||
if (iptr == &(s->currentItemData))
|
||||
|
||||
Reference in New Issue
Block a user