1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-10 17:42:29 +03:00

pgindent run for 8.2.

This commit is contained in:
Bruce Momjian
2006-10-04 00:30:14 +00:00
parent 451e419e98
commit f99a569a2e
522 changed files with 21297 additions and 17170 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.143 2006/08/25 04:06:46 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.144 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -252,7 +252,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
*/
htup.t_self = itup->t_tid;
if (heap_fetch(heapRel, SnapshotSelf, &htup, &hbuffer,
false, NULL))
false, NULL))
{
/* Normal case --- it's still live */
ReleaseBuffer(hbuffer);
@@ -355,7 +355,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
* insertion, and the buffer must be pinned and write-locked. On return,
* insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* If 'afteritem' is >0 then the new tuple must be inserted after the
@@ -608,7 +608,7 @@ _bt_insertonpg(Relation rel,
if (!rel->rd_istemp)
{
xl_btree_insert xlrec;
BlockNumber xldownlink;
BlockNumber xldownlink;
xl_btree_metadata xlmeta;
uint8 xlinfo;
XLogRecPtr recptr;
@@ -888,16 +888,17 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
if (sopaque->btpo_prev != ropaque->btpo_prev)
elog(PANIC, "right sibling's left-link doesn't match");
/*
* Check to see if we can set the SPLIT_END flag in the right-hand
* split page; this can save some I/O for vacuum since it need not
* proceed to the right sibling. We can set the flag if the right
* sibling has a different cycleid: that means it could not be part
* of a group of pages that were all split off from the same ancestor
* sibling has a different cycleid: that means it could not be part of
* a group of pages that were all split off from the same ancestor
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
* examine both pages. But if D, our right sibling, has a different
* examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@@ -911,8 +912,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* NO EREPORT(ERROR) till right sibling is updated. We can get away with
* not starting the critical section till here because we haven't been
* scribbling on the original page yet, and we don't care about the
* new sibling until it's linked into the btree.
* scribbling on the original page yet, and we don't care about the new
* sibling until it's linked into the btree.
*/
START_CRIT_SECTION();
@@ -947,8 +948,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* Direct access to page is not good but faster - we should implement
* some new func in page API. Note we only store the tuples
* themselves, knowing that the item pointers are in the same order
* and can be reconstructed by scanning the tuples. See comments
* for _bt_restore_page().
* and can be reconstructed by scanning the tuples. See comments for
* _bt_restore_page().
*/
xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
((PageHeader) leftpage)->pd_upper;
@@ -1708,17 +1709,17 @@ _bt_isequal(TupleDesc itupdesc, Page page, OffsetNumber offnum,
static void
_bt_vacuum_one_page(Relation rel, Buffer buffer)
{
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable = 0;
OffsetNumber offnum,
minoff,
maxoff;
Page page = BufferGetPage(buffer);
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable = 0;
OffsetNumber offnum,
minoff,
maxoff;
Page page = BufferGetPage(buffer);
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* Scan over all items to see which ones need deleted
* according to LP_DELETE flags.
* Scan over all items to see which ones need deleted according to
* LP_DELETE flags.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
@@ -1726,7 +1727,7 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer)
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
ItemId itemId = PageGetItemId(page, offnum);
ItemId itemId = PageGetItemId(page, offnum);
if (ItemIdDeleted(itemId))
deletable[ndeletable++] = offnum;
@@ -1734,10 +1735,11 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer)
if (ndeletable > 0)
_bt_delitems(rel, buffer, deletable, ndeletable);
/*
* Note: if we didn't find any LP_DELETE items, then the page's
* BTP_HAS_GARBAGE hint bit is falsely set. We do not bother
* expending a separate write to clear it, however. We will clear
* it when we split the page.
* BTP_HAS_GARBAGE hint bit is falsely set. We do not bother expending a
* separate write to clear it, however. We will clear it when we split
* the page.
*/
}

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.99 2006/07/25 19:13:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.100 2006/10/04 00:29:49 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -124,10 +124,10 @@ _bt_getroot(Relation rel, int access)
/*
* Since the cache might be stale, we check the page more carefully
* here than normal. We *must* check that it's not deleted.
* If it's not alone on its level, then we reject too --- this
* may be overly paranoid but better safe than sorry. Note we
* don't check P_ISROOT, because that's not set in a "fast root".
* here than normal. We *must* check that it's not deleted. If it's
* not alone on its level, then we reject too --- this may be overly
* paranoid but better safe than sorry. Note we don't check P_ISROOT,
* because that's not set in a "fast root".
*/
if (!P_IGNORE(rootopaque) &&
rootopaque->btpo.level == rootlevel &&
@@ -662,18 +662,18 @@ _bt_delitems(Relation rel, Buffer buf,
PageIndexMultiDelete(page, itemnos, nitems);
/*
* We can clear the vacuum cycle ID since this page has certainly
* been processed by the current vacuum scan.
* We can clear the vacuum cycle ID since this page has certainly been
* processed by the current vacuum scan.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_cycleid = 0;
/*
* Mark the page as not containing any LP_DELETE items. This is not
* certainly true (there might be some that have recently been marked,
* but weren't included in our target-item list), but it will almost
* always be true and it doesn't seem worth an additional page scan
* to check it. Remember that BTP_HAS_GARBAGE is only a hint anyway.
* certainly true (there might be some that have recently been marked, but
* weren't included in our target-item list), but it will almost always be
* true and it doesn't seem worth an additional page scan to check it.
* Remember that BTP_HAS_GARBAGE is only a hint anyway.
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;

View File

@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.151 2006/09/21 20:31:22 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.152 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ typedef struct
BlockNumber *freePages;
int nFreePages; /* number of entries in freePages[] */
int maxFreePages; /* allocated size of freePages[] */
BlockNumber totFreePages; /* true total # of free pages */
BlockNumber totFreePages; /* true total # of free pages */
MemoryContext pagedelcontext;
} BTVacState;
@@ -70,7 +70,7 @@ static void btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state,
BTCycleId cycleid);
static void btvacuumpage(BTVacState *vstate, BlockNumber blkno,
BlockNumber orig_blkno);
BlockNumber orig_blkno);
/*
@@ -109,8 +109,8 @@ btbuild(PG_FUNCTION_ARGS)
buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique, false);
/*
* If building a unique index, put dead tuples in a second spool to
* keep them out of the uniqueness check.
* If building a unique index, put dead tuples in a second spool to keep
* them out of the uniqueness check.
*/
if (indexInfo->ii_Unique)
buildstate.spool2 = _bt_spoolinit(index, false, true);
@@ -146,11 +146,11 @@ btbuild(PG_FUNCTION_ARGS)
#endif /* BTREE_BUILD_STATS */
/*
* If we are reindexing a pre-existing index, it is critical to send out
* a relcache invalidation SI message to ensure all backends re-read the
* index metapage. We expect that the caller will ensure that happens
* (typically as a side effect of updating index stats, but it must
* happen even if the stats don't change!)
* If we are reindexing a pre-existing index, it is critical to send out a
* relcache invalidation SI message to ensure all backends re-read the
* index metapage. We expect that the caller will ensure that happens
* (typically as a side effect of updating index stats, but it must happen
* even if the stats don't change!)
*/
/*
@@ -252,11 +252,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
* Yes, remember it for later. (We'll deal with all such tuples
* Yes, remember it for later. (We'll deal with all such tuples
* at once right before leaving the index page.) The test for
* numKilled overrun is not just paranoia: if the caller reverses
* direction in the indexscan then the same item might get entered
* multiple times. It's not worth trying to optimize that, so we
* multiple times. It's not worth trying to optimize that, so we
* don't detect it, but instead just forget any excess entries.
*/
if (so->killedItems == NULL)
@@ -316,8 +316,8 @@ btgetmulti(PG_FUNCTION_ARGS)
while (ntids < max_tids)
{
/*
* Advance to next tuple within page. This is the same as the
* easy case in _bt_next().
* Advance to next tuple within page. This is the same as the easy
* case in _bt_next().
*/
if (++so->currPos.itemIndex > so->currPos.lastItem)
{
@@ -373,7 +373,7 @@ btrescan(PG_FUNCTION_ARGS)
so->keyData = (ScanKey) palloc(scan->numberOfKeys * sizeof(ScanKeyData));
else
so->keyData = NULL;
so->killedItems = NULL; /* until needed */
so->killedItems = NULL; /* until needed */
so->numKilled = 0;
scan->opaque = so;
}
@@ -461,9 +461,9 @@ btmarkpos(PG_FUNCTION_ARGS)
/*
* Just record the current itemIndex. If we later step to next page
* before releasing the marked position, _bt_steppage makes a full copy
* of the currPos struct in markPos. If (as often happens) the mark is
* moved before we leave the page, we don't have to do that work.
* before releasing the marked position, _bt_steppage makes a full copy of
* the currPos struct in markPos. If (as often happens) the mark is moved
* before we leave the page, we don't have to do that work.
*/
if (BTScanPosIsValid(so->currPos))
so->markItemIndex = so->currPos.itemIndex;
@@ -485,11 +485,11 @@ btrestrpos(PG_FUNCTION_ARGS)
if (so->markItemIndex >= 0)
{
/*
* The mark position is on the same page we are currently on.
* Just restore the itemIndex.
* The mark position is on the same page we are currently on. Just
* restore the itemIndex.
*/
so->currPos.itemIndex = so->markItemIndex;
}
}
else
{
/* we aren't holding any read locks, but gotta drop the pin */
@@ -527,7 +527,7 @@ Datum
btbulkdelete(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
IndexBulkDeleteResult * volatile stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
IndexBulkDeleteResult *volatile stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
void *callback_state = (void *) PG_GETARG_POINTER(3);
Relation rel = info->index;
@@ -569,10 +569,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
/*
* If btbulkdelete was called, we need not do anything, just return
* the stats from the latest btbulkdelete call. If it wasn't called,
* we must still do a pass over the index, to recycle any newly-recyclable
* pages and to obtain index statistics.
* If btbulkdelete was called, we need not do anything, just return the
* stats from the latest btbulkdelete call. If it wasn't called, we must
* still do a pass over the index, to recycle any newly-recyclable pages
* and to obtain index statistics.
*
* Since we aren't going to actually delete any leaf items, there's no
* need to go through all the vacuum-cycle-ID pushups.
@@ -586,8 +586,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* During a non-FULL vacuum it's quite possible for us to be fooled by
* concurrent page splits into double-counting some index tuples, so
* disbelieve any total that exceeds the underlying heap's count.
* (We can't check this during btbulkdelete.)
* disbelieve any total that exceeds the underlying heap's count. (We
* can't check this during btbulkdelete.)
*/
if (!info->vacuum_full)
{
@@ -622,8 +622,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
bool needLock;
/*
* Reset counts that will be incremented during the scan; needed in
* case of multiple scans during a single VACUUM command
* Reset counts that will be incremented during the scan; needed in case
* of multiple scans during a single VACUUM command
*/
stats->num_index_tuples = 0;
stats->pages_deleted = 0;
@@ -647,24 +647,24 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
ALLOCSET_DEFAULT_MAXSIZE);
/*
* The outer loop iterates over all index pages except the metapage,
* in physical order (we hope the kernel will cooperate in providing
* The outer loop iterates over all index pages except the metapage, in
* physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to
* delete some deletable tuples. Hence, we must repeatedly check the
* relation length. We must acquire the relation-extension lock while
* doing so to avoid a race condition: if someone else is extending the
* relation, there is a window where bufmgr/smgr have created a new
* all-zero page but it hasn't yet been write-locked by _bt_getbuf().
* If we manage to scan such a page here, we'll improperly assume it can
* be recycled. Taking the lock synchronizes things enough to prevent a
* all-zero page but it hasn't yet been write-locked by _bt_getbuf(). If
* we manage to scan such a page here, we'll improperly assume it can be
* recycled. Taking the lock synchronizes things enough to prevent a
* problem: either num_pages won't include the new page, or _bt_getbuf
* already has write lock on the buffer and it will be fully initialized
* before we can examine it. (See also vacuumlazy.c, which has the same
* issue.) Also, we need not worry if a page is added immediately after
* issue.) Also, we need not worry if a page is added immediately after
* we look; the page splitting code already has write-lock on the left
* page before it adds a right page, so we must already have processed
* any tuples due to be moved into such a page.
* page before it adds a right page, so we must already have processed any
* tuples due to be moved into such a page.
*
* We can skip locking for new or temp relations, however, since no one
* else could be accessing them.
@@ -771,7 +771,7 @@ btvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno)
void *callback_state = vstate->callback_state;
Relation rel = info->index;
bool delete_now;
BlockNumber recurse_to;
BlockNumber recurse_to;
Buffer buf;
Page page;
BTPageOpaque opaque;
@@ -796,10 +796,10 @@ restart:
_bt_checkpage(rel, buf);
/*
* If we are recursing, the only case we want to do anything with is
* a live leaf page having the current vacuum cycle ID. Any other state
* implies we already saw the page (eg, deleted it as being empty).
* In particular, we don't want to risk adding it to freePages twice.
* If we are recursing, the only case we want to do anything with is a
* live leaf page having the current vacuum cycle ID. Any other state
* implies we already saw the page (eg, deleted it as being empty). In
* particular, we don't want to risk adding it to freePages twice.
*/
if (blkno != orig_blkno)
{
@@ -838,25 +838,24 @@ restart:
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable;
OffsetNumber offnum,
minoff,
maxoff;
minoff,
maxoff;
/*
* Trade in the initial read lock for a super-exclusive write
* lock on this page. We must get such a lock on every leaf page
* over the course of the vacuum scan, whether or not it actually
* contains any deletable tuples --- see nbtree/README.
* Trade in the initial read lock for a super-exclusive write lock on
* this page. We must get such a lock on every leaf page over the
* course of the vacuum scan, whether or not it actually contains any
* deletable tuples --- see nbtree/README.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
/*
* Check whether we need to recurse back to earlier pages. What
* we are concerned about is a page split that happened since we
* started the vacuum scan. If the split moved some tuples to a
* lower page then we might have missed 'em. If so, set up for
* tail recursion. (Must do this before possibly clearing
* btpo_cycleid below!)
* Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.
* (Must do this before possibly clearing btpo_cycleid below!)
*/
if (vstate->cycleid != 0 &&
opaque->btpo_cycleid == vstate->cycleid &&
@@ -866,8 +865,8 @@ restart:
recurse_to = opaque->btpo_next;
/*
* Scan over all items to see which ones need deleted
* according to the callback function.
* Scan over all items to see which ones need deleted according to the
* callback function.
*/
ndeletable = 0;
minoff = P_FIRSTDATAKEY(opaque);
@@ -890,8 +889,8 @@ restart:
}
/*
* Apply any needed deletes. We issue just one _bt_delitems()
* call per page, so as to minimize WAL traffic.
* Apply any needed deletes. We issue just one _bt_delitems() call
* per page, so as to minimize WAL traffic.
*/
if (ndeletable > 0)
{
@@ -908,8 +907,8 @@ restart:
* have any deletions to do. (If we do, _bt_delitems takes care
* of this.) This ensures we won't process the page again.
*
* We treat this like a hint-bit update because there's no need
* to WAL-log it.
* We treat this like a hint-bit update because there's no need to
* WAL-log it.
*/
if (vstate->cycleid != 0 &&
opaque->btpo_cycleid == vstate->cycleid)
@@ -920,10 +919,10 @@ restart:
}
/*
* If it's now empty, try to delete; else count the live tuples.
* We don't delete when recursing, though, to avoid putting entries
* into freePages out-of-order (doesn't seem worth any extra code to
* handle the case).
* If it's now empty, try to delete; else count the live tuples. We
* don't delete when recursing, though, to avoid putting entries into
* freePages out-of-order (doesn't seem worth any extra code to handle
* the case).
*/
if (minoff > maxoff)
delete_now = (blkno == orig_blkno);
@@ -947,13 +946,12 @@ restart:
stats->pages_deleted++;
/*
* During VACUUM FULL it's okay to recycle deleted pages
* immediately, since there can be no other transactions scanning
* the index. Note that we will only recycle the current page and
* not any parent pages that _bt_pagedel might have recursed to;
* this seems reasonable in the name of simplicity. (Trying to do
* otherwise would mean we'd have to sort the list of recyclable
* pages we're building.)
* During VACUUM FULL it's okay to recycle deleted pages immediately,
* since there can be no other transactions scanning the index. Note
* that we will only recycle the current page and not any parent pages
* that _bt_pagedel might have recursed to; this seems reasonable in
* the name of simplicity. (Trying to do otherwise would mean we'd
* have to sort the list of recyclable pages we're building.)
*/
if (ndel && info->vacuum_full)
{
@@ -969,11 +967,11 @@ restart:
_bt_relbuf(rel, buf);
/*
* This is really tail recursion, but if the compiler is too stupid
* to optimize it as such, we'd eat an uncomfortably large amount of
* stack space per recursion level (due to the deletable[] array).
* A failure is improbable since the number of levels isn't likely to be
* large ... but just in case, let's hand-optimize into a loop.
* This is really tail recursion, but if the compiler is too stupid to
* optimize it as such, we'd eat an uncomfortably large amount of stack
* space per recursion level (due to the deletable[] array). A failure is
* improbable since the number of levels isn't likely to be large ... but
* just in case, let's hand-optimize into a loop.
*/
if (recurse_to != P_NONE)
{

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.106 2006/08/24 01:18:34 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.107 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,7 @@
static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir,
OffsetNumber offnum);
OffsetNumber offnum);
static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir);
static Buffer _bt_walk_left(Relation rel, Buffer buf);
static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
@@ -417,7 +417,7 @@ _bt_compare(Relation rel,
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
* conditions, and the tree ordering. We find the first item (or,
* conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
@@ -604,7 +604,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
{
ScanKey cur = startKeys[i];
Assert(cur->sk_attno == i+1);
Assert(cur->sk_attno == i + 1);
if (cur->sk_flags & SK_ROW_HEADER)
{
@@ -612,16 +612,17 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* Row comparison header: look to the first row member instead.
*
* The member scankeys are already in insertion format (ie, they
* have sk_func = 3-way-comparison function), but we have to
* watch out for nulls, which _bt_preprocess_keys didn't check.
* A null in the first row member makes the condition unmatchable,
* just like qual_ok = false.
* have sk_func = 3-way-comparison function), but we have to watch
* out for nulls, which _bt_preprocess_keys didn't check. A null
* in the first row member makes the condition unmatchable, just
* like qual_ok = false.
*/
cur = (ScanKey) DatumGetPointer(cur->sk_argument);
Assert(cur->sk_flags & SK_ROW_MEMBER);
if (cur->sk_flags & SK_ISNULL)
return false;
memcpy(scankeys + i, cur, sizeof(ScanKeyData));
/*
* If the row comparison is the last positioning key we accepted,
* try to add additional keys from the lower-order row members.
@@ -833,10 +834,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
* the last item on this page. Adjust the starting offset if needed.
* (If this results in an offset before the first item or after the last
* one, _bt_readpage will report no items found, and then we'll step to
* the next page as needed.)
* the last item on this page. Adjust the starting offset if needed. (If
* this results in an offset before the first item or after the last one,
* _bt_readpage will report no items found, and then we'll step to the
* next page as needed.)
*/
if (goback)
offnum = OffsetNumberPrev(offnum);
@@ -882,8 +883,8 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
BTScanOpaque so = (BTScanOpaque) scan->opaque;
/*
* Advance to next tuple on current page; or if there's no more,
* try to step to the next page with data.
* Advance to next tuple on current page; or if there's no more, try to
* step to the next page with data.
*/
if (ScanDirectionIsForward(dir))
{
@@ -954,8 +955,8 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum)
/*
* we must save the page's right-link while scanning it; this tells us
* where to step right to after we're done with these items. There is
* no corresponding need for the left-link, since splits always go right.
* where to step right to after we're done with these items. There is no
* corresponding need for the left-link, since splits always go right.
*/
so->currPos.nextPage = opaque->btpo_next;
@@ -1055,8 +1056,8 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
_bt_killitems(scan, true);
/*
* Before we modify currPos, make a copy of the page data if there
* was a mark position that needs it.
* Before we modify currPos, make a copy of the page data if there was a
* mark position that needs it.
*/
if (so->markItemIndex >= 0)
{
@@ -1112,11 +1113,11 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
so->currPos.moreRight = true;
/*
* Walk left to the next page with data. This is much more
* complex than the walk-right case because of the possibility
* that the page to our left splits while we are in flight to it,
* plus the possibility that the page we were on gets deleted
* after we leave it. See nbtree/README for details.
* Walk left to the next page with data. This is much more complex
* than the walk-right case because of the possibility that the page
* to our left splits while we are in flight to it, plus the
* possibility that the page we were on gets deleted after we leave
* it. See nbtree/README for details.
*/
for (;;)
{
@@ -1136,9 +1137,9 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
return false;
/*
* Okay, we managed to move left to a non-deleted page.
* Done if it's not half-dead and contains matching tuples.
* Else loop back and do it all again.
* Okay, we managed to move left to a non-deleted page. Done if
* it's not half-dead and contains matching tuples. Else loop back
* and do it all again.
*/
page = BufferGetPage(so->currPos.buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);

View File

@@ -57,7 +57,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.106 2006/07/14 14:52:17 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.107 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -125,7 +125,7 @@ static void _bt_slideleft(Page page);
static void _bt_sortaddtup(Page page, Size itemsize,
IndexTuple itup, OffsetNumber itup_off);
static void _bt_buildadd(BTWriteState *wstate, BTPageState *state,
IndexTuple itup);
IndexTuple itup);
static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state);
static void _bt_load(BTWriteState *wstate,
BTSpool *btspool, BTSpool *btspool2);
@@ -351,7 +351,7 @@ _bt_pagestate(BTWriteState *wstate, uint32 level)
state->btps_full = (BLCKSZ * (100 - BTREE_NONLEAF_FILLFACTOR) / 100);
else
state->btps_full = RelationGetTargetPageFreeSpace(wstate->index,
BTREE_DEFAULT_FILLFACTOR);
BTREE_DEFAULT_FILLFACTOR);
/* no parent level, yet */
state->btps_next = NULL;
@@ -464,8 +464,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
Size itupsz;
/*
* This is a handy place to check for cancel interrupts during the
* btree load phase of index creation.
* This is a handy place to check for cancel interrupts during the btree
* load phase of index creation.
*/
CHECK_FOR_INTERRUPTS();
@@ -499,10 +499,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
"or use full text indexing.")));
/*
* Check to see if page is "full". It's definitely full if the item
* won't fit. Otherwise, compare to the target freespace derived from
* the fillfactor. However, we must put at least two items on each
* page, so disregard fillfactor if we don't have that many.
* Check to see if page is "full". It's definitely full if the item won't
* fit. Otherwise, compare to the target freespace derived from the
* fillfactor. However, we must put at least two items on each page, so
* disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.78 2006/07/25 19:13:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.79 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,8 +28,8 @@
static void _bt_mark_scankey_required(ScanKey skey);
static bool _bt_check_rowcompare(ScanKey skey,
IndexTuple tuple, TupleDesc tupdesc,
ScanDirection dir, bool *continuescan);
IndexTuple tuple, TupleDesc tupdesc,
ScanDirection dir, bool *continuescan);
/*
@@ -83,7 +83,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
* data is first stored into the key entries. Currently this
* data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
@@ -388,7 +388,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
* mark them if they are required. They are required (possibly
* mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
@@ -461,7 +461,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
* directions or just one. Also, if the key is a row comparison header,
* directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns.
@@ -472,12 +472,12 @@ _bt_preprocess_keys(IndexScanDesc scan)
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
* anyway on a rescan. Something to keep an eye on though.
* anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
{
int addflags;
int addflags;
switch (skey->sk_strategy)
{
@@ -503,8 +503,8 @@ _bt_mark_scankey_required(ScanKey skey)
if (skey->sk_flags & SK_ROW_HEADER)
{
ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
AttrNumber attno = skey->sk_attno;
ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
AttrNumber attno = skey->sk_attno;
/* First subkey should be same as the header says */
Assert(subkey->sk_attno == attno);
@@ -558,12 +558,12 @@ _bt_checkkeys(IndexScanDesc scan,
*continuescan = true; /* default assumption */
/*
* If the scan specifies not to return killed tuples, then we treat
* a killed tuple as not passing the qual. Most of the time, it's a
* win to not bother examining the tuple's index keys, but just return
* If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual. Most of the time, it's a win to
* not bother examining the tuple's index keys, but just return
* immediately with continuescan = true to proceed to the next tuple.
* However, if this is the last tuple on the page, we should check
* the index keys to prevent uselessly advancing to the next page.
* However, if this is the last tuple on the page, we should check the
* index keys to prevent uselessly advancing to the next page.
*/
if (scan->ignore_killed_tuples && ItemIdDeleted(iid))
{
@@ -580,9 +580,10 @@ _bt_checkkeys(IndexScanDesc scan,
if (offnum > P_FIRSTDATAKEY(opaque))
return false;
}
/*
* OK, we want to check the keys, but we'll return FALSE even
* if the tuple passes the key tests.
* OK, we want to check the keys, but we'll return FALSE even if the
* tuple passes the key tests.
*/
tuple_valid = false;
}
@@ -734,10 +735,9 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
* But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop
* the scan, because there can't be another tuple that will
* succeed.
* But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop the
* scan, because there can't be another tuple that will succeed.
*/
if (subkey != (ScanKey) DatumGetPointer(skey->sk_argument))
subkey--;
@@ -771,7 +771,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
*/
switch (subkey->sk_strategy)
{
/* EQ and NE cases aren't allowed here */
/* EQ and NE cases aren't allowed here */
case BTLessStrategyNumber:
result = (cmpresult < 0);
break;
@@ -795,8 +795,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Tuple fails this qual. If it's a required qual for the current
* scan direction, then we can conclude no further tuples will
* pass, either. Note we have to look at the deciding column, not
* scan direction, then we can conclude no further tuples will pass,
* either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -822,7 +822,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* is sufficient for setting LP_DELETE hint bits.
*
* We match items by heap TID before assuming they are the right ones to
* delete. We cope with cases where items have moved right due to insertions.
* delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
@@ -856,9 +856,9 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
for (i = 0; i < so->numKilled; i++)
{
int itemIndex = so->killedItems[i];
BTScanPosItem *kitem = &so->currPos.items[itemIndex];
OffsetNumber offnum = kitem->indexOffset;
int itemIndex = so->killedItems[i];
BTScanPosItem *kitem = &so->currPos.items[itemIndex];
OffsetNumber offnum = kitem->indexOffset;
Assert(itemIndex >= so->currPos.firstItem &&
itemIndex <= so->currPos.lastItem);
@@ -881,9 +881,9 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
}
/*
* Since this can be redone later if needed, it's treated the same
* as a commit-hint-bit status update for heap tuples: we mark the
* buffer dirty but don't make a WAL log entry.
* Since this can be redone later if needed, it's treated the same as a
* commit-hint-bit status update for heap tuples: we mark the buffer dirty
* but don't make a WAL log entry.
*
* Whenever we mark anything LP_DELETEd, we also set the page's
* BTP_HAS_GARBAGE flag, which is likewise just a hint.
@@ -898,8 +898,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
/*
* Always reset the scan state, so we don't look for same items
* on other pages.
* Always reset the scan state, so we don't look for same items on other
* pages.
*/
so->numKilled = 0;
}
@@ -908,8 +908,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
* operations. There is a single counter which increments each time we
* start a vacuum to assign it a cycle ID. Since multiple vacuums could
* operations. There is a single counter which increments each time we
* start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
@@ -987,7 +987,8 @@ _bt_start_vacuum(Relation rel)
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/* Assign the next cycle ID, being careful to avoid zero */
do {
do
{
result = ++(btvacinfo->cycle_ctr);
} while (result == 0);

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.37 2006/08/07 16:57:56 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.38 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,7 +82,7 @@ forget_matching_split(RelFileNode node, BlockNumber downlink, bool is_root)
* in correct itemno sequence, but physically the opposite order from the
* original, because we insert them in the opposite of itemno order. This
* does not matter in any current btree code, but it's something to keep an
* eye on. Is it worth changing just on general principles?
* eye on. Is it worth changing just on general principles?
*/
static void
_bt_restore_page(Page page, char *from, int len)
@@ -155,7 +155,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
char *datapos;
int datalen;
xl_btree_metadata md;
BlockNumber downlink = 0;
BlockNumber downlink = 0;
datapos = (char *) xlrec + SizeOfBtreeInsert;
datalen = record->xl_len - SizeOfBtreeInsert;
@@ -180,7 +180,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
false);
if (BufferIsValid(buffer))
{
@@ -193,7 +193,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
else
{
if (PageAddItem(page, (Item) datapos, datalen,
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber)
elog(PANIC, "btree_insert_redo: failed to add item");
@@ -225,7 +225,7 @@ btree_xlog_split(bool onleft, bool isroot,
OffsetNumber targetoff;
BlockNumber leftsib;
BlockNumber rightsib;
BlockNumber downlink = 0;
BlockNumber downlink = 0;
Buffer buffer;
Page page;
BTPageOpaque pageop;
@@ -376,8 +376,8 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
}
/*
* Mark the page as not containing any LP_DELETE items --- see comments
* in _bt_delitems().
* Mark the page as not containing any LP_DELETE items --- see comments in
* _bt_delitems().
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
@@ -543,7 +543,7 @@ btree_xlog_newroot(XLogRecPtr lsn, XLogRecord *record)
Buffer buffer;
Page page;
BTPageOpaque pageop;
BlockNumber downlink = 0;
BlockNumber downlink = 0;
reln = XLogOpenRelation(xlrec->node);
buffer = XLogReadBuffer(reln, xlrec->rootblk, true);
@@ -637,9 +637,9 @@ static void
out_target(StringInfo buf, xl_btreetid *target)
{
appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
target->node.spcNode, target->node.dbNode, target->node.relNode,
ItemPointerGetBlockNumber(&(target->tid)),
ItemPointerGetOffsetNumber(&(target->tid)));
target->node.spcNode, target->node.dbNode, target->node.relNode,
ItemPointerGetBlockNumber(&(target->tid)),
ItemPointerGetOffsetNumber(&(target->tid)));
}
void
@@ -680,7 +680,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "split_l: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
xlrec->otherblk, xlrec->rightblk);
xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_R:
@@ -690,7 +690,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "split_r: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
xlrec->otherblk, xlrec->rightblk);
xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_L_ROOT:
@@ -700,7 +700,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "split_l_root: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
xlrec->otherblk, xlrec->rightblk);
xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_R_ROOT:
@@ -710,7 +710,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "split_r_root: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
xlrec->otherblk, xlrec->rightblk);
xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_DELETE:
@@ -718,8 +718,8 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
appendStringInfo(buf, "delete: rel %u/%u/%u; blk %u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode, xlrec->block);
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode, xlrec->block);
break;
}
case XLOG_BTREE_DELETE_PAGE:
@@ -730,7 +730,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "delete_page: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; dead %u; left %u; right %u",
xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_NEWROOT:
@@ -738,9 +738,9 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
appendStringInfo(buf, "newroot: rel %u/%u/%u; root %u lev %u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode,
xlrec->rootblk, xlrec->level);
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode,
xlrec->rootblk, xlrec->level);
break;
}
default: