1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-10 17:42:29 +03:00

Pgindent run for 8.0.

This commit is contained in:
Bruce Momjian
2004-08-29 05:07:03 +00:00
parent 90cb9c3051
commit b6b71b85bc
527 changed files with 20550 additions and 18283 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.115 2004/08/29 04:12:21 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.116 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -200,26 +200,26 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
* We can skip items that are marked killed.
*
* Formerly, we applied _bt_isequal() before checking the kill
* flag, so as to fall out of the item loop as soon as possible.
* However, in the presence of heavy update activity an index
* may contain many killed items with the same key; running
* _bt_isequal() on each killed item gets expensive. Furthermore
* it is likely that the non-killed version of each key appears
* first, so that we didn't actually get to exit any sooner anyway.
* So now we just advance over killed items as quickly as we can.
* We only apply _bt_isequal() when we get to a non-killed item or
* the end of the page.
* flag, so as to fall out of the item loop as soon as
* possible. However, in the presence of heavy update activity
* an index may contain many killed items with the same key;
* running _bt_isequal() on each killed item gets expensive.
* Furthermore it is likely that the non-killed version of
* each key appears first, so that we didn't actually get to
* exit any sooner anyway. So now we just advance over killed
* items as quickly as we can. We only apply _bt_isequal()
* when we get to a non-killed item or the end of the page.
*/
if (!ItemIdDeleted(curitemid))
{
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
* how we handling NULLs - and so we must not use _bt_compare
* in real comparison, but only for ordering/finding items on
* pages. - vadim 03/24/97
* _bt_compare returns 0 for (1,NULL) and (1,NULL) -
* this's how we handling NULLs - and so we must not use
* _bt_compare in real comparison, but only for
* ordering/finding items on pages. - vadim 03/24/97
*/
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
break; /* we're past all the equal tuples */
/* okay, we gotta fetch the heap tuple ... */
cbti = (BTItem) PageGetItem(page, curitemid);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.79 2004/08/29 04:12:21 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.80 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -276,8 +276,8 @@ _bt_getroot(Relation rel, int access)
rootlevel = metad->btm_fastlevel;
/*
* We are done with the metapage; arrange to release it via
* first _bt_relandgetbuf call
* We are done with the metapage; arrange to release it via first
* _bt_relandgetbuf call
*/
rootbuf = metabuf;
@@ -368,8 +368,8 @@ _bt_gettrueroot(Relation rel)
rootlevel = metad->btm_level;
/*
* We are done with the metapage; arrange to release it via
* first _bt_relandgetbuf call
* We are done with the metapage; arrange to release it via first
* _bt_relandgetbuf call
*/
rootbuf = metabuf;
@@ -433,21 +433,22 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* page could have been re-used between the time the last VACUUM
* scanned it and the time the VACUUM made its FSM updates.)
*
* In fact, it's worse than that: we can't even assume that it's
* safe to take a lock on the reported page. If somebody else
* has a lock on it, or even worse our own caller does, we could
* In fact, it's worse than that: we can't even assume that it's safe
* to take a lock on the reported page. If somebody else has a
* lock on it, or even worse our own caller does, we could
* deadlock. (The own-caller scenario is actually not improbable.
* Consider an index on a serial or timestamp column. Nearly all
* splits will be at the rightmost page, so it's entirely likely
* that _bt_split will call us while holding a lock on the page most
* recently acquired from FSM. A VACUUM running concurrently with
* the previous split could well have placed that page back in FSM.)
* that _bt_split will call us while holding a lock on the page
* most recently acquired from FSM. A VACUUM running concurrently
* with the previous split could well have placed that page back
* in FSM.)
*
* To get around that, we ask for only a conditional lock on the
* reported page. If we fail, then someone else is using the page,
* and we may reasonably assume it's not free. (If we happen to be
* wrong, the worst consequence is the page will be lost to use till
* the next VACUUM, which is no big problem.)
* reported page. If we fail, then someone else is using the
* page, and we may reasonably assume it's not free. (If we
* happen to be wrong, the worst consequence is the page will be
* lost to use till the next VACUUM, which is no big problem.)
*/
for (;;)
{

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.88 2004/08/29 04:12:21 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.89 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -155,15 +155,16 @@ _bt_moveright(Relation rel,
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* When nextkey = false (normal case): if the scan key that brought us to
* this page is > the high key stored on the page, then the page has split
* and we need to move right. (If the scan key is equal to the high key,
* we might or might not need to move right; have to scan the page first
* anyway.)
* When nextkey = false (normal case): if the scan key that brought us
* to this page is > the high key stored on the page, then the page
* has split and we need to move right. (If the scan key is equal to
* the high key, we might or might not need to move right; have to
* scan the page first anyway.)
*
* When nextkey = true: move right if the scan key is >= page's high key.
*
* The page could even have split more than once, so scan as far as needed.
* The page could even have split more than once, so scan as far as
* needed.
*
* We also have to move right if we followed a link that brought us to a
* dead page.
@@ -253,13 +254,11 @@ _bt_binsrch(Relation rel,
* Binary search to find the first key on the page >= scan key, or
* first key > scankey when nextkey is true.
*
* For nextkey=false (cmpval=1), the loop invariant is: all slots
* before 'low' are < scan key, all slots at or after 'high'
* are >= scan key.
* For nextkey=false (cmpval=1), the loop invariant is: all slots before
* 'low' are < scan key, all slots at or after 'high' are >= scan key.
*
* For nextkey=true (cmpval=0), the loop invariant is: all slots
* before 'low' are <= scan key, all slots at or after 'high'
* are > scan key.
* For nextkey=true (cmpval=0), the loop invariant is: all slots before
* 'low' are <= scan key, all slots at or after 'high' are > scan key.
*
* We can fall out when high == low.
*/
@@ -285,15 +284,15 @@ _bt_binsrch(Relation rel,
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
* On a leaf page, we always return the first key >= scan key (resp.
* > scan key), which could be the last slot + 1.
* On a leaf page, we always return the first key >= scan key (resp. >
* scan key), which could be the last slot + 1.
*/
if (P_ISLEAF(opaque))
return low;
/*
* On a non-leaf page, return the last key < scan key (resp. <= scan key).
* There must be one if _bt_compare() is playing by the rules.
* On a non-leaf page, return the last key < scan key (resp. <= scan
* key). There must be one if _bt_compare() is playing by the rules.
*/
Assert(low > P_FIRSTDATAKEY(opaque));
@@ -382,10 +381,10 @@ _bt_compare(Relation rel,
{
/*
* The sk_func needs to be passed the index value as left arg
* and the sk_argument as right arg (they might be of different
* types). Since it is convenient for callers to think of
* _bt_compare as comparing the scankey to the index item,
* we have to flip the sign of the comparison result.
* and the sk_argument as right arg (they might be of
* different types). Since it is convenient for callers to
* think of _bt_compare as comparing the scankey to the index
* item, we have to flip the sign of the comparison result.
*
* Note: curious-looking coding is to avoid overflow if
* comparison function returns INT_MIN. There is no risk of
@@ -497,7 +496,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
bool goback;
bool continuescan;
ScanKey scankeys;
ScanKey *startKeys = NULL;
ScanKey *startKeys = NULL;
int keysCount = 0;
int i;
StrategyNumber strat_total;
@@ -521,7 +520,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* We want to identify the keys that can be used as starting boundaries;
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
* a backwards scan. We can use keys for multiple attributes so long as
* the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
@@ -554,13 +553,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ScanKey cur;
startKeys = (ScanKey *) palloc(so->numberOfKeys * sizeof(ScanKey));
/*
* chosen is the so-far-chosen key for the current attribute, if any.
* We don't cast the decision in stone until we reach keys for the
* next attribute.
* chosen is the so-far-chosen key for the current attribute, if
* any. We don't cast the decision in stone until we reach keys
* for the next attribute.
*/
curattr = 1;
chosen = NULL;
/*
* Loop iterates from 0 to numberOfKeys inclusive; we use the last
* pass to handle after-last-key processing. Actual exit from the
@@ -578,8 +579,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (chosen == NULL)
break;
startKeys[keysCount++] = chosen;
/*
* Adjust strat_total, and quit if we have stored a > or < key.
* Adjust strat_total, and quit if we have stored a > or <
* key.
*/
strat = chosen->sk_strategy;
if (strat != BTEqualStrategyNumber)
@@ -589,11 +592,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
strat == BTLessStrategyNumber)
break;
}
/*
* Done if that was the last attribute.
*/
if (i >= so->numberOfKeys)
break;
/*
* Reset for next attr, which should be in sequence.
*/
@@ -646,8 +651,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ScanKey cur = startKeys[i];
/*
* _bt_preprocess_keys disallows it, but it's place to add some code
* later
* _bt_preprocess_keys disallows it, but it's place to add some
* code later
*/
if (cur->sk_flags & SK_ISNULL)
{
@@ -656,10 +661,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
elog(ERROR, "btree doesn't support is(not)null, yet");
return false;
}
/*
* If scankey operator is of default subtype, we can use the
* cached comparison procedure; otherwise gotta look it up in
* the catalogs.
* cached comparison procedure; otherwise gotta look it up in the
* catalogs.
*/
if (cur->sk_subtype == InvalidOid)
{
@@ -695,43 +701,46 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* Examine the selected initial-positioning strategy to determine
* exactly where we need to start the scan, and set flag variables
* to control the code below.
* exactly where we need to start the scan, and set flag variables to
* control the code below.
*
* If nextkey = false, _bt_search and _bt_binsrch will locate the
* first item >= scan key. If nextkey = true, they will locate the
* first item > scan key.
* If nextkey = false, _bt_search and _bt_binsrch will locate the first
* item >= scan key. If nextkey = true, they will locate the first
* item > scan key.
*
* If goback = true, we will then step back one item, while if
* goback = false, we will start the scan on the located item.
* If goback = true, we will then step back one item, while if goback =
* false, we will start the scan on the located item.
*
* it's yet other place to add some code later for is(not)null ...
*/
switch (strat_total)
{
case BTLessStrategyNumber:
/*
* Find first item >= scankey, then back up one to arrive at last
* item < scankey. (Note: this positioning strategy is only used
* for a backward scan, so that is always the correct starting
* position.)
* Find first item >= scankey, then back up one to arrive at
* last item < scankey. (Note: this positioning strategy is
* only used for a backward scan, so that is always the
* correct starting position.)
*/
nextkey = false;
goback = true;
break;
case BTLessEqualStrategyNumber:
/*
* Find first item > scankey, then back up one to arrive at last
* item <= scankey. (Note: this positioning strategy is only used
* for a backward scan, so that is always the correct starting
* position.)
* Find first item > scankey, then back up one to arrive at
* last item <= scankey. (Note: this positioning strategy is
* only used for a backward scan, so that is always the
* correct starting position.)
*/
nextkey = true;
goback = true;
break;
case BTEqualStrategyNumber:
/*
* If a backward scan was specified, need to start with last
* equal item not first one.
@@ -739,8 +748,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (ScanDirectionIsBackward(dir))
{
/*
* This is the same as the <= strategy. We will check
* at the end whether the found item is actually =.
* This is the same as the <= strategy. We will check at
* the end whether the found item is actually =.
*/
nextkey = true;
goback = true;
@@ -748,8 +757,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
else
{
/*
* This is the same as the >= strategy. We will check
* at the end whether the found item is actually =.
* This is the same as the >= strategy. We will check at
* the end whether the found item is actually =.
*/
nextkey = false;
goback = false;
@@ -757,18 +766,20 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterEqualStrategyNumber:
/*
* Find first item >= scankey. (This is only used for
* forward scans.)
* Find first item >= scankey. (This is only used for forward
* scans.)
*/
nextkey = false;
goback = false;
break;
case BTGreaterStrategyNumber:
/*
* Find first item > scankey. (This is only used for
* forward scans.)
* Find first item > scankey. (This is only used for forward
* scans.)
*/
nextkey = true;
goback = false;
@@ -814,23 +825,23 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
pfree(scankeys);
/*
* If nextkey = false, we are positioned at the first item >= scan key,
* or possibly at the end of a page on which all the existing items are
* less than the scan key and we know that everything on later pages
* is greater than or equal to scan key.
* If nextkey = false, we are positioned at the first item >= scan
* key, or possibly at the end of a page on which all the existing
* items are less than the scan key and we know that everything on
* later pages is greater than or equal to scan key.
*
* If nextkey = true, we are positioned at the first item > scan key,
* or possibly at the end of a page on which all the existing items are
* If nextkey = true, we are positioned at the first item > scan key, or
* possibly at the end of a page on which all the existing items are
* less than or equal to the scan key and we know that everything on
* later pages is greater than scan key.
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page
* or the last item on this page. We apply _bt_step if needed to get to
* the right place.
* one, or in the end-of-page case it's the first item on the next
* page or the last item on this page. We apply _bt_step if needed to
* get to the right place.
*
* If _bt_step fails (meaning we fell off the end of the index in
* one direction or the other), then there are no matches so we just
* If _bt_step fails (meaning we fell off the end of the index in one
* direction or the other), then there are no matches so we just
* return false.
*/
if (goback)
@@ -1292,7 +1303,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
itup = &(btitem->bti_itup);
/*
* Okay, we are on the first or last tuple. Does it pass all the quals?
* Okay, we are on the first or last tuple. Does it pass all the
* quals?
*/
if (_bt_checkkeys(scan, itup, dir, &continuescan))
{

View File

@@ -41,11 +41,11 @@
*
* Since the index will never be used unless it is completely built,
* from a crash-recovery point of view there is no need to WAL-log the
* steps of the build. After completing the index build, we can just sync
* steps of the build. After completing the index build, we can just sync
* the whole file to disk using smgrimmedsync() before exiting this module.
* This can be seen to be sufficient for crash recovery by considering that
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
* just after the index build. However, it is clearly not sufficient if the
* just after the index build. However, it is clearly not sufficient if the
* DBA is using the WAL log for PITR or replication purposes, since another
* machine would not be able to reconstruct the index from WAL. Therefore,
* we log the completed index pages to WAL if and only if WAL archiving is
@@ -56,7 +56,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.87 2004/08/29 04:12:21 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.88 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,7 +98,7 @@ struct BTSpool
typedef struct BTPageState
{
Page btps_page; /* workspace for page building */
BlockNumber btps_blkno; /* block # to write this page at */
BlockNumber btps_blkno; /* block # to write this page at */
BTItem btps_minkey; /* copy of minimum key (first item) on
* page */
OffsetNumber btps_lastoff; /* last item offset loaded */
@@ -114,10 +114,10 @@ typedef struct BTPageState
typedef struct BTWriteState
{
Relation index;
bool btws_use_wal; /* dump pages to WAL? */
BlockNumber btws_pages_alloced; /* # pages allocated */
BlockNumber btws_pages_written; /* # pages written out */
Page btws_zeropage; /* workspace for filling zeroes */
bool btws_use_wal; /* dump pages to WAL? */
BlockNumber btws_pages_alloced; /* # pages allocated */
BlockNumber btws_pages_written; /* # pages written out */
Page btws_zeropage; /* workspace for filling zeroes */
} BTWriteState;
@@ -136,7 +136,7 @@ static void _bt_sortaddtup(Page page, Size itemsize,
static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti);
static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state);
static void _bt_load(BTWriteState *wstate,
BTSpool *btspool, BTSpool *btspool2);
BTSpool *btspool, BTSpool *btspool2);
/*
@@ -157,12 +157,12 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead)
btspool->isunique = isunique;
/*
* We size the sort area as maintenance_work_mem rather than work_mem to
* speed index creation. This should be OK since a single backend can't
* run multiple index creations in parallel. Note that creation of a
* unique index actually requires two BTSpool objects. We expect that the
* second one (for dead tuples) won't get very full, so we give it only
* work_mem.
* We size the sort area as maintenance_work_mem rather than work_mem
* to speed index creation. This should be OK since a single backend
* can't run multiple index creations in parallel. Note that creation
* of a unique index actually requires two BTSpool objects. We expect
* that the second one (for dead tuples) won't get very full, so we
* give it only work_mem.
*/
btKbytes = isdead ? work_mem : maintenance_work_mem;
btspool->sortstate = tuplesort_begin_index(index, isunique,
@@ -205,7 +205,7 @@ _bt_spool(BTItem btitem, BTSpool *btspool)
void
_bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
{
BTWriteState wstate;
BTWriteState wstate;
#ifdef BTREE_BUILD_STATS
if (log_btree_build_stats)
@@ -220,6 +220,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
tuplesort_performsort(btspool2->sortstate);
wstate.index = btspool->index;
/*
* We need to log index creation in WAL iff WAL archiving is enabled
* AND it's not a temp index.
@@ -229,7 +230,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
/* reserve the metapage */
wstate.btws_pages_alloced = BTREE_METAPAGE + 1;
wstate.btws_pages_written = 0;
wstate.btws_zeropage = NULL; /* until needed */
wstate.btws_zeropage = NULL; /* until needed */
_bt_load(&wstate, btspool, btspool2);
}
@@ -246,7 +247,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
static Page
_bt_blnewpage(uint32 level)
{
Page page;
Page page;
BTPageOpaque opaque;
page = (Page) palloc(BLCKSZ);
@@ -313,8 +314,8 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
* If we have to write pages nonsequentially, fill in the space with
* zeroes until we come back and overwrite. This is not logically
* necessary on standard Unix filesystems (unwritten space will read
* as zeroes anyway), but it should help to avoid fragmentation.
* The dummy pages aren't WAL-logged though.
* as zeroes anyway), but it should help to avoid fragmentation. The
* dummy pages aren't WAL-logged though.
*/
while (blkno > wstate->btws_pages_written)
{
@@ -326,9 +327,9 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
}
/*
* Now write the page. We say isTemp = true even if it's not a
* temp index, because there's no need for smgr to schedule an fsync
* for this write; we'll do it ourselves before ending the build.
* Now write the page. We say isTemp = true even if it's not a temp
* index, because there's no need for smgr to schedule an fsync for
* this write; we'll do it ourselves before ending the build.
*/
smgrwrite(wstate->index->rd_smgr, blkno, (char *) page, true);
@@ -468,7 +469,7 @@ static void
_bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
{
Page npage;
BlockNumber nblkno;
BlockNumber nblkno;
OffsetNumber last_off;
Size pgspc;
Size btisz;
@@ -506,7 +507,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
* already. Finish off the page and write it out.
*/
Page opage = npage;
BlockNumber oblkno = nblkno;
BlockNumber oblkno = nblkno;
ItemId ii;
ItemId hii;
BTItem obti;
@@ -539,8 +540,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
/*
* Link the old page into its parent, using its minimum key. If
* we don't have a parent, we have to create one; this adds a new
* Link the old page into its parent, using its minimum key. If we
* don't have a parent, we have to create one; this adds a new
* btree level.
*/
if (state->btps_next == NULL)
@@ -572,8 +573,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
}
/*
* Write out the old page. We never need to touch it again,
* so we can free the opage workspace too.
* Write out the old page. We never need to touch it again, so we
* can free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);
@@ -613,7 +614,7 @@ static void
_bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
{
BTPageState *s;
BlockNumber rootblkno = P_NONE;
BlockNumber rootblkno = P_NONE;
uint32 rootlevel = 0;
Page metapage;
@@ -663,9 +664,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
/*
* As the last step in the process, construct the metapage and make it
* point to the new root (unless we had no data at all, in which case it's
* set to point to "P_NONE"). This changes the index to the "valid"
* state by filling in a valid magic number in the metapage.
* point to the new root (unless we had no data at all, in which case
* it's set to point to "P_NONE"). This changes the index to the
* "valid" state by filling in a valid magic number in the metapage.
*/
metapage = (Page) palloc(BLCKSZ);
_bt_initmetapage(metapage, rootblkno, rootlevel);
@@ -744,7 +745,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
compare = DatumGetInt32(FunctionCall2(&entry->sk_func,
attrDatum1,
attrDatum2));
attrDatum2));
if (compare > 0)
{
load1 = false;
@@ -768,7 +769,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
if (should_free)
pfree((void *) bti);
bti = (BTItem) tuplesort_getindextuple(btspool->sortstate,
true, &should_free);
true, &should_free);
}
else
{
@@ -776,7 +777,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
if (should_free2)
pfree((void *) bti2);
bti2 = (BTItem) tuplesort_getindextuple(btspool2->sortstate,
true, &should_free2);
true, &should_free2);
}
}
_bt_freeskey(indexScanKey);
@@ -785,7 +786,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
{
/* merge is unnecessary */
while ((bti = (BTItem) tuplesort_getindextuple(btspool->sortstate,
true, &should_free)) != NULL)
true, &should_free)) != NULL)
{
/* When we see first tuple, create first index page */
if (state == NULL)
@@ -802,18 +803,18 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
/*
* If the index isn't temp, we must fsync it down to disk before it's
* safe to commit the transaction. (For a temp index we don't care
* safe to commit the transaction. (For a temp index we don't care
* since the index will be uninteresting after a crash anyway.)
*
* It's obvious that we must do this when not WAL-logging the build.
* It's less obvious that we have to do it even if we did WAL-log the
* index pages. The reason is that since we're building outside
* shared buffers, a CHECKPOINT occurring during the build has no way
* to flush the previously written data to disk (indeed it won't know
* the index even exists). A crash later on would replay WAL from the
* It's obvious that we must do this when not WAL-logging the build. It's
* less obvious that we have to do it even if we did WAL-log the index
* pages. The reason is that since we're building outside shared
* buffers, a CHECKPOINT occurring during the build has no way to
* flush the previously written data to disk (indeed it won't know the
* index even exists). A crash later on would replay WAL from the
* checkpoint, therefore it wouldn't replay our earlier WAL entries.
* If we do not fsync those pages here, they might still not be on disk
* when the crash occurs.
* If we do not fsync those pages here, they might still not be on
* disk when the crash occurs.
*/
if (!wstate->index->rd_istemp)
smgrimmedsync(wstate->index->rd_smgr);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.59 2004/08/29 04:12:21 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.60 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,8 +48,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
bool null;
/*
* We can use the cached (default) support procs since no cross-type
* comparison can be needed.
* We can use the cached (default) support procs since no
* cross-type comparison can be needed.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
arg = index_getattr(itup, i + 1, itupdesc, &null);
@@ -68,7 +68,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
/*
* _bt_mkscankey_nodata
* Build a scan key that contains comparator routines appropriate to
* the key datatypes, but no comparison data. The comparison data
* the key datatypes, but no comparison data. The comparison data
* ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(). Currently this
@@ -93,8 +93,8 @@ _bt_mkscankey_nodata(Relation rel)
FmgrInfo *procinfo;
/*
* We can use the cached (default) support procs since no cross-type
* comparison can be needed.
* We can use the cached (default) support procs since no
* cross-type comparison can be needed.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
ScanKeyEntryInitializeWithInfo(&skey[i],
@@ -163,12 +163,12 @@ _bt_formitem(IndexTuple itup)
* _bt_preprocess_keys() -- Preprocess scan keys
*
* The caller-supplied keys (in scan->keyData[]) are copied to
* so->keyData[] with possible transformation. scan->numberOfKeys is
* so->keyData[] with possible transformation. scan->numberOfKeys is
* the number of input keys, so->numberOfKeys gets the number of output
* keys (possibly less, never greater).
*
* The primary purpose of this routine is to discover how many scan keys
* must be satisfied to continue the scan. It also attempts to eliminate
* must be satisfied to continue the scan. It also attempts to eliminate
* redundant keys and detect contradictory keys. At present, redundant and
* contradictory keys can only be detected for same-data-type comparisons,
* but that's the usual case so it seems worth doing.
@@ -198,7 +198,7 @@ _bt_formitem(IndexTuple itup)
* or one or two boundary-condition keys for each attr.) However, we can
* only detect redundant keys when the right-hand datatypes are all equal
* to the index datatype, because we do not know suitable operators for
* comparing right-hand values of two different datatypes. (In theory
* comparing right-hand values of two different datatypes. (In theory
* we could handle comparison of a RHS of the index datatype with a RHS of
* another type, but that seems too much pain for too little gain.) So,
* keys whose operator has a nondefault subtype (ie, its RHS is not of the
@@ -285,9 +285,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
*
* xform[i] points to the currently best scan key of strategy type i+1,
* if any is found with a default operator subtype; it is NULL if we
* haven't yet found such a key for this attr. Scan keys of nondefault
* subtypes are transferred to the output with no processing except for
* noting if they are of "=" type.
* haven't yet found such a key for this attr. Scan keys of
* nondefault subtypes are transferred to the output with no
* processing except for noting if they are of "=" type.
*/
attno = 1;
memset(xform, 0, sizeof(xform));
@@ -361,7 +361,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* If no "=" for this key, we're done with required keys
*/
if (! hasOtherTypeEqual)
if (!hasOtherTypeEqual)
allEqualSoFar = false;
}
@@ -369,8 +369,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
if (xform[BTLessStrategyNumber - 1]
&& xform[BTLessEqualStrategyNumber - 1])
{
ScanKey lt = xform[BTLessStrategyNumber - 1];
ScanKey le = xform[BTLessEqualStrategyNumber - 1];
ScanKey lt = xform[BTLessStrategyNumber - 1];
ScanKey le = xform[BTLessEqualStrategyNumber - 1];
test = FunctionCall2(&le->sk_func,
lt->sk_argument,
@@ -385,8 +385,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
if (xform[BTGreaterStrategyNumber - 1]
&& xform[BTGreaterEqualStrategyNumber - 1])
{
ScanKey gt = xform[BTGreaterStrategyNumber - 1];
ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1];
ScanKey gt = xform[BTGreaterStrategyNumber - 1];
ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1];
test = FunctionCall2(&ge->sk_func,
gt->sk_argument,
@@ -545,21 +545,23 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
{
/*
* Tuple fails this qual. If it's a required qual, then we
* may be able to conclude no further tuples will pass, either.
* We have to look at the scan direction and the qual type.
* may be able to conclude no further tuples will pass,
* either. We have to look at the scan direction and the qual
* type.
*
* Note: the only case in which we would keep going after failing
* a required qual is if there are partially-redundant quals that
* _bt_preprocess_keys() was unable to eliminate. For example,
* given "x > 4 AND x > 10" where both are cross-type comparisons
* and so not removable, we might start the scan at the x = 4
* boundary point. The "x > 10" condition will fail until we
* pass x = 10, but we must not stop the scan on its account.
* a required qual is if there are partially-redundant quals
* that _bt_preprocess_keys() was unable to eliminate. For
* example, given "x > 4 AND x > 10" where both are cross-type
* comparisons and so not removable, we might start the scan
* at the x = 4 boundary point. The "x > 10" condition will
* fail until we pass x = 10, but we must not stop the scan on
* its account.
*
* Note: because we stop the scan as soon as any required equality
* qual fails, it is critical that equality quals be used for the
* initial positioning in _bt_first() when they are available.
* See comments in _bt_first().
* Note: because we stop the scan as soon as any required
* equality qual fails, it is critical that equality quals be
* used for the initial positioning in _bt_first() when they
* are available. See comments in _bt_first().
*/
if (ikey < so->numberOfRequiredKeys)
{

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.17 2004/08/29 04:12:21 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.18 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -770,7 +770,7 @@ static void
out_target(char *buf, xl_btreetid *target)
{
sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u",
target->node.spcNode, target->node.dbNode, target->node.relNode,
target->node.spcNode, target->node.dbNode, target->node.relNode,
ItemPointerGetBlockNumber(&(target->tid)),
ItemPointerGetOffsetNumber(&(target->tid)));
}