1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-09 06:21:09 +03:00

pgindent run for 8.3.

This commit is contained in:
Bruce Momjian
2007-11-15 21:14:46 +00:00
parent 3adc760fb9
commit fdf5a5efb7
486 changed files with 10044 additions and 9664 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.160 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.161 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,7 @@ typedef struct
OffsetNumber newitemoff; /* where the new item is to be inserted */
int leftspace; /* space available for items on left page */
int rightspace; /* space available for items on right page */
int olddataitemstotal; /* space taken by old items */
int olddataitemstotal; /* space taken by old items */
bool have_split; /* found a valid split? */
@@ -222,7 +222,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
if (!ItemIdIsDead(curitemid))
{
ItemPointerData htid;
bool all_dead;
bool all_dead;
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
@@ -239,8 +239,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* We check the whole HOT-chain to see if there is any tuple
* that satisfies SnapshotDirty. This is necessary because
* we have just a single index entry for the entire chain.
* that satisfies SnapshotDirty. This is necessary because we
* have just a single index entry for the entire chain.
*/
if (heap_hot_search(&htid, heapRel, &SnapshotDirty, &all_dead))
{
@@ -267,15 +267,16 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* is itself now committed dead --- if so, don't complain.
* This is a waste of time in normal scenarios but we must
* do it to support CREATE INDEX CONCURRENTLY.
*
*
* We must follow HOT-chains here because during
* concurrent index build, we insert the root TID though
* the actual tuple may be somewhere in the HOT-chain.
* While following the chain we might not stop at the exact
* tuple which triggered the insert, but that's OK because
* if we find a live tuple anywhere in this chain, we have
* a unique key conflict. The other live tuple is not part
* of this chain because it had a different index entry.
* While following the chain we might not stop at the
* exact tuple which triggered the insert, but that's OK
* because if we find a live tuple anywhere in this chain,
* we have a unique key conflict. The other live tuple is
* not part of this chain because it had a different index
* entry.
*/
htid = itup->t_tid;
if (heap_hot_search(&htid, heapRel, SnapshotSelf, NULL))
@@ -293,8 +294,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
ereport(ERROR,
(errcode(ERRCODE_UNIQUE_VIOLATION),
errmsg("duplicate key value violates unique constraint \"%s\"",
RelationGetRelationName(rel))));
errmsg("duplicate key value violates unique constraint \"%s\"",
RelationGetRelationName(rel))));
}
else if (all_dead)
{
@@ -372,7 +373,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* On entry, *buf and *offsetptr point to the first legal position
* where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search the right
* InvalidOffsetNumber, in which case the function will search the right
* location within the page if needed. On exit, they point to the chosen
* insert location. If findinsertloc decided to move right, the lock and
* pin on the original page will be released and the new page returned to
@@ -389,11 +390,12 @@ _bt_findinsertloc(Relation rel,
ScanKey scankey,
IndexTuple newtup)
{
Buffer buf = *bufptr;
Page page = BufferGetPage(buf);
Size itemsz;
Buffer buf = *bufptr;
Page page = BufferGetPage(buf);
Size itemsz;
BTPageOpaque lpageop;
bool movedright, vacuumed;
bool movedright,
vacuumed;
OffsetNumber newitemoff;
OffsetNumber firstlegaloff = *offsetptr;
@@ -447,19 +449,21 @@ _bt_findinsertloc(Relation rel,
Buffer rbuf;
/*
* before considering moving right, see if we can obtain enough
* space by erasing LP_DEAD items
* before considering moving right, see if we can obtain enough space
* by erasing LP_DEAD items
*/
if (P_ISLEAF(lpageop) && P_HAS_GARBAGE(lpageop))
{
_bt_vacuum_one_page(rel, buf);
/* remember that we vacuumed this page, because that makes
* the hint supplied by the caller invalid */
/*
* remember that we vacuumed this page, because that makes the
* hint supplied by the caller invalid
*/
vacuumed = true;
if (PageGetFreeSpace(page) >= itemsz)
break; /* OK, now we have enough space */
break; /* OK, now we have enough space */
}
/*
@@ -473,11 +477,10 @@ _bt_findinsertloc(Relation rel,
/*
* step right to next non-dead page
*
* must write-lock that page before releasing write lock on
* current page; else someone else's _bt_check_unique scan could
* fail to see our insertion. write locks on intermediate dead
* pages won't do because we don't know when they will get
* de-linked from the tree.
* must write-lock that page before releasing write lock on current
* page; else someone else's _bt_check_unique scan could fail to see
* our insertion. write locks on intermediate dead pages won't do
* because we don't know when they will get de-linked from the tree.
*/
rbuf = InvalidBuffer;
@@ -501,17 +504,16 @@ _bt_findinsertloc(Relation rel,
}
/*
* Now we are on the right page, so find the insert position. If we
* moved right at all, we know we should insert at the start of the
* page. If we didn't move right, we can use the firstlegaloff hint
* if the caller supplied one, unless we vacuumed the page which
* might have moved tuples around making the hint invalid. If we
* didn't move right or can't use the hint, find the position
* by searching.
* Now we are on the right page, so find the insert position. If we moved
* right at all, we know we should insert at the start of the page. If we
* didn't move right, we can use the firstlegaloff hint if the caller
* supplied one, unless we vacuumed the page which might have moved tuples
* around making the hint invalid. If we didn't move right or can't use
* the hint, find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
else if(firstlegaloff != InvalidOffsetNumber && !vacuumed)
else if (firstlegaloff != InvalidOffsetNumber && !vacuumed)
newitemoff = firstlegaloff;
else
newitemoff = _bt_binsrch(rel, buf, keysz, scankey, false);
@@ -982,8 +984,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* the data by reinserting it into a new left page. (XXX the latter
* comment is probably obsolete.)
*
* We need to do this before writing the WAL record, so that XLogInsert can
* WAL log an image of the page if necessary.
* We need to do this before writing the WAL record, so that XLogInsert
* can WAL log an image of the page if necessary.
*/
PageRestoreTempPage(leftpage, origpage);
@@ -1033,10 +1035,10 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* Log the new item and its offset, if it was inserted on the left
* page. (If it was put on the right page, we don't need to explicitly
* WAL log it because it's included with all the other items on the
* right page.) Show the new item as belonging to the left page buffer,
* so that it is not stored if XLogInsert decides it needs a full-page
* image of the left page. We store the offset anyway, though, to
* support archive compression of these records.
* right page.) Show the new item as belonging to the left page
* buffer, so that it is not stored if XLogInsert decides it needs a
* full-page image of the left page. We store the offset anyway,
* though, to support archive compression of these records.
*/
if (newitemonleft)
{
@@ -1052,31 +1054,31 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lastrdata->data = (char *) newitem;
lastrdata->len = MAXALIGN(newitemsz);
lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer_std = true;
}
else
{
/*
* Although we don't need to WAL-log the new item, we still
* need XLogInsert to consider storing a full-page image of the
* left page, so make an empty entry referencing that buffer.
* This also ensures that the left page is always backup block 1.
* Although we don't need to WAL-log the new item, we still need
* XLogInsert to consider storing a full-page image of the left
* page, so make an empty entry referencing that buffer. This also
* ensures that the left page is always backup block 1.
*/
lastrdata->next = lastrdata + 1;
lastrdata++;
lastrdata->data = NULL;
lastrdata->len = 0;
lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer_std = true;
}
/*
* Log the contents of the right page in the format understood by
* _bt_restore_page(). We set lastrdata->buffer to InvalidBuffer,
* because we're going to recreate the whole page anyway, so it
* should never be stored by XLogInsert.
* because we're going to recreate the whole page anyway, so it should
* never be stored by XLogInsert.
*
* Direct access to page is not good but faster - we should implement
* some new func in page API. Note we only store the tuples
@@ -1101,7 +1103,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lastrdata->data = NULL;
lastrdata->len = 0;
lastrdata->buffer = sbuf; /* backup block 2 */
lastrdata->buffer = sbuf; /* backup block 2 */
lastrdata->buffer_std = true;
}
@@ -1275,9 +1277,10 @@ _bt_findsplitloc(Relation rel,
olddataitemstoleft += itemsz;
}
/* If the new item goes as the last item, check for splitting so that
* all the old items go to the left page and the new item goes to the
* right page.
/*
* If the new item goes as the last item, check for splitting so that all
* the old items go to the left page and the new item goes to the right
* page.
*/
if (newitemoff > maxoff && !goodenoughfound)
_bt_checksplitloc(&state, newitemoff, false, olddataitemstotal, 0);
@@ -1314,16 +1317,16 @@ _bt_checksplitloc(FindSplitData *state,
int olddataitemstoleft,
Size firstoldonrightsz)
{
int leftfree,
rightfree;
Size firstrightitemsz;
bool newitemisfirstonright;
int leftfree,
rightfree;
Size firstrightitemsz;
bool newitemisfirstonright;
/* Is the new item going to be the first item on the right page? */
newitemisfirstonright = (firstoldonright == state->newitemoff
&& !newitemonleft);
if(newitemisfirstonright)
if (newitemisfirstonright)
firstrightitemsz = state->newitemsz;
else
firstrightitemsz = firstoldonrightsz;
@@ -1334,9 +1337,8 @@ _bt_checksplitloc(FindSplitData *state,
(state->olddataitemstotal - olddataitemstoleft);
/*
* The first item on the right page becomes the high key of the
* left page; therefore it counts against left space as well as right
* space.
* The first item on the right page becomes the high key of the left page;
* therefore it counts against left space as well as right space.
*/
leftfree -= firstrightitemsz;
@@ -1875,8 +1877,8 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer)
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* Scan over all items to see which ones need to be deleted
* according to LP_DEAD flags.
* Scan over all items to see which ones need to be deleted according to
* LP_DEAD flags.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.103 2007/09/12 22:10:26 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.104 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -751,8 +751,8 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
/*
* In recovery mode, assume the deletion being replayed is valid. We
* can't always check it because we won't have a full search stack,
* and we should complain if there's a problem, anyway.
* can't always check it because we won't have a full search stack, and we
* should complain if there's a problem, anyway.
*/
if (InRecovery)
return true;
@@ -781,8 +781,8 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
{
/*
* It's only child, so safe if parent would itself be removable.
* We have to check the parent itself, and then recurse to
* test the conditions at the parent's parent.
* We have to check the parent itself, and then recurse to test
* the conditions at the parent's parent.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque))
{
@@ -887,18 +887,18 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
/*
* To avoid deadlocks, we'd better drop the target page lock before
* going further.
* To avoid deadlocks, we'd better drop the target page lock before going
* further.
*/
_bt_relbuf(rel, buf);
/*
* We need an approximate pointer to the page's parent page. We use
* the standard search mechanism to search for the page's high key; this
* will give us a link to either the current parent or someplace to its
* left (if there are multiple equal high keys). In recursion cases,
* the caller already generated a search stack and we can just re-use
* that work.
* We need an approximate pointer to the page's parent page. We use the
* standard search mechanism to search for the page's high key; this will
* give us a link to either the current parent or someplace to its left
* (if there are multiple equal high keys). In recursion cases, the
* caller already generated a search stack and we can just re-use that
* work.
*/
if (stack == NULL)
{
@@ -933,11 +933,11 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
* facilities not available in recovery mode). Instead, just
* set up a dummy stack pointing to the left end of the parent
* tree level, from which _bt_getstackbuf will walk right to the
* parent page. Painful, but we don't care too much about
* performance in this scenario.
* facilities not available in recovery mode). Instead, just set
* up a dummy stack pointing to the left end of the parent tree
* level, from which _bt_getstackbuf will walk right to the parent
* page. Painful, but we don't care too much about performance in
* this scenario.
*/
pbuf = _bt_get_endpoint(rel, targetlevel + 1, false);
stack = (BTStack) palloc(sizeof(BTStackData));
@@ -951,10 +951,10 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* We cannot delete a page that is the rightmost child of its immediate
* parent, unless it is the only child --- in which case the parent has
* to be deleted too, and the same condition applies recursively to it.
* We have to check this condition all the way up before trying to delete.
* We don't need to re-test when deleting a non-leaf page, though.
* parent, unless it is the only child --- in which case the parent has to
* be deleted too, and the same condition applies recursively to it. We
* have to check this condition all the way up before trying to delete. We
* don't need to re-test when deleting a non-leaf page, though.
*/
if (targetlevel == 0 &&
!_bt_parent_deletion_safe(rel, target, stack))
@@ -1072,8 +1072,8 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
* might be possible to push the fast root even further down, but the odds
* of doing so are slim, and the locking considerations daunting.)
*
* We don't support handling this in the case where the parent is
* becoming half-dead, even though it theoretically could occur.
* We don't support handling this in the case where the parent is becoming
* half-dead, even though it theoretically could occur.
*
* We can safely acquire a lock on the metapage here --- see comments for
* _bt_newroot().
@@ -1287,10 +1287,10 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
_bt_relbuf(rel, lbuf);
/*
* If parent became half dead, recurse to delete it. Otherwise, if
* right sibling is empty and is now the last child of the parent, recurse
* to try to delete it. (These cases cannot apply at the same time,
* though the second case might itself recurse to the first.)
* If parent became half dead, recurse to delete it. Otherwise, if right
* sibling is empty and is now the last child of the parent, recurse to
* try to delete it. (These cases cannot apply at the same time, though
* the second case might itself recurse to the first.)
*
* When recursing to parent, we hold the lock on the target page until
* done. This delays any insertions into the keyspace that was just

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.113 2007/05/27 03:50:39 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.114 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -637,17 +637,17 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
* scheme. But, by the same token, if we aren't able to use all
* scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
* did use has to be treated as just a ">=" or "<=" condition,
* and so we'd better adjust strat_total accordingly.
* did use has to be treated as just a ">=" or "<=" condition, and
* so we'd better adjust strat_total accordingly.
*/
if (i == keysCount - 1)
{
bool used_all_subkeys = false;
Assert(!(subkey->sk_flags & SK_ROW_END));
for(;;)
for (;;)
{
subkey++;
Assert(subkey->sk_flags & SK_ROW_MEMBER);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.86 2007/09/12 22:10:26 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.87 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -205,7 +205,7 @@ _bt_freestack(BTStack stack)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
* we cannot eliminate either. If there are two such keys of the same
* we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@@ -265,13 +265,13 @@ _bt_preprocess_keys(IndexScanDesc scan)
{
/*
* We treat all btree operators as strict (even if they're not so
* marked in pg_proc). This means that it is impossible for an
* operator condition with a NULL comparison constant to succeed,
* and we can reject it right away.
* marked in pg_proc). This means that it is impossible for an
* operator condition with a NULL comparison constant to succeed, and
* we can reject it right away.
*
* However, we now also support "x IS NULL" clauses as search
* conditions, so in that case keep going. The planner has not
* filled in any particular strategy in this case, so set it to
* conditions, so in that case keep going. The planner has not filled
* in any particular strategy in this case, so set it to
* BTEqualStrategyNumber --- we can treat IS NULL as an equality
* operator for purposes of search strategy.
*/
@@ -303,8 +303,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Initialize for processing of keys for attr 1.
*
* xform[i] points to the currently best scan key of strategy type i+1;
* it is NULL if we haven't yet found such a key for this attr.
* xform[i] points to the currently best scan key of strategy type i+1; it
* is NULL if we haven't yet found such a key for this attr.
*/
attno = 1;
memset(xform, 0, sizeof(xform));
@@ -464,6 +464,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
memcpy(outkey, cur, sizeof(ScanKeyData));
if (numberOfEqualCols == attno - 1)
_bt_mark_scankey_required(outkey);
/*
* We don't support RowCompare using equality; such a qual would
* mess up the numberOfEqualCols tracking.
@@ -514,9 +515,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
else
{
/*
* We can't determine which key is more restrictive. Keep
* the previous one in xform[j] and push this one directly
* to the output array.
* We can't determine which key is more restrictive. Keep the
* previous one in xform[j] and push this one directly to the
* output array.
*/
ScanKey outkey = &outkeys[new_numberOfKeys++];
@@ -542,7 +543,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
* may not be able to make the comparison. If we can make the comparison
* may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@@ -608,8 +609,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
* indexscan initiated by syscache lookup will use cross-data-type
* operators.)
*
* If the sk_strategy was flipped by _bt_mark_scankey_with_indoption,
* we have to un-flip it to get the correct opfamily member.
* If the sk_strategy was flipped by _bt_mark_scankey_with_indoption, we
* have to un-flip it to get the correct opfamily member.
*/
strat = op->sk_strategy;
if (op->sk_flags & SK_BT_DESC)
@@ -654,7 +655,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
static void
_bt_mark_scankey_with_indoption(ScanKey skey, int16 *indoption)
{
int addflags;
int addflags;
addflags = indoption[skey->sk_attno - 1] << SK_BT_INDOPTION_SHIFT;
if ((addflags & SK_BT_DESC) && !(skey->sk_flags & SK_BT_DESC))
@@ -874,8 +875,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
* one of the "must match" subset. On a forward scan,
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQBKWD) &&
@@ -887,8 +888,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQFWD) &&
@@ -978,7 +979,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
* one of the "must match" subset. On a forward scan,
* one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQBKWD) &&
@@ -991,7 +992,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1264,8 +1265,8 @@ _bt_start_vacuum(Relation rel)
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/*
* Assign the next cycle ID, being careful to avoid zero as well as
* the reserved high values.
* Assign the next cycle ID, being careful to avoid zero as well as the
* reserved high values.
*/
result = ++(btvacinfo->cycle_ctr);
if (result == 0 || result > MAX_BT_CYCLE_ID)

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.46 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.47 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@ typedef struct bt_incomplete_action
BlockNumber rightblk; /* right half of split */
/* these fields are for a delete: */
BlockNumber delblk; /* parent block to be deleted */
} bt_incomplete_action;
} bt_incomplete_action;
static List *incomplete_actions;
@@ -271,8 +271,8 @@ btree_xlog_split(bool onleft, bool isroot,
char *datapos;
int datalen;
OffsetNumber newitemoff = 0;
Item newitem = NULL;
Size newitemsz = 0;
Item newitem = NULL;
Size newitemsz = 0;
reln = XLogOpenRelation(xlrec->node);
@@ -343,15 +343,15 @@ btree_xlog_split(bool onleft, bool isroot,
* Reconstruct left (original) sibling if needed. Note that this code
* ensures that the items remaining on the left page are in the correct
* item number order, but it does not reproduce the physical order they
* would have had. Is this worth changing? See also _bt_restore_page().
* would have had. Is this worth changing? See also _bt_restore_page().
*/
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
Buffer lbuf = XLogReadBuffer(reln, xlrec->leftsib, false);
Buffer lbuf = XLogReadBuffer(reln, xlrec->leftsib, false);
if (BufferIsValid(lbuf))
{
Page lpage = (Page) BufferGetPage(lbuf);
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
if (!XLByteLE(lsn, PageGetLSN(lpage)))
@@ -359,19 +359,20 @@ btree_xlog_split(bool onleft, bool isroot,
OffsetNumber off;
OffsetNumber maxoff = PageGetMaxOffsetNumber(lpage);
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable = 0;
ItemId hiItemId;
Item hiItem;
int ndeletable = 0;
ItemId hiItemId;
Item hiItem;
/*
* Remove the items from the left page that were copied to
* the right page. Also remove the old high key, if any.
* (We must remove everything before trying to insert any
* items, else we risk not having enough space.)
* Remove the items from the left page that were copied to the
* right page. Also remove the old high key, if any. (We must
* remove everything before trying to insert any items, else
* we risk not having enough space.)
*/
if (!P_RIGHTMOST(lopaque))
{
deletable[ndeletable++] = P_HIKEY;
/*
* newitemoff is given to us relative to the original
* page's item numbering, so adjust it for this deletion.
@@ -421,11 +422,11 @@ btree_xlog_split(bool onleft, bool isroot,
/* Fix left-link of the page to the right of the new right sibling */
if (xlrec->rnext != P_NONE && !(record->xl_info & XLR_BKP_BLOCK_2))
{
Buffer buffer = XLogReadBuffer(reln, xlrec->rnext, false);
Buffer buffer = XLogReadBuffer(reln, xlrec->rnext, false);
if (BufferIsValid(buffer))
{
Page page = (Page) BufferGetPage(buffer);
Page page = (Page) BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@@ -795,7 +796,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -807,7 +808,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -819,7 +820,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -831,7 +832,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}