1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

pgindent run.

This commit is contained in:
Bruce Momjian
2002-09-04 20:31:48 +00:00
parent c91ceec21d
commit e50f52a074
446 changed files with 14942 additions and 13363 deletions

View File

@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.91 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.92 2002/09/04 20:31:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -315,24 +315,28 @@ btgettuple(PG_FUNCTION_ARGS)
* buffer, too.
*/
_bt_restscan(scan);
/*
* Check to see if we should kill the previously-fetched tuple.
*/
if (scan->kill_prior_tuple)
{
/*
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
* Yes, so mark it by setting the LP_DELETE bit in the item
* flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->btso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
* Since this can be redone later if needed, it's treated the
* same as a commit-hint-bit status update for heap tuples:
* we mark the buffer dirty but don't make a WAL log entry.
* same as a commit-hint-bit status update for heap tuples: we
* mark the buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->btso_curbuf);
}
/*
* Now continue the scan.
*/
@ -645,15 +649,15 @@ btbulkdelete(PG_FUNCTION_ARGS)
/*
* If this is first deletion on this page, trade in read
* lock for a really-exclusive write lock. Then, step
* back one and re-examine the item, because other backends
* might have inserted item(s) while we weren't holding
* the lock!
* back one and re-examine the item, because other
* backends might have inserted item(s) while we weren't
* holding the lock!
*
* We assume that only concurrent insertions, not deletions,
* can occur while we're not holding the page lock (the caller
* should hold a suitable relation lock to ensure this).
* Therefore, the item we want to delete is either in the
* same slot as before, or some slot to its right.
* can occur while we're not holding the page lock (the
* caller should hold a suitable relation lock to ensure
* this). Therefore, the item we want to delete is either
* in the same slot as before, or some slot to its right.
* Rechecking the same slot is necessary and sufficient to
* get back in sync after any insertions.
*/
@ -675,19 +679,19 @@ btbulkdelete(PG_FUNCTION_ARGS)
}
/*
* In either case, we now need to back up the scan one item,
* so that the next cycle will re-examine the same offnum on
* this page.
* In either case, we now need to back up the scan one
* item, so that the next cycle will re-examine the same
* offnum on this page.
*
* For now, just hack the current-item index. Will need to
* be smarter when deletion includes removal of empty
* index pages.
*
* We must decrement ip_posid in all cases but one: if the
* page was formerly rightmost but was split while we didn't
* hold the lock, and ip_posid is pointing to item 1, then
* ip_posid now points at the high key not a valid data item.
* In this case we do want to step forward.
* page was formerly rightmost but was split while we
* didn't hold the lock, and ip_posid is pointing to item
* 1, then ip_posid now points at the high key not a valid
* data item. In this case we do want to step forward.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (current->ip_posid >= P_FIRSTDATAKEY(opaque))