1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-13 16:22:44 +03:00

pgindent run.

This commit is contained in:
Bruce Momjian
2002-09-04 20:31:48 +00:00
parent c91ceec21d
commit e50f52a074
446 changed files with 14942 additions and 13363 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.95 2002/08/06 02:36:33 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.96 2002/09/04 20:31:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,14 +119,14 @@ top:
*
* NOTE: obviously, _bt_check_unique can only detect keys that are
* already in the index; so it cannot defend against concurrent
* insertions of the same key. We protect against that by means
* of holding a write lock on the target page. Any other would-be
* insertions of the same key. We protect against that by means of
* holding a write lock on the target page. Any other would-be
* inserter of the same key must acquire a write lock on the same
* target page, so only one would-be inserter can be making the check
* at one time. Furthermore, once we are past the check we hold
* write locks continuously until we have performed our insertion,
* so no later inserter can fail to see our insertion. (This
* requires some care in _bt_insertonpg.)
* at one time. Furthermore, once we are past the check we hold write
* locks continuously until we have performed our insertion, so no
* later inserter can fail to see our insertion. (This requires some
* care in _bt_insertonpg.)
*
* If we must wait for another xact, we release the lock while waiting,
* and then must start over completely.
@@ -205,15 +205,16 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
if (offset <= maxoff)
{
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
* handling NULLs - and so we must not use _bt_compare in real
* comparison, but only for ordering/finding items on pages. -
* vadim 03/24/97
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
* how we handling NULLs - and so we must not use _bt_compare
* in real comparison, but only for ordering/finding items on
* pages. - vadim 03/24/97
*/
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
curitemid = PageGetItemId(page, offset);
/*
* We can skip the heap fetch if the item is marked killed.
*/
@@ -226,10 +227,11 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
{
/* it is a duplicate */
TransactionId xwait =
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
SnapshotDirty->xmin : SnapshotDirty->xmax;
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
SnapshotDirty->xmin : SnapshotDirty->xmax;
ReleaseBuffer(hbuffer);
/*
* If this tuple is being updated by other transaction
* then we have to wait for its commit/abort.
@@ -252,8 +254,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
{
/*
* Hmm, if we can't see the tuple, maybe it can be
* marked killed. This logic should match index_getnext
* and btgettuple.
* marked killed. This logic should match
* index_getnext and btgettuple.
*/
uint16 sv_infomask;
@@ -421,7 +423,7 @@ _bt_insertonpg(Relation rel,
{
/* step right one page */
BlockNumber rblkno = lpageop->btpo_next;
Buffer rbuf;
Buffer rbuf;
/*
* must write-lock next page before releasing write lock on

View File

@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.91 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.92 2002/09/04 20:31:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -315,24 +315,28 @@ btgettuple(PG_FUNCTION_ARGS)
* buffer, too.
*/
_bt_restscan(scan);
/*
* Check to see if we should kill the previously-fetched tuple.
*/
if (scan->kill_prior_tuple)
{
/*
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
* Yes, so mark it by setting the LP_DELETE bit in the item
* flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->btso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
* Since this can be redone later if needed, it's treated the
* same as a commit-hint-bit status update for heap tuples:
* we mark the buffer dirty but don't make a WAL log entry.
* same as a commit-hint-bit status update for heap tuples: we
* mark the buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->btso_curbuf);
}
/*
* Now continue the scan.
*/
@@ -645,15 +649,15 @@ btbulkdelete(PG_FUNCTION_ARGS)
/*
* If this is first deletion on this page, trade in read
* lock for a really-exclusive write lock. Then, step
* back one and re-examine the item, because other backends
* might have inserted item(s) while we weren't holding
* the lock!
* back one and re-examine the item, because other
* backends might have inserted item(s) while we weren't
* holding the lock!
*
* We assume that only concurrent insertions, not deletions,
* can occur while we're not holding the page lock (the caller
* should hold a suitable relation lock to ensure this).
* Therefore, the item we want to delete is either in the
* same slot as before, or some slot to its right.
* can occur while we're not holding the page lock (the
* caller should hold a suitable relation lock to ensure
* this). Therefore, the item we want to delete is either
* in the same slot as before, or some slot to its right.
* Rechecking the same slot is necessary and sufficient to
* get back in sync after any insertions.
*/
@@ -675,19 +679,19 @@ btbulkdelete(PG_FUNCTION_ARGS)
}
/*
* In either case, we now need to back up the scan one item,
* so that the next cycle will re-examine the same offnum on
* this page.
* In either case, we now need to back up the scan one
* item, so that the next cycle will re-examine the same
* offnum on this page.
*
* For now, just hack the current-item index. Will need to
* be smarter when deletion includes removal of empty
* index pages.
*
* We must decrement ip_posid in all cases but one: if the
* page was formerly rightmost but was split while we didn't
* hold the lock, and ip_posid is pointing to item 1, then
* ip_posid now points at the high key not a valid data item.
* In this case we do want to step forward.
* page was formerly rightmost but was split while we
* didn't hold the lock, and ip_posid is pointing to item
* 1, then ip_posid now points at the high key not a valid
* data item. In this case we do want to step forward.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (current->ip_posid >= P_FIRSTDATAKEY(opaque))

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.50 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.51 2002/09/04 20:31:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,7 @@
#include "executor/execdebug.h"
static int _bt_getstrategynumber(RegProcedure sk_procedure, StrategyMap map);
static int _bt_getstrategynumber(RegProcedure sk_procedure, StrategyMap map);
/*
@@ -178,7 +178,7 @@ _bt_formitem(IndexTuple itup)
* example.
*
* Furthermore, we detect the case where the index is unique and we have
* equality quals for all columns. In this case there can be at most one
* equality quals for all columns. In this case there can be at most one
* (visible) matching tuple. index_getnext uses this to avoid uselessly
* continuing the scan after finding one match.
*
@@ -439,8 +439,8 @@ _bt_orderkeys(IndexScanDesc scan)
so->numberOfKeys = new_numberOfKeys;
/*
* If unique index and we have equality keys for all columns,
* set keys_are_unique flag for higher levels.
* If unique index and we have equality keys for all columns, set
* keys_are_unique flag for higher levels.
*/
if (allEqualSoFar && relation->rd_index->indisunique &&
relation->rd_rel->relnatts == new_numberOfKeys)