mirror of
https://github.com/postgres/postgres.git
synced 2025-11-13 16:22:44 +03:00
pgindent run on all C files. Java run to follow. initdb/regression
tests pass.
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.42 2001/05/03 19:00:36 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.43 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
*
|
||||
@@ -25,7 +25,7 @@
|
||||
* NOTE: although any negative int32 is acceptable for reporting "<",
|
||||
* and any positive int32 is acceptable for reporting ">", routines
|
||||
* that work on 32-bit or wider datatypes can't just return "a - b".
|
||||
* That could overflow and give the wrong answer. Also, one should not
|
||||
* That could overflow and give the wrong answer. Also, one should not
|
||||
* return INT_MIN to report "<", since some callers will negate the result.
|
||||
*
|
||||
* NOTE: it is critical that the comparison function impose a total order
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.86 2001/09/29 23:49:51 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.87 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -1100,7 +1100,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
|
||||
* If we are not on the leaf level, we will be able to discard the key
|
||||
* data from the first item that winds up on the right page.
|
||||
*/
|
||||
if (! state->is_leaf)
|
||||
if (!state->is_leaf)
|
||||
rightfree += (int) firstrightitemsz -
|
||||
(int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData));
|
||||
|
||||
@@ -1115,7 +1115,8 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
|
||||
{
|
||||
/*
|
||||
* On a rightmost page, try to equalize right free space with
|
||||
* twice the left free space. See comments for _bt_findsplitloc.
|
||||
* twice the left free space. See comments for
|
||||
* _bt_findsplitloc.
|
||||
*/
|
||||
delta = (2 * leftfree) - rightfree;
|
||||
}
|
||||
@@ -1618,7 +1619,6 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
|
||||
|
||||
for (;;)
|
||||
{
|
||||
|
||||
/*
|
||||
* Read up to 2 more child pages and look for pointers to them in
|
||||
* *saved* parent page
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.53 2001/07/15 22:48:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.54 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Postgres btree pages look like ordinary relation pages. The opaque
|
||||
@@ -153,7 +153,6 @@ _bt_getroot(Relation rel, int access)
|
||||
*/
|
||||
if (metad->btm_root == P_NONE)
|
||||
{
|
||||
|
||||
/*
|
||||
* Get, initialize, write, and leave a lock of the appropriate
|
||||
* type on the new root page. Since this is the first page in
|
||||
@@ -209,7 +208,6 @@ _bt_getroot(Relation rel, int access)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* Metadata initialized by someone else. In order to
|
||||
* guarantee no deadlocks, we have to release the metadata
|
||||
@@ -237,7 +235,6 @@ _bt_getroot(Relation rel, int access)
|
||||
|
||||
if (!P_ISROOT(rootopaque))
|
||||
{
|
||||
|
||||
/*
|
||||
* It happened, but if root page splitter failed to create new
|
||||
* root page then we'll go in loop trying to call _bt_getroot
|
||||
@@ -402,7 +399,6 @@ _bt_wrtnorelbuf(Relation rel, Buffer buf)
|
||||
void
|
||||
_bt_pageinit(Page page, Size size)
|
||||
{
|
||||
|
||||
/*
|
||||
* Cargo_cult programming -- don't really need this to be zero, but
|
||||
* creating new pages is an infrequent occurrence and it makes me feel
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.82 2001/07/15 22:48:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.83 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -37,6 +37,7 @@ typedef struct
|
||||
bool haveDead;
|
||||
Relation heapRel;
|
||||
BTSpool *spool;
|
||||
|
||||
/*
|
||||
* spool2 is needed only when the index is an unique index. Dead
|
||||
* tuples are put into spool2 instead of spool in order to avoid
|
||||
@@ -58,11 +59,11 @@ bool FixBTree = true;
|
||||
|
||||
static void _bt_restscan(IndexScanDesc scan);
|
||||
static void btbuildCallback(Relation index,
|
||||
HeapTuple htup,
|
||||
Datum *attdata,
|
||||
char *nulls,
|
||||
bool tupleIsAlive,
|
||||
void *state);
|
||||
HeapTuple htup,
|
||||
Datum *attdata,
|
||||
char *nulls,
|
||||
bool tupleIsAlive,
|
||||
void *state);
|
||||
|
||||
|
||||
/*
|
||||
@@ -134,6 +135,7 @@ btbuild(PG_FUNCTION_ARGS)
|
||||
if (buildstate.usefast)
|
||||
{
|
||||
buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique);
|
||||
|
||||
/*
|
||||
* Different from spool, the uniqueness isn't checked for spool2.
|
||||
*/
|
||||
@@ -214,7 +216,7 @@ btbuildCallback(Relation index,
|
||||
bool tupleIsAlive,
|
||||
void *state)
|
||||
{
|
||||
BTBuildState *buildstate = (BTBuildState *) state;
|
||||
BTBuildState *buildstate = (BTBuildState *) state;
|
||||
IndexTuple itup;
|
||||
BTItem btitem;
|
||||
InsertIndexResult res;
|
||||
@@ -226,9 +228,9 @@ btbuildCallback(Relation index,
|
||||
btitem = _bt_formitem(itup);
|
||||
|
||||
/*
|
||||
* if we are doing bottom-up btree build, we insert the index into
|
||||
* a spool file for subsequent processing. otherwise, we insert
|
||||
* into the btree.
|
||||
* if we are doing bottom-up btree build, we insert the index into a
|
||||
* spool file for subsequent processing. otherwise, we insert into
|
||||
* the btree.
|
||||
*/
|
||||
if (buildstate->usefast)
|
||||
{
|
||||
@@ -305,7 +307,6 @@ btgettuple(PG_FUNCTION_ARGS)
|
||||
|
||||
if (ItemPointerIsValid(&(scan->currentItemData)))
|
||||
{
|
||||
|
||||
/*
|
||||
* Restore scan position using heap TID returned by previous call
|
||||
* to btgettuple(). _bt_restscan() re-grabs the read lock on the
|
||||
@@ -321,7 +322,7 @@ btgettuple(PG_FUNCTION_ARGS)
|
||||
* Save heap TID to use it in _bt_restscan. Then release the read
|
||||
* lock on the buffer so that we aren't blocking other backends.
|
||||
*
|
||||
* NOTE: we do keep the pin on the buffer! This is essential to ensure
|
||||
* NOTE: we do keep the pin on the buffer! This is essential to ensure
|
||||
* that someone else doesn't delete the index entry we are stopped on.
|
||||
*/
|
||||
if (res)
|
||||
@@ -362,7 +363,6 @@ btrescan(PG_FUNCTION_ARGS)
|
||||
|
||||
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
|
||||
bool fromEnd = PG_GETARG_BOOL(1);
|
||||
|
||||
#endif
|
||||
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
|
||||
ItemPointer iptr;
|
||||
@@ -547,7 +547,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(2);
|
||||
IndexBulkDeleteResult *result;
|
||||
BlockNumber num_pages;
|
||||
BlockNumber num_pages;
|
||||
double tuples_removed;
|
||||
double num_index_tuples;
|
||||
RetrieveIndexResult res;
|
||||
@@ -559,15 +559,16 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
num_index_tuples = 0;
|
||||
|
||||
/*
|
||||
* We use a standard IndexScanDesc scan object, but to speed up the loop,
|
||||
* we skip most of the wrapper layers of index_getnext and instead call
|
||||
* _bt_step directly. This implies holding buffer lock on a target page
|
||||
* throughout the loop over the page's tuples. Initially, we have a read
|
||||
* lock acquired by _bt_step when we stepped onto the page. If we find
|
||||
* a tuple we need to delete, we trade in the read lock for an exclusive
|
||||
* write lock; after that, we hold the write lock until we step off the
|
||||
* page (fortunately, _bt_relbuf doesn't care which kind of lock it's
|
||||
* releasing). This should minimize the amount of work needed per page.
|
||||
* We use a standard IndexScanDesc scan object, but to speed up the
|
||||
* loop, we skip most of the wrapper layers of index_getnext and
|
||||
* instead call _bt_step directly. This implies holding buffer lock
|
||||
* on a target page throughout the loop over the page's tuples.
|
||||
* Initially, we have a read lock acquired by _bt_step when we stepped
|
||||
* onto the page. If we find a tuple we need to delete, we trade in
|
||||
* the read lock for an exclusive write lock; after that, we hold the
|
||||
* write lock until we step off the page (fortunately, _bt_relbuf
|
||||
* doesn't care which kind of lock it's releasing). This should
|
||||
* minimize the amount of work needed per page.
|
||||
*/
|
||||
scan = index_beginscan(rel, false, 0, (ScanKey) NULL);
|
||||
so = (BTScanOpaque) scan->opaque;
|
||||
@@ -579,7 +580,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
if (res != NULL)
|
||||
{
|
||||
Buffer buf;
|
||||
BlockNumber lockedBlock = InvalidBlockNumber;
|
||||
BlockNumber lockedBlock = InvalidBlockNumber;
|
||||
|
||||
pfree(res);
|
||||
/* we have the buffer pinned and locked */
|
||||
@@ -589,11 +590,11 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
do
|
||||
{
|
||||
Page page;
|
||||
BlockNumber blkno;
|
||||
BlockNumber blkno;
|
||||
OffsetNumber offnum;
|
||||
BTItem btitem;
|
||||
IndexTuple itup;
|
||||
ItemPointer htup;
|
||||
ItemPointer htup;
|
||||
|
||||
/* current is the next index tuple */
|
||||
blkno = ItemPointerGetBlockNumber(current);
|
||||
@@ -607,9 +608,10 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
{
|
||||
/*
|
||||
* If this is first deletion on this page, trade in read
|
||||
* lock for a really-exclusive write lock. Then, step back
|
||||
* one and re-examine the item, because someone else might
|
||||
* have inserted an item while we weren't holding the lock!
|
||||
* lock for a really-exclusive write lock. Then, step
|
||||
* back one and re-examine the item, because someone else
|
||||
* might have inserted an item while we weren't holding
|
||||
* the lock!
|
||||
*/
|
||||
if (blkno != lockedBlock)
|
||||
{
|
||||
@@ -632,8 +634,8 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
* We need to back up the scan one item so that the next
|
||||
* cycle will re-examine the same offnum on this page.
|
||||
*
|
||||
* For now, just hack the current-item index. Will need
|
||||
* to be smarter when deletion includes removal of empty
|
||||
* For now, just hack the current-item index. Will need to
|
||||
* be smarter when deletion includes removal of empty
|
||||
* index pages.
|
||||
*/
|
||||
current->ip_posid--;
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.68 2001/10/06 23:21:43 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.69 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -589,10 +589,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
|
||||
/*
|
||||
* At this point we are positioned at the first item >= scan key, or
|
||||
* possibly at the end of a page on which all the existing items are
|
||||
* greater than the scan key and we know that everything on later pages
|
||||
* is less than or equal to scan key.
|
||||
*
|
||||
* possibly at the end of a page on which all the existing items are
|
||||
* greater than the scan key and we know that everything on later
|
||||
* pages is less than or equal to scan key.
|
||||
*
|
||||
* We could step forward in the latter case, but that'd be a waste of
|
||||
* time if we want to scan backwards. So, it's now time to examine
|
||||
* the scan strategy to find the exact place to start the scan.
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.60 2001/03/22 03:59:15 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.61 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -108,7 +108,7 @@ static void _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2);
|
||||
/*
|
||||
* create and initialize a spool structure
|
||||
*/
|
||||
BTSpool *
|
||||
BTSpool *
|
||||
_bt_spoolinit(Relation index, bool isunique)
|
||||
{
|
||||
BTSpool *btspool = (BTSpool *) palloc(sizeof(BTSpool));
|
||||
@@ -354,7 +354,6 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
|
||||
|
||||
if (pgspc < btisz || pgspc < state->btps_full)
|
||||
{
|
||||
|
||||
/*
|
||||
* Item won't fit on this page, or we feel the page is full enough
|
||||
* already. Finish off the page and write it out.
|
||||
@@ -544,7 +543,6 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
|
||||
|
||||
if (merge)
|
||||
{
|
||||
|
||||
/*
|
||||
* Another BTSpool for dead tuples exists. Now we have to merge
|
||||
* btspool and btspool2.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtstrat.c,v 1.14 2001/05/30 19:53:40 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtstrat.c,v 1.15 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -134,5 +134,4 @@ _bt_invokestrat(Relation rel,
|
||||
return (RelationInvokeStrategy(rel, &BTEvaluationData, attno, strat,
|
||||
left, right));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.46 2001/10/06 23:21:43 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.47 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -221,7 +221,6 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
|
||||
/* We can short-circuit most of the work if there's just one key */
|
||||
if (numberOfKeys == 1)
|
||||
{
|
||||
|
||||
/*
|
||||
* We don't use indices for 'A is null' and 'A is not null'
|
||||
* currently and 'A < = > <> NULL' will always fail - so qual is
|
||||
@@ -317,7 +316,6 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* No "=" for this key, so we're done with required keys
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user