mirror of
https://github.com/postgres/postgres.git
synced 2025-11-10 17:42:29 +03:00
pgindent run on all C files. Java run to follow. initdb/regression
tests pass.
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.52 2001/07/15 22:48:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.53 2001/10/25 05:49:20 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains only the public interface routines.
|
||||
@@ -37,11 +37,11 @@ typedef struct
|
||||
} HashBuildState;
|
||||
|
||||
static void hashbuildCallback(Relation index,
|
||||
HeapTuple htup,
|
||||
Datum *attdata,
|
||||
char *nulls,
|
||||
bool tupleIsAlive,
|
||||
void *state);
|
||||
HeapTuple htup,
|
||||
Datum *attdata,
|
||||
char *nulls,
|
||||
bool tupleIsAlive,
|
||||
void *state);
|
||||
|
||||
|
||||
/*
|
||||
@@ -80,7 +80,7 @@ hashbuild(PG_FUNCTION_ARGS)
|
||||
|
||||
/* do the heap scan */
|
||||
reltuples = IndexBuildHeapScan(heap, index, indexInfo,
|
||||
hashbuildCallback, (void *) &buildstate);
|
||||
hashbuildCallback, (void *) &buildstate);
|
||||
|
||||
/* all done */
|
||||
BuildingHash = false;
|
||||
@@ -121,7 +121,7 @@ hashbuildCallback(Relation index,
|
||||
bool tupleIsAlive,
|
||||
void *state)
|
||||
{
|
||||
HashBuildState *buildstate = (HashBuildState *) state;
|
||||
HashBuildState *buildstate = (HashBuildState *) state;
|
||||
IndexTuple itup;
|
||||
HashItem hitem;
|
||||
InsertIndexResult res;
|
||||
@@ -164,6 +164,7 @@ hashinsert(PG_FUNCTION_ARGS)
|
||||
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
|
||||
char *nulls = (char *) PG_GETARG_POINTER(2);
|
||||
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
|
||||
|
||||
#ifdef NOT_USED
|
||||
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
|
||||
#endif
|
||||
@@ -176,14 +177,13 @@ hashinsert(PG_FUNCTION_ARGS)
|
||||
itup->t_tid = *ht_ctid;
|
||||
|
||||
/*
|
||||
* If the single index key is null, we don't insert it into the
|
||||
* index. Hash tables support scans on '='. Relational algebra
|
||||
* says that A = B returns null if either A or B is null. This
|
||||
* means that no qualification used in an index scan could ever
|
||||
* return true on a null attribute. It also means that indices
|
||||
* can't be used by ISNULL or NOTNULL scans, but that's an
|
||||
* artifact of the strategy map architecture chosen in 1986, not
|
||||
* of the way nulls are handled here.
|
||||
* If the single index key is null, we don't insert it into the index.
|
||||
* Hash tables support scans on '='. Relational algebra says that A =
|
||||
* B returns null if either A or B is null. This means that no
|
||||
* qualification used in an index scan could ever return true on a
|
||||
* null attribute. It also means that indices can't be used by ISNULL
|
||||
* or NOTNULL scans, but that's an artifact of the strategy map
|
||||
* architecture chosen in 1986, not of the way nulls are handled here.
|
||||
*/
|
||||
if (IndexTupleHasNulls(itup))
|
||||
{
|
||||
@@ -262,7 +262,6 @@ hashrescan(PG_FUNCTION_ARGS)
|
||||
|
||||
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
|
||||
bool fromEnd = PG_GETARG_BOOL(1);
|
||||
|
||||
#endif
|
||||
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
|
||||
ItemPointer iptr;
|
||||
@@ -412,7 +411,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
|
||||
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
|
||||
void *callback_state = (void *) PG_GETARG_POINTER(2);
|
||||
IndexBulkDeleteResult *result;
|
||||
BlockNumber num_pages;
|
||||
BlockNumber num_pages;
|
||||
double tuples_removed;
|
||||
double num_index_tuples;
|
||||
RetrieveIndexResult res;
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.22 2001/03/07 21:20:26 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.23 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -133,13 +133,11 @@ _hash_insertonpg(Relation rel,
|
||||
|
||||
while (PageGetFreeSpace(page) < itemsz)
|
||||
{
|
||||
|
||||
/*
|
||||
* no space on this page; check for an overflow page
|
||||
*/
|
||||
if (BlockNumberIsValid(pageopaque->hasho_nextblkno))
|
||||
{
|
||||
|
||||
/*
|
||||
* ovfl page exists; go get it. if it doesn't have room,
|
||||
* we'll find out next pass through the loop test above.
|
||||
@@ -152,7 +150,6 @@ _hash_insertonpg(Relation rel,
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* we're at the end of the bucket chain and we haven't found a
|
||||
* page with enough room. allocate a new overflow page.
|
||||
@@ -184,7 +181,6 @@ _hash_insertonpg(Relation rel,
|
||||
|
||||
if (res != NULL)
|
||||
{
|
||||
|
||||
/*
|
||||
* Increment the number of keys in the table. We switch lock
|
||||
* access type just for a moment to allow greater accessibility to
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.30 2001/07/15 22:48:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.31 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Overflow pages look like ordinary relation pages.
|
||||
@@ -208,7 +208,6 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* Free_bit addresses the last used bit. Bump it to address the
|
||||
* first available bit.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.32 2001/07/15 22:48:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.33 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Postgres hash pages look like ordinary relation pages. The opaque
|
||||
@@ -50,7 +50,7 @@ static void _hash_splitpage(Relation rel, Buffer metabuf, Bucket obucket, Bucket
|
||||
* system catalogs anyway.
|
||||
*
|
||||
* Note that our page locks are actual lockmanager locks, not buffer
|
||||
* locks (as are used by btree, for example). This is a good idea because
|
||||
* locks (as are used by btree, for example). This is a good idea because
|
||||
* the algorithms are not deadlock-free, and we'd better be able to detect
|
||||
* and recover from deadlocks.
|
||||
*
|
||||
@@ -325,7 +325,7 @@ _hash_setpagelock(Relation rel,
|
||||
{
|
||||
switch (access)
|
||||
{
|
||||
case HASH_WRITE:
|
||||
case HASH_WRITE:
|
||||
LockPage(rel, blkno, ExclusiveLock);
|
||||
break;
|
||||
case HASH_READ:
|
||||
@@ -349,7 +349,7 @@ _hash_unsetpagelock(Relation rel,
|
||||
{
|
||||
switch (access)
|
||||
{
|
||||
case HASH_WRITE:
|
||||
case HASH_WRITE:
|
||||
UnlockPage(rel, blkno, ExclusiveLock);
|
||||
break;
|
||||
case HASH_READ:
|
||||
@@ -369,7 +369,7 @@ _hash_unsetpagelock(Relation rel,
|
||||
* It is safe to delete an item after acquiring a regular WRITE lock on
|
||||
* the page, because no other backend can hold a READ lock on the page,
|
||||
* and that means no other backend currently has an indexscan stopped on
|
||||
* any item of the item being deleted. Our own backend might have such
|
||||
* any item of the item being deleted. Our own backend might have such
|
||||
* an indexscan (in fact *will*, since that's how VACUUM found the item
|
||||
* in the first place), but _hash_adjscans will fix the scan position.
|
||||
*/
|
||||
@@ -532,7 +532,6 @@ _hash_splitpage(Relation rel,
|
||||
_hash_relbuf(rel, obuf, HASH_WRITE);
|
||||
if (!BlockNumberIsValid(oblkno))
|
||||
{
|
||||
|
||||
/*
|
||||
* the old bucket is completely empty; of course, the new
|
||||
* bucket will be as well, but since it's a base bucket page
|
||||
@@ -559,7 +558,6 @@ _hash_splitpage(Relation rel,
|
||||
omaxoffnum = PageGetMaxOffsetNumber(opage);
|
||||
for (;;)
|
||||
{
|
||||
|
||||
/*
|
||||
* at each iteration through this loop, each of these variables
|
||||
* should be up-to-date: obuf opage oopaque ooffnum omaxoffnum
|
||||
@@ -572,7 +570,6 @@ _hash_splitpage(Relation rel,
|
||||
oblkno = oopaque->hasho_nextblkno;
|
||||
if (BlockNumberIsValid(oblkno))
|
||||
{
|
||||
|
||||
/*
|
||||
* we ran out of tuples on this particular page, but we
|
||||
* have more overflow pages; re-init values.
|
||||
@@ -594,7 +591,6 @@ _hash_splitpage(Relation rel,
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* we're at the end of the bucket chain, so now we're
|
||||
* really done with everything. before quitting, call
|
||||
@@ -618,7 +614,6 @@ _hash_splitpage(Relation rel,
|
||||
|
||||
if (bucket == nbucket)
|
||||
{
|
||||
|
||||
/*
|
||||
* insert the tuple into the new bucket. if it doesn't fit on
|
||||
* the current page in the new bucket, we must allocate a new
|
||||
@@ -695,7 +690,6 @@ _hash_splitpage(Relation rel,
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* the tuple stays on this page. we didn't move anything, so
|
||||
* we didn't delete anything and therefore we don't have to
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.26 2001/03/23 04:49:51 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.27 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -36,7 +36,6 @@ _hash_search(Relation rel,
|
||||
if (scankey == (ScanKey) NULL ||
|
||||
(keyDatum = scankey[0].sk_argument) == (Datum) NULL)
|
||||
{
|
||||
|
||||
/*
|
||||
* If the scankey argument is NULL, all tuples will satisfy the
|
||||
* scan so we start the scan at the first bucket (bucket 0).
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.18 2001/05/30 19:53:40 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.19 2001/10/25 05:49:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -46,7 +46,6 @@ static StrategyEvaluationData HTEvaluationData = {
|
||||
(StrategyTransformMap) HTNegateCommute,
|
||||
HTEvaluationExpressions
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
@@ -68,7 +67,6 @@ _hash_getstrat(Relation rel,
|
||||
|
||||
return strat;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef NOT_USED
|
||||
@@ -82,5 +80,4 @@ _hash_invokestrat(Relation rel,
|
||||
return (RelationInvokeStrategy(rel, &HTEvaluationData, attno, strat,
|
||||
left, right));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user