1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-10 17:42:29 +03:00

Standard pgindent run for 8.1.

This commit is contained in:
Bruce Momjian
2005-10-15 02:49:52 +00:00
parent 790c01d280
commit 1dc3498251
770 changed files with 34334 additions and 32507 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.80 2005/06/06 17:01:21 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.81 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -55,8 +55,8 @@ hashbuild(PG_FUNCTION_ARGS)
HashBuildState buildstate;
/*
* We expect to be called exactly once for any index relation. If
* that's not the case, big trouble's what we have.
* We expect to be called exactly once for any index relation. If that's
* not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
@@ -70,7 +70,7 @@ hashbuild(PG_FUNCTION_ARGS)
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo,
hashbuildCallback, (void *) &buildstate);
hashbuildCallback, (void *) &buildstate);
/* since we just counted the # of tuples, may as well update stats */
IndexCloseAndUpdateStats(heap, reltuples, index, buildstate.indtuples);
@@ -141,12 +141,12 @@ hashinsert(PG_FUNCTION_ARGS)
/*
* If the single index key is null, we don't insert it into the index.
* Hash tables support scans on '='. Relational algebra says that A =
* B returns null if either A or B is null. This means that no
* qualification used in an index scan could ever return true on a
* null attribute. It also means that indices can't be used by ISNULL
* or NOTNULL scans, but that's an artifact of the strategy map
* architecture chosen in 1986, not of the way nulls are handled here.
* Hash tables support scans on '='. Relational algebra says that A = B
* returns null if either A or B is null. This means that no
* qualification used in an index scan could ever return true on a null
* attribute. It also means that indices can't be used by ISNULL or
* NOTNULL scans, but that's an artifact of the strategy map architecture
* chosen in 1986, not of the way nulls are handled here.
*/
if (IndexTupleHasNulls(itup))
{
@@ -180,16 +180,16 @@ hashgettuple(PG_FUNCTION_ARGS)
bool res;
/*
* We hold pin but not lock on current buffer while outside the hash
* AM. Reacquire the read lock here.
* We hold pin but not lock on current buffer while outside the hash AM.
* Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
/*
* If we've already initialized this scan, we can just advance it in
* the appropriate direction. If we haven't done so yet, we call a
* routine to get the first item in the scan.
* If we've already initialized this scan, we can just advance it in the
* appropriate direction. If we haven't done so yet, we call a routine to
* get the first item in the scan.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
{
@@ -199,17 +199,16 @@ hashgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
* Yes, so mark it by setting the LP_DELETE bit in the item
* flags.
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->hashso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
* Since this can be redone later if needed, it's treated the
* same as a commit-hint-bit status update for heap tuples: we
* mark the buffer dirty but don't make a WAL log entry.
* Since this can be redone later if needed, it's treated the same
* as a commit-hint-bit status update for heap tuples: we mark the
* buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->hashso_curbuf);
}
@@ -256,7 +255,7 @@ Datum
hashgetmulti(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
int32 max_tids = PG_GETARG_INT32(2);
int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
HashScanOpaque so = (HashScanOpaque) scan->opaque;
@@ -265,8 +264,8 @@ hashgetmulti(PG_FUNCTION_ARGS)
int32 ntids = 0;
/*
* We hold pin but not lock on current buffer while outside the hash
* AM. Reacquire the read lock here.
* We hold pin but not lock on current buffer while outside the hash AM.
* Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
@@ -280,6 +279,7 @@ hashgetmulti(PG_FUNCTION_ARGS)
res = _hash_next(scan, ForwardScanDirection);
else
res = _hash_first(scan, ForwardScanDirection);
/*
* Skip killed tuples if asked to.
*/
@@ -505,12 +505,12 @@ hashbulkdelete(PG_FUNCTION_ARGS)
num_index_tuples = 0;
/*
* Read the metapage to fetch original bucket and tuple counts. Also,
* we keep a copy of the last-seen metapage so that we can use its
* hashm_spares[] values to compute bucket page addresses. This is a
* bit hokey but perfectly safe, since the interesting entries in the
* spares array cannot change under us; and it beats rereading the
* metapage for each bucket.
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
* hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
*/
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
metap = (HashMetaPage) BufferGetPage(metabuf);
@@ -569,7 +569,7 @@ loop_top:
ItemPointer htup;
hitem = (HashItem) PageGetItem(page,
PageGetItemId(page, offno));
PageGetItemId(page, offno));
htup = &(hitem->hash_itup.t_tid);
if (callback(htup, callback_state))
{
@@ -641,8 +641,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
* double-scanned tuples in split buckets. Proceed by
* dead-reckoning.
* double-scanned tuples in split buckets. Proceed by dead-reckoning.
*/
if (metap->hashm_ntuples > tuples_removed)
metap->hashm_ntuples -= tuples_removed;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.44 2005/05/25 21:40:40 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.45 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -46,11 +46,11 @@ hashint8(PG_FUNCTION_ARGS)
{
/*
* The idea here is to produce a hash value compatible with the values
* produced by hashint4 and hashint2 for logically equivalent inputs;
* this is necessary if we ever hope to support cross-type hash joins
* across these input types. Since all three types are signed, we can
* xor the high half of the int8 value if the sign is positive, or the
* complement of the high half when the sign is negative.
* produced by hashint4 and hashint2 for logically equivalent inputs; this
* is necessary if we ever hope to support cross-type hash joins across
* these input types. Since all three types are signed, we can xor the
* high half of the int8 value if the sign is positive, or the complement
* of the high half when the sign is negative.
*/
#ifndef INT64_IS_BUSTED
int64 val = PG_GETARG_INT64(0);
@@ -78,9 +78,9 @@ hashfloat4(PG_FUNCTION_ARGS)
float4 key = PG_GETARG_FLOAT4(0);
/*
* On IEEE-float machines, minus zero and zero have different bit
* patterns but should compare as equal. We must ensure that they
* have the same hash value, which is most easily done this way:
* On IEEE-float machines, minus zero and zero have different bit patterns
* but should compare as equal. We must ensure that they have the same
* hash value, which is most easily done this way:
*/
if (key == (float4) 0)
PG_RETURN_UINT32(0);
@@ -94,9 +94,9 @@ hashfloat8(PG_FUNCTION_ARGS)
float8 key = PG_GETARG_FLOAT8(0);
/*
* On IEEE-float machines, minus zero and zero have different bit
* patterns but should compare as equal. We must ensure that they
* have the same hash value, which is most easily done this way:
* On IEEE-float machines, minus zero and zero have different bit patterns
* but should compare as equal. We must ensure that they have the same
* hash value, which is most easily done this way:
*/
if (key == (float8) 0)
PG_RETURN_UINT32(0);
@@ -126,8 +126,7 @@ hashname(PG_FUNCTION_ARGS)
char *key = NameStr(*PG_GETARG_NAME(0));
int keylen = strlen(key);
Assert(keylen < NAMEDATALEN); /* else it's not truncated
* correctly */
Assert(keylen < NAMEDATALEN); /* else it's not truncated correctly */
return hash_any((unsigned char *) key, keylen);
}
@@ -139,8 +138,8 @@ hashtext(PG_FUNCTION_ARGS)
Datum result;
/*
* Note: this is currently identical in behavior to hashvarlena, but
* it seems likely that we may need to do something different in non-C
* Note: this is currently identical in behavior to hashvarlena, but it
* seems likely that we may need to do something different in non-C
* locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA(key),

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.37 2005/08/10 21:36:45 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.38 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,8 +50,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
bool isnull;
/*
* Compute the hash key for the item. We do this first so as not to
* need to hold any locks while running the hash function.
* Compute the hash key for the item. We do this first so as not to need
* to hold any locks while running the hash function.
*/
itup = &(hitem->hash_itup);
if (rel->rd_rel->relnatts != 1)
@@ -64,12 +64,12 @@ _hash_doinsert(Relation rel, HashItem hitem)
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but
* we need to be consistent */
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
* need to be consistent */
/*
* Acquire shared split lock so we can compute the target bucket
* safely (see README).
* Acquire shared split lock so we can compute the target bucket safely
* (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
@@ -79,9 +79,9 @@ _hash_doinsert(Relation rel, HashItem hitem)
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
* Check whether the item can fit on a hash page at all. (Eventually,
* we ought to try to apply TOAST methods if not.) Note that at this
* point, itemsz doesn't include the ItemId.
* Check whether the item can fit on a hash page at all. (Eventually, we
* ought to try to apply TOAST methods if not.) Note that at this point,
* itemsz doesn't include the ItemId.
*/
if (itemsz > HashMaxItemSize((Page) metap))
ereport(ERROR,
@@ -89,7 +89,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
errmsg("index row size %lu exceeds hash maximum %lu",
(unsigned long) itemsz,
(unsigned long) HashMaxItemSize((Page) metap)),
errhint("Values larger than a buffer page cannot be indexed.")));
errhint("Values larger than a buffer page cannot be indexed.")));
/*
* Compute the target bucket number, and convert to block number.
@@ -105,8 +105,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
/*
* Acquire share lock on target bucket; then we can release split
* lock.
* Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
@@ -130,8 +129,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
if (BlockNumberIsValid(nextblkno))
{
/*
* ovfl page exists; go get it. if it doesn't have room,
* we'll find out next pass through the loop test above.
* ovfl page exists; go get it. if it doesn't have room, we'll
* find out next pass through the loop test above.
*/
_hash_relbuf(rel, buf);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.46 2005/05/11 01:26:01 neilc Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.47 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -44,8 +44,8 @@ bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum)
/* loop */ ;
/*
* Convert to absolute page number by adding the number of bucket
* pages that exist before this split point.
* Convert to absolute page number by adding the number of bucket pages
* that exist before this split point.
*/
return (BlockNumber) ((1 << i) + ovflbitnum);
}
@@ -252,10 +252,10 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
/*
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page
* which is the one we return to the caller. Both of these are
* correctly marked "in use". Subsequent pages do not exist yet,
* but it is convenient to pre-mark them as "in use" too.
* immediately: the bitmap page itself, and the following page which
* is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
_hash_initbitmap(rel, metap, bitno_to_blkno(metap, bit));
@@ -265,8 +265,8 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
else
{
/*
* Nothing to do here; since the page was past the last used page,
* we know its bitmap bit was preinitialized to "in use".
* Nothing to do here; since the page was past the last used page, we
* know its bitmap bit was preinitialized to "in use".
*/
}
@@ -275,8 +275,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap
* pages.
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
metap->hashm_firstfree = bit + 1;
@@ -305,8 +304,7 @@ found:
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap
* pages.
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
{
@@ -394,10 +392,10 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
_hash_wrtbuf(rel, ovflbuf);
/*
* Fix up the bucket chain. this is a doubly-linked list, so we must
* fix up the bucket chain members behind and ahead of the overflow
* page being deleted. No concurrency issues since we hold exclusive
* lock on the entire bucket.
* Fix up the bucket chain. this is a doubly-linked list, so we must fix
* up the bucket chain members behind and ahead of the overflow page being
* deleted. No concurrency issues since we hold exclusive lock on the
* entire bucket.
*/
if (BlockNumberIsValid(prevblkno))
{
@@ -488,12 +486,11 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
/*
* It is okay to write-lock the new bitmap page while holding metapage
* write lock, because no one else could be contending for the new
* page.
* write lock, because no one else could be contending for the new page.
*
* There is some loss of concurrency in possibly doing I/O for the new
* page while holding the metapage lock, but this path is taken so
* seldom that it's not worth worrying about.
* There is some loss of concurrency in possibly doing I/O for the new page
* while holding the metapage lock, but this path is taken so seldom that
* it's not worth worrying about.
*/
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
pg = BufferGetPage(buf);
@@ -586,8 +583,8 @@ _hash_squeezebucket(Relation rel,
}
/*
* find the last page in the bucket chain by starting at the base
* bucket page and working forward.
* find the last page in the bucket chain by starting at the base bucket
* page and working forward.
*/
ropaque = wopaque;
do
@@ -655,22 +652,21 @@ _hash_squeezebucket(Relation rel,
/*
* delete the tuple from the "read" page. PageIndexTupleDelete
* repacks the ItemId array, so 'roffnum' will be "advanced"
* to the "next" ItemId.
* repacks the ItemId array, so 'roffnum' will be "advanced" to
* the "next" ItemId.
*/
PageIndexTupleDelete(rpage, roffnum);
}
/*
* if the "read" page is now empty because of the deletion (or
* because it was empty when we got to it), free it.
* if the "read" page is now empty because of the deletion (or because
* it was empty when we got to it), free it.
*
* Tricky point here: if our read and write pages are adjacent in the
* bucket chain, our write lock on wbuf will conflict with
* _hash_freeovflpage's attempt to update the sibling links of the
* removed page. However, in that case we are done anyway, so we
* can simply drop the write lock before calling
* _hash_freeovflpage.
* removed page. However, in that case we are done anyway, so we can
* simply drop the write lock before calling _hash_freeovflpage.
*/
if (PageIsEmpty(rpage))
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.51 2005/06/09 21:01:25 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.52 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -240,13 +240,13 @@ _hash_metapinit(Relation rel)
RelationGetRelationName(rel));
/*
* Determine the target fill factor (tuples per bucket) for this
* index. The idea is to make the fill factor correspond to pages
* about 3/4ths full. We can compute it exactly if the index datatype
* is fixed-width, but for var-width there's some guessing involved.
* Determine the target fill factor (tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about 3/4ths
* full. We can compute it exactly if the index datatype is fixed-width,
* but for var-width there's some guessing involved.
*/
data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid,
RelationGetDescr(rel)->attrs[0]->atttypmod);
RelationGetDescr(rel)->attrs[0]->atttypmod);
item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) +
sizeof(ItemIdData); /* include the line pointer */
ffactor = (BLCKSZ * 3 / 4) / item_width;
@@ -289,9 +289,8 @@ _hash_metapinit(Relation rel)
metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
/*
* We initialize the index with two buckets, 0 and 1, occupying
* physical blocks 1 and 2. The first freespace bitmap page is in
* block 3.
* We initialize the index with two buckets, 0 and 1, occupying physical
* blocks 1 and 2. The first freespace bitmap page is in block 3.
*/
metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */
metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */
@@ -321,8 +320,8 @@ _hash_metapinit(Relation rel)
}
/*
* Initialize first bitmap page. Can't do this until we create the
* first two buckets, else smgr will complain.
* Initialize first bitmap page. Can't do this until we create the first
* two buckets, else smgr will complain.
*/
_hash_initbitmap(rel, metap, 3);
@@ -367,15 +366,14 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Obtain the page-zero lock to assert the right to begin a split (see
* README).
*
* Note: deadlock should be impossible here. Our own backend could only
* be holding bucket sharelocks due to stopped indexscans; those will
* not block other holders of the page-zero lock, who are only
* interested in acquiring bucket sharelocks themselves. Exclusive
* bucket locks are only taken here and in hashbulkdelete, and neither
* of these operations needs any additional locks to complete. (If,
* due to some flaw in this reasoning, we manage to deadlock anyway,
* it's okay to error out; the index will be left in a consistent
* state.)
* Note: deadlock should be impossible here. Our own backend could only be
* holding bucket sharelocks due to stopped indexscans; those will not
* block other holders of the page-zero lock, who are only interested in
* acquiring bucket sharelocks themselves. Exclusive bucket locks are
* only taken here and in hashbulkdelete, and neither of these operations
* needs any additional locks to complete. (If, due to some flaw in this
* reasoning, we manage to deadlock anyway, it's okay to error out; the
* index will be left in a consistent state.)
*/
_hash_getlock(rel, 0, HASH_EXCLUSIVE);
@@ -386,8 +384,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
* Check to see if split is still needed; someone else might have
* already done one while we waited for the lock.
* Check to see if split is still needed; someone else might have already
* done one while we waited for the lock.
*
* Make sure this stays in sync with _hash_doinsert()
*/
@@ -402,11 +400,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
* Ideally we would lock the new bucket too before proceeding, but if we
* are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
* isn't correct yet. For simplicity we update the metapage first and
* then lock. This should be okay because no one else should be
* trying to lock the new bucket yet...
* Ideally we would lock the new bucket too before proceeding, but if we are
* about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
* correct yet. For simplicity we update the metapage first and then
* lock. This should be okay because no one else should be trying to lock
* the new bucket yet...
*/
new_bucket = metap->hashm_maxbucket + 1;
old_bucket = (new_bucket & metap->hashm_lowmask);
@@ -420,14 +418,13 @@ _hash_expandtable(Relation rel, Buffer metabuf)
goto fail;
/*
* Okay to proceed with split. Update the metapage bucket mapping
* info.
* Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
* problem: the metapage is effectively corrupt but could get written
* back to disk. We don't really expect any failure, but just to be
* sure, establish a critical section.
* Since we are scribbling on the metapage data right in the shared buffer,
* any failure in this next little bit leaves us with a big problem: the
* metapage is effectively corrupt but could get written back to disk. We
* don't really expect any failure, but just to be sure, establish a
* critical section.
*/
START_CRIT_SECTION();
@@ -443,8 +440,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* If the split point is increasing (hashm_maxbucket's log base 2
* increases), we need to adjust the hashm_spares[] array and
* hashm_ovflpoint so that future overflow pages will be created
* beyond this new batch of bucket pages.
* hashm_ovflpoint so that future overflow pages will be created beyond
* this new batch of bucket pages.
*
* XXX should initialize new bucket pages to prevent out-of-order page
* creation? Don't wanna do it right here though.
@@ -471,10 +468,9 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out
* of date before _hash_splitbucket finishes. That's okay, since all
* it needs is to tell which of these two buckets to map hashkeys
* into.
* split lock, other splits could begin, so these values might be out of
* date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
highmask = metap->hashm_highmask;
@@ -554,9 +550,9 @@ _hash_splitbucket(Relation rel,
TupleDesc itupdesc = RelationGetDescr(rel);
/*
* It should be okay to simultaneously write-lock pages from each
* bucket, since no one else can be trying to acquire buffer lock on
* pages of either bucket.
* It should be okay to simultaneously write-lock pages from each bucket,
* since no one else can be trying to acquire buffer lock on pages of
* either bucket.
*/
oblkno = start_oblkno;
nblkno = start_nblkno;
@@ -578,17 +574,17 @@ _hash_splitbucket(Relation rel,
nopaque->hasho_filler = HASHO_FILL;
/*
* Partition the tuples in the old bucket between the old bucket and
* the new bucket, advancing along the old bucket's overflow bucket
* chain and adding overflow pages to the new bucket as needed.
* Partition the tuples in the old bucket between the old bucket and the
* new bucket, advancing along the old bucket's overflow bucket chain and
* adding overflow pages to the new bucket as needed.
*/
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
for (;;)
{
/*
* at each iteration through this loop, each of these variables
* should be up-to-date: obuf opage oopaque ooffnum omaxoffnum
* at each iteration through this loop, each of these variables should
* be up-to-date: obuf opage oopaque ooffnum omaxoffnum
*/
/* check if we're at the end of the page */
@@ -600,8 +596,8 @@ _hash_splitbucket(Relation rel,
break;
/*
* we ran out of tuples on this particular page, but we have
* more overflow pages; advance to next page.
* we ran out of tuples on this particular page, but we have more
* overflow pages; advance to next page.
*/
_hash_wrtbuf(rel, obuf);
@@ -618,8 +614,7 @@ _hash_splitbucket(Relation rel,
* Re-hash the tuple to determine which bucket it now belongs in.
*
* It is annoying to call the hash function while holding locks, but
* releasing and relocking the page for each tuple is unappealing
* too.
* releasing and relocking the page for each tuple is unappealing too.
*/
hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
itup = &(hitem->hash_itup);
@@ -632,9 +627,9 @@ _hash_splitbucket(Relation rel,
if (bucket == nbucket)
{
/*
* insert the tuple into the new bucket. if it doesn't fit on
* the current page in the new bucket, we must allocate a new
* overflow page and place the tuple on that page instead.
* insert the tuple into the new bucket. if it doesn't fit on the
* current page in the new bucket, we must allocate a new overflow
* page and place the tuple on that page instead.
*/
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
@@ -659,13 +654,13 @@ _hash_splitbucket(Relation rel,
RelationGetRelationName(rel));
/*
* now delete the tuple from the old bucket. after this
* section of code, 'ooffnum' will actually point to the
* ItemId to which we would point if we had advanced it before
* the deletion (PageIndexTupleDelete repacks the ItemId
* array). this also means that 'omaxoffnum' is exactly one
* less than it used to be, so we really can just decrement it
* instead of calling PageGetMaxOffsetNumber.
* now delete the tuple from the old bucket. after this section
* of code, 'ooffnum' will actually point to the ItemId to which
* we would point if we had advanced it before the deletion
* (PageIndexTupleDelete repacks the ItemId array). this also
* means that 'omaxoffnum' is exactly one less than it used to be,
* so we really can just decrement it instead of calling
* PageGetMaxOffsetNumber.
*/
PageIndexTupleDelete(opage, ooffnum);
omaxoffnum = OffsetNumberPrev(omaxoffnum);
@@ -673,9 +668,9 @@ _hash_splitbucket(Relation rel,
else
{
/*
* the tuple stays on this page. we didn't move anything, so
* we didn't delete anything and therefore we don't have to
* change 'omaxoffnum'.
* the tuple stays on this page. we didn't move anything, so we
* didn't delete anything and therefore we don't have to change
* 'omaxoffnum'.
*/
Assert(bucket == obucket);
ooffnum = OffsetNumberNext(ooffnum);
@@ -683,11 +678,10 @@ _hash_splitbucket(Relation rel,
}
/*
* We're at the end of the old bucket chain, so we're done
* partitioning the tuples. Before quitting, call _hash_squeezebucket
* to ensure the tuples remaining in the old bucket (including the
* overflow pages) are packed as tightly as possible. The new bucket
* is already tight.
* We're at the end of the old bucket chain, so we're done partitioning
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
_hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.38 2004/12/31 21:59:13 pgsql Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.39 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,9 +44,9 @@ ReleaseResources_hash(void)
HashScanList next;
/*
* Note: this should be a no-op during normal query shutdown. However,
* in an abort situation ExecutorEnd is not called and so there may be
* open index scans to clean up.
* Note: this should be a no-op during normal query shutdown. However, in
* an abort situation ExecutorEnd is not called and so there may be open
* index scans to clean up.
*/
prev = NULL;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.39 2005/10/06 02:29:08 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.40 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -137,33 +137,32 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
ItemPointerSetInvalid(current);
/*
* We do not support hash scans with no index qualification, because
* we would have to read the whole index rather than just one bucket.
* That creates a whole raft of problems, since we haven't got a
* practical way to lock all the buckets against splits or
* compactions.
* We do not support hash scans with no index qualification, because we
* would have to read the whole index rather than just one bucket. That
* creates a whole raft of problems, since we haven't got a practical way
* to lock all the buckets against splits or compactions.
*/
if (scan->numberOfKeys < 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("hash indexes do not support whole-index scans")));
errmsg("hash indexes do not support whole-index scans")));
/*
* If the constant in the index qual is NULL, assume it cannot match
* any items in the index.
* If the constant in the index qual is NULL, assume it cannot match any
* items in the index.
*/
if (scan->keyData[0].sk_flags & SK_ISNULL)
return false;
/*
* Okay to compute the hash key. We want to do this before acquiring
* any locks, in case a user-defined hash function happens to be slow.
* Okay to compute the hash key. We want to do this before acquiring any
* locks, in case a user-defined hash function happens to be slow.
*/
hashkey = _hash_datum2hashkey(rel, scan->keyData[0].sk_argument);
/*
* Acquire shared split lock so we can compute the target bucket
* safely (see README).
* Acquire shared split lock so we can compute the target bucket safely
* (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
@@ -186,8 +185,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
_hash_relbuf(rel, metabuf);
/*
* Acquire share lock on target bucket; then we can release split
* lock.
* Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
@@ -263,9 +261,9 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
bucket = opaque->hasho_bucket;
/*
* If _hash_step is called from _hash_first, current will not be
* valid, so we can't dereference it. However, in that case, we
* presumably want to start at the beginning/end of the page...
* If _hash_step is called from _hash_first, current will not be valid, so
* we can't dereference it. However, in that case, we presumably want to
* start at the beginning/end of the page...
*/
maxoff = PageGetMaxOffsetNumber(page);
if (ItemPointerIsValid(current))
@@ -276,8 +274,8 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
/*
* 'offnum' now points to the last tuple we have seen (if any).
*
* continue to step through tuples until: 1) we get to the end of the
* bucket chain or 2) we find a valid tuple.
* continue to step through tuples until: 1) we get to the end of the bucket
* chain or 2) we find a valid tuple.
*/
do
{