mirror of
https://github.com/postgres/postgres.git
synced 2025-11-13 16:22:44 +03:00
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
This commit is contained in:
@@ -78,7 +78,7 @@ hashbuild(PG_FUNCTION_ARGS)
|
||||
* (assuming their hash codes are pretty random) there will be no locality
|
||||
* of access to the index, and if the index is bigger than available RAM
|
||||
* then we'll thrash horribly. To prevent that scenario, we can sort the
|
||||
* tuples by (expected) bucket number. However, such a sort is useless
|
||||
* tuples by (expected) bucket number. However, such a sort is useless
|
||||
* overhead when the index does fit in RAM. We choose to sort if the
|
||||
* initial index size exceeds NBuffers.
|
||||
*
|
||||
@@ -248,7 +248,7 @@ hashgettuple(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* An insertion into the current index page could have happened while
|
||||
* we didn't have read lock on it. Re-find our position by looking
|
||||
* for the TID we previously returned. (Because we hold share lock on
|
||||
* for the TID we previously returned. (Because we hold share lock on
|
||||
* the bucket, no deletions or splits could have occurred; therefore
|
||||
* we can expect that the TID still exists in the current index page,
|
||||
* at an offset >= where we were.)
|
||||
@@ -524,7 +524,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* Read the metapage to fetch original bucket and tuple counts. Also, we
|
||||
* keep a copy of the last-seen metapage so that we can use its
|
||||
* hashm_spares[] values to compute bucket page addresses. This is a bit
|
||||
* hashm_spares[] values to compute bucket page addresses. This is a bit
|
||||
* hokey but perfectly safe, since the interesting entries in the spares
|
||||
* array cannot change under us; and it beats rereading the metapage for
|
||||
* each bucket.
|
||||
@@ -655,7 +655,7 @@ loop_top:
|
||||
{
|
||||
/*
|
||||
* Otherwise, our count is untrustworthy since we may have
|
||||
* double-scanned tuples in split buckets. Proceed by dead-reckoning.
|
||||
* double-scanned tuples in split buckets. Proceed by dead-reckoning.
|
||||
* (Note: we still return estimated_count = false, because using this
|
||||
* count is better than not updating reltuples at all.)
|
||||
*/
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
* src/backend/access/hash/hashfunc.c
|
||||
*
|
||||
* NOTES
|
||||
* These functions are stored in pg_amproc. For each operator class
|
||||
* These functions are stored in pg_amproc. For each operator class
|
||||
* defined for hash indexes, they compute the hash value of the argument.
|
||||
*
|
||||
* Additional hash functions appear in /utils/adt/ files for various
|
||||
@@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* Note: this is currently identical in behavior to hashvarlena, but keep
|
||||
* it as a separate function in case we someday want to do something
|
||||
* different in non-C locales. (See also hashbpchar, if so.)
|
||||
* different in non-C locales. (See also hashbpchar, if so.)
|
||||
*/
|
||||
result = hash_any((unsigned char *) VARDATA_ANY(key),
|
||||
VARSIZE_ANY_EXHDR(key));
|
||||
@@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS)
|
||||
*
|
||||
* This allows some parallelism. Read-after-writes are good at doubling
|
||||
* the number of bits affected, so the goal of mixing pulls in the opposite
|
||||
* direction from the goal of parallelism. I did what I could. Rotates
|
||||
* direction from the goal of parallelism. I did what I could. Rotates
|
||||
* seem to cost as much as shifts on every machine I could lay my hands on,
|
||||
* and rotates are much kinder to the top and bottom bits, so I used rotates.
|
||||
*----------
|
||||
@@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS)
|
||||
* substantial performance increase since final() does not need to
|
||||
* do well in reverse, but is does need to affect all output bits.
|
||||
* mix(), on the other hand, does not need to affect all output
|
||||
* bits (affecting 32 bits is enough). The original hash function had
|
||||
* bits (affecting 32 bits is enough). The original hash function had
|
||||
* a single mixing operation that had to satisfy both sets of requirements
|
||||
* and was slower as a result.
|
||||
*----------
|
||||
@@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS)
|
||||
* k : the key (the unaligned variable-length array of bytes)
|
||||
* len : the length of the key, counting by bytes
|
||||
*
|
||||
* Returns a uint32 value. Every bit of the key affects every bit of
|
||||
* Returns a uint32 value. Every bit of the key affects every bit of
|
||||
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
|
||||
* About 6*len+35 instructions. The best hash table sizes are powers
|
||||
* of 2. There is no need to do mod a prime (mod is sooo slow!).
|
||||
|
||||
@@ -89,7 +89,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
|
||||
|
||||
/*
|
||||
* If the previous iteration of this loop locked what is still the
|
||||
* correct target bucket, we are done. Otherwise, drop any old lock
|
||||
* correct target bucket, we are done. Otherwise, drop any old lock
|
||||
* and lock what now appears to be the correct bucket.
|
||||
*/
|
||||
if (retry)
|
||||
|
||||
@@ -80,7 +80,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
|
||||
*
|
||||
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
|
||||
*
|
||||
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
|
||||
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
|
||||
* dropped before exiting (we assume the caller is not interested in 'buf'
|
||||
* anymore). The returned overflow page will be pinned and write-locked;
|
||||
* it is guaranteed to be empty.
|
||||
@@ -89,12 +89,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
|
||||
* That buffer is returned in the same state.
|
||||
*
|
||||
* The caller must hold at least share lock on the bucket, to ensure that
|
||||
* no one else tries to compact the bucket meanwhile. This guarantees that
|
||||
* no one else tries to compact the bucket meanwhile. This guarantees that
|
||||
* 'buf' won't stop being part of the bucket while it's unlocked.
|
||||
*
|
||||
* NB: since this could be executed concurrently by multiple processes,
|
||||
* one should not assume that the returned overflow page will be the
|
||||
* immediate successor of the originally passed 'buf'. Additional overflow
|
||||
* immediate successor of the originally passed 'buf'. Additional overflow
|
||||
* pages might have been added to the bucket chain in between.
|
||||
*/
|
||||
Buffer
|
||||
@@ -157,7 +157,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
|
||||
/*
|
||||
* _hash_getovflpage()
|
||||
*
|
||||
* Find an available overflow page and return it. The returned buffer
|
||||
* Find an available overflow page and return it. The returned buffer
|
||||
* is pinned and write-locked, and has had _hash_pageinit() applied,
|
||||
* but it is caller's responsibility to fill the special space.
|
||||
*
|
||||
@@ -253,7 +253,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
|
||||
* We create the new bitmap page with all pages marked "in use".
|
||||
* Actually two pages in the new bitmap's range will exist
|
||||
* immediately: the bitmap page itself, and the following page which
|
||||
* is the one we return to the caller. Both of these are correctly
|
||||
* is the one we return to the caller. Both of these are correctly
|
||||
* marked "in use". Subsequent pages do not exist yet, but it is
|
||||
* convenient to pre-mark them as "in use" too.
|
||||
*/
|
||||
@@ -284,7 +284,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
|
||||
metap->hashm_spares[splitnum]++;
|
||||
|
||||
/*
|
||||
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
|
||||
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
|
||||
* changing it if someone moved it while we were searching bitmap pages.
|
||||
*/
|
||||
if (metap->hashm_firstfree == orig_firstfree)
|
||||
@@ -313,7 +313,7 @@ found:
|
||||
blkno = bitno_to_blkno(metap, bit);
|
||||
|
||||
/*
|
||||
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
|
||||
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
|
||||
* changing it if someone moved it while we were searching bitmap pages.
|
||||
*/
|
||||
if (metap->hashm_firstfree == orig_firstfree)
|
||||
@@ -494,7 +494,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
|
||||
/*
|
||||
* _hash_initbitmap()
|
||||
*
|
||||
* Initialize a new bitmap page. The metapage has a write-lock upon
|
||||
* Initialize a new bitmap page. The metapage has a write-lock upon
|
||||
* entering the function, and must be written by caller after return.
|
||||
*
|
||||
* 'blkno' is the block number of the new bitmap page.
|
||||
|
||||
@@ -49,7 +49,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
|
||||
* of the locking rules). However, we can skip taking lmgr locks when the
|
||||
* index is local to the current backend (ie, either temp or new in the
|
||||
* current transaction). No one else can see it, so there's no reason to
|
||||
* take locks. We still take buffer-level locks, but not lmgr locks.
|
||||
* take locks. We still take buffer-level locks, but not lmgr locks.
|
||||
*/
|
||||
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
|
||||
|
||||
@@ -136,7 +136,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
|
||||
*
|
||||
* This must be used only to fetch pages that are known to be before
|
||||
* the index's filesystem EOF, but are to be filled from scratch.
|
||||
* _hash_pageinit() is applied automatically. Otherwise it has
|
||||
* _hash_pageinit() is applied automatically. Otherwise it has
|
||||
* effects similar to _hash_getbuf() with access = HASH_WRITE.
|
||||
*
|
||||
* When this routine returns, a write lock is set on the
|
||||
@@ -344,7 +344,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
|
||||
/*
|
||||
* Determine the target fill factor (in tuples per bucket) for this index.
|
||||
* The idea is to make the fill factor correspond to pages about as full
|
||||
* as the user-settable fillfactor parameter says. We can compute it
|
||||
* as the user-settable fillfactor parameter says. We can compute it
|
||||
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
|
||||
*/
|
||||
data_width = sizeof(uint32);
|
||||
@@ -377,7 +377,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
|
||||
/*
|
||||
* We initialize the metapage, the first N bucket pages, and the first
|
||||
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
|
||||
* calls to occur. This ensures that the smgr level has the right idea of
|
||||
* calls to occur. This ensures that the smgr level has the right idea of
|
||||
* the physical index length.
|
||||
*/
|
||||
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
|
||||
@@ -545,7 +545,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
|
||||
/*
|
||||
* Determine which bucket is to be split, and attempt to lock the old
|
||||
* bucket. If we can't get the lock, give up.
|
||||
* bucket. If we can't get the lock, give up.
|
||||
*
|
||||
* The lock protects us against other backends, but not against our own
|
||||
* backend. Must check for active scans separately.
|
||||
@@ -603,7 +603,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
}
|
||||
|
||||
/*
|
||||
* Okay to proceed with split. Update the metapage bucket mapping info.
|
||||
* Okay to proceed with split. Update the metapage bucket mapping info.
|
||||
*
|
||||
* Since we are scribbling on the metapage data right in the shared
|
||||
* buffer, any failure in this next little bit leaves us with a big
|
||||
@@ -641,7 +641,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
* Copy bucket mapping info now; this saves re-accessing the meta page
|
||||
* inside _hash_splitbucket's inner loop. Note that once we drop the
|
||||
* split lock, other splits could begin, so these values might be out of
|
||||
* date before _hash_splitbucket finishes. That's okay, since all it
|
||||
* date before _hash_splitbucket finishes. That's okay, since all it
|
||||
* needs is to tell which of these two buckets to map hashkeys into.
|
||||
*/
|
||||
maxbucket = metap->hashm_maxbucket;
|
||||
@@ -876,7 +876,7 @@ _hash_splitbucket(Relation rel,
|
||||
|
||||
/*
|
||||
* We're at the end of the old bucket chain, so we're done partitioning
|
||||
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
|
||||
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
|
||||
* tuples remaining in the old bucket (including the overflow pages) are
|
||||
* packed as tightly as possible. The new bucket is already tight.
|
||||
*/
|
||||
|
||||
@@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
|
||||
|
||||
/*
|
||||
* If the previous iteration of this loop locked what is still the
|
||||
* correct target bucket, we are done. Otherwise, drop any old lock
|
||||
* correct target bucket, we are done. Otherwise, drop any old lock
|
||||
* and lock what now appears to be the correct bucket.
|
||||
*/
|
||||
if (retry)
|
||||
@@ -269,7 +269,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
|
||||
* _hash_step() -- step to the next valid item in a scan in the bucket.
|
||||
*
|
||||
* If no valid record exists in the requested direction, return
|
||||
* false. Else, return true and set the hashso_curpos for the
|
||||
* false. Else, return true and set the hashso_curpos for the
|
||||
* scan to the right thing.
|
||||
*
|
||||
* 'bufP' points to the current buffer, which is pinned and read-locked.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* thrashing. We use tuplesort.c to sort the given index tuples into order.
|
||||
*
|
||||
* Note: if the number of rows in the table has been underestimated,
|
||||
* bucket splits may occur during the index build. In that case we'd
|
||||
* bucket splits may occur during the index build. In that case we'd
|
||||
* be inserting into two or more buckets for each possible masked-off
|
||||
* hash code value. That's no big problem though, since we'll still have
|
||||
* plenty of locality of access.
|
||||
@@ -52,7 +52,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
|
||||
hspool->index = index;
|
||||
|
||||
/*
|
||||
* Determine the bitmask for hash code values. Since there are currently
|
||||
* Determine the bitmask for hash code values. Since there are currently
|
||||
* num_buckets buckets in the index, the appropriate mask can be computed
|
||||
* as follows.
|
||||
*
|
||||
|
||||
@@ -160,7 +160,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
|
||||
/*
|
||||
* ReadBuffer verifies that every newly-read page passes
|
||||
* PageHeaderIsValid, which means it either contains a reasonably sane
|
||||
* page header or is all-zero. We have to defend against the all-zero
|
||||
* page header or is all-zero. We have to defend against the all-zero
|
||||
* case, however.
|
||||
*/
|
||||
if (PageIsNew(page))
|
||||
@@ -280,7 +280,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
|
||||
*
|
||||
* Returns the offset of the first index entry having hashkey >= hash_value,
|
||||
* or the page's max offset plus one if hash_value is greater than all
|
||||
* existing hash keys in the page. This is the appropriate place to start
|
||||
* existing hash keys in the page. This is the appropriate place to start
|
||||
* a search, or to insert a new item.
|
||||
*/
|
||||
OffsetNumber
|
||||
|
||||
Reference in New Issue
Block a user