1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-09 06:21:09 +03:00

pgindent run for 8.3.

This commit is contained in:
Bruce Momjian
2007-11-15 21:14:46 +00:00
parent 3adc760fb9
commit fdf5a5efb7
486 changed files with 10044 additions and 9664 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.96 2007/09/12 22:10:25 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.97 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -548,7 +548,7 @@ loop_top:
vacuum_delay_point();
buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
info->strategy);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.53 2007/09/21 22:52:52 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.54 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -103,8 +103,8 @@ hashfloat4(PG_FUNCTION_ARGS)
* To support cross-type hashing of float8 and float4, we want to return
* the same hash value hashfloat8 would produce for an equal float8 value.
* So, widen the value to float8 and hash that. (We must do this rather
* than have hashfloat8 try to narrow its value to float4; that could
* fail on overflow.)
* than have hashfloat8 try to narrow its value to float4; that could fail
* on overflow.)
*/
key8 = key;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.60 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.61 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -156,7 +156,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
* Find an available overflow page and return it. The returned buffer
* Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@@ -402,9 +402,9 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
bucket = ovflopaque->hasho_bucket;
/*
* Zero the page for debugging's sake; then write and release it.
* (Note: if we failed to zero the page here, we'd have problems
* with the Assert in _hash_pageinit() when the page is reused.)
* Zero the page for debugging's sake; then write and release it. (Note:
* if we failed to zero the page here, we'd have problems with the Assert
* in _hash_pageinit() when the page is reused.)
*/
MemSet(ovflpage, 0, BufferGetPageSize(ovflbuf));
_hash_wrtbuf(rel, ovflbuf);
@@ -420,7 +420,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
Buffer prevbuf = _hash_getbuf_with_strategy(rel,
prevblkno,
HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
bstrategy);
Page prevpage = BufferGetPage(prevbuf);
HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.70 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.71 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -37,7 +37,7 @@
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
uint32 nblocks);
uint32 nblocks);
static void _hash_splitbucket(Relation rel, Buffer metabuf,
Bucket obucket, Bucket nbucket,
BlockNumber start_oblkno,
@@ -138,7 +138,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
* _hash_pageinit() is applied automatically. Otherwise it has
* _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@@ -184,7 +184,7 @@ _hash_getinitbuf(Relation rel, BlockNumber blkno)
Buffer
_hash_getnewbuf(Relation rel, BlockNumber blkno)
{
BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
Buffer buf;
if (blkno == P_NEW)
@@ -354,10 +354,10 @@ _hash_metapinit(Relation rel)
ffactor = 10;
/*
* We initialize the metapage, the first two bucket pages, and the
* first bitmap page in sequence, using _hash_getnewbuf to cause
* smgrextend() calls to occur. This ensures that the smgr level
* has the right idea of the physical index length.
* We initialize the metapage, the first two bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
* calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
pg = BufferGetPage(metabuf);
@@ -501,15 +501,16 @@ _hash_expandtable(Relation rel, Buffer metabuf)
goto fail;
/*
* Can't split anymore if maxbucket has reached its maximum possible value.
* Can't split anymore if maxbucket has reached its maximum possible
* value.
*
* Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
* the calculation maxbucket+1 mustn't overflow). Currently we restrict
* to half that because of overflow looping in _hash_log2() and
* insufficient space in hashm_spares[]. It's moot anyway because an
* index with 2^32 buckets would certainly overflow BlockNumber and
* hence _hash_alloc_buckets() would fail, but if we supported buckets
* smaller than a disk block then this would be an independent constraint.
* index with 2^32 buckets would certainly overflow BlockNumber and hence
* _hash_alloc_buckets() would fail, but if we supported buckets smaller
* than a disk block then this would be an independent constraint.
*/
if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
goto fail;
@@ -536,10 +537,10 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Likewise lock the new bucket (should never fail).
*
* Note: it is safe to compute the new bucket's blkno here, even though
* we may still need to update the BUCKET_TO_BLKNO mapping. This is
* because the current value of hashm_spares[hashm_ovflpoint] correctly
* shows where we are going to put a new splitpoint's worth of buckets.
* Note: it is safe to compute the new bucket's blkno here, even though we
* may still need to update the BUCKET_TO_BLKNO mapping. This is because
* the current value of hashm_spares[hashm_ovflpoint] correctly shows
* where we are going to put a new splitpoint's worth of buckets.
*/
start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
@@ -557,11 +558,12 @@ _hash_expandtable(Relation rel, Buffer metabuf)
if (spare_ndx > metap->hashm_ovflpoint)
{
Assert(spare_ndx == metap->hashm_ovflpoint + 1);
/*
* The number of buckets in the new splitpoint is equal to the
* total number already in existence, i.e. new_bucket. Currently
* this maps one-to-one to blocks required, but someday we may need
* a more complicated calculation here.
* The number of buckets in the new splitpoint is equal to the total
* number already in existence, i.e. new_bucket. Currently this maps
* one-to-one to blocks required, but someday we may need a more
* complicated calculation here.
*/
if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
{
@@ -673,14 +675,14 @@ fail:
static bool
_hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
{
BlockNumber lastblock;
BlockNumber lastblock;
char zerobuf[BLCKSZ];
lastblock = firstblock + nblocks - 1;
/*
* Check for overflow in block number calculation; if so, we cannot
* extend the index anymore.
* Check for overflow in block number calculation; if so, we cannot extend
* the index anymore.
*/
if (lastblock < firstblock || lastblock == InvalidBlockNumber)
return false;