1
0
mirror of https://github.com/postgres/postgres.git synced 2025-08-27 07:42:10 +03:00

Remove un-needed braces around single statements.

This commit is contained in:
Bruce Momjian
1998-06-15 19:30:31 +00:00
parent 27db9ecd0b
commit 6bd323c6b3
224 changed files with 221 additions and 2504 deletions

View File

@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.12 1998/01/07 21:00:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.13 1998/06/15 19:27:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -204,9 +204,7 @@ _hash_insertonpg(Relation rel,
if (do_expand ||
(metap->hashm_nkeys / (metap->hashm_maxbucket + 1))
> metap->hashm_ffactor)
{
_hash_expandtable(rel, metabuf);
}
_hash_relbuf(rel, metabuf, HASH_READ);
return (res);
}

View File

@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.15 1998/01/07 21:01:00 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.16 1998/06/15 19:27:49 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -64,9 +64,7 @@ _hash_addovflpage(Relation rel, Buffer *metabufp, Buffer buf)
/* allocate an empty overflow page */
oaddr = _hash_getovfladdr(rel, metabufp);
if (oaddr == InvalidOvflAddress)
{
elog(ERROR, "_hash_addovflpage: problem with _hash_getovfladdr.");
}
ovflblkno = OADDR_TO_BLKNO(OADDR_OF(SPLITNUM(oaddr), OPAGENUM(oaddr)));
Assert(BlockNumberIsValid(ovflblkno));
ovflbuf = _hash_getbuf(rel, ovflblkno, HASH_WRITE);
@@ -171,9 +169,7 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
if (offset > SPLITMASK)
{
if (++splitnum >= NCACHED)
{
elog(ERROR, OVMSG);
}
metap->OVFL_POINT = splitnum;
metap->SPARES[splitnum] = metap->SPARES[splitnum - 1];
metap->SPARES[splitnum - 1]--;
@@ -189,9 +185,7 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
free_page++;
if (free_page >= NCACHED)
{
elog(ERROR, OVMSG);
}
/*
* This is tricky. The 1 indicates that you want the new page
@@ -205,17 +199,13 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
*/
if (_hash_initbitmap(rel, metap, OADDR_OF(splitnum, offset),
1, free_page))
{
elog(ERROR, "overflow_page: problem with _hash_initbitmap.");
}
metap->SPARES[splitnum]++;
offset++;
if (offset > SPLITMASK)
{
if (++splitnum >= NCACHED)
{
elog(ERROR, OVMSG);
}
metap->OVFL_POINT = splitnum;
metap->SPARES[splitnum] = metap->SPARES[splitnum - 1];
metap->SPARES[splitnum - 1]--;
@@ -252,18 +242,14 @@ found:
bit = 1 + bit + (i * BMPGSZ_BIT(metap));
if (bit >= metap->LAST_FREED)
{
metap->LAST_FREED = bit - 1;
}
/* Calculate the split number for this page */
for (i = 0; (i < splitnum) && (bit > metap->SPARES[i]); i++)
;
offset = (i ? bit - metap->SPARES[i - 1] : bit);
if (offset >= SPLITMASK)
{
elog(ERROR, OVMSG);
}
/* initialize this page */
oaddr = OADDR_OF(i, offset);
@@ -381,9 +367,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
(splitnum ? metap->SPARES[splitnum - 1] : 0) + (addr & SPLITMASK) - 1;
if (ovflpgno < metap->LAST_FREED)
{
metap->LAST_FREED = ovflpgno;
}
bitmappage = (ovflpgno >> (metap->BSHIFT + BYTE_TO_BIT));
bitmapbit = ovflpgno & (BMPGSZ_BIT(metap) - 1);
@@ -403,13 +387,9 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
* return that buffer with a write lock.
*/
if (BlockNumberIsValid(nextblkno))
{
return (_hash_getbuf(rel, nextblkno, HASH_WRITE));
}
else
{
return (InvalidBuffer);
}
}
@@ -543,9 +523,7 @@ _hash_squeezebucket(Relation rel,
{
rblkno = ropaque->hasho_nextblkno;
if (ropaque != wopaque)
{
_hash_relbuf(rel, rbuf, HASH_WRITE);
}
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
rpage = BufferGetPage(rbuf);
_hash_checkpage(rpage, LH_OVERFLOW_PAGE);
@@ -621,9 +599,7 @@ _hash_squeezebucket(Relation rel,
*/
rbuf = _hash_freeovflpage(rel, rbuf);
if (BufferIsValid(rbuf))
{
_hash_relbuf(rel, rbuf, HASH_WRITE);
}
if (rblkno == wblkno)
{

View File

@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.15 1998/01/07 21:01:08 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.16 1998/06/15 19:27:49 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -106,9 +106,7 @@ _hash_metapinit(Relation rel)
if ((1 << i) < (metap->hashm_bsize -
(DOUBLEALIGN(sizeof(PageHeaderData)) +
DOUBLEALIGN(sizeof(HashPageOpaqueData)))))
{
break;
}
}
Assert(i);
metap->hashm_bmsize = 1 << i;
@@ -191,9 +189,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access)
Buffer buf;
if (blkno == P_NEW)
{
elog(ERROR, "_hash_getbuf: internal error: hash AM does not use P_NEW");
}
switch (access)
{
case HASH_WRITE:
@@ -395,14 +391,10 @@ _hash_pagedel(Relation rel, ItemPointer tid)
{
buf = _hash_freeovflpage(rel, buf);
if (BufferIsValid(buf))
{
_hash_relbuf(rel, buf, HASH_WRITE);
}
}
else
{
_hash_relbuf(rel, buf, HASH_WRITE);
}
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);
metap = (HashMetaPage) BufferGetPage(metabuf);
@@ -545,9 +537,7 @@ _hash_splitpage(Relation rel,
opage = BufferGetPage(obuf);
_hash_checkpage(opage, LH_OVERFLOW_PAGE);
if (PageIsEmpty(opage))
{
elog(ERROR, "_hash_splitpage: empty overflow page %d", oblkno);
}
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
}

View File

@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.13 1998/01/07 21:01:13 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.14 1998/06/15 19:27:50 momjian Exp $
*
* NOTES
* Because we can be doing an index scan on a relation while we
@@ -71,9 +71,7 @@ _hash_dropscan(IndexScanDesc scan)
for (chk = HashScans;
chk != (HashScanList) NULL && chk->hashsl_scan != scan;
chk = chk->hashsl_next)
{
last = chk;
}
if (chk == (HashScanList) NULL)
elog(ERROR, "hash scan list trashed; can't find 0x%lx", scan);

View File

@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.14 1997/09/08 21:40:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.15 1998/06/15 19:27:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,9 +49,7 @@ _hash_search(Relation rel,
bucket = 0;
}
else
{
bucket = _hash_call(rel, metap, keyDatum);
}
blkno = BUCKET_TO_BLKNO(bucket);
@@ -109,9 +107,7 @@ _hash_next(IndexScanDesc scan, ScanDirection dir)
* next tuple, we come back with a lock on that buffer.
*/
if (!_hash_step(scan, &buf, dir, metabuf))
{
return ((RetrieveIndexResult) NULL);
}
/* if we're here, _hash_step found a valid tuple */
current = &(scan->currentItemData);
@@ -225,9 +221,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
if (PageIsEmpty(page))
{
if (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
else
{
ItemPointerSetInvalid(current);
@@ -249,15 +243,11 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
if (ScanDirectionIsBackward(dir))
{
while (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
}
if (!_hash_step(scan, &buf, dir, metabuf))
{
return ((RetrieveIndexResult) NULL);
}
/* if we're here, _hash_step found a valid tuple */
current = &(scan->currentItemData);
@@ -321,13 +311,9 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
*/
maxoff = PageGetMaxOffsetNumber(page);
if (ItemPointerIsValid(current))
{
offnum = ItemPointerGetOffsetNumber(current);
}
else
{
offnum = InvalidOffsetNumber;
}
/*
* 'offnum' now points to the last tuple we have seen (if any).
@@ -371,9 +357,7 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
Assert(opaque->hasho_bucket == bucket);
while (PageIsEmpty(page) &&
BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
}
@@ -420,9 +404,7 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
while (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
maxoff = offnum = PageGetMaxOffsetNumber(page);
}
else