1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-30 11:03:19 +03:00

8.4 pgindent run, with new combined Linux/FreeBSD/MinGW typedef list

provided by Andrew.
This commit is contained in:
Bruce Momjian
2009-06-11 14:49:15 +00:00
parent 4e86efb4e5
commit d747140279
654 changed files with 11900 additions and 11387 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.251 2009/04/03 18:17:43 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.252 2009/06/11 14:49:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -81,9 +81,9 @@ static volatile BufferDesc *PinCountWaitBuf = NULL;
static Buffer ReadBuffer_common(SMgrRelation reln, bool isLocalBuf,
ForkNumber forkNum, BlockNumber blockNum,
ReadBufferMode mode , BufferAccessStrategy strategy,
bool *hit);
ForkNumber forkNum, BlockNumber blockNum,
ReadBufferMode mode, BufferAccessStrategy strategy,
bool *hit);
static bool PinBuffer(volatile BufferDesc *buf, BufferAccessStrategy strategy);
static void PinBuffer_Locked(volatile BufferDesc *buf);
static void UnpinBuffer(volatile BufferDesc *buf, bool fixOwner);
@ -106,7 +106,7 @@ static void AtProcExit_Buffers(int code, Datum arg);
* PrefetchBuffer -- initiate asynchronous read of a block of a relation
*
* This is named by analogy to ReadBuffer but doesn't actually allocate a
* buffer. Instead it tries to ensure that a future ReadBuffer for the given
* buffer. Instead it tries to ensure that a future ReadBuffer for the given
* block will not be delayed by the I/O. Prefetching is optional.
* No-op if prefetching isn't compiled in.
*/
@ -126,16 +126,16 @@ PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
if (RELATION_IS_OTHER_TEMP(reln))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
errmsg("cannot access temporary tables of other sessions")));
/* pass it off to localbuf.c */
LocalPrefetchBuffer(reln->rd_smgr, forkNum, blockNum);
}
else
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
LWLockId newPartitionLock; /* buffer partition lock for it */
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
LWLockId newPartitionLock; /* buffer partition lock for it */
int buf_id;
/* create a tag so we can lookup the buffer */
@ -156,17 +156,17 @@ PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
/*
* If the block *is* in buffers, we do nothing. This is not really
* ideal: the block might be just about to be evicted, which would
* be stupid since we know we are going to need it soon. But the
* only easy answer is to bump the usage_count, which does not seem
* like a great solution: when the caller does ultimately touch the
* block, usage_count would get bumped again, resulting in too much
* favoritism for blocks that are involved in a prefetch sequence.
* A real fix would involve some additional per-buffer state, and
* it's not clear that there's enough of a problem to justify that.
* ideal: the block might be just about to be evicted, which would be
* stupid since we know we are going to need it soon. But the only
* easy answer is to bump the usage_count, which does not seem like a
* great solution: when the caller does ultimately touch the block,
* usage_count would get bumped again, resulting in too much
* favoritism for blocks that are involved in a prefetch sequence. A
* real fix would involve some additional per-buffer state, and it's
* not clear that there's enough of a problem to justify that.
*/
}
#endif /* USE_PREFETCH */
#endif /* USE_PREFETCH */
}
@ -202,7 +202,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* for non-critical data, where the caller is prepared to repair errors.
*
* In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled
* with zeros instead of reading it from disk. Useful when the caller is
* with zeros instead of reading it from disk. Useful when the caller is
* going to fill the page from scratch, since this saves I/O and avoids
* unnecessary failure if the page-on-disk has corrupt page headers.
* Caution: do not use this mode to read a page that is beyond the relation's
@ -216,16 +216,16 @@ Buffer
ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
ReadBufferMode mode, BufferAccessStrategy strategy)
{
bool hit;
Buffer buf;
bool hit;
Buffer buf;
/* Open it at the smgr level if not already done */
RelationOpenSmgr(reln);
/*
* Reject attempts to read non-local temporary relations; we would
* be likely to get wrong data since we have no visibility into the
* owning session's local buffers.
* Reject attempts to read non-local temporary relations; we would be
* likely to get wrong data since we have no visibility into the owning
* session's local buffers.
*/
if (RELATION_IS_OTHER_TEMP(reln))
ereport(ERROR,
@ -233,8 +233,8 @@ ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
errmsg("cannot access temporary tables of other sessions")));
/*
* Read the buffer, and update pgstat counters to reflect a cache
* hit or miss.
* Read the buffer, and update pgstat counters to reflect a cache hit or
* miss.
*/
pgstat_count_buffer_read(reln);
buf = ReadBuffer_common(reln->rd_smgr, reln->rd_istemp, forkNum, blockNum,
@ -256,9 +256,10 @@ ReadBufferWithoutRelcache(RelFileNode rnode, bool isTemp,
ForkNumber forkNum, BlockNumber blockNum,
ReadBufferMode mode, BufferAccessStrategy strategy)
{
bool hit;
bool hit;
SMgrRelation smgr = smgropen(rnode);
return ReadBuffer_common(smgr, isTemp, forkNum, blockNum, mode, strategy,
&hit);
}
@ -357,9 +358,9 @@ ReadBuffer_common(SMgrRelation smgr, bool isLocalBuf, ForkNumber forkNum,
bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
if (!PageIsNew((Page) bufBlock))
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
blockNum, relpath(smgr->smgr_rnode, forkNum)),
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
(errmsg("unexpected data beyond EOF in block %u of relation %s",
blockNum, relpath(smgr->smgr_rnode, forkNum)),
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
* We *must* do smgrextend before succeeding, else the page will not
@ -439,9 +440,9 @@ ReadBuffer_common(SMgrRelation smgr, bool isLocalBuf, ForkNumber forkNum,
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page header in block %u of relation %s",
blockNum,
relpath(smgr->smgr_rnode, forkNum))));
errmsg("invalid page header in block %u of relation %s",
blockNum,
relpath(smgr->smgr_rnode, forkNum))));
}
}
}
@ -631,17 +632,17 @@ BufferAlloc(SMgrRelation smgr, ForkNumber forkNum,
/* OK, do the I/O */
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
smgr->smgr_rnode.spcNode,
smgr->smgr_rnode.dbNode,
smgr->smgr_rnode.relNode);
smgr->smgr_rnode.spcNode,
smgr->smgr_rnode.dbNode,
smgr->smgr_rnode.relNode);
FlushBuffer(buf, NULL);
LWLockRelease(buf->content_lock);
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
smgr->smgr_rnode.spcNode,
smgr->smgr_rnode.spcNode,
smgr->smgr_rnode.dbNode,
smgr->smgr_rnode.relNode);
smgr->smgr_rnode.relNode);
}
else
{
@ -983,7 +984,7 @@ ReleaseAndReadBuffer(Buffer buffer,
Relation relation,
BlockNumber blockNum)
{
ForkNumber forkNum = MAIN_FORKNUM;
ForkNumber forkNum = MAIN_FORKNUM;
volatile BufferDesc *bufHdr;
if (BufferIsValid(buffer))
@ -2708,7 +2709,8 @@ AbortBufferIO(void)
if (sv_flags & BM_IO_ERROR)
{
/* Buffer is pinned, so we can read tag without spinlock */
char *path = relpath(buf->tag.rnode, buf->tag.forkNum);
char *path = relpath(buf->tag.rnode, buf->tag.forkNum);
ereport(WARNING,
(errcode(ERRCODE_IO_ERROR),
errmsg("could not write block %u of %s",
@ -2732,7 +2734,8 @@ buffer_write_error_callback(void *arg)
/* Buffer is pinned, so we can read the tag without locking the spinlock */
if (bufHdr != NULL)
{
char *path = relpath(bufHdr->tag.rnode, bufHdr->tag.forkNum);
char *path = relpath(bufHdr->tag.rnode, bufHdr->tag.forkNum);
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);
pfree(path);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.86 2009/01/12 05:10:44 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.87 2009/06/11 14:49:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -85,7 +85,7 @@ LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum,
/* Not in buffers, so initiate prefetch */
smgrprefetch(smgr, forkNum, blockNum);
#endif /* USE_PREFETCH */
#endif /* USE_PREFETCH */
}
@ -149,7 +149,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
smgr->smgr_rnode.relNode, forkNum, blockNum, -nextFreeLocalBuf - 1);
smgr->smgr_rnode.relNode, forkNum, blockNum, -nextFreeLocalBuf - 1);
#endif
/*

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/file/buffile.c,v 1.33 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/file/buffile.c,v 1.34 2009/06/11 14:49:01 momjian Exp $
*
* NOTES:
*
@ -125,7 +125,7 @@ extendBufFile(BufFile *file)
file->files = (File *) repalloc(file->files,
(file->numFiles + 1) * sizeof(File));
file->offsets = (off_t *) repalloc(file->offsets,
(file->numFiles + 1) * sizeof(off_t));
(file->numFiles + 1) * sizeof(off_t));
file->files[file->numFiles] = pfile;
file->offsets[file->numFiles] = 0L;
file->numFiles++;

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.148 2009/03/04 09:12:49 petere Exp $
* $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.149 2009/06/11 14:49:01 momjian Exp $
*
* NOTES:
*
@ -128,13 +128,13 @@ static int max_safe_fds = 32; /* default if not changed */
* Flag to tell whether it's worth scanning VfdCache looking for temp files to
* close
*/
static bool have_xact_temporary_files = false;
static bool have_xact_temporary_files = false;
typedef struct vfd
{
int fd; /* current FD, or VFD_CLOSED if none */
unsigned short fdstate; /* bitflags for VFD's state */
SubTransactionId create_subid; /* for TEMPORARY fds, creating subxact */
SubTransactionId create_subid; /* for TEMPORARY fds, creating subxact */
File nextFree; /* link to next free VFD, if in freelist */
File lruMoreRecently; /* doubly linked recency-of-use list */
File lruLessRecently;
@ -364,6 +364,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
int used = 0;
int highestfd = 0;
int j;
#ifdef HAVE_GETRLIMIT
struct rlimit rlim;
int getrlimit_status;
@ -373,14 +374,14 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
fd = (int *) palloc(size * sizeof(int));
#ifdef HAVE_GETRLIMIT
# ifdef RLIMIT_NOFILE /* most platforms use RLIMIT_NOFILE */
#ifdef RLIMIT_NOFILE /* most platforms use RLIMIT_NOFILE */
getrlimit_status = getrlimit(RLIMIT_NOFILE, &rlim);
# else /* but BSD doesn't ... */
#else /* but BSD doesn't ... */
getrlimit_status = getrlimit(RLIMIT_OFILE, &rlim);
# endif /* RLIMIT_NOFILE */
#endif /* RLIMIT_NOFILE */
if (getrlimit_status != 0)
ereport(WARNING, (errmsg("getrlimit failed: %m")));
#endif /* HAVE_GETRLIMIT */
#endif /* HAVE_GETRLIMIT */
/* dup until failure or probe limit reached */
for (;;)
@ -388,7 +389,11 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
int thisfd;
#ifdef HAVE_GETRLIMIT
/* don't go beyond RLIMIT_NOFILE; causes irritating kernel logs on some platforms */
/*
* don't go beyond RLIMIT_NOFILE; causes irritating kernel logs on
* some platforms
*/
if (getrlimit_status == 0 && highestfd >= rlim.rlim_cur - 1)
break;
#endif
@ -1069,7 +1074,7 @@ FilePrefetch(File file, off_t offset, int amount)
int returnCode;
Assert(FileIsValid(file));
DO_DB(elog(LOG, "FilePrefetch: %d (%s) " INT64_FORMAT " %d",
file, VfdCache[file].fileName,
(int64) offset, amount));

View File

@ -8,16 +8,16 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.72 2009/01/20 18:59:37 heikki Exp $
* $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.73 2009/06/11 14:49:01 momjian Exp $
*
*
* NOTES:
*
* Free Space Map keeps track of the amount of free space on pages, and
* allows quickly searching for a page with enough free space. The FSM is
* stored in a dedicated relation fork of all heap relations, and those
* index access methods that need it (see also indexfsm.c). See README for
* more information.
* Free Space Map keeps track of the amount of free space on pages, and
* allows quickly searching for a page with enough free space. The FSM is
* stored in a dedicated relation fork of all heap relations, and those
* index access methods that need it (see also indexfsm.c). See README for
* more information.
*
*-------------------------------------------------------------------------
*/
@ -49,10 +49,10 @@
* look like this
*
*
* Range Category
* 0 - 31 0
* 32 - 63 1
* ... ... ...
* Range Category
* 0 - 31 0
* 32 - 63 1
* ... ... ...
* 8096 - 8127 253
* 8128 - 8163 254
* 8164 - 8192 255
@ -86,12 +86,12 @@
*/
typedef struct
{
int level; /* level */
int logpageno; /* page number within the level */
int level; /* level */
int logpageno; /* page number within the level */
} FSMAddress;
/* Address of the root page. */
static const FSMAddress FSM_ROOT_ADDRESS = { FSM_ROOT_LEVEL, 0 };
static const FSMAddress FSM_ROOT_ADDRESS = {FSM_ROOT_LEVEL, 0};
/* functions to navigate the tree */
static FSMAddress fsm_get_child(FSMAddress parent, uint16 slot);
@ -106,11 +106,11 @@ static void fsm_extend(Relation rel, BlockNumber fsm_nblocks);
/* functions to convert amount of free space to a FSM category */
static uint8 fsm_space_avail_to_cat(Size avail);
static uint8 fsm_space_needed_to_cat(Size needed);
static Size fsm_space_cat_to_avail(uint8 cat);
static Size fsm_space_cat_to_avail(uint8 cat);
/* workhorse functions for various operations */
static int fsm_set_and_search(Relation rel, FSMAddress addr, uint16 slot,
uint8 newValue, uint8 minValue);
uint8 newValue, uint8 minValue);
static BlockNumber fsm_search(Relation rel, uint8 min_cat);
static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof);
@ -133,7 +133,8 @@ static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof);
BlockNumber
GetPageWithFreeSpace(Relation rel, Size spaceNeeded)
{
uint8 min_cat = fsm_space_needed_to_cat(spaceNeeded);
uint8 min_cat = fsm_space_needed_to_cat(spaceNeeded);
return fsm_search(rel, min_cat);
}
@ -259,7 +260,7 @@ GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk)
void
FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks)
{
BlockNumber new_nfsmblocks;
BlockNumber new_nfsmblocks;
FSMAddress first_removed_address;
uint16 first_removed_slot;
Buffer buf;
@ -278,15 +279,15 @@ FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks)
/*
* Zero out the tail of the last remaining FSM page. If the slot
* representing the first removed heap block is at a page boundary, as
* the first slot on the FSM page that first_removed_address points to,
* we can just truncate that page altogether.
* representing the first removed heap block is at a page boundary, as the
* first slot on the FSM page that first_removed_address points to, we can
* just truncate that page altogether.
*/
if (first_removed_slot > 0)
{
buf = fsm_readbuf(rel, first_removed_address, false);
if (!BufferIsValid(buf))
return; /* nothing to do; the FSM was already smaller */
return; /* nothing to do; the FSM was already smaller */
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
fsm_truncate_avail(BufferGetPage(buf), first_removed_slot);
MarkBufferDirty(buf);
@ -298,15 +299,15 @@ FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks)
{
new_nfsmblocks = fsm_logical_to_physical(first_removed_address);
if (smgrnblocks(rel->rd_smgr, FSM_FORKNUM) <= new_nfsmblocks)
return; /* nothing to do; the FSM was already smaller */
return; /* nothing to do; the FSM was already smaller */
}
/* Truncate the unused FSM pages */
smgrtruncate(rel->rd_smgr, FSM_FORKNUM, new_nfsmblocks, rel->rd_istemp);
/*
* Need to invalidate the relcache entry, because rd_fsm_nblocks
* seen by other backends is no longer valid.
* Need to invalidate the relcache entry, because rd_fsm_nblocks seen by
* other backends is no longer valid.
*/
if (!InRecovery)
CacheInvalidateRelcache(rel);
@ -320,7 +321,7 @@ FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks)
void
FreeSpaceMapVacuum(Relation rel)
{
bool dummy;
bool dummy;
/*
* Traverse the tree in depth-first order. The tree is stored physically
@ -337,7 +338,7 @@ FreeSpaceMapVacuum(Relation rel)
static uint8
fsm_space_avail_to_cat(Size avail)
{
int cat;
int cat;
Assert(avail < BLCKSZ);
@ -377,12 +378,12 @@ fsm_space_cat_to_avail(uint8 cat)
static uint8
fsm_space_needed_to_cat(Size needed)
{
int cat;
int cat;
/* Can't ask for more space than the highest category represents */
if (needed > MaxFSMRequestSize)
elog(ERROR, "invalid FSM request size %lu",
(unsigned long) needed);
elog(ERROR, "invalid FSM request size %lu",
(unsigned long) needed);
if (needed == 0)
return 1;
@ -402,8 +403,8 @@ static BlockNumber
fsm_logical_to_physical(FSMAddress addr)
{
BlockNumber pages;
int leafno;
int l;
int leafno;
int l;
/*
* Calculate the logical page number of the first leaf page below the
@ -422,8 +423,8 @@ fsm_logical_to_physical(FSMAddress addr)
}
/*
* If the page we were asked for wasn't at the bottom level, subtract
* the additional lower level pages we counted above.
* If the page we were asked for wasn't at the bottom level, subtract the
* additional lower level pages we counted above.
*/
pages -= addr.level;
@ -437,7 +438,7 @@ fsm_logical_to_physical(FSMAddress addr)
static FSMAddress
fsm_get_location(BlockNumber heapblk, uint16 *slot)
{
FSMAddress addr;
FSMAddress addr;
addr.level = FSM_BOTTOM_LEVEL;
addr.logpageno = heapblk / SlotsPerFSMPage;
@ -463,7 +464,7 @@ fsm_get_heap_blk(FSMAddress addr, uint16 slot)
static FSMAddress
fsm_get_parent(FSMAddress child, uint16 *slot)
{
FSMAddress parent;
FSMAddress parent;
Assert(child.level < FSM_ROOT_LEVEL);
@ -481,7 +482,7 @@ fsm_get_parent(FSMAddress child, uint16 *slot)
static FSMAddress
fsm_get_child(FSMAddress parent, uint16 slot)
{
FSMAddress child;
FSMAddress child;
Assert(parent.level > FSM_BOTTOM_LEVEL);
@ -501,7 +502,7 @@ static Buffer
fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
{
BlockNumber blkno = fsm_logical_to_physical(addr);
Buffer buf;
Buffer buf;
RelationOpenSmgr(rel);
@ -545,20 +546,20 @@ static void
fsm_extend(Relation rel, BlockNumber fsm_nblocks)
{
BlockNumber fsm_nblocks_now;
Page pg;
Page pg;
pg = (Page) palloc(BLCKSZ);
PageInit(pg, BLCKSZ, 0);
/*
* We use the relation extension lock to lock out other backends
* trying to extend the FSM at the same time. It also locks out
* extension of the main fork, unnecessarily, but extending the
* FSM happens seldom enough that it doesn't seem worthwhile to
* have a separate lock tag type for it.
* We use the relation extension lock to lock out other backends trying to
* extend the FSM at the same time. It also locks out extension of the
* main fork, unnecessarily, but extending the FSM happens seldom enough
* that it doesn't seem worthwhile to have a separate lock tag type for
* it.
*
* Note that another backend might have extended or created the
* relation before we get the lock.
* Note that another backend might have extended or created the relation
* before we get the lock.
*/
LockRelationForExtension(rel, ExclusiveLock);
@ -631,14 +632,14 @@ fsm_set_and_search(Relation rel, FSMAddress addr, uint16 slot,
static BlockNumber
fsm_search(Relation rel, uint8 min_cat)
{
int restarts = 0;
FSMAddress addr = FSM_ROOT_ADDRESS;
int restarts = 0;
FSMAddress addr = FSM_ROOT_ADDRESS;
for (;;)
{
int slot;
Buffer buf;
uint8 max_avail = 0;
int slot;
Buffer buf;
uint8 max_avail = 0;
/* Read the FSM page. */
buf = fsm_readbuf(rel, addr, false);
@ -678,8 +679,8 @@ fsm_search(Relation rel, uint8 min_cat)
}
else
{
uint16 parentslot;
FSMAddress parent;
uint16 parentslot;
FSMAddress parent;
/*
* At lower level, failure can happen if the value in the upper-
@ -697,11 +698,11 @@ fsm_search(Relation rel, uint8 min_cat)
fsm_set_and_search(rel, parent, parentslot, max_avail, 0);
/*
* If the upper pages are badly out of date, we might need to
* loop quite a few times, updating them as we go. Any
* inconsistencies should eventually be corrected and the loop
* should end. Looping indefinitely is nevertheless scary, so
* provide an emergency valve.
* If the upper pages are badly out of date, we might need to loop
* quite a few times, updating them as we go. Any inconsistencies
* should eventually be corrected and the loop should end. Looping
* indefinitely is nevertheless scary, so provide an emergency
* valve.
*/
if (restarts++ > 10000)
return InvalidBlockNumber;
@ -719,9 +720,9 @@ fsm_search(Relation rel, uint8 min_cat)
static uint8
fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof_p)
{
Buffer buf;
Page page;
uint8 max_avail;
Buffer buf;
Page page;
uint8 max_avail;
/* Read the page if it exists, or return EOF */
buf = fsm_readbuf(rel, addr, false);
@ -736,17 +737,17 @@ fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof_p)
page = BufferGetPage(buf);
/*
* Recurse into children, and fix the information stored about them
* at this level.
* Recurse into children, and fix the information stored about them at
* this level.
*/
if (addr.level > FSM_BOTTOM_LEVEL)
{
int slot;
bool eof = false;
int slot;
bool eof = false;
for (slot = 0; slot < SlotsPerFSMPage; slot++)
{
int child_avail;
int child_avail;
/* After we hit end-of-file, just clear the rest of the slots */
if (!eof)

View File

@ -8,15 +8,15 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/freespace/fsmpage.c,v 1.4 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/freespace/fsmpage.c,v 1.5 2009/06/11 14:49:01 momjian Exp $
*
* NOTES:
*
* The public functions in this file form an API that hides the internal
* structure of a FSM page. This allows freespace.c to treat each FSM page
* as a black box with SlotsPerPage "slots". fsm_set_avail() and
* fsm_get_avail() let you get/set the value of a slot, and
* fsm_search_avail() lets you search for a slot with value >= X.
* The public functions in this file form an API that hides the internal
* structure of a FSM page. This allows freespace.c to treat each FSM page
* as a black box with SlotsPerPage "slots". fsm_set_avail() and
* fsm_get_avail() let you get/set the value of a slot, and
* fsm_search_avail() lets you search for a slot with value >= X.
*
*-------------------------------------------------------------------------
*/
@ -43,9 +43,9 @@ rightneighbor(int x)
x++;
/*
* Check if we stepped to the leftmost node at next level, and correct
* if so. The leftmost nodes at each level are numbered x = 2^level - 1,
* so check if (x + 1) is a power of two, using a standard
* Check if we stepped to the leftmost node at next level, and correct if
* so. The leftmost nodes at each level are numbered x = 2^level - 1, so
* check if (x + 1) is a power of two, using a standard
* twos-complement-arithmetic trick.
*/
if (((x + 1) & x) == 0)
@ -62,9 +62,9 @@ rightneighbor(int x)
bool
fsm_set_avail(Page page, int slot, uint8 value)
{
int nodeno = NonLeafNodesPerPage + slot;
FSMPage fsmpage = (FSMPage) PageGetContents(page);
uint8 oldvalue;
int nodeno = NonLeafNodesPerPage + slot;
FSMPage fsmpage = (FSMPage) PageGetContents(page);
uint8 oldvalue;
Assert(slot < LeafNodesPerPage);
@ -77,14 +77,14 @@ fsm_set_avail(Page page, int slot, uint8 value)
fsmpage->fp_nodes[nodeno] = value;
/*
* Propagate up, until we hit the root or a node that doesn't
* need to be updated.
* Propagate up, until we hit the root or a node that doesn't need to be
* updated.
*/
do
{
uint8 newvalue = 0;
int lchild;
int rchild;
uint8 newvalue = 0;
int lchild;
int rchild;
nodeno = parentof(nodeno);
lchild = leftchild(nodeno);
@ -103,8 +103,8 @@ fsm_set_avail(Page page, int slot, uint8 value)
} while (nodeno > 0);
/*
* sanity check: if the new value is (still) higher than the value
* at the top, the tree is corrupt. If so, rebuild.
* sanity check: if the new value is (still) higher than the value at the
* top, the tree is corrupt. If so, rebuild.
*/
if (value > fsmpage->fp_nodes[0])
fsm_rebuild_page(page);
@ -121,7 +121,7 @@ fsm_set_avail(Page page, int slot, uint8 value)
uint8
fsm_get_avail(Page page, int slot)
{
FSMPage fsmpage = (FSMPage) PageGetContents(page);
FSMPage fsmpage = (FSMPage) PageGetContents(page);
Assert(slot < LeafNodesPerPage);
@ -137,7 +137,7 @@ fsm_get_avail(Page page, int slot)
uint8
fsm_get_max_avail(Page page)
{
FSMPage fsmpage = (FSMPage) PageGetContents(page);
FSMPage fsmpage = (FSMPage) PageGetContents(page);
return fsmpage->fp_nodes[0];
}
@ -158,16 +158,17 @@ int
fsm_search_avail(Buffer buf, uint8 minvalue, bool advancenext,
bool exclusive_lock_held)
{
Page page = BufferGetPage(buf);
FSMPage fsmpage = (FSMPage) PageGetContents(page);
int nodeno;
int target;
uint16 slot;
Page page = BufferGetPage(buf);
FSMPage fsmpage = (FSMPage) PageGetContents(page);
int nodeno;
int target;
uint16 slot;
restart:
restart:
/*
* Check the root first, and exit quickly if there's no leaf with
* enough free space
* Check the root first, and exit quickly if there's no leaf with enough
* free space
*/
if (fsmpage->fp_nodes[0] < minvalue)
return -1;
@ -184,13 +185,13 @@ fsm_search_avail(Buffer buf, uint8 minvalue, bool advancenext,
/*----------
* Start the search from the target slot. At every step, move one
* node to the right, then climb up to the parent. Stop when we reach
* node to the right, then climb up to the parent. Stop when we reach
* a node with enough free space (as we must, since the root has enough
* space).
*
* The idea is to gradually expand our "search triangle", that is, all
* nodes covered by the current node, and to be sure we search to the
* right from the start point. At the first step, only the target slot
* right from the start point. At the first step, only the target slot
* is examined. When we move up from a left child to its parent, we are
* adding the right-hand subtree of that parent to the search triangle.
* When we move right then up from a right child, we are dropping the
@ -207,11 +208,11 @@ fsm_search_avail(Buffer buf, uint8 minvalue, bool advancenext,
*
* For example, consider this tree:
*
* 7
* 7 6
* 5 7 6 5
* 4 5 5 7 2 6 5 2
* T
* 7
* 7 6
* 5 7 6 5
* 4 5 5 7 2 6 5 2
* T
*
* Assume that the target node is the node indicated by the letter T,
* and we're searching for a node with value of 6 or higher. The search
@ -230,8 +231,8 @@ fsm_search_avail(Buffer buf, uint8 minvalue, bool advancenext,
break;
/*
* Move to the right, wrapping around on same level if necessary,
* then climb up.
* Move to the right, wrapping around on same level if necessary, then
* climb up.
*/
nodeno = parentof(rightneighbor(nodeno));
}
@ -243,7 +244,7 @@ fsm_search_avail(Buffer buf, uint8 minvalue, bool advancenext,
*/
while (nodeno < NonLeafNodesPerPage)
{
int childnodeno = leftchild(nodeno);
int childnodeno = leftchild(nodeno);
if (childnodeno < NodesPerPage &&
fsmpage->fp_nodes[childnodeno] >= minvalue)
@ -260,17 +261,16 @@ fsm_search_avail(Buffer buf, uint8 minvalue, bool advancenext,
else
{
/*
* Oops. The parent node promised that either left or right
* child has enough space, but neither actually did. This can
* happen in case of a "torn page", IOW if we crashed earlier
* while writing the page to disk, and only part of the page
* made it to disk.
* Oops. The parent node promised that either left or right child
* has enough space, but neither actually did. This can happen in
* case of a "torn page", IOW if we crashed earlier while writing
* the page to disk, and only part of the page made it to disk.
*
* Fix the corruption and restart.
*/
RelFileNode rnode;
RelFileNode rnode;
ForkNumber forknum;
BlockNumber blknum;
BlockNumber blknum;
BufferGetTag(buf, &rnode, &forknum, &blknum);
elog(DEBUG1, "fixing corrupt FSM block %u, relation %u/%u/%u",
@ -312,9 +312,9 @@ fsm_search_avail(Buffer buf, uint8 minvalue, bool advancenext,
bool
fsm_truncate_avail(Page page, int nslots)
{
FSMPage fsmpage = (FSMPage) PageGetContents(page);
uint8 *ptr;
bool changed = false;
FSMPage fsmpage = (FSMPage) PageGetContents(page);
uint8 *ptr;
bool changed = false;
Assert(nslots >= 0 && nslots < LeafNodesPerPage);
@ -341,9 +341,9 @@ fsm_truncate_avail(Page page, int nslots)
bool
fsm_rebuild_page(Page page)
{
FSMPage fsmpage = (FSMPage) PageGetContents(page);
bool changed = false;
int nodeno;
FSMPage fsmpage = (FSMPage) PageGetContents(page);
bool changed = false;
int nodeno;
/*
* Start from the lowest non-leaf level, at last node, working our way
@ -351,9 +351,9 @@ fsm_rebuild_page(Page page)
*/
for (nodeno = NonLeafNodesPerPage - 1; nodeno >= 0; nodeno--)
{
int lchild = leftchild(nodeno);
int rchild = lchild + 1;
uint8 newvalue = 0;
int lchild = leftchild(nodeno);
int rchild = lchild + 1;
uint8 newvalue = 0;
/* The first few nodes we examine might have zero or one child. */
if (lchild < NodesPerPage)

View File

@ -8,15 +8,15 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/freespace/indexfsm.c,v 1.4 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/freespace/indexfsm.c,v 1.5 2009/06/11 14:49:01 momjian Exp $
*
*
* NOTES:
*
* This is similar to the FSM used for heap, in freespace.c, but instead
* of tracking the amount of free space on pages, we only track whether
* pages are completely free or in-use. We use the same FSM implementation
* as for heaps, using BLCKSZ - 1 to denote used pages, and 0 for unused.
* This is similar to the FSM used for heap, in freespace.c, but instead
* of tracking the amount of free space on pages, we only track whether
* pages are completely free or in-use. We use the same FSM implementation
* as for heaps, using BLCKSZ - 1 to denote used pages, and 0 for unused.
*
*-------------------------------------------------------------------------
*/
@ -38,7 +38,7 @@
BlockNumber
GetFreeIndexPage(Relation rel)
{
BlockNumber blkno = GetPageWithFreeSpace(rel, BLCKSZ/2);
BlockNumber blkno = GetPageWithFreeSpace(rel, BLCKSZ / 2);
if (blkno != InvalidBlockNumber)
RecordUsedIndexPage(rel, blkno);

View File

@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.104 2009/05/15 15:56:39 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.105 2009/06/11 14:49:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -83,7 +83,7 @@ static int on_proc_exit_index,
* -cim 2/6/90
*
* Unfortunately, we can't really guarantee that add-on code
* obeys the rule of not calling exit() directly. So, while
* obeys the rule of not calling exit() directly. So, while
* this is the preferred way out of the system, we also register
* an atexit callback that will make sure cleanup happens.
* ----------------------------------------------------------------
@ -113,10 +113,10 @@ proc_exit(int code)
*
* Note that we do this here instead of in an on_proc_exit() callback
* because we want to ensure that this code executes last - we don't
* want to interfere with any other on_proc_exit() callback. For
* the same reason, we do not include it in proc_exit_prepare ...
* so if you are exiting in the "wrong way" you won't drop your profile
* in a nice place.
* want to interfere with any other on_proc_exit() callback. For the
* same reason, we do not include it in proc_exit_prepare ... so if
* you are exiting in the "wrong way" you won't drop your profile in a
* nice place.
*/
char gprofDirName[32];
@ -229,8 +229,7 @@ atexit_callback(void)
/* ... too bad we don't know the real exit code ... */
proc_exit_prepare(-1);
}
#else /* assume we have on_exit instead */
#else /* assume we have on_exit instead */
static void
atexit_callback(int exitstatus, void *arg)
@ -238,8 +237,7 @@ atexit_callback(int exitstatus, void *arg)
/* Clean up everything that must be cleaned up */
proc_exit_prepare(exitstatus);
}
#endif /* HAVE_ATEXIT */
#endif /* HAVE_ATEXIT */
/* ----------------------------------------------------------------
* on_proc_exit

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.27 2009/05/05 19:59:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.28 2009/06/11 14:49:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -41,7 +41,7 @@
* have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is
* available for assignment. An ASSIGNED slot is associated with a postmaster
* child process, but either the process has not touched shared memory yet,
* or it has successfully cleaned up after itself. A ACTIVE slot means the
* or it has successfully cleaned up after itself. A ACTIVE slot means the
* process is actively using shared memory. The slots are assigned to
* child processes at random, and postmaster.c is responsible for tracking
* which one goes with which PID.
@ -57,8 +57,8 @@ struct PMSignalData
/* per-reason flags */
sig_atomic_t PMSignalFlags[NUM_PMSIGNALS];
/* per-child-process flags */
int num_child_flags; /* # of entries in PMChildFlags[] */
int next_child_flag; /* next slot to try to assign */
int num_child_flags; /* # of entries in PMChildFlags[] */
int next_child_flag; /* next slot to try to assign */
sig_atomic_t PMChildFlags[1]; /* VARIABLE LENGTH ARRAY */
};
@ -181,6 +181,7 @@ ReleasePostmasterChildSlot(int slot)
Assert(slot > 0 && slot <= PMSignalState->num_child_flags);
slot--;
/*
* Note: the slot state might already be unused, because the logic in
* postmaster.c is such that this might get called twice when a child

View File

@ -23,7 +23,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.49 2009/04/04 17:40:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.50 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -829,8 +829,8 @@ GetSnapshotData(Snapshot snapshot)
snapshot->curcid = GetCurrentCommandId(false);
/*
* This is a new snapshot, so set both refcounts are zero, and mark it
* as not copied in persistent memory.
* This is a new snapshot, so set both refcounts are zero, and mark it as
* not copied in persistent memory.
*/
snapshot->active_count = 0;
snapshot->regd_count = 0;
@ -1038,7 +1038,7 @@ IsBackendPid(int pid)
* some snapshot we have. Since we examine the procarray with only shared
* lock, there are race conditions: a backend could set its xmin just after
* we look. Indeed, on multiprocessors with weak memory ordering, the
* other backend could have set its xmin *before* we look. We know however
* other backend could have set its xmin *before* we look. We know however
* that such a backend must have held shared ProcArrayLock overlapping our
* own hold of ProcArrayLock, else we would see its xmin update. Therefore,
* any snapshot the other backend is taking concurrently with our scan cannot
@ -1133,9 +1133,9 @@ CountActiveBackends(void)
*
* If someone just decremented numProcs, 'proc' could also point to a
* PGPROC entry that's no longer in the array. It still points to a
* PGPROC struct, though, because freed PGPPROC entries just go to
* the free list and are recycled. Its contents are nonsense in that
* case, but that's acceptable for this function.
* PGPROC struct, though, because freed PGPPROC entries just go to the
* free list and are recycled. Its contents are nonsense in that case,
* but that's acceptable for this function.
*/
if (proc != NULL)
continue;
@ -1235,7 +1235,8 @@ bool
CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
{
ProcArrayStruct *arrayP = procArray;
#define MAXAUTOVACPIDS 10 /* max autovacs to SIGTERM per iteration */
#define MAXAUTOVACPIDS 10 /* max autovacs to SIGTERM per iteration */
int autovac_pids[MAXAUTOVACPIDS];
int tries;
@ -1280,10 +1281,10 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
return false; /* no conflicting backends, so done */
/*
* Send SIGTERM to any conflicting autovacuums before sleeping.
* We postpone this step until after the loop because we don't
* want to hold ProcArrayLock while issuing kill().
* We have no idea what might block kill() inside the kernel...
* Send SIGTERM to any conflicting autovacuums before sleeping. We
* postpone this step until after the loop because we don't want to
* hold ProcArrayLock while issuing kill(). We have no idea what might
* block kill() inside the kernel...
*/
for (index = 0; index < nautovacs; index++)
(void) kill(autovac_pids[index], SIGTERM); /* ignore any error */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.89 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.90 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -65,7 +65,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
* NOTE: it is entirely possible for this routine to be invoked recursively
* as a consequence of processing inside the invalFunction or resetFunction.
* Furthermore, such a recursive call must guarantee that all outstanding
* inval messages have been processed before it exits. This is the reason
* inval messages have been processed before it exits. This is the reason
* for the strange-looking choice to use a statically allocated buffer array
* and counters; it's so that a recursive call can process messages already
* sucked out of sinvaladt.c.
@ -77,9 +77,10 @@ ReceiveSharedInvalidMessages(
{
#define MAXINVALMSGS 32
static SharedInvalidationMessage messages[MAXINVALMSGS];
/*
* We use volatile here to prevent bugs if a compiler doesn't realize
* that recursion is a possibility ...
* We use volatile here to prevent bugs if a compiler doesn't realize that
* recursion is a possibility ...
*/
static volatile int nextmsg = 0;
static volatile int nummsgs = 0;
@ -121,18 +122,18 @@ ReceiveSharedInvalidMessages(
}
/*
* We only need to loop if the last SIGetDataEntries call (which
* might have been within a recursive call) returned a full buffer.
* We only need to loop if the last SIGetDataEntries call (which might
* have been within a recursive call) returned a full buffer.
*/
} while (nummsgs == MAXINVALMSGS);
/*
* We are now caught up. If we received a catchup signal, reset that
* flag, and call SICleanupQueue(). This is not so much because we
* need to flush dead messages right now, as that we want to pass on
* the catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for
* what should be just a background maintenance activity.
* flag, and call SICleanupQueue(). This is not so much because we need
* to flush dead messages right now, as that we want to pass on the
* catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for what
* should be just a background maintenance activity.
*/
if (catchupInterruptOccurred)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.77 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.78 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -57,7 +57,7 @@
* normal behavior is that at most one such interrupt is in flight at a time;
* when a backend completes processing a catchup interrupt, it executes
* SICleanupQueue, which will signal the next-furthest-behind backend if
* needed. This avoids undue contention from multiple backends all trying
* needed. This avoids undue contention from multiple backends all trying
* to catch up at once. However, the furthest-back backend might be stuck
* in a state where it can't catch up. Eventually it will get reset, so it
* won't cause any more problems for anyone but itself. But we don't want
@ -88,7 +88,7 @@
* the writer wants to change maxMsgNum while readers need to read it.
* We deal with that by having a spinlock that readers must take for just
* long enough to read maxMsgNum, while writers take it for just long enough
* to write maxMsgNum. (The exact rule is that you need the spinlock to
* to write maxMsgNum. (The exact rule is that you need the spinlock to
* read maxMsgNum if you are not holding SInvalWriteLock, and you need the
* spinlock to write maxMsgNum unless you are holding both locks.)
*
@ -146,8 +146,8 @@ typedef struct ProcState
/*
* Next LocalTransactionId to use for each idle backend slot. We keep
* this here because it is indexed by BackendId and it is convenient to
* copy the value to and from local memory when MyBackendId is set.
* It's meaningless in an active ProcState entry.
* copy the value to and from local memory when MyBackendId is set. It's
* meaningless in an active ProcState entry.
*/
LocalTransactionId nextLXID;
} ProcState;
@ -235,8 +235,8 @@ CreateSharedInvalidationState(void)
/* Mark all backends inactive, and initialize nextLXID */
for (i = 0; i < shmInvalBuffer->maxBackends; i++)
{
shmInvalBuffer->procState[i].procPid = 0; /* inactive */
shmInvalBuffer->procState[i].nextMsgNum = 0; /* meaningless */
shmInvalBuffer->procState[i].procPid = 0; /* inactive */
shmInvalBuffer->procState[i].nextMsgNum = 0; /* meaningless */
shmInvalBuffer->procState[i].resetState = false;
shmInvalBuffer->procState[i].signaled = false;
shmInvalBuffer->procState[i].nextLXID = InvalidLocalTransactionId;
@ -255,11 +255,11 @@ SharedInvalBackendInit(void)
SISeg *segP = shmInvalBuffer;
/*
* This can run in parallel with read operations, and for that matter
* with write operations; but not in parallel with additions and removals
* of backends, nor in parallel with SICleanupQueue. It doesn't seem
* worth having a third lock, so we choose to use SInvalWriteLock to
* serialize additions/removals.
* This can run in parallel with read operations, and for that matter with
* write operations; but not in parallel with additions and removals of
* backends, nor in parallel with SICleanupQueue. It doesn't seem worth
* having a third lock, so we choose to use SInvalWriteLock to serialize
* additions/removals.
*/
LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
@ -394,7 +394,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
SISeg *segP = shmInvalBuffer;
/*
* N can be arbitrarily large. We divide the work into groups of no more
* N can be arbitrarily large. We divide the work into groups of no more
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
* an unreasonably long time. (This is not so much because we care about
* letting in other writers, as that some just-caught-up backend might be
@ -404,9 +404,9 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
*/
while (n > 0)
{
int nthistime = Min(n, WRITE_QUANTUM);
int numMsgs;
int max;
int nthistime = Min(n, WRITE_QUANTUM);
int numMsgs;
int max;
n -= nthistime;
@ -416,7 +416,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* If the buffer is full, we *must* acquire some space. Clean the
* queue and reset anyone who is preventing space from being freed.
* Otherwise, clean the queue only when it's exceeded the next
* fullness threshold. We have to loop and recheck the buffer state
* fullness threshold. We have to loop and recheck the buffer state
* after any call of SICleanupQueue.
*/
for (;;)
@ -458,9 +458,9 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* get next SI message(s) for current backend, if there are any
*
* Possible return values:
* 0: no SI message available
* 0: no SI message available
* n>0: next n SI messages have been extracted into data[]
* -1: SI reset message extracted
* -1: SI reset message extracted
*
* If the return value is less than the array size "datasize", the caller
* can assume that there are no more SI messages after the one(s) returned.
@ -470,11 +470,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* executing on behalf of other backends, since each instance will modify only
* fields of its own backend's ProcState, and no instance will look at fields
* of other backends' ProcStates. We express this by grabbing SInvalReadLock
* in shared mode. Note that this is not exactly the normal (read-only)
* in shared mode. Note that this is not exactly the normal (read-only)
* interpretation of a shared lock! Look closely at the interactions before
* allowing SInvalReadLock to be grabbed in shared mode for any other reason!
*
* NB: this can also run in parallel with SIInsertDataEntries. It is not
* NB: this can also run in parallel with SIInsertDataEntries. It is not
* guaranteed that we will return any messages added after the routine is
* entered.
*
@ -488,7 +488,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
ProcState *stateP;
int max;
int n;
LWLockAcquire(SInvalReadLock, LW_SHARED);
segP = shmInvalBuffer;
@ -557,7 +557,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
*
* Caution: because we transiently release write lock when we have to signal
* some other backend, it is NOT guaranteed that there are still minFree
* free message slots at exit. Caller must recheck and perhaps retry.
* free message slots at exit. Caller must recheck and perhaps retry.
*/
void
SICleanupQueue(bool callerHasWriteLock, int minFree)
@ -576,9 +576,9 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
LWLockAcquire(SInvalReadLock, LW_EXCLUSIVE);
/*
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify
* the furthest-back backend that needs signaling (if any), and reset
* any backends that are too far back.
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
* furthest-back backend that needs signaling (if any), and reset any
* backends that are too far back.
*/
min = segP->maxMsgNum;
minsig = min - SIG_THRESHOLD;
@ -587,15 +587,15 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
for (i = 0; i < segP->lastBackend; i++)
{
ProcState *stateP = &segP->procState[i];
int n = stateP->nextMsgNum;
int n = stateP->nextMsgNum;
/* Ignore if inactive or already in reset state */
if (stateP->procPid == 0 || stateP->resetState)
continue;
/*
* If we must free some space and this backend is preventing it,
* force him into reset state and then ignore until he catches up.
* If we must free some space and this backend is preventing it, force
* him into reset state and then ignore until he catches up.
*/
if (n < lowbound)
{
@ -619,8 +619,8 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
/*
* When minMsgNum gets really large, decrement all message counters so as
* to forestall overflow of the counters. This happens seldom enough
* that folding it into the previous loop would be a loser.
* to forestall overflow of the counters. This happens seldom enough that
* folding it into the previous loop would be a loser.
*/
if (min >= MSGNUMWRAPAROUND)
{
@ -649,7 +649,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
*/
if (needSig)
{
pid_t his_pid = needSig->procPid;
pid_t his_pid = needSig->procPid;
needSig->signaled = true;
LWLockRelease(SInvalReadLock);

View File

@ -24,7 +24,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.137 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.138 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -248,12 +248,12 @@ inv_open(Oid lobjId, int flags, MemoryContext mcxt)
else if (flags & INV_READ)
{
/*
* We must register the snapshot in TopTransaction's resowner,
* because it must stay alive until the LO is closed rather than until
* the current portal shuts down.
* We must register the snapshot in TopTransaction's resowner, because
* it must stay alive until the LO is closed rather than until the
* current portal shuts down.
*/
retval->snapshot = RegisterSnapshotOnOwner(GetActiveSnapshot(),
TopTransactionResourceOwner);
TopTransactionResourceOwner);
retval->flags = IFS_RDLOCK;
}
else
@ -641,7 +641,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes)
values[Anum_pg_largeobject_data - 1] = PointerGetDatum(&workbuf);
replace[Anum_pg_largeobject_data - 1] = true;
newtup = heap_modify_tuple(oldtuple, RelationGetDescr(lo_heap_r),
values, nulls, replace);
values, nulls, replace);
simple_heap_update(lo_heap_r, &newtup->t_self, newtup);
CatalogIndexInsert(indstate, newtup);
heap_freetuple(newtup);
@ -810,7 +810,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
values[Anum_pg_largeobject_data - 1] = PointerGetDatum(&workbuf);
replace[Anum_pg_largeobject_data - 1] = true;
newtup = heap_modify_tuple(oldtuple, RelationGetDescr(lo_heap_r),
values, nulls, replace);
values, nulls, replace);
simple_heap_update(lo_heap_r, &newtup->t_self, newtup);
CatalogIndexInsert(indstate, newtup);
heap_freetuple(newtup);

View File

@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.56 2009/01/01 17:23:47 momjian Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.57 2009/06/11 14:49:02 momjian Exp $
*
* Interface:
*
@ -933,7 +933,7 @@ DeadLockReport(void)
appendStringInfo(&logbuf,
_("Process %d: %s"),
info->pid,
pgstat_get_backend_current_activity(info->pid, false));
pgstat_get_backend_current_activity(info->pid, false));
}
ereport(ERROR,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.187 2009/03/23 01:52:38 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.188 2009/06/11 14:49:02 momjian Exp $
*
* NOTES
* A lock table is a shared memory hash table. When
@ -1112,7 +1112,7 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
{
LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
LockMethod lockMethodTable = LockMethods[lockmethodid];
char * volatile new_status = NULL;
char *volatile new_status = NULL;
LOCK_PRINT("WaitOnLock: sleeping on lock",
locallock->lock, locallock->tag.mode);
@ -1145,20 +1145,20 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
* the locktable state must fully reflect the fact that we own the lock;
* we can't do additional work on return.
*
* We can and do use a PG_TRY block to try to clean up after failure,
* but this still has a major limitation: elog(FATAL) can occur while
* waiting (eg, a "die" interrupt), and then control won't come back here.
* So all cleanup of essential state should happen in LockWaitCancel,
* not here. We can use PG_TRY to clear the "waiting" status flags,
* since doing that is unimportant if the process exits.
* We can and do use a PG_TRY block to try to clean up after failure, but
* this still has a major limitation: elog(FATAL) can occur while waiting
* (eg, a "die" interrupt), and then control won't come back here. So all
* cleanup of essential state should happen in LockWaitCancel, not here.
* We can use PG_TRY to clear the "waiting" status flags, since doing that
* is unimportant if the process exits.
*/
PG_TRY();
{
if (ProcSleep(locallock, lockMethodTable) != STATUS_OK)
{
/*
* We failed as a result of a deadlock, see CheckDeadLock().
* Quit now.
* We failed as a result of a deadlock, see CheckDeadLock(). Quit
* now.
*/
awaitedLock = NULL;
LOCK_PRINT("WaitOnLock: aborting on lock",

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.206 2009/05/05 19:59:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.207 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -277,8 +277,8 @@ InitProcess(void)
/*
* Now that we have a PGPROC, mark ourselves as an active postmaster
* child; this is so that the postmaster can detect it if we exit
* without cleaning up.
* child; this is so that the postmaster can detect it if we exit without
* cleaning up.
*/
if (IsUnderPostmaster)
MarkPostmasterChildActive();
@ -1184,8 +1184,8 @@ CheckDeadLock(void)
* Check to see if we've been awoken by anyone in the interim.
*
* If we have, we can return and resume our transaction -- happy day.
* Before we are awoken the process releasing the lock grants it to us
* so we know that we don't have to wait anymore.
* Before we are awoken the process releasing the lock grants it to us so
* we know that we don't have to wait anymore.
*
* We check by looking to see if we've been unlinked from the wait queue.
* This is quicker than checking our semaphore's state, since no kernel

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.145 2009/03/11 23:19:25 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.146 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -120,7 +120,7 @@ static MemoryContext MdCxt; /* context for all md.c allocations */
typedef struct
{
RelFileNode rnode; /* the targeted relation */
ForkNumber forknum;
ForkNumber forknum;
BlockNumber segno; /* which segment */
} PendingOperationTag;
@ -154,18 +154,18 @@ typedef enum /* behavior for mdopen & _mdfd_getseg */
} ExtensionBehavior;
/* local routines */
static MdfdVec *mdopen(SMgrRelation reln, ForkNumber forknum,
ExtensionBehavior behavior);
static MdfdVec *mdopen(SMgrRelation reln, ForkNumber forknum,
ExtensionBehavior behavior);
static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
MdfdVec *seg);
MdfdVec *seg);
static void register_unlink(RelFileNode rnode);
static MdfdVec *_fdvec_alloc(void);
static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forkno,
BlockNumber segno, int oflags);
BlockNumber segno, int oflags);
static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forkno,
BlockNumber blkno, bool isTemp, ExtensionBehavior behavior);
static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum,
MdfdVec *seg);
MdfdVec *seg);
/*
@ -204,7 +204,7 @@ mdinit(void)
}
/*
* mdexists() -- Does the physical file exist?
* mdexists() -- Does the physical file exist?
*
* Note: this will return true for lingering files, with pending deletions
*/
@ -212,8 +212,8 @@ bool
mdexists(SMgrRelation reln, ForkNumber forkNum)
{
/*
* Close it first, to ensure that we notice if the fork has been
* unlinked since we opened it.
* Close it first, to ensure that we notice if the fork has been unlinked
* since we opened it.
*/
mdclose(reln, forkNum);
@ -369,8 +369,8 @@ mdunlink(RelFileNode rnode, ForkNumber forkNum, bool isRedo)
if (errno != ENOENT)
ereport(WARNING,
(errcode_for_file_access(),
errmsg("could not remove segment %u of relation %s: %m",
segno, path)));
errmsg("could not remove segment %u of relation %s: %m",
segno, path)));
break;
}
}
@ -420,7 +420,8 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
v = _mdfd_getseg(reln, forknum, blocknum, isTemp, EXTENSION_CREATE);
seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
/*
@ -535,7 +536,7 @@ mdclose(SMgrRelation reln, ForkNumber forknum)
if (v == NULL)
return;
reln->md_fd[forknum] = NULL; /* prevent dangling pointer after error */
reln->md_fd[forknum] = NULL; /* prevent dangling pointer after error */
while (v != NULL)
{
@ -562,11 +563,12 @@ mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL);
seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
(void) FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ);
#endif /* USE_PREFETCH */
#endif /* USE_PREFETCH */
}
@ -588,7 +590,8 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL);
seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
@ -611,8 +614,8 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
if (nbytes < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read block %u of relation %s: %m",
blocknum, relpath(reln->smgr_rnode, forknum))));
errmsg("could not read block %u of relation %s: %m",
blocknum, relpath(reln->smgr_rnode, forknum))));
/*
* Short read: we are at or past EOF, or we read a partial block at
@ -660,7 +663,8 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
v = _mdfd_getseg(reln, forknum, blocknum, isTemp, EXTENSION_FAIL);
seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
@ -683,8 +687,8 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
if (nbytes < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not write block %u of relation %s: %m",
blocknum, relpath(reln->smgr_rnode, forknum))));
errmsg("could not write block %u of relation %s: %m",
blocknum, relpath(reln->smgr_rnode, forknum))));
/* short write: complain appropriately */
ereport(ERROR,
(errcode(ERRCODE_DISK_FULL),
@ -757,9 +761,9 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
if (v->mdfd_chain == NULL)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open segment %u of relation %s: %m",
segno,
relpath(reln->smgr_rnode, forknum))));
errmsg("could not open segment %u of relation %s: %m",
segno,
relpath(reln->smgr_rnode, forknum))));
}
v = v->mdfd_chain;
@ -812,13 +816,14 @@ mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks,
if (FileTruncate(v->mdfd_vfd, 0) < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not truncate relation %s to %u blocks: %m",
relpath(reln->smgr_rnode, forknum),
nblocks)));
errmsg("could not truncate relation %s to %u blocks: %m",
relpath(reln->smgr_rnode, forknum),
nblocks)));
if (!isTemp)
register_dirty_segment(reln, forknum, v);
v = v->mdfd_chain;
Assert(ov != reln->md_fd[forknum]); /* we never drop the 1st segment */
Assert(ov != reln->md_fd[forknum]); /* we never drop the 1st
* segment */
pfree(ov);
}
else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
@ -836,9 +841,9 @@ mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks,
if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ) < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not truncate relation %s to %u blocks: %m",
relpath(reln->smgr_rnode, forknum),
nblocks)));
errmsg("could not truncate relation %s to %u blocks: %m",
relpath(reln->smgr_rnode, forknum),
nblocks)));
if (!isTemp)
register_dirty_segment(reln, forknum, v);
v = v->mdfd_chain;
@ -1055,8 +1060,8 @@ mdsync(void)
failures > 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not fsync segment %u of relation %s: %m",
entry->tag.segno, path)));
errmsg("could not fsync segment %u of relation %s: %m",
entry->tag.segno, path)));
else
ereport(DEBUG1,
(errcode_for_file_access(),
@ -1268,7 +1273,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
hash_seq_init(&hstat, pendingOpsTable);
while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
{
if (RelFileNodeEquals(entry->tag.rnode, rnode) &&
if (RelFileNodeEquals(entry->tag.rnode, rnode) &&
entry->tag.forknum == forknum)
{
/* Okay, cancel this entry */
@ -1281,7 +1286,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/* Remove any pending requests for the entire database */
HASH_SEQ_STATUS hstat;
PendingOperationEntry *entry;
ListCell *cell,
ListCell *cell,
*prev,
*next;
@ -1295,7 +1300,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
entry->canceled = true;
}
}
/* Remove unlink requests */
prev = NULL;
for (cell = list_head(pendingUnlinks); cell; cell = next)
@ -1303,7 +1308,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
next = lnext(cell);
if (entry->rnode.dbNode == rnode.dbNode)
if (entry->rnode.dbNode == rnode.dbNode)
{
pendingUnlinks = list_delete_cell(pendingUnlinks, cell, prev);
pfree(entry);
@ -1569,8 +1574,8 @@ _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
if (len < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not seek to end of segment %u of relation %s: %m",
seg->mdfd_segno, relpath(reln->smgr_rnode, forknum))));
errmsg("could not seek to end of segment %u of relation %s: %m",
seg->mdfd_segno, relpath(reln->smgr_rnode, forknum))));
/* note that this calculation will ignore any partial block at EOF */
return (BlockNumber) (len / BLCKSZ);
}

View File

@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.116 2009/01/12 05:10:44 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.117 2009/06/11 14:49:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -42,21 +42,21 @@ typedef struct f_smgr
void (*smgr_shutdown) (void); /* may be NULL */
void (*smgr_close) (SMgrRelation reln, ForkNumber forknum);
void (*smgr_create) (SMgrRelation reln, ForkNumber forknum,
bool isRedo);
bool isRedo);
bool (*smgr_exists) (SMgrRelation reln, ForkNumber forknum);
void (*smgr_unlink) (RelFileNode rnode, ForkNumber forknum,
bool isRedo);
bool isRedo);
void (*smgr_extend) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool isTemp);
void (*smgr_prefetch) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum);
BlockNumber blocknum);
void (*smgr_read) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer);
void (*smgr_write) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool isTemp);
BlockNumber blocknum, char *buffer);
void (*smgr_write) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool isTemp);
BlockNumber (*smgr_nblocks) (SMgrRelation reln, ForkNumber forknum);
void (*smgr_truncate) (SMgrRelation reln, ForkNumber forknum,
BlockNumber nblocks, bool isTemp);
BlockNumber nblocks, bool isTemp);
void (*smgr_immedsync) (SMgrRelation reln, ForkNumber forknum);
void (*smgr_pre_ckpt) (void); /* may be NULL */
void (*smgr_sync) (void); /* may be NULL */
@ -82,8 +82,8 @@ static HTAB *SMgrRelationHash = NULL;
/* local function prototypes */
static void smgrshutdown(int code, Datum arg);
static void smgr_internal_unlink(RelFileNode rnode, ForkNumber forknum,
int which, bool isTemp, bool isRedo);
static void smgr_internal_unlink(RelFileNode rnode, ForkNumber forknum,
int which, bool isTemp, bool isRedo);
/*
@ -156,14 +156,14 @@ smgropen(RelFileNode rnode)
/* Initialize it if not present before */
if (!found)
{
int forknum;
int forknum;
/* hash_search already filled in the lookup key */
reln->smgr_owner = NULL;
reln->smgr_which = 0; /* we only have md.c at present */
/* mark it not open */
for(forknum = 0; forknum <= MAX_FORKNUM; forknum++)
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
reln->md_fd[forknum] = NULL;
}
@ -209,7 +209,7 @@ void
smgrclose(SMgrRelation reln)
{
SMgrRelation *owner;
ForkNumber forknum;
ForkNumber forknum;
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
(*(smgrsw[reln->smgr_which].smgr_close)) (reln, forknum);
@ -286,9 +286,9 @@ void
smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
{
/*
* Exit quickly in WAL replay mode if we've already opened the file.
* If it's open, it surely must exist.
*/
* Exit quickly in WAL replay mode if we've already opened the file. If
* it's open, it surely must exist.
*/
if (isRedo && reln->md_fd[forknum] != NULL)
return;
@ -334,7 +334,7 @@ smgrdounlink(SMgrRelation reln, ForkNumber forknum, bool isTemp, bool isRedo)
* Shared subroutine that actually does the unlink ...
*/
static void
smgr_internal_unlink(RelFileNode rnode, ForkNumber forknum,
smgr_internal_unlink(RelFileNode rnode, ForkNumber forknum,
int which, bool isTemp, bool isRedo)
{
/*
@ -370,7 +370,7 @@ smgr_internal_unlink(RelFileNode rnode, ForkNumber forknum,
* causes intervening file space to become filled with zeroes.
*/
void
smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char *buffer, bool isTemp)
{
(*(smgrsw[reln->smgr_which].smgr_extend)) (reln, forknum, blocknum,
@ -395,7 +395,7 @@ smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
* return pages in the format that POSTGRES expects.
*/
void
smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char *buffer)
{
(*(smgrsw[reln->smgr_which].smgr_read)) (reln, forknum, blocknum, buffer);
@ -417,7 +417,7 @@ smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
* made to fsync the write before checkpointing.
*/
void
smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char *buffer, bool isTemp)
{
(*(smgrsw[reln->smgr_which].smgr_write)) (reln, forknum, blocknum,
@ -527,4 +527,3 @@ smgrpostckpt(void)
(*(smgrsw[i].smgr_post_ckpt)) ();
}
}