mirror of
https://github.com/postgres/postgres.git
synced 2025-06-13 07:41:39 +03:00
pgindent run for release 9.3
This is the first run of the Perl-based pgindent script. Also update pgindent instructions.
This commit is contained in:
@ -110,7 +110,7 @@ static volatile BufferDesc *BufferAlloc(SMgrRelation smgr,
|
||||
bool *foundPtr);
|
||||
static void FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln);
|
||||
static void AtProcExit_Buffers(int code, Datum arg);
|
||||
static int rnode_comparator(const void *p1, const void *p2);
|
||||
static int rnode_comparator(const void *p1, const void *p2);
|
||||
|
||||
|
||||
/*
|
||||
@ -476,9 +476,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATA_CORRUPTED),
|
||||
errmsg("invalid page in block %u of relation %s",
|
||||
blockNum,
|
||||
relpath(smgr->smgr_rnode, forkNum))));
|
||||
errmsg("invalid page in block %u of relation %s",
|
||||
blockNum,
|
||||
relpath(smgr->smgr_rnode, forkNum))));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1220,7 +1220,8 @@ BufferSync(int flags)
|
||||
|
||||
/*
|
||||
* Unless this is a shutdown checkpoint, we write only permanent, dirty
|
||||
* buffers. But at shutdown or end of recovery, we write all dirty buffers.
|
||||
* buffers. But at shutdown or end of recovery, we write all dirty
|
||||
* buffers.
|
||||
*/
|
||||
if (!((flags & CHECKPOINT_IS_SHUTDOWN) || (flags & CHECKPOINT_END_OF_RECOVERY)))
|
||||
mask |= BM_PERMANENT;
|
||||
@ -1918,7 +1919,7 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
|
||||
instr_time io_start,
|
||||
io_time;
|
||||
Block bufBlock;
|
||||
char *bufToWrite;
|
||||
char *bufToWrite;
|
||||
|
||||
/*
|
||||
* Acquire the buffer's io_in_progress lock. If StartBufferIO returns
|
||||
@ -1964,14 +1965,14 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
|
||||
* However, this rule does not apply to unlogged relations, which will be
|
||||
* lost after a crash anyway. Most unlogged relation pages do not bear
|
||||
* LSNs since we never emit WAL records for them, and therefore flushing
|
||||
* up through the buffer LSN would be useless, but harmless. However, GiST
|
||||
* indexes use LSNs internally to track page-splits, and therefore unlogged
|
||||
* GiST pages bear "fake" LSNs generated by GetFakeLSNForUnloggedRel. It
|
||||
* is unlikely but possible that the fake LSN counter could advance past
|
||||
* the WAL insertion point; and if it did happen, attempting to flush WAL
|
||||
* through that location would fail, with disastrous system-wide
|
||||
* consequences. To make sure that can't happen, skip the flush if the
|
||||
* buffer isn't permanent.
|
||||
* up through the buffer LSN would be useless, but harmless. However,
|
||||
* GiST indexes use LSNs internally to track page-splits, and therefore
|
||||
* unlogged GiST pages bear "fake" LSNs generated by
|
||||
* GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
|
||||
* LSN counter could advance past the WAL insertion point; and if it did
|
||||
* happen, attempting to flush WAL through that location would fail, with
|
||||
* disastrous system-wide consequences. To make sure that can't happen,
|
||||
* skip the flush if the buffer isn't permanent.
|
||||
*/
|
||||
if (buf->flags & BM_PERMANENT)
|
||||
XLogFlush(recptr);
|
||||
@ -2076,8 +2077,8 @@ XLogRecPtr
|
||||
BufferGetLSNAtomic(Buffer buffer)
|
||||
{
|
||||
volatile BufferDesc *bufHdr = &BufferDescriptors[buffer - 1];
|
||||
char *page = BufferGetPage(buffer);
|
||||
XLogRecPtr lsn;
|
||||
char *page = BufferGetPage(buffer);
|
||||
XLogRecPtr lsn;
|
||||
|
||||
/*
|
||||
* If we don't need locking for correctness, fastpath out.
|
||||
@ -2181,7 +2182,7 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber forkNum,
|
||||
void
|
||||
DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
{
|
||||
int i,
|
||||
int i,
|
||||
n = 0;
|
||||
RelFileNode *nodes;
|
||||
bool use_bsearch;
|
||||
@ -2189,7 +2190,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
if (nnodes == 0)
|
||||
return;
|
||||
|
||||
nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
|
||||
nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
|
||||
|
||||
/* If it's a local relation, it's localbuf.c's problem. */
|
||||
for (i = 0; i < nnodes; i++)
|
||||
@ -2204,8 +2205,8 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no non-local relations, then we're done. Release the memory
|
||||
* and return.
|
||||
* If there are no non-local relations, then we're done. Release the
|
||||
* memory and return.
|
||||
*/
|
||||
if (n == 0)
|
||||
{
|
||||
@ -2215,8 +2216,8 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
|
||||
/*
|
||||
* For low number of relations to drop just use a simple walk through, to
|
||||
* save the bsearch overhead. The threshold to use is rather a guess than a
|
||||
* exactly determined value, as it depends on many factors (CPU and RAM
|
||||
* save the bsearch overhead. The threshold to use is rather a guess than
|
||||
* a exactly determined value, as it depends on many factors (CPU and RAM
|
||||
* speeds, amount of shared buffers etc.).
|
||||
*/
|
||||
use_bsearch = n > DROP_RELS_BSEARCH_THRESHOLD;
|
||||
@ -2237,7 +2238,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
|
||||
if (!use_bsearch)
|
||||
{
|
||||
int j;
|
||||
int j;
|
||||
|
||||
for (j = 0; j < n; j++)
|
||||
{
|
||||
@ -2397,8 +2398,8 @@ FlushRelationBuffers(Relation rel)
|
||||
if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
|
||||
(bufHdr->flags & BM_VALID) && (bufHdr->flags & BM_DIRTY))
|
||||
{
|
||||
ErrorContextCallback errcallback;
|
||||
Page localpage;
|
||||
ErrorContextCallback errcallback;
|
||||
Page localpage;
|
||||
|
||||
localpage = (char *) LocalBufHdrGetBlock(bufHdr);
|
||||
|
||||
@ -2575,17 +2576,17 @@ IncrBufferRefCount(Buffer buffer)
|
||||
* This is essentially the same as MarkBufferDirty, except:
|
||||
*
|
||||
* 1. The caller does not write WAL; so if checksums are enabled, we may need
|
||||
* to write an XLOG_HINT WAL record to protect against torn pages.
|
||||
* to write an XLOG_HINT WAL record to protect against torn pages.
|
||||
* 2. The caller might have only share-lock instead of exclusive-lock on the
|
||||
* buffer's content lock.
|
||||
* buffer's content lock.
|
||||
* 3. This function does not guarantee that the buffer is always marked dirty
|
||||
* (due to a race condition), so it cannot be used for important changes.
|
||||
* (due to a race condition), so it cannot be used for important changes.
|
||||
*/
|
||||
void
|
||||
MarkBufferDirtyHint(Buffer buffer)
|
||||
{
|
||||
volatile BufferDesc *bufHdr;
|
||||
Page page = BufferGetPage(buffer);
|
||||
Page page = BufferGetPage(buffer);
|
||||
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(ERROR, "bad buffer ID: %d", buffer);
|
||||
@ -2605,13 +2606,13 @@ MarkBufferDirtyHint(Buffer buffer)
|
||||
/*
|
||||
* This routine might get called many times on the same page, if we are
|
||||
* making the first scan after commit of an xact that added/deleted many
|
||||
* tuples. So, be as quick as we can if the buffer is already dirty. We do
|
||||
* this by not acquiring spinlock if it looks like the status bits are
|
||||
* tuples. So, be as quick as we can if the buffer is already dirty. We
|
||||
* do this by not acquiring spinlock if it looks like the status bits are
|
||||
* already set. Since we make this test unlocked, there's a chance we
|
||||
* might fail to notice that the flags have just been cleared, and failed
|
||||
* to reset them, due to memory-ordering issues. But since this function
|
||||
* is only intended to be used in cases where failing to write out the data
|
||||
* would be harmless anyway, it doesn't really matter.
|
||||
* is only intended to be used in cases where failing to write out the
|
||||
* data would be harmless anyway, it doesn't really matter.
|
||||
*/
|
||||
if ((bufHdr->flags & (BM_DIRTY | BM_JUST_DIRTIED)) !=
|
||||
(BM_DIRTY | BM_JUST_DIRTIED))
|
||||
@ -2622,21 +2623,20 @@ MarkBufferDirtyHint(Buffer buffer)
|
||||
|
||||
/*
|
||||
* If checksums are enabled, and the buffer is permanent, then a full
|
||||
* page image may be required even for some hint bit updates to protect
|
||||
* against torn pages. This full page image is only necessary if the
|
||||
* hint bit update is the first change to the page since the last
|
||||
* checkpoint.
|
||||
* page image may be required even for some hint bit updates to
|
||||
* protect against torn pages. This full page image is only necessary
|
||||
* if the hint bit update is the first change to the page since the
|
||||
* last checkpoint.
|
||||
*
|
||||
* We don't check full_page_writes here because that logic is
|
||||
* included when we call XLogInsert() since the value changes
|
||||
* dynamically.
|
||||
* We don't check full_page_writes here because that logic is included
|
||||
* when we call XLogInsert() since the value changes dynamically.
|
||||
*/
|
||||
if (DataChecksumsEnabled() && (bufHdr->flags & BM_PERMANENT))
|
||||
{
|
||||
/*
|
||||
* If we're in recovery we cannot dirty a page because of a hint.
|
||||
* We can set the hint, just not dirty the page as a result so
|
||||
* the hint is lost when we evict the page or shutdown.
|
||||
* We can set the hint, just not dirty the page as a result so the
|
||||
* hint is lost when we evict the page or shutdown.
|
||||
*
|
||||
* See src/backend/storage/page/README for longer discussion.
|
||||
*/
|
||||
@ -2646,21 +2646,21 @@ MarkBufferDirtyHint(Buffer buffer)
|
||||
/*
|
||||
* If the block is already dirty because we either made a change
|
||||
* or set a hint already, then we don't need to write a full page
|
||||
* image. Note that aggressive cleaning of blocks
|
||||
* dirtied by hint bit setting would increase the call rate.
|
||||
* Bulk setting of hint bits would reduce the call rate...
|
||||
* image. Note that aggressive cleaning of blocks dirtied by hint
|
||||
* bit setting would increase the call rate. Bulk setting of hint
|
||||
* bits would reduce the call rate...
|
||||
*
|
||||
* We must issue the WAL record before we mark the buffer dirty.
|
||||
* Otherwise we might write the page before we write the WAL.
|
||||
* That causes a race condition, since a checkpoint might occur
|
||||
* between writing the WAL record and marking the buffer dirty.
|
||||
* We solve that with a kluge, but one that is already in use
|
||||
* during transaction commit to prevent race conditions.
|
||||
* Basically, we simply prevent the checkpoint WAL record from
|
||||
* being written until we have marked the buffer dirty. We don't
|
||||
* start the checkpoint flush until we have marked dirty, so our
|
||||
* checkpoint must flush the change to disk successfully or the
|
||||
* checkpoint never gets written, so crash recovery will fix.
|
||||
* Otherwise we might write the page before we write the WAL. That
|
||||
* causes a race condition, since a checkpoint might occur between
|
||||
* writing the WAL record and marking the buffer dirty. We solve
|
||||
* that with a kluge, but one that is already in use during
|
||||
* transaction commit to prevent race conditions. Basically, we
|
||||
* simply prevent the checkpoint WAL record from being written
|
||||
* until we have marked the buffer dirty. We don't start the
|
||||
* checkpoint flush until we have marked dirty, so our checkpoint
|
||||
* must flush the change to disk successfully or the checkpoint
|
||||
* never gets written, so crash recovery will fix.
|
||||
*
|
||||
* It's possible we may enter here without an xid, so it is
|
||||
* essential that CreateCheckpoint waits for virtual transactions
|
||||
@ -2677,13 +2677,13 @@ MarkBufferDirtyHint(Buffer buffer)
|
||||
dirtied = true; /* Means "will be dirtied by this action" */
|
||||
|
||||
/*
|
||||
* Set the page LSN if we wrote a backup block. We aren't
|
||||
* supposed to set this when only holding a share lock but
|
||||
* as long as we serialise it somehow we're OK. We choose to
|
||||
* set LSN while holding the buffer header lock, which causes
|
||||
* any reader of an LSN who holds only a share lock to also
|
||||
* obtain a buffer header lock before using PageGetLSN(),
|
||||
* which is enforced in BufferGetLSNAtomic().
|
||||
* Set the page LSN if we wrote a backup block. We aren't supposed
|
||||
* to set this when only holding a share lock but as long as we
|
||||
* serialise it somehow we're OK. We choose to set LSN while
|
||||
* holding the buffer header lock, which causes any reader of an
|
||||
* LSN who holds only a share lock to also obtain a buffer header
|
||||
* lock before using PageGetLSN(), which is enforced in
|
||||
* BufferGetLSNAtomic().
|
||||
*
|
||||
* If checksums are enabled, you might think we should reset the
|
||||
* checksum here. That will happen when the page is written
|
||||
|
Reference in New Issue
Block a user