mirror of
https://github.com/postgres/postgres.git
synced 2025-11-15 03:41:20 +03:00
pgindent run for release 9.3
This is the first run of the Perl-based pgindent script. Also update pgindent instructions.
This commit is contained in:
@@ -110,7 +110,7 @@ static volatile BufferDesc *BufferAlloc(SMgrRelation smgr,
|
||||
bool *foundPtr);
|
||||
static void FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln);
|
||||
static void AtProcExit_Buffers(int code, Datum arg);
|
||||
static int rnode_comparator(const void *p1, const void *p2);
|
||||
static int rnode_comparator(const void *p1, const void *p2);
|
||||
|
||||
|
||||
/*
|
||||
@@ -476,9 +476,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATA_CORRUPTED),
|
||||
errmsg("invalid page in block %u of relation %s",
|
||||
blockNum,
|
||||
relpath(smgr->smgr_rnode, forkNum))));
|
||||
errmsg("invalid page in block %u of relation %s",
|
||||
blockNum,
|
||||
relpath(smgr->smgr_rnode, forkNum))));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1220,7 +1220,8 @@ BufferSync(int flags)
|
||||
|
||||
/*
|
||||
* Unless this is a shutdown checkpoint, we write only permanent, dirty
|
||||
* buffers. But at shutdown or end of recovery, we write all dirty buffers.
|
||||
* buffers. But at shutdown or end of recovery, we write all dirty
|
||||
* buffers.
|
||||
*/
|
||||
if (!((flags & CHECKPOINT_IS_SHUTDOWN) || (flags & CHECKPOINT_END_OF_RECOVERY)))
|
||||
mask |= BM_PERMANENT;
|
||||
@@ -1918,7 +1919,7 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
|
||||
instr_time io_start,
|
||||
io_time;
|
||||
Block bufBlock;
|
||||
char *bufToWrite;
|
||||
char *bufToWrite;
|
||||
|
||||
/*
|
||||
* Acquire the buffer's io_in_progress lock. If StartBufferIO returns
|
||||
@@ -1964,14 +1965,14 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
|
||||
* However, this rule does not apply to unlogged relations, which will be
|
||||
* lost after a crash anyway. Most unlogged relation pages do not bear
|
||||
* LSNs since we never emit WAL records for them, and therefore flushing
|
||||
* up through the buffer LSN would be useless, but harmless. However, GiST
|
||||
* indexes use LSNs internally to track page-splits, and therefore unlogged
|
||||
* GiST pages bear "fake" LSNs generated by GetFakeLSNForUnloggedRel. It
|
||||
* is unlikely but possible that the fake LSN counter could advance past
|
||||
* the WAL insertion point; and if it did happen, attempting to flush WAL
|
||||
* through that location would fail, with disastrous system-wide
|
||||
* consequences. To make sure that can't happen, skip the flush if the
|
||||
* buffer isn't permanent.
|
||||
* up through the buffer LSN would be useless, but harmless. However,
|
||||
* GiST indexes use LSNs internally to track page-splits, and therefore
|
||||
* unlogged GiST pages bear "fake" LSNs generated by
|
||||
* GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
|
||||
* LSN counter could advance past the WAL insertion point; and if it did
|
||||
* happen, attempting to flush WAL through that location would fail, with
|
||||
* disastrous system-wide consequences. To make sure that can't happen,
|
||||
* skip the flush if the buffer isn't permanent.
|
||||
*/
|
||||
if (buf->flags & BM_PERMANENT)
|
||||
XLogFlush(recptr);
|
||||
@@ -2076,8 +2077,8 @@ XLogRecPtr
|
||||
BufferGetLSNAtomic(Buffer buffer)
|
||||
{
|
||||
volatile BufferDesc *bufHdr = &BufferDescriptors[buffer - 1];
|
||||
char *page = BufferGetPage(buffer);
|
||||
XLogRecPtr lsn;
|
||||
char *page = BufferGetPage(buffer);
|
||||
XLogRecPtr lsn;
|
||||
|
||||
/*
|
||||
* If we don't need locking for correctness, fastpath out.
|
||||
@@ -2181,7 +2182,7 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber forkNum,
|
||||
void
|
||||
DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
{
|
||||
int i,
|
||||
int i,
|
||||
n = 0;
|
||||
RelFileNode *nodes;
|
||||
bool use_bsearch;
|
||||
@@ -2189,7 +2190,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
if (nnodes == 0)
|
||||
return;
|
||||
|
||||
nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
|
||||
nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
|
||||
|
||||
/* If it's a local relation, it's localbuf.c's problem. */
|
||||
for (i = 0; i < nnodes; i++)
|
||||
@@ -2204,8 +2205,8 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no non-local relations, then we're done. Release the memory
|
||||
* and return.
|
||||
* If there are no non-local relations, then we're done. Release the
|
||||
* memory and return.
|
||||
*/
|
||||
if (n == 0)
|
||||
{
|
||||
@@ -2215,8 +2216,8 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
|
||||
/*
|
||||
* For low number of relations to drop just use a simple walk through, to
|
||||
* save the bsearch overhead. The threshold to use is rather a guess than a
|
||||
* exactly determined value, as it depends on many factors (CPU and RAM
|
||||
* save the bsearch overhead. The threshold to use is rather a guess than
|
||||
* a exactly determined value, as it depends on many factors (CPU and RAM
|
||||
* speeds, amount of shared buffers etc.).
|
||||
*/
|
||||
use_bsearch = n > DROP_RELS_BSEARCH_THRESHOLD;
|
||||
@@ -2237,7 +2238,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
|
||||
|
||||
if (!use_bsearch)
|
||||
{
|
||||
int j;
|
||||
int j;
|
||||
|
||||
for (j = 0; j < n; j++)
|
||||
{
|
||||
@@ -2397,8 +2398,8 @@ FlushRelationBuffers(Relation rel)
|
||||
if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
|
||||
(bufHdr->flags & BM_VALID) && (bufHdr->flags & BM_DIRTY))
|
||||
{
|
||||
ErrorContextCallback errcallback;
|
||||
Page localpage;
|
||||
ErrorContextCallback errcallback;
|
||||
Page localpage;
|
||||
|
||||
localpage = (char *) LocalBufHdrGetBlock(bufHdr);
|
||||
|
||||
@@ -2575,17 +2576,17 @@ IncrBufferRefCount(Buffer buffer)
|
||||
* This is essentially the same as MarkBufferDirty, except:
|
||||
*
|
||||
* 1. The caller does not write WAL; so if checksums are enabled, we may need
|
||||
* to write an XLOG_HINT WAL record to protect against torn pages.
|
||||
* to write an XLOG_HINT WAL record to protect against torn pages.
|
||||
* 2. The caller might have only share-lock instead of exclusive-lock on the
|
||||
* buffer's content lock.
|
||||
* buffer's content lock.
|
||||
* 3. This function does not guarantee that the buffer is always marked dirty
|
||||
* (due to a race condition), so it cannot be used for important changes.
|
||||
* (due to a race condition), so it cannot be used for important changes.
|
||||
*/
|
||||
void
|
||||
MarkBufferDirtyHint(Buffer buffer)
|
||||
{
|
||||
volatile BufferDesc *bufHdr;
|
||||
Page page = BufferGetPage(buffer);
|
||||
Page page = BufferGetPage(buffer);
|
||||
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(ERROR, "bad buffer ID: %d", buffer);
|
||||
@@ -2605,13 +2606,13 @@ MarkBufferDirtyHint(Buffer buffer)
|
||||
/*
|
||||
* This routine might get called many times on the same page, if we are
|
||||
* making the first scan after commit of an xact that added/deleted many
|
||||
* tuples. So, be as quick as we can if the buffer is already dirty. We do
|
||||
* this by not acquiring spinlock if it looks like the status bits are
|
||||
* tuples. So, be as quick as we can if the buffer is already dirty. We
|
||||
* do this by not acquiring spinlock if it looks like the status bits are
|
||||
* already set. Since we make this test unlocked, there's a chance we
|
||||
* might fail to notice that the flags have just been cleared, and failed
|
||||
* to reset them, due to memory-ordering issues. But since this function
|
||||
* is only intended to be used in cases where failing to write out the data
|
||||
* would be harmless anyway, it doesn't really matter.
|
||||
* is only intended to be used in cases where failing to write out the
|
||||
* data would be harmless anyway, it doesn't really matter.
|
||||
*/
|
||||
if ((bufHdr->flags & (BM_DIRTY | BM_JUST_DIRTIED)) !=
|
||||
(BM_DIRTY | BM_JUST_DIRTIED))
|
||||
@@ -2622,21 +2623,20 @@ MarkBufferDirtyHint(Buffer buffer)
|
||||
|
||||
/*
|
||||
* If checksums are enabled, and the buffer is permanent, then a full
|
||||
* page image may be required even for some hint bit updates to protect
|
||||
* against torn pages. This full page image is only necessary if the
|
||||
* hint bit update is the first change to the page since the last
|
||||
* checkpoint.
|
||||
* page image may be required even for some hint bit updates to
|
||||
* protect against torn pages. This full page image is only necessary
|
||||
* if the hint bit update is the first change to the page since the
|
||||
* last checkpoint.
|
||||
*
|
||||
* We don't check full_page_writes here because that logic is
|
||||
* included when we call XLogInsert() since the value changes
|
||||
* dynamically.
|
||||
* We don't check full_page_writes here because that logic is included
|
||||
* when we call XLogInsert() since the value changes dynamically.
|
||||
*/
|
||||
if (DataChecksumsEnabled() && (bufHdr->flags & BM_PERMANENT))
|
||||
{
|
||||
/*
|
||||
* If we're in recovery we cannot dirty a page because of a hint.
|
||||
* We can set the hint, just not dirty the page as a result so
|
||||
* the hint is lost when we evict the page or shutdown.
|
||||
* We can set the hint, just not dirty the page as a result so the
|
||||
* hint is lost when we evict the page or shutdown.
|
||||
*
|
||||
* See src/backend/storage/page/README for longer discussion.
|
||||
*/
|
||||
@@ -2646,21 +2646,21 @@ MarkBufferDirtyHint(Buffer buffer)
|
||||
/*
|
||||
* If the block is already dirty because we either made a change
|
||||
* or set a hint already, then we don't need to write a full page
|
||||
* image. Note that aggressive cleaning of blocks
|
||||
* dirtied by hint bit setting would increase the call rate.
|
||||
* Bulk setting of hint bits would reduce the call rate...
|
||||
* image. Note that aggressive cleaning of blocks dirtied by hint
|
||||
* bit setting would increase the call rate. Bulk setting of hint
|
||||
* bits would reduce the call rate...
|
||||
*
|
||||
* We must issue the WAL record before we mark the buffer dirty.
|
||||
* Otherwise we might write the page before we write the WAL.
|
||||
* That causes a race condition, since a checkpoint might occur
|
||||
* between writing the WAL record and marking the buffer dirty.
|
||||
* We solve that with a kluge, but one that is already in use
|
||||
* during transaction commit to prevent race conditions.
|
||||
* Basically, we simply prevent the checkpoint WAL record from
|
||||
* being written until we have marked the buffer dirty. We don't
|
||||
* start the checkpoint flush until we have marked dirty, so our
|
||||
* checkpoint must flush the change to disk successfully or the
|
||||
* checkpoint never gets written, so crash recovery will fix.
|
||||
* Otherwise we might write the page before we write the WAL. That
|
||||
* causes a race condition, since a checkpoint might occur between
|
||||
* writing the WAL record and marking the buffer dirty. We solve
|
||||
* that with a kluge, but one that is already in use during
|
||||
* transaction commit to prevent race conditions. Basically, we
|
||||
* simply prevent the checkpoint WAL record from being written
|
||||
* until we have marked the buffer dirty. We don't start the
|
||||
* checkpoint flush until we have marked dirty, so our checkpoint
|
||||
* must flush the change to disk successfully or the checkpoint
|
||||
* never gets written, so crash recovery will fix.
|
||||
*
|
||||
* It's possible we may enter here without an xid, so it is
|
||||
* essential that CreateCheckpoint waits for virtual transactions
|
||||
@@ -2677,13 +2677,13 @@ MarkBufferDirtyHint(Buffer buffer)
|
||||
dirtied = true; /* Means "will be dirtied by this action" */
|
||||
|
||||
/*
|
||||
* Set the page LSN if we wrote a backup block. We aren't
|
||||
* supposed to set this when only holding a share lock but
|
||||
* as long as we serialise it somehow we're OK. We choose to
|
||||
* set LSN while holding the buffer header lock, which causes
|
||||
* any reader of an LSN who holds only a share lock to also
|
||||
* obtain a buffer header lock before using PageGetLSN(),
|
||||
* which is enforced in BufferGetLSNAtomic().
|
||||
* Set the page LSN if we wrote a backup block. We aren't supposed
|
||||
* to set this when only holding a share lock but as long as we
|
||||
* serialise it somehow we're OK. We choose to set LSN while
|
||||
* holding the buffer header lock, which causes any reader of an
|
||||
* LSN who holds only a share lock to also obtain a buffer header
|
||||
* lock before using PageGetLSN(), which is enforced in
|
||||
* BufferGetLSNAtomic().
|
||||
*
|
||||
* If checksums are enabled, you might think we should reset the
|
||||
* checksum here. That will happen when the page is written
|
||||
|
||||
@@ -196,8 +196,8 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
|
||||
*/
|
||||
if (bufHdr->flags & BM_DIRTY)
|
||||
{
|
||||
SMgrRelation oreln;
|
||||
Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
|
||||
SMgrRelation oreln;
|
||||
Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
|
||||
|
||||
/* Find smgr relation for buffer */
|
||||
oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
|
||||
@@ -509,7 +509,7 @@ AtEOXact_LocalBuffers(bool isCommit)
|
||||
{
|
||||
if (LocalRefCount[i] != 0)
|
||||
{
|
||||
Buffer b = -i - 1;
|
||||
Buffer b = -i - 1;
|
||||
|
||||
PrintBufferLeakWarning(b);
|
||||
RefCountErrors++;
|
||||
@@ -541,7 +541,7 @@ AtProcExit_LocalBuffers(void)
|
||||
{
|
||||
if (LocalRefCount[i] != 0)
|
||||
{
|
||||
Buffer b = -i - 1;
|
||||
Buffer b = -i - 1;
|
||||
|
||||
PrintBufferLeakWarning(b);
|
||||
RefCountErrors++;
|
||||
|
||||
@@ -400,7 +400,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
|
||||
pgxact->xmin = InvalidTransactionId;
|
||||
/* must be cleared with xid/xmin: */
|
||||
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
|
||||
pgxact->delayChkpt = false; /* be sure this is cleared in abort */
|
||||
pgxact->delayChkpt = false; /* be sure this is cleared in abort */
|
||||
proc->recoveryConflictPending = false;
|
||||
|
||||
/* Clear the subtransaction-XID cache too while holding the lock */
|
||||
@@ -427,7 +427,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
|
||||
pgxact->xmin = InvalidTransactionId;
|
||||
/* must be cleared with xid/xmin: */
|
||||
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
|
||||
pgxact->delayChkpt = false; /* be sure this is cleared in abort */
|
||||
pgxact->delayChkpt = false; /* be sure this is cleared in abort */
|
||||
proc->recoveryConflictPending = false;
|
||||
|
||||
Assert(pgxact->nxids == 0);
|
||||
@@ -1429,11 +1429,11 @@ GetSnapshotData(Snapshot snapshot)
|
||||
* depending upon when the snapshot was taken, or change normal
|
||||
* snapshot processing so it matches.
|
||||
*
|
||||
* Note: It is possible for recovery to end before we finish taking the
|
||||
* snapshot, and for newly assigned transaction ids to be added to the
|
||||
* ProcArray. xmax cannot change while we hold ProcArrayLock, so those
|
||||
* newly added transaction ids would be filtered away, so we need not
|
||||
* be concerned about them.
|
||||
* Note: It is possible for recovery to end before we finish taking
|
||||
* the snapshot, and for newly assigned transaction ids to be added to
|
||||
* the ProcArray. xmax cannot change while we hold ProcArrayLock, so
|
||||
* those newly added transaction ids would be filtered away, so we
|
||||
* need not be concerned about them.
|
||||
*/
|
||||
subcount = KnownAssignedXidsGetAndSetXmin(snapshot->subxip, &xmin,
|
||||
xmax);
|
||||
@@ -1688,8 +1688,8 @@ GetRunningTransactionData(void)
|
||||
|
||||
/*
|
||||
* Top-level XID of a transaction is always less than any of
|
||||
* its subxids, so we don't need to check if any of the subxids
|
||||
* are smaller than oldestRunningXid
|
||||
* its subxids, so we don't need to check if any of the
|
||||
* subxids are smaller than oldestRunningXid
|
||||
*/
|
||||
}
|
||||
}
|
||||
@@ -1811,9 +1811,9 @@ GetVirtualXIDsDelayingChkpt(int *nvxids)
|
||||
|
||||
for (index = 0; index < arrayP->numProcs; index++)
|
||||
{
|
||||
int pgprocno = arrayP->pgprocnos[index];
|
||||
volatile PGPROC *proc = &allProcs[pgprocno];
|
||||
volatile PGXACT *pgxact = &allPgXact[pgprocno];
|
||||
int pgprocno = arrayP->pgprocnos[index];
|
||||
volatile PGPROC *proc = &allProcs[pgprocno];
|
||||
volatile PGXACT *pgxact = &allPgXact[pgprocno];
|
||||
|
||||
if (pgxact->delayChkpt)
|
||||
{
|
||||
@@ -1853,9 +1853,9 @@ HaveVirtualXIDsDelayingChkpt(VirtualTransactionId *vxids, int nvxids)
|
||||
{
|
||||
for (index = 0; index < arrayP->numProcs; index++)
|
||||
{
|
||||
int pgprocno = arrayP->pgprocnos[index];
|
||||
volatile PGPROC *proc = &allProcs[pgprocno];
|
||||
volatile PGXACT *pgxact = &allPgXact[pgprocno];
|
||||
int pgprocno = arrayP->pgprocnos[index];
|
||||
volatile PGPROC *proc = &allProcs[pgprocno];
|
||||
volatile PGXACT *pgxact = &allPgXact[pgprocno];
|
||||
VirtualTransactionId vxid;
|
||||
|
||||
GET_VXID_FROM_PGPROC(vxid, *proc);
|
||||
|
||||
@@ -443,10 +443,10 @@ ResolveRecoveryConflictWithBufferPin(void)
|
||||
ProcWaitForSignal();
|
||||
|
||||
/*
|
||||
* Clear any timeout requests established above. We assume here that
|
||||
* the Startup process doesn't have any other timeouts than what this
|
||||
* function uses. If that stops being true, we could cancel the
|
||||
* timeouts individually, but that'd be slower.
|
||||
* Clear any timeout requests established above. We assume here that the
|
||||
* Startup process doesn't have any other timeouts than what this function
|
||||
* uses. If that stops being true, we could cancel the timeouts
|
||||
* individually, but that'd be slower.
|
||||
*/
|
||||
disable_all_timeouts(false);
|
||||
}
|
||||
|
||||
@@ -1210,7 +1210,7 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
|
||||
static void
|
||||
RemoveLocalLock(LOCALLOCK *locallock)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = locallock->numLockOwners - 1; i >= 0; i--)
|
||||
{
|
||||
@@ -1988,7 +1988,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
|
||||
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
|
||||
|
||||
/* If session lock is above array position 0, move it down to 0 */
|
||||
for (i = 0; i < locallock->numLockOwners ; i++)
|
||||
for (i = 0; i < locallock->numLockOwners; i++)
|
||||
{
|
||||
if (lockOwners[i].owner == NULL)
|
||||
lockOwners[0] = lockOwners[i];
|
||||
@@ -2214,7 +2214,7 @@ LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
|
||||
}
|
||||
else
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = nlocks - 1; i >= 0; i--)
|
||||
ReleaseLockIfHeld(locallocks[i], false);
|
||||
@@ -2313,7 +2313,7 @@ LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
|
||||
}
|
||||
else
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = nlocks - 1; i >= 0; i--)
|
||||
LockReassignOwner(locallocks[i], parent);
|
||||
@@ -2333,8 +2333,8 @@ LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
|
||||
int ip = -1;
|
||||
|
||||
/*
|
||||
* Scan to see if there are any locks belonging to current owner or
|
||||
* its parent
|
||||
* Scan to see if there are any locks belonging to current owner or its
|
||||
* parent
|
||||
*/
|
||||
lockOwners = locallock->lockOwners;
|
||||
for (i = locallock->numLockOwners - 1; i >= 0; i--)
|
||||
@@ -2346,7 +2346,7 @@ LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
|
||||
}
|
||||
|
||||
if (ic < 0)
|
||||
return; /* no current locks */
|
||||
return; /* no current locks */
|
||||
|
||||
if (ip < 0)
|
||||
{
|
||||
@@ -2690,9 +2690,9 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
|
||||
LWLockAcquire(proc->backendLock, LW_SHARED);
|
||||
|
||||
/*
|
||||
* If the target backend isn't referencing the same database as the
|
||||
* lock, then we needn't examine the individual relation IDs at
|
||||
* all; none of them can be relevant.
|
||||
* If the target backend isn't referencing the same database as
|
||||
* the lock, then we needn't examine the individual relation IDs
|
||||
* at all; none of them can be relevant.
|
||||
*
|
||||
* See FastPathTransferLocks() for discussion of why we do this
|
||||
* test after acquiring the lock.
|
||||
@@ -3158,15 +3158,15 @@ PostPrepare_Locks(TransactionId xid)
|
||||
/*
|
||||
* We cannot simply modify proclock->tag.myProc to reassign
|
||||
* ownership of the lock, because that's part of the hash key and
|
||||
* the proclock would then be in the wrong hash chain. Instead
|
||||
* the proclock would then be in the wrong hash chain. Instead
|
||||
* use hash_update_hash_key. (We used to create a new hash entry,
|
||||
* but that risks out-of-memory failure if other processes are
|
||||
* busy making proclocks too.) We must unlink the proclock from
|
||||
* busy making proclocks too.) We must unlink the proclock from
|
||||
* our procLink chain and put it into the new proc's chain, too.
|
||||
*
|
||||
* Note: the updated proclock hash key will still belong to the
|
||||
* same hash partition, cf proclock_hash(). So the partition
|
||||
* lock we already hold is sufficient for this.
|
||||
* same hash partition, cf proclock_hash(). So the partition lock
|
||||
* we already hold is sufficient for this.
|
||||
*/
|
||||
SHMQueueDelete(&proclock->procLink);
|
||||
|
||||
@@ -3177,9 +3177,9 @@ PostPrepare_Locks(TransactionId xid)
|
||||
proclocktag.myProc = newproc;
|
||||
|
||||
/*
|
||||
* Update the proclock. We should not find any existing entry
|
||||
* for the same hash key, since there can be only one entry for
|
||||
* any given lock with my own proc.
|
||||
* Update the proclock. We should not find any existing entry for
|
||||
* the same hash key, since there can be only one entry for any
|
||||
* given lock with my own proc.
|
||||
*/
|
||||
if (!hash_update_hash_key(LockMethodProcLockHash,
|
||||
(void *) proclock,
|
||||
|
||||
@@ -1575,8 +1575,8 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
|
||||
|
||||
/*
|
||||
* Can't use serializable mode while recovery is still active, as it is,
|
||||
* for example, on a hot standby. We could get here despite the check
|
||||
* in check_XactIsoLevel() if default_transaction_isolation is set to
|
||||
* for example, on a hot standby. We could get here despite the check in
|
||||
* check_XactIsoLevel() if default_transaction_isolation is set to
|
||||
* serializable, so phrase the hint accordingly.
|
||||
*/
|
||||
if (RecoveryInProgress())
|
||||
|
||||
@@ -186,8 +186,8 @@ InitProcGlobal(void)
|
||||
* five separate consumers: (1) normal backends, (2) autovacuum workers
|
||||
* and the autovacuum launcher, (3) background workers, (4) auxiliary
|
||||
* processes, and (5) prepared transactions. Each PGPROC structure is
|
||||
* dedicated to exactly one of these purposes, and they do not move between
|
||||
* groups.
|
||||
* dedicated to exactly one of these purposes, and they do not move
|
||||
* between groups.
|
||||
*/
|
||||
procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
|
||||
ProcGlobal->allProcs = procs;
|
||||
@@ -291,7 +291,7 @@ InitProcess(void)
|
||||
elog(ERROR, "you already exist");
|
||||
|
||||
/*
|
||||
* Initialize process-local latch support. This could fail if the kernel
|
||||
* Initialize process-local latch support. This could fail if the kernel
|
||||
* is low on resources, and if so we want to exit cleanly before acquiring
|
||||
* any shared-memory resources.
|
||||
*/
|
||||
@@ -476,7 +476,7 @@ InitAuxiliaryProcess(void)
|
||||
elog(ERROR, "you already exist");
|
||||
|
||||
/*
|
||||
* Initialize process-local latch support. This could fail if the kernel
|
||||
* Initialize process-local latch support. This could fail if the kernel
|
||||
* is low on resources, and if so we want to exit cleanly before acquiring
|
||||
* any shared-memory resources.
|
||||
*/
|
||||
@@ -1153,25 +1153,25 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
|
||||
{
|
||||
int pid = autovac->pid;
|
||||
StringInfoData locktagbuf;
|
||||
StringInfoData logbuf; /* errdetail for server log */
|
||||
StringInfoData logbuf; /* errdetail for server log */
|
||||
|
||||
initStringInfo(&locktagbuf);
|
||||
initStringInfo(&logbuf);
|
||||
DescribeLockTag(&locktagbuf, &lock->tag);
|
||||
appendStringInfo(&logbuf,
|
||||
_("Process %d waits for %s on %s."),
|
||||
MyProcPid,
|
||||
GetLockmodeName(lock->tag.locktag_lockmethodid,
|
||||
lockmode),
|
||||
locktagbuf.data);
|
||||
_("Process %d waits for %s on %s."),
|
||||
MyProcPid,
|
||||
GetLockmodeName(lock->tag.locktag_lockmethodid,
|
||||
lockmode),
|
||||
locktagbuf.data);
|
||||
|
||||
/* release lock as quickly as possible */
|
||||
LWLockRelease(ProcArrayLock);
|
||||
|
||||
ereport(LOG,
|
||||
(errmsg("sending cancel to blocking autovacuum PID %d",
|
||||
pid),
|
||||
errdetail_log("%s", logbuf.data)));
|
||||
(errmsg("sending cancel to blocking autovacuum PID %d",
|
||||
pid),
|
||||
errdetail_log("%s", logbuf.data)));
|
||||
|
||||
pfree(logbuf.data);
|
||||
pfree(locktagbuf.data);
|
||||
|
||||
@@ -51,7 +51,7 @@ SpinlockSemas(void)
|
||||
int
|
||||
SpinlockSemas(void)
|
||||
{
|
||||
int nsemas;
|
||||
int nsemas;
|
||||
|
||||
/*
|
||||
* It would be cleaner to distribute this logic into the affected modules,
|
||||
|
||||
@@ -18,9 +18,9 @@
|
||||
#include "access/xlog.h"
|
||||
#include "storage/checksum.h"
|
||||
|
||||
bool ignore_checksum_failure = false;
|
||||
bool ignore_checksum_failure = false;
|
||||
|
||||
static char pageCopyData[BLCKSZ]; /* for checksum calculation */
|
||||
static char pageCopyData[BLCKSZ]; /* for checksum calculation */
|
||||
static Page pageCopy = pageCopyData;
|
||||
|
||||
static uint16 PageCalcChecksum16(Page page, BlockNumber blkno);
|
||||
@@ -101,16 +101,16 @@ PageIsVerified(Page page, BlockNumber blkno)
|
||||
}
|
||||
|
||||
/*
|
||||
* The following checks don't prove the header is correct,
|
||||
* only that it looks sane enough to allow into the buffer pool.
|
||||
* Later usage of the block can still reveal problems,
|
||||
* which is why we offer the checksum option.
|
||||
* The following checks don't prove the header is correct, only that
|
||||
* it looks sane enough to allow into the buffer pool. Later usage of
|
||||
* the block can still reveal problems, which is why we offer the
|
||||
* checksum option.
|
||||
*/
|
||||
if ((p->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
|
||||
p->pd_lower <= p->pd_upper &&
|
||||
p->pd_upper <= p->pd_special &&
|
||||
p->pd_special <= BLCKSZ &&
|
||||
p->pd_special == MAXALIGN(p->pd_special))
|
||||
p->pd_lower <= p->pd_upper &&
|
||||
p->pd_upper <= p->pd_special &&
|
||||
p->pd_special <= BLCKSZ &&
|
||||
p->pd_special == MAXALIGN(p->pd_special))
|
||||
header_sane = true;
|
||||
|
||||
if (header_sane && !checksum_failure)
|
||||
@@ -905,10 +905,10 @@ PageSetChecksumCopy(Page page, BlockNumber blkno)
|
||||
|
||||
/*
|
||||
* We make a copy iff we need to calculate a checksum because other
|
||||
* backends may set hint bits on this page while we write, which
|
||||
* would mean the checksum differs from the page contents. It doesn't
|
||||
* matter if we include or exclude hints during the copy, as long
|
||||
* as we write a valid page and associated checksum.
|
||||
* backends may set hint bits on this page while we write, which would
|
||||
* mean the checksum differs from the page contents. It doesn't matter if
|
||||
* we include or exclude hints during the copy, as long as we write a
|
||||
* valid page and associated checksum.
|
||||
*/
|
||||
memcpy((char *) pageCopy, (char *) page, BLCKSZ);
|
||||
PageSetChecksumInplace(pageCopy, blkno);
|
||||
@@ -931,6 +931,7 @@ PageSetChecksumInplace(Page page, BlockNumber blkno)
|
||||
if (DataChecksumsEnabled())
|
||||
{
|
||||
PageHeader p = (PageHeader) page;
|
||||
|
||||
p->pd_checksum = PageCalcChecksum16(page, blkno);
|
||||
}
|
||||
|
||||
@@ -949,7 +950,7 @@ PageSetChecksumInplace(Page page, BlockNumber blkno)
|
||||
static uint16
|
||||
PageCalcChecksum16(Page page, BlockNumber blkno)
|
||||
{
|
||||
PageHeader phdr = (PageHeader) page;
|
||||
PageHeader phdr = (PageHeader) page;
|
||||
uint16 save_checksum;
|
||||
uint32 checksum;
|
||||
|
||||
@@ -958,9 +959,8 @@ PageCalcChecksum16(Page page, BlockNumber blkno)
|
||||
|
||||
/*
|
||||
* Save pd_checksum and set it to zero, so that the checksum calculation
|
||||
* isn't affected by the checksum stored on the page. We do this to
|
||||
* allow optimization of the checksum calculation on the whole block
|
||||
* in one go.
|
||||
* isn't affected by the checksum stored on the page. We do this to allow
|
||||
* optimization of the checksum calculation on the whole block in one go.
|
||||
*/
|
||||
save_checksum = phdr->pd_checksum;
|
||||
phdr->pd_checksum = 0;
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
* for Fowler/Noll/Vo) The primitive of a plain FNV-1a hash folds in data 1
|
||||
* byte at a time according to the formula:
|
||||
*
|
||||
* hash = (hash ^ value) * FNV_PRIME
|
||||
* hash = (hash ^ value) * FNV_PRIME
|
||||
*
|
||||
* FNV-1a algorithm is described at http://www.isthe.com/chongo/tech/comp/fnv/
|
||||
*
|
||||
@@ -36,7 +36,7 @@
|
||||
* avalanche into lower positions. For performance reasons we choose to combine
|
||||
* 4 bytes at a time. The actual hash formula used as the basis is:
|
||||
*
|
||||
* hash = (hash ^ value) * FNV_PRIME ^ ((hash ^ value) >> 17)
|
||||
* hash = (hash ^ value) * FNV_PRIME ^ ((hash ^ value) >> 17)
|
||||
*
|
||||
* The main bottleneck in this calculation is the multiplication latency. To
|
||||
* hide the latency and to make use of SIMD parallelism multiple hash values
|
||||
@@ -131,19 +131,20 @@ static const uint32 checksumBaseOffsets[N_SUMS] = {
|
||||
uint32
|
||||
checksum_block(char *data, uint32 size)
|
||||
{
|
||||
uint32 sums[N_SUMS];
|
||||
uint32 (*dataArr)[N_SUMS] = (uint32 (*)[N_SUMS]) data;
|
||||
uint32 result = 0;
|
||||
int i, j;
|
||||
uint32 sums[N_SUMS];
|
||||
uint32 (*dataArr)[N_SUMS] = (uint32 (*)[N_SUMS]) data;
|
||||
uint32 result = 0;
|
||||
int i,
|
||||
j;
|
||||
|
||||
/* ensure that the size is compatible with the algorithm */
|
||||
Assert((size % (sizeof(uint32)*N_SUMS)) == 0);
|
||||
Assert((size % (sizeof(uint32) * N_SUMS)) == 0);
|
||||
|
||||
/* initialize partial checksums to their corresponding offsets */
|
||||
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets));
|
||||
|
||||
/* main checksum calculation */
|
||||
for (i = 0; i < size/sizeof(uint32)/N_SUMS; i++)
|
||||
for (i = 0; i < size / sizeof(uint32) / N_SUMS; i++)
|
||||
for (j = 0; j < N_SUMS; j++)
|
||||
CHECKSUM_COMP(sums[j], dataArr[i][j]);
|
||||
|
||||
|
||||
@@ -435,16 +435,16 @@ smgrdounlink(SMgrRelation reln, bool isRedo)
|
||||
void
|
||||
smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
|
||||
{
|
||||
int i = 0;
|
||||
int i = 0;
|
||||
RelFileNodeBackend *rnodes;
|
||||
ForkNumber forknum;
|
||||
ForkNumber forknum;
|
||||
|
||||
if (nrels == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* create an array which contains all relations to be dropped, and
|
||||
* close each relation's forks at the smgr level while at it
|
||||
* create an array which contains all relations to be dropped, and close
|
||||
* each relation's forks at the smgr level while at it
|
||||
*/
|
||||
rnodes = palloc(sizeof(RelFileNodeBackend) * nrels);
|
||||
for (i = 0; i < nrels; i++)
|
||||
@@ -460,14 +460,14 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
|
||||
}
|
||||
|
||||
/*
|
||||
* Get rid of any remaining buffers for the relations. bufmgr will just
|
||||
* Get rid of any remaining buffers for the relations. bufmgr will just
|
||||
* drop them without bothering to write the contents.
|
||||
*/
|
||||
DropRelFileNodesAllBuffers(rnodes, nrels);
|
||||
|
||||
/*
|
||||
* It'd be nice to tell the stats collector to forget them immediately, too.
|
||||
* But we can't because we don't know the OIDs.
|
||||
* It'd be nice to tell the stats collector to forget them immediately,
|
||||
* too. But we can't because we don't know the OIDs.
|
||||
*/
|
||||
|
||||
/*
|
||||
@@ -475,8 +475,8 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
|
||||
* dangling smgr references they may have for these rels. We should do
|
||||
* this before starting the actual unlinking, in case we fail partway
|
||||
* through that step. Note that the sinval messages will eventually come
|
||||
* back to this backend, too, and thereby provide a backstop that we closed
|
||||
* our own smgr rel.
|
||||
* back to this backend, too, and thereby provide a backstop that we
|
||||
* closed our own smgr rel.
|
||||
*/
|
||||
for (i = 0; i < nrels; i++)
|
||||
CacheInvalidateSmgr(rnodes[i]);
|
||||
@@ -491,7 +491,8 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
|
||||
|
||||
for (i = 0; i < nrels; i++)
|
||||
{
|
||||
int which = rels[i]->smgr_which;
|
||||
int which = rels[i]->smgr_which;
|
||||
|
||||
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
|
||||
(*(smgrsw[which].smgr_unlink)) (rnodes[i], forknum, isRedo);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user