1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-15 03:41:20 +03:00

Run pgindent on 9.2 source tree in preparation for first 9.3

commit-fest.
This commit is contained in:
Bruce Momjian
2012-06-10 15:20:04 -04:00
parent 60801944fa
commit 927d61eeff
494 changed files with 7343 additions and 7046 deletions

View File

@@ -1325,7 +1325,7 @@ BufferSync(int flags)
* This is called periodically by the background writer process.
*
* Returns true if it's appropriate for the bgwriter process to go into
* low-power hibernation mode. (This happens if the strategy clock sweep
* low-power hibernation mode. (This happens if the strategy clock sweep
* has been "lapped" and no buffer allocations have occurred recently,
* or if the bgwriter has been effectively disabled by setting
* bgwriter_lru_maxpages to 0.)
@@ -1510,8 +1510,8 @@ BgBufferSync(void)
/*
* If recent_alloc remains at zero for many cycles, smoothed_alloc will
* eventually underflow to zero, and the underflows produce annoying
* kernel warnings on some platforms. Once upcoming_alloc_est has gone
* to zero, there's no point in tracking smaller and smaller values of
* kernel warnings on some platforms. Once upcoming_alloc_est has gone to
* zero, there's no point in tracking smaller and smaller values of
* smoothed_alloc, so just reset it to exactly zero to avoid this
* syndrome. It will pop back up as soon as recent_alloc increases.
*/
@@ -2006,11 +2006,11 @@ BufferIsPermanent(Buffer buffer)
Assert(BufferIsPinned(buffer));
/*
* BM_PERMANENT can't be changed while we hold a pin on the buffer, so
* we need not bother with the buffer header spinlock. Even if someone
* else changes the buffer header flags while we're doing this, we assume
* that changing an aligned 2-byte BufFlags value is atomic, so we'll read
* the old value or the new value, but not random garbage.
* BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
* need not bother with the buffer header spinlock. Even if someone else
* changes the buffer header flags while we're doing this, we assume that
* changing an aligned 2-byte BufFlags value is atomic, so we'll read the
* old value or the new value, but not random garbage.
*/
bufHdr = &BufferDescriptors[buffer - 1];
return (bufHdr->flags & BM_PERMANENT) != 0;
@@ -2461,10 +2461,10 @@ SetBufferCommitInfoNeedsSave(Buffer buffer)
* tuples. So, be as quick as we can if the buffer is already dirty. We
* do this by not acquiring spinlock if it looks like the status bits are
* already. Since we make this test unlocked, there's a chance we might
* fail to notice that the flags have just been cleared, and failed to reset
* them, due to memory-ordering issues. But since this function is only
* intended to be used in cases where failing to write out the data would
* be harmless anyway, it doesn't really matter.
* fail to notice that the flags have just been cleared, and failed to
* reset them, due to memory-ordering issues. But since this function is
* only intended to be used in cases where failing to write out the data
* would be harmless anyway, it doesn't really matter.
*/
if ((bufHdr->flags & (BM_DIRTY | BM_JUST_DIRTIED)) !=
(BM_DIRTY | BM_JUST_DIRTIED))

View File

@@ -294,7 +294,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
* StrategyNotifyBgWriter -- set or clear allocation notification latch
*
* If bgwriterLatch isn't NULL, the next invocation of StrategyGetBuffer will
* set that latch. Pass NULL to clear the pending notification before it
* set that latch. Pass NULL to clear the pending notification before it
* happens. This feature is used by the bgwriter process to wake itself up
* from hibernation, and is not meant for anybody else to use.
*/

View File

@@ -164,7 +164,7 @@ static bool have_pending_fd_cleanup = false;
/*
* Tracks the total size of all temporary files. Note: when temp_file_limit
* is being enforced, this cannot overflow since the limit cannot be more
* than INT_MAX kilobytes. When not enforcing, it could theoretically
* than INT_MAX kilobytes. When not enforcing, it could theoretically
* overflow, but we don't care.
*/
static uint64 temporary_files_size = 0;
@@ -685,7 +685,7 @@ LruInsert(File file)
/* seek to the right position */
if (vfdP->seekPos != (off_t) 0)
{
off_t returnValue PG_USED_FOR_ASSERTS_ONLY;
off_t returnValue PG_USED_FOR_ASSERTS_ONLY;
returnValue = lseek(vfdP->fd, vfdP->seekPos, SEEK_SET);
Assert(returnValue != (off_t) -1);
@@ -1046,7 +1046,7 @@ OpenTemporaryFileInTablespace(Oid tblspcOid, bool rejectError)
void
FileSetTransient(File file)
{
Vfd *vfdP;
Vfd *vfdP;
Assert(FileIsValid(file));
@@ -1255,7 +1255,7 @@ FileWrite(File file, char *buffer, int amount)
/*
* If enforcing temp_file_limit and it's a temp file, check to see if the
* write would overrun temp_file_limit, and throw error if so. Note: it's
* write would overrun temp_file_limit, and throw error if so. Note: it's
* really a modularity violation to throw error here; we should set errno
* and return -1. However, there's no way to report a suitable error
* message if we do that. All current callers would just throw error
@@ -1263,18 +1263,18 @@ FileWrite(File file, char *buffer, int amount)
*/
if (temp_file_limit >= 0 && (VfdCache[file].fdstate & FD_TEMPORARY))
{
off_t newPos = VfdCache[file].seekPos + amount;
off_t newPos = VfdCache[file].seekPos + amount;
if (newPos > VfdCache[file].fileSize)
{
uint64 newTotal = temporary_files_size;
uint64 newTotal = temporary_files_size;
newTotal += newPos - VfdCache[file].fileSize;
if (newTotal > (uint64) temp_file_limit * (uint64) 1024)
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("temporary file size exceeds temp_file_limit (%dkB)",
temp_file_limit)));
errmsg("temporary file size exceeds temp_file_limit (%dkB)",
temp_file_limit)));
}
}
@@ -1293,7 +1293,7 @@ retry:
/* maintain fileSize and temporary_files_size if it's a temp file */
if (VfdCache[file].fdstate & FD_TEMPORARY)
{
off_t newPos = VfdCache[file].seekPos;
off_t newPos = VfdCache[file].seekPos;
if (newPos > VfdCache[file].fileSize)
{
@@ -1915,8 +1915,8 @@ CleanupTempFiles(bool isProcExit)
/*
* If we're in the process of exiting a backend process,
* close all temporary files. Otherwise, only close
* temporary files local to the current transaction.
* They should be closed by the ResourceOwner mechanism
* temporary files local to the current transaction. They
* should be closed by the ResourceOwner mechanism
* already, so this is just a debugging cross-check.
*/
if (isProcExit)
@@ -1924,7 +1924,7 @@ CleanupTempFiles(bool isProcExit)
else if (fdstate & FD_XACT_TEMPORARY)
{
elog(WARNING,
"temporary file %s not closed at end-of-transaction",
"temporary file %s not closed at end-of-transaction",
VfdCache[i].fileName);
FileClose(i);
}

View File

@@ -272,8 +272,8 @@ bool
PostmasterIsAlive(void)
{
#ifndef WIN32
char c;
ssize_t rc;
char c;
ssize_t rc;
rc = read(postmaster_alive_fds[POSTMASTER_FD_WATCH], &c, 1);
if (rc < 0)
@@ -287,7 +287,6 @@ PostmasterIsAlive(void)
elog(FATAL, "unexpected data in postmaster death monitoring pipe");
return false;
#else /* WIN32 */
return (WaitForSingleObject(PostmasterHandle, 0) == WAIT_TIMEOUT);
#endif /* WIN32 */

View File

@@ -82,10 +82,10 @@ typedef struct ProcArrayStruct
TransactionId lastOverflowedXid;
/*
* We declare pgprocnos[] as 1 entry because C wants a fixed-size array, but
* actually it is maxProcs entries long.
* We declare pgprocnos[] as 1 entry because C wants a fixed-size array,
* but actually it is maxProcs entries long.
*/
int pgprocnos[1]; /* VARIABLE LENGTH ARRAY */
int pgprocnos[1]; /* VARIABLE LENGTH ARRAY */
} ProcArrayStruct;
static ProcArrayStruct *procArray;
@@ -282,22 +282,22 @@ ProcArrayAdd(PGPROC *proc)
* locality of references much better. This is useful while traversing the
* ProcArray because there is a increased likelihood of finding the next
* PGPROC structure in the cache.
*
*
* Since the occurrence of adding/removing a proc is much lower than the
* access to the ProcArray itself, the overhead should be marginal
*/
for (index = 0; index < arrayP->numProcs; index++)
{
/*
* If we are the first PGPROC or if we have found our right position in
* the array, break
* If we are the first PGPROC or if we have found our right position
* in the array, break
*/
if ((arrayP->pgprocnos[index] == -1) || (arrayP->pgprocnos[index] > proc->pgprocno))
break;
}
memmove(&arrayP->pgprocnos[index + 1], &arrayP->pgprocnos[index],
(arrayP->numProcs - index) * sizeof (int));
(arrayP->numProcs - index) * sizeof(int));
arrayP->pgprocnos[index] = proc->pgprocno;
arrayP->numProcs++;
@@ -349,8 +349,8 @@ ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
{
/* Keep the PGPROC array sorted. See notes above */
memmove(&arrayP->pgprocnos[index], &arrayP->pgprocnos[index + 1],
(arrayP->numProcs - index - 1) * sizeof (int));
arrayP->pgprocnos[arrayP->numProcs - 1] = -1; /* for debugging */
(arrayP->numProcs - index - 1) * sizeof(int));
arrayP->pgprocnos[arrayP->numProcs - 1] = -1; /* for debugging */
arrayP->numProcs--;
LWLockRelease(ProcArrayLock);
return;
@@ -380,7 +380,7 @@ ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
void
ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
{
PGXACT *pgxact = &allPgXact[proc->pgprocno];
PGXACT *pgxact = &allPgXact[proc->pgprocno];
if (TransactionIdIsValid(latestXid))
{
@@ -399,7 +399,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
pgxact->inCommit = false; /* be sure this is cleared in abort */
pgxact->inCommit = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */
@@ -426,7 +426,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
pgxact->inCommit = false; /* be sure this is cleared in abort */
pgxact->inCommit = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
Assert(pgxact->nxids == 0);
@@ -446,7 +446,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
void
ProcArrayClearTransaction(PGPROC *proc)
{
PGXACT *pgxact = &allPgXact[proc->pgprocno];
PGXACT *pgxact = &allPgXact[proc->pgprocno];
/*
* We can skip locking ProcArrayLock here, because this action does not
@@ -511,9 +511,9 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
/*
* If our initial RunningTransactionsData had an overflowed snapshot then
* we knew we were missing some subxids from our snapshot. If we continue
* to see overflowed snapshots then we might never be able to start up,
* so we make another test to see if our snapshot is now valid. We know
* that the missing subxids are equal to or earlier than nextXid. After we
* to see overflowed snapshots then we might never be able to start up, so
* we make another test to see if our snapshot is now valid. We know that
* the missing subxids are equal to or earlier than nextXid. After we
* initialise we continue to apply changes during recovery, so once the
* oldestRunningXid is later than the nextXid from the initial snapshot we
* know that we no longer have missing information and can mark the
@@ -522,8 +522,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
if (standbyState == STANDBY_SNAPSHOT_PENDING)
{
/*
* If the snapshot isn't overflowed or if its empty we can
* reset our pending state and use this snapshot instead.
* If the snapshot isn't overflowed or if its empty we can reset our
* pending state and use this snapshot instead.
*/
if (!running->subxid_overflow || running->xcnt == 0)
{
@@ -545,8 +545,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
}
else
elog(trace_recovery(DEBUG1),
"recovery snapshot waiting for non-overflowed snapshot or "
"until oldest active xid on standby is at least %u (now %u)",
"recovery snapshot waiting for non-overflowed snapshot or "
"until oldest active xid on standby is at least %u (now %u)",
standbySnapshotPendingXmin,
running->oldestRunningXid);
return;
@@ -673,7 +673,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
* ShmemVariableCache->nextXid must be beyond any observed xid.
*
* We don't expect anyone else to modify nextXid, hence we don't need to
* hold a lock while examining it. We still acquire the lock to modify
* hold a lock while examining it. We still acquire the lock to modify
* it, though.
*/
nextXid = latestObservedXid;
@@ -861,10 +861,10 @@ TransactionIdIsInProgress(TransactionId xid)
/* No shortcuts, gotta grovel through the array */
for (i = 0; i < arrayP->numProcs; i++)
{
int pgprocno = arrayP->pgprocnos[i];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId pxid;
int pgprocno = arrayP->pgprocnos[i];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId pxid;
/* Ignore my own proc --- dealt with it above */
if (proc == MyProc)
@@ -1017,10 +1017,10 @@ TransactionIdIsActive(TransactionId xid)
for (i = 0; i < arrayP->numProcs; i++)
{
int pgprocno = arrayP->pgprocnos[i];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId pxid;
int pgprocno = arrayP->pgprocnos[i];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId pxid;
/* Fetch xid just once - see GetNewTransactionId */
pxid = pgxact->xid;
@@ -1115,9 +1115,9 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
if (ignoreVacuum && (pgxact->vacuumFlags & PROC_IN_VACUUM))
continue;
@@ -1141,7 +1141,7 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
* have an Xmin but not (yet) an Xid; conversely, if it has an
* Xid, that could determine some not-yet-set Xmin.
*/
xid = pgxact->xmin; /* Fetch just once */
xid = pgxact->xmin; /* Fetch just once */
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, result))
result = xid;
@@ -1318,7 +1318,7 @@ GetSnapshotData(Snapshot snapshot)
if (!snapshot->takenDuringRecovery)
{
int *pgprocnos = arrayP->pgprocnos;
int *pgprocnos = arrayP->pgprocnos;
int numProcs;
/*
@@ -1329,32 +1329,32 @@ GetSnapshotData(Snapshot snapshot)
numProcs = arrayP->numProcs;
for (index = 0; index < numProcs; index++)
{
int pgprocno = pgprocnos[index];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId xid;
int pgprocno = pgprocnos[index];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId xid;
/* Ignore procs running LAZY VACUUM */
if (pgxact->vacuumFlags & PROC_IN_VACUUM)
continue;
/* Update globalxmin to be the smallest valid xmin */
xid = pgxact->xmin; /* fetch just once */
xid = pgxact->xmin; /* fetch just once */
if (TransactionIdIsNormal(xid) &&
NormalTransactionIdPrecedes(xid, globalxmin))
globalxmin = xid;
globalxmin = xid;
/* Fetch xid just once - see GetNewTransactionId */
xid = pgxact->xid;
/*
* If the transaction has no XID assigned, we can skip it; it won't
* have sub-XIDs either. If the XID is >= xmax, we can also skip
* it; such transactions will be treated as running anyway (and any
* sub-XIDs will also be >= xmax).
* If the transaction has no XID assigned, we can skip it; it
* won't have sub-XIDs either. If the XID is >= xmax, we can also
* skip it; such transactions will be treated as running anyway
* (and any sub-XIDs will also be >= xmax).
*/
if (!TransactionIdIsNormal(xid)
|| !NormalTransactionIdPrecedes(xid, xmax))
continue;
continue;
/*
* We don't include our own XIDs (if any) in the snapshot, but we
@@ -1394,6 +1394,7 @@ GetSnapshotData(Snapshot snapshot)
if (nxids > 0)
{
volatile PGPROC *proc = &allProcs[pgprocno];
memcpy(snapshot->subxip + subcount,
(void *) proc->subxids.xids,
nxids * sizeof(TransactionId));
@@ -1498,23 +1499,23 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId xid;
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId xid;
/* Ignore procs running LAZY VACUUM */
if (pgxact->vacuumFlags & PROC_IN_VACUUM)
continue;
xid = pgxact->xid; /* fetch just once */
xid = pgxact->xid; /* fetch just once */
if (xid != sourcexid)
continue;
/*
* We check the transaction's database ID for paranoia's sake: if
* it's in another DB then its xmin does not cover us. Caller should
* have detected this already, so we just treat any funny cases as
* We check the transaction's database ID for paranoia's sake: if it's
* in another DB then its xmin does not cover us. Caller should have
* detected this already, so we just treat any funny cases as
* "transaction not found".
*/
if (proc->databaseId != MyDatabaseId)
@@ -1523,7 +1524,7 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid)
/*
* Likewise, let's just make real sure its xmin does cover us.
*/
xid = pgxact->xmin; /* fetch just once */
xid = pgxact->xmin; /* fetch just once */
if (!TransactionIdIsNormal(xid) ||
!TransactionIdPrecedesOrEquals(xid, xmin))
continue;
@@ -1531,8 +1532,8 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid)
/*
* We're good. Install the new xmin. As in GetSnapshotData, set
* TransactionXmin too. (Note that because snapmgr.c called
* GetSnapshotData first, we'll be overwriting a valid xmin here,
* so we don't check that.)
* GetSnapshotData first, we'll be overwriting a valid xmin here, so
* we don't check that.)
*/
MyPgXact->xmin = TransactionXmin = xmin;
@@ -1626,7 +1627,7 @@ GetRunningTransactionData(void)
*/
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId xid;
@@ -1726,7 +1727,7 @@ GetOldestActiveTransactionId(void)
*/
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
int pgprocno = arrayP->pgprocnos[index];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId xid;
@@ -1783,7 +1784,7 @@ GetTransactionsInCommit(TransactionId **xids_p)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
int pgprocno = arrayP->pgprocnos[index];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId pxid;
@@ -1820,9 +1821,9 @@ HaveTransactionsInCommit(TransactionId *xids, int nxids)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId pxid;
int pgprocno = arrayP->pgprocnos[index];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId pxid;
/* Fetch xid just once - see GetNewTransactionId */
pxid = pgxact->xid;
@@ -1911,9 +1912,9 @@ BackendXidGetPid(TransactionId xid)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
if (pgxact->xid == xid)
{
@@ -1981,9 +1982,9 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0,
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
if (proc == MyProc)
continue;
@@ -2078,9 +2079,9 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
/* Exclude prepared transactions */
if (proc->pid == 0)
@@ -2134,9 +2135,9 @@ CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
VirtualTransactionId procvxid;
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
VirtualTransactionId procvxid;
GET_VXID_FROM_PGPROC(procvxid, *proc);
@@ -2189,9 +2190,9 @@ MinimumActiveBackends(int min)
*/
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
/*
* Since we're not holding a lock, need to check that the pointer is
@@ -2237,7 +2238,7 @@ CountDBBackends(Oid databaseid)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
if (proc->pid == 0)
@@ -2267,7 +2268,7 @@ CancelDBBackends(Oid databaseid, ProcSignalReason sigmode, bool conflictPending)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
if (databaseid == InvalidOid || proc->databaseId == databaseid)
@@ -2306,7 +2307,7 @@ CountUserBackends(Oid roleid)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
if (proc->pid == 0)
@@ -2367,7 +2368,7 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
for (index = 0; index < arrayP->numProcs; index++)
{
int pgprocno = arrayP->pgprocnos[index];
int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];

View File

@@ -22,7 +22,7 @@
#include "utils/inval.h"
uint64 SharedInvalidMessageCounter;
uint64 SharedInvalidMessageCounter;
/*

View File

@@ -467,15 +467,16 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
}
/*
* Now that the maxMsgNum change is globally visible, we give
* everyone a swift kick to make sure they read the newly added
* messages. Releasing SInvalWriteLock will enforce a full memory
* barrier, so these (unlocked) changes will be committed to memory
* before we exit the function.
* Now that the maxMsgNum change is globally visible, we give everyone
* a swift kick to make sure they read the newly added messages.
* Releasing SInvalWriteLock will enforce a full memory barrier, so
* these (unlocked) changes will be committed to memory before we exit
* the function.
*/
for (i = 0; i < segP->lastBackend; i++)
{
ProcState *stateP = &segP->procState[i];
stateP->hasMessages = true;
}
@@ -524,12 +525,12 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
/*
* Before starting to take locks, do a quick, unlocked test to see whether
* there can possibly be anything to read. On a multiprocessor system,
* it's possible that this load could migrate backwards and occur before we
* actually enter this function, so we might miss a sinval message that
* was just added by some other processor. But they can't migrate
* backwards over a preceding lock acquisition, so it should be OK. If
* we haven't acquired a lock preventing against further relevant
* there can possibly be anything to read. On a multiprocessor system,
* it's possible that this load could migrate backwards and occur before
* we actually enter this function, so we might miss a sinval message that
* was just added by some other processor. But they can't migrate
* backwards over a preceding lock acquisition, so it should be OK. If we
* haven't acquired a lock preventing against further relevant
* invalidations, any such occurrence is not much different than if the
* invalidation had arrived slightly later in the first place.
*/

View File

@@ -467,7 +467,7 @@ SendRecoveryConflictWithBufferPin(ProcSignalReason reason)
* determine whether an actual deadlock condition is present: the lock we
* need to wait for might be unrelated to any held by the Startup process.
* Sooner or later, this mechanism should get ripped out in favor of somehow
* accounting for buffer locks in DeadLockCheck(). However, errors here
* accounting for buffer locks in DeadLockCheck(). However, errors here
* seem to be very low-probability in practice, so for now it's not worth
* the trouble.
*/
@@ -658,7 +658,7 @@ StandbyReleaseOldLocks(int nxids, TransactionId *xids)
for (cell = list_head(RecoveryLockList); cell; cell = next)
{
xl_standby_lock *lock = (xl_standby_lock *) lfirst(cell);
bool remove = false;
bool remove = false;
next = lnext(cell);
@@ -668,8 +668,8 @@ StandbyReleaseOldLocks(int nxids, TransactionId *xids)
remove = false;
else
{
int i;
bool found = false;
int i;
bool found = false;
for (i = 0; i < nxids; i++)
{
@@ -1009,8 +1009,8 @@ LogAccessExclusiveLockPrepare(void)
* RecordTransactionAbort() do not optimise away the transaction
* completion record which recovery relies upon to release locks. It's a
* hack, but for a corner case not worth adding code for into the main
* commit path. Second, we must assign an xid before the lock is
* recorded in shared memory, otherwise a concurrently executing
* commit path. Second, we must assign an xid before the lock is recorded
* in shared memory, otherwise a concurrently executing
* GetRunningTransactionLocks() might see a lock associated with an
* InvalidTransactionId which we later assert cannot happen.
*/

View File

@@ -164,7 +164,7 @@ typedef struct TwoPhaseLockRecord
* our locks to the primary lock table, but it can never be lower than the
* real value, since only we can acquire locks on our own behalf.
*/
static int FastPathLocalUseCount = 0;
static int FastPathLocalUseCount = 0;
/* Macros for manipulating proc->fpLockBits */
#define FAST_PATH_BITS_PER_SLOT 3
@@ -186,7 +186,7 @@ static int FastPathLocalUseCount = 0;
/*
* The fast-path lock mechanism is concerned only with relation locks on
* unshared relations by backends bound to a database. The fast-path
* unshared relations by backends bound to a database. The fast-path
* mechanism exists mostly to accelerate acquisition and release of locks
* that rarely conflict. Because ShareUpdateExclusiveLock is
* self-conflicting, it can't use the fast-path mechanism; but it also does
@@ -207,7 +207,7 @@ static int FastPathLocalUseCount = 0;
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
const LOCKTAG *locktag, uint32 hashcode);
const LOCKTAG *locktag, uint32 hashcode);
static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
static void VirtualXactLockTableCleanup(void);
@@ -234,8 +234,8 @@ static void VirtualXactLockTableCleanup(void);
typedef struct
{
slock_t mutex;
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
slock_t mutex;
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
} FastPathStrongRelationLockData;
FastPathStrongRelationLockData *FastPathStrongRelationLocks;
@@ -339,7 +339,7 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
static uint32 proclock_hash(const void *key, Size keysize);
static void RemoveLocalLock(LOCALLOCK *locallock);
static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
static void FinishStrongLockAcquire(void);
@@ -425,7 +425,7 @@ InitLocks(void)
*/
FastPathStrongRelationLocks =
ShmemInitStruct("Fast Path Strong Relation Lock Data",
sizeof(FastPathStrongRelationLockData), &found);
sizeof(FastPathStrongRelationLockData), &found);
if (!found)
SpinLockInit(&FastPathStrongRelationLocks->mutex);
@@ -713,12 +713,12 @@ LockAcquireExtended(const LOCKTAG *locktag,
if (EligibleForRelationFastPath(locktag, lockmode)
&& FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
{
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
bool acquired;
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
bool acquired;
/*
* LWLockAcquire acts as a memory sequencing point, so it's safe
* to assume that any strong locker whose increment to
* LWLockAcquire acts as a memory sequencing point, so it's safe to
* assume that any strong locker whose increment to
* FastPathStrongRelationLocks->counts becomes visible after we test
* it has yet to begin to transfer fast-path locks.
*/
@@ -744,7 +744,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
*/
if (ConflictsWithRelationFastPath(locktag, lockmode))
{
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
BeginStrongLockAcquire(locallock, fasthashcode);
if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
@@ -762,9 +762,9 @@ LockAcquireExtended(const LOCKTAG *locktag,
}
/*
* We didn't find the lock in our LOCALLOCK table, and we didn't manage
* to take it via the fast-path, either, so we've got to mess with the
* shared lock table.
* We didn't find the lock in our LOCALLOCK table, and we didn't manage to
* take it via the fast-path, either, so we've got to mess with the shared
* lock table.
*/
partitionLock = LockHashPartitionLock(hashcode);
@@ -1102,7 +1102,8 @@ RemoveLocalLock(LOCALLOCK *locallock)
locallock->lockOwners = NULL;
if (locallock->holdsStrongLockCount)
{
uint32 fasthashcode;
uint32 fasthashcode;
fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
@@ -1367,9 +1368,9 @@ BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Assert(locallock->holdsStrongLockCount == FALSE);
/*
* Adding to a memory location is not atomic, so we take a
* spinlock to ensure we don't collide with someone else trying
* to bump the count at the same time.
* Adding to a memory location is not atomic, so we take a spinlock to
* ensure we don't collide with someone else trying to bump the count at
* the same time.
*
* XXX: It might be worth considering using an atomic fetch-and-add
* instruction here, on architectures where that is supported.
@@ -1399,9 +1400,9 @@ FinishStrongLockAcquire(void)
void
AbortStrongLockAcquire(void)
{
uint32 fasthashcode;
uint32 fasthashcode;
LOCALLOCK *locallock = StrongLockInProgress;
if (locallock == NULL)
return;
@@ -1699,11 +1700,11 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
if (EligibleForRelationFastPath(locktag, lockmode)
&& FastPathLocalUseCount > 0)
{
bool released;
bool released;
/*
* We might not find the lock here, even if we originally entered
* it here. Another backend may have moved it to the main table.
* We might not find the lock here, even if we originally entered it
* here. Another backend may have moved it to the main table.
*/
LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE);
released = FastPathUnGrantRelationLock(locktag->locktag_field2,
@@ -1816,8 +1817,8 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
#endif
/*
* Get rid of our fast-path VXID lock, if appropriate. Note that this
* is the only way that the lock we hold on our own VXID can ever get
* Get rid of our fast-path VXID lock, if appropriate. Note that this is
* the only way that the lock we hold on our own VXID can ever get
* released: it is always and only released when a toplevel transaction
* ends.
*/
@@ -1898,8 +1899,8 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* If we don't currently hold the LWLock that protects our
* fast-path data structures, we must acquire it before
* attempting to release the lock via the fast-path.
* fast-path data structures, we must acquire it before attempting
* to release the lock via the fast-path.
*/
if (!have_fast_path_lwlock)
{
@@ -1917,7 +1918,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Our lock, originally taken via the fast path, has been
* transferred to the main lock table. That's going to require
* transferred to the main lock table. That's going to require
* some extra work, so release our fast-path lock before starting.
*/
LWLockRelease(MyProc->backendLock);
@@ -1926,7 +1927,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Now dump the lock. We haven't got a pointer to the LOCK or
* PROCLOCK in this case, so we have to handle this a bit
* differently than a normal lock release. Unfortunately, this
* differently than a normal lock release. Unfortunately, this
* requires an extra LWLock acquire-and-release cycle on the
* partitionLock, but hopefully it shouldn't happen often.
*/
@@ -2268,16 +2269,16 @@ FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
*/
static bool
FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
uint32 hashcode)
uint32 hashcode)
{
LWLockId partitionLock = LockHashPartitionLock(hashcode);
Oid relid = locktag->locktag_field2;
uint32 i;
LWLockId partitionLock = LockHashPartitionLock(hashcode);
Oid relid = locktag->locktag_field2;
uint32 i;
/*
* Every PGPROC that can potentially hold a fast-path lock is present
* in ProcGlobal->allProcs. Prepared transactions are not, but
* any outstanding fast-path locks held by prepared transactions are
* Every PGPROC that can potentially hold a fast-path lock is present in
* ProcGlobal->allProcs. Prepared transactions are not, but any
* outstanding fast-path locks held by prepared transactions are
* transferred to the main lock table.
*/
for (i = 0; i < ProcGlobal->allProcCount; i++)
@@ -2288,19 +2289,19 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
LWLockAcquire(proc->backendLock, LW_EXCLUSIVE);
/*
* If the target backend isn't referencing the same database as we are,
* then we needn't examine the individual relation IDs at all; none of
* them can be relevant.
* If the target backend isn't referencing the same database as we
* are, then we needn't examine the individual relation IDs at all;
* none of them can be relevant.
*
* proc->databaseId is set at backend startup time and never changes
* thereafter, so it might be safe to perform this test before
* acquiring proc->backendLock. In particular, it's certainly safe to
* assume that if the target backend holds any fast-path locks, it must
* have performed a memory-fencing operation (in particular, an LWLock
* acquisition) since setting proc->databaseId. However, it's less
* clear that our backend is certain to have performed a memory fencing
* operation since the other backend set proc->databaseId. So for now,
* we test it after acquiring the LWLock just to be safe.
* assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an
* LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory
* fencing operation since the other backend set proc->databaseId. So
* for now, we test it after acquiring the LWLock just to be safe.
*/
if (proc->databaseId != MyDatabaseId)
{
@@ -2319,7 +2320,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
/* Find or create lock object. */
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
lockmode < FAST_PATH_LOCKNUMBER_OFFSET+FAST_PATH_BITS_PER_SLOT;
lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
++lockmode)
{
PROCLOCK *proclock;
@@ -2346,17 +2347,17 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
/*
* FastPathGetLockEntry
* Return the PROCLOCK for a lock originally taken via the fast-path,
* transferring it to the primary lock table if necessary.
* transferring it to the primary lock table if necessary.
*/
static PROCLOCK *
FastPathGetRelationLockEntry(LOCALLOCK *locallock)
{
LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
LOCKTAG *locktag = &locallock->tag.lock;
PROCLOCK *proclock = NULL;
LWLockId partitionLock = LockHashPartitionLock(locallock->hashcode);
Oid relid = locktag->locktag_field2;
uint32 f;
LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
LOCKTAG *locktag = &locallock->tag.lock;
PROCLOCK *proclock = NULL;
LWLockId partitionLock = LockHashPartitionLock(locallock->hashcode);
Oid relid = locktag->locktag_field2;
uint32 f;
LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE);
@@ -2383,7 +2384,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase max_locks_per_transaction.")));
errhint("You might need to increase max_locks_per_transaction.")));
}
GrantLock(proclock->tag.myLock, proclock, lockmode);
FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
@@ -2397,7 +2398,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
if (proclock == NULL)
{
LOCK *lock;
PROCLOCKTAG proclocktag;
PROCLOCKTAG proclocktag;
uint32 proclock_hashcode;
LWLockAcquire(partitionLock, LW_SHARED);
@@ -2495,15 +2496,15 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
{
int i;
Oid relid = locktag->locktag_field2;
VirtualTransactionId vxid;
VirtualTransactionId vxid;
/*
* Iterate over relevant PGPROCs. Anything held by a prepared
* transaction will have been transferred to the primary lock table,
* so we need not worry about those. This is all a bit fuzzy,
* because new locks could be taken after we've visited a particular
* partition, but the callers had better be prepared to deal with
* that anyway, since the locks could equally well be taken between the
* so we need not worry about those. This is all a bit fuzzy, because
* new locks could be taken after we've visited a particular
* partition, but the callers had better be prepared to deal with that
* anyway, since the locks could equally well be taken between the
* time we return the value and the time the caller does something
* with it.
*/
@@ -2520,8 +2521,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
/*
* If the target backend isn't referencing the same database as we
* are, then we needn't examine the individual relation IDs at all;
* none of them can be relevant.
* are, then we needn't examine the individual relation IDs at
* all; none of them can be relevant.
*
* See FastPathTransferLocks() for discussion of why we do this
* test after acquiring the lock.
@@ -2545,9 +2546,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
/*
* There can only be one entry per relation, so if we found
* it and it doesn't conflict, we can skip the rest of the
* slots.
* There can only be one entry per relation, so if we found it
* and it doesn't conflict, we can skip the rest of the slots.
*/
if ((lockmask & conflictMask) == 0)
break;
@@ -2621,7 +2621,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
*/
if (VirtualTransactionIdIsValid(vxid))
{
int i;
int i;
/* Avoid duplicate entries. */
for (i = 0; i < fast_count; ++i)
@@ -2650,7 +2650,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
* responsibility to verify that this is a sane thing to do. (For example, it
* would be bad to release a lock here if there might still be a LOCALLOCK
* object with pointers to it.)
*
*
* We currently use this in two situations: first, to release locks held by
* prepared transactions on commit (see lock_twophase_postcommit); and second,
* to release locks taken via the fast-path, transferred to the main hash
@@ -2725,13 +2725,14 @@ LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
LWLockRelease(partitionLock);
/*
/*
* Decrement strong lock count. This logic is needed only for 2PC.
*/
if (decrement_strong_lock_count
&& ConflictsWithRelationFastPath(&lock->tag, lockmode))
{
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
FastPathStrongRelationLocks->count[fasthashcode]--;
SpinLockRelease(&FastPathStrongRelationLocks->mutex);
@@ -2760,8 +2761,8 @@ AtPrepare_Locks(void)
/*
* For the most part, we don't need to touch shared memory for this ---
* all the necessary state information is in the locallock table.
* Fast-path locks are an exception, however: we move any such locks
* to the main table before allowing PREPARE TRANSACTION to succeed.
* Fast-path locks are an exception, however: we move any such locks to
* the main table before allowing PREPARE TRANSACTION to succeed.
*/
hash_seq_init(&status, LockMethodLocalHash);
@@ -2799,7 +2800,7 @@ AtPrepare_Locks(void)
continue;
/*
* If we have both session- and transaction-level locks, fail. This
* If we have both session- and transaction-level locks, fail. This
* should never happen with regular locks, since we only take those at
* session level in some special operations like VACUUM. It's
* possible to hit this with advisory locks, though.
@@ -2808,7 +2809,7 @@ AtPrepare_Locks(void)
* the transactional hold to the prepared xact. However, that would
* require two PROCLOCK objects, and we cannot be sure that another
* PROCLOCK will be available when it comes time for PostPrepare_Locks
* to do the deed. So for now, we error out while we can still do so
* to do the deed. So for now, we error out while we can still do so
* safely.
*/
if (haveSessionLock)
@@ -2819,7 +2820,8 @@ AtPrepare_Locks(void)
/*
* If the local lock was taken via the fast-path, we need to move it
* to the primary lock table, or just get a pointer to the existing
* primary lock table entry if by chance it's already been transferred.
* primary lock table entry if by chance it's already been
* transferred.
*/
if (locallock->proclock == NULL)
{
@@ -2829,8 +2831,8 @@ AtPrepare_Locks(void)
/*
* Arrange to not release any strong lock count held by this lock
* entry. We must retain the count until the prepared transaction
* is committed or rolled back.
* entry. We must retain the count until the prepared transaction is
* committed or rolled back.
*/
locallock->holdsStrongLockCount = FALSE;
@@ -3114,12 +3116,12 @@ GetLockStatusData(void)
/*
* First, we iterate through the per-backend fast-path arrays, locking
* them one at a time. This might produce an inconsistent picture of the
* them one at a time. This might produce an inconsistent picture of the
* system state, but taking all of those LWLocks at the same time seems
* impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
* matter too much, because none of these locks can be involved in lock
* conflicts anyway - anything that might must be present in the main
* lock table.
* conflicts anyway - anything that might must be present in the main lock
* table.
*/
for (i = 0; i < ProcGlobal->allProcCount; ++i)
{
@@ -3130,7 +3132,7 @@ GetLockStatusData(void)
for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
{
LockInstanceData *instance;
LockInstanceData *instance;
uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
/* Skip unallocated slots. */
@@ -3159,8 +3161,8 @@ GetLockStatusData(void)
if (proc->fpVXIDLock)
{
VirtualTransactionId vxid;
LockInstanceData *instance;
VirtualTransactionId vxid;
LockInstanceData *instance;
if (el >= els)
{
@@ -3219,7 +3221,7 @@ GetLockStatusData(void)
{
PGPROC *proc = proclock->tag.myProc;
LOCK *lock = proclock->tag.myLock;
LockInstanceData *instance = &data->locks[el];
LockInstanceData *instance = &data->locks[el];
memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
instance->holdMask = proclock->holdMask;
@@ -3304,10 +3306,10 @@ GetRunningTransactionLocks(int *nlocks)
TransactionId xid = pgxact->xid;
/*
* Don't record locks for transactions if we know they have already
* issued their WAL record for commit but not yet released lock.
* It is still possible that we see locks held by already complete
* transactions, if they haven't yet zeroed their xids.
* Don't record locks for transactions if we know they have
* already issued their WAL record for commit but not yet released
* lock. It is still possible that we see locks held by already
* complete transactions, if they haven't yet zeroed their xids.
*/
if (!TransactionIdIsValid(xid))
continue;
@@ -3607,13 +3609,14 @@ lock_twophase_recover(TransactionId xid, uint16 info,
*/
GrantLock(lock, proclock, lockmode);
/*
/*
* Bump strong lock count, to make sure any fast-path lock requests won't
* be granted without consulting the primary lock table.
*/
if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
{
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
FastPathStrongRelationLocks->count[fasthashcode]++;
SpinLockRelease(&FastPathStrongRelationLocks->mutex);
@@ -3701,7 +3704,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info,
* as MyProc->lxid, you might wonder if we really need both. The
* difference is that MyProc->lxid is set and cleared unlocked, and
* examined by procarray.c, while fpLocalTransactionId is protected by
* backendLock and is used only by the locking subsystem. Doing it this
* backendLock and is used only by the locking subsystem. Doing it this
* way makes it easier to verify that there are no funny race conditions.
*
* We don't bother recording this lock in the local lock table, since it's
@@ -3734,8 +3737,8 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid)
static void
VirtualXactLockTableCleanup()
{
bool fastpath;
LocalTransactionId lxid;
bool fastpath;
LocalTransactionId lxid;
Assert(MyProc->backendId != InvalidBackendId);
@@ -3757,8 +3760,8 @@ VirtualXactLockTableCleanup()
*/
if (!fastpath && LocalTransactionIdIsValid(lxid))
{
VirtualTransactionId vxid;
LOCKTAG locktag;
VirtualTransactionId vxid;
LOCKTAG locktag;
vxid.backendId = MyBackendId;
vxid.localTransactionId = lxid;
@@ -3766,7 +3769,7 @@ VirtualXactLockTableCleanup()
LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
&locktag, ExclusiveLock, false);
}
}
}
/*
@@ -3802,8 +3805,8 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
/*
* We must acquire this lock before checking the backendId and lxid
* against the ones we're waiting for. The target backend will only
* set or clear lxid while holding this lock.
* against the ones we're waiting for. The target backend will only set
* or clear lxid while holding this lock.
*/
LWLockAcquire(proc->backendLock, LW_EXCLUSIVE);
@@ -3841,7 +3844,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase max_locks_per_transaction.")));
errhint("You might need to increase max_locks_per_transaction.")));
GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
proc->fpVXIDLock = false;
}

View File

@@ -574,7 +574,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
/*
* LWLockAcquireOrWait - Acquire lock, or wait until it's free
*
* The semantics of this function are a bit funky. If the lock is currently
* The semantics of this function are a bit funky. If the lock is currently
* free, it is acquired in the given mode, and the function returns true. If
* the lock isn't immediately free, the function waits until it is released
* and returns false, but does not acquire the lock.
@@ -769,7 +769,7 @@ LWLockRelease(LWLockId lockid)
/*
* Remove the to-be-awakened PGPROCs from the queue.
*/
bool releaseOK = true;
bool releaseOK = true;
proc = head;
@@ -797,6 +797,7 @@ LWLockRelease(LWLockId lockid)
/* proc is now the last PGPROC to be released */
lock->head = proc->lwWaitLink;
proc->lwWaitLink = NULL;
/*
* Prevent additional wakeups until retryer gets to run. Backends
* that are just waiting for the lock to become free don't retry

View File

@@ -1509,7 +1509,7 @@ GetSafeSnapshot(Snapshot origSnapshot)
* one passed to it, but we avoid assuming that here.
*/
snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
InvalidTransactionId);
InvalidTransactionId);
if (MySerializableXact == InvalidSerializableXact)
return snapshot; /* no concurrent r/w xacts; it's safe */
@@ -1600,9 +1600,9 @@ SetSerializableTransactionSnapshot(Snapshot snapshot,
/*
* We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
* import snapshots, since there's no way to wait for a safe snapshot
* when we're using the snap we're told to. (XXX instead of throwing
* an error, we could just ignore the XactDeferrable flag?)
* import snapshots, since there's no way to wait for a safe snapshot when
* we're using the snap we're told to. (XXX instead of throwing an error,
* we could just ignore the XactDeferrable flag?)
*/
if (XactReadOnly && XactDeferrable)
ereport(ERROR,
@@ -1646,11 +1646,11 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
*
* We must hold SerializableXactHashLock when taking/checking the snapshot
* to avoid race conditions, for much the same reasons that
* GetSnapshotData takes the ProcArrayLock. Since we might have to release
* SerializableXactHashLock to call SummarizeOldestCommittedSxact, this
* means we have to create the sxact first, which is a bit annoying (in
* particular, an elog(ERROR) in procarray.c would cause us to leak the
* sxact). Consider refactoring to avoid this.
* GetSnapshotData takes the ProcArrayLock. Since we might have to
* release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
* this means we have to create the sxact first, which is a bit annoying
* (in particular, an elog(ERROR) in procarray.c would cause us to leak
* the sxact). Consider refactoring to avoid this.
*/
#ifdef TEST_OLDSERXID
SummarizeOldestCommittedSxact();
@@ -1678,8 +1678,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not import the requested snapshot"),
errdetail("The source transaction %u is not running anymore.",
sourcexid)));
errdetail("The source transaction %u is not running anymore.",
sourcexid)));
}
/*
@@ -2704,8 +2704,8 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
newpredlock = (PREDICATELOCK *)
hash_search_with_hash_value(PredicateLockHash,
&newpredlocktag,
PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
newtargettaghash),
PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
newtargettaghash),
HASH_ENTER_NULL,
&found);
if (!newpredlock)
@@ -2945,8 +2945,8 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
newpredlock = (PREDICATELOCK *)
hash_search_with_hash_value(PredicateLockHash,
&newpredlocktag,
PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
heaptargettaghash),
PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
heaptargettaghash),
HASH_ENTER,
&found);
if (!found)
@@ -3253,6 +3253,7 @@ ReleasePredicateLocks(bool isCommit)
*/
MySerializableXact->flags |= SXACT_FLAG_DOOMED;
MySerializableXact->flags |= SXACT_FLAG_ROLLED_BACK;
/*
* If the transaction was previously prepared, but is now failing due
* to a ROLLBACK PREPARED or (hopefully very rare) error after the
@@ -3544,9 +3545,9 @@ ClearOldPredicateLocks(void)
else
{
/*
* A read-write transaction can only be partially
* cleared. We need to keep the SERIALIZABLEXACT but
* can release the SIREAD locks and conflicts in.
* A read-write transaction can only be partially cleared. We
* need to keep the SERIALIZABLEXACT but can release the
* SIREAD locks and conflicts in.
*/
ReleaseOneSerializableXact(finishedSxact, true, false);
}
@@ -4003,7 +4004,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to read/write dependencies among transactions"),
errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
errhint("The transaction might succeed if retried.")));
}
}
@@ -4507,7 +4508,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
&& (!SxactIsCommitted(writer)
|| t2->prepareSeqNo <= writer->commitSeqNo)
&& (!SxactIsReadOnly(reader)
|| t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
|| t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
{
failure = true;
break;
@@ -4552,7 +4553,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
&& (!SxactIsCommitted(t0)
|| t0->commitSeqNo >= writer->prepareSeqNo)
&& (!SxactIsReadOnly(t0)
|| t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
|| t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
{
failure = true;
break;
@@ -4730,10 +4731,10 @@ AtPrepare_PredicateLocks(void)
xactRecord->flags = MySerializableXact->flags;
/*
* Note that we don't include the list of conflicts in our out in
* the statefile, because new conflicts can be added even after the
* transaction prepares. We'll just make a conservative assumption
* during recovery instead.
* Note that we don't include the list of conflicts in our out in the
* statefile, because new conflicts can be added even after the
* transaction prepares. We'll just make a conservative assumption during
* recovery instead.
*/
RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
@@ -4891,10 +4892,9 @@ predicatelock_twophase_recover(TransactionId xid, uint16 info,
}
/*
* We don't know whether the transaction had any conflicts or
* not, so we'll conservatively assume that it had both a
* conflict in and a conflict out, and represent that with the
* summary conflict flags.
* We don't know whether the transaction had any conflicts or not, so
* we'll conservatively assume that it had both a conflict in and a
* conflict out, and represent that with the summary conflict flags.
*/
SHMQueueInit(&(sxact->outConflicts));
SHMQueueInit(&(sxact->inConflicts));

View File

@@ -70,9 +70,9 @@ PGXACT *MyPgXact = NULL;
NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
/* Pointers to shared-memory structures */
PROC_HDR *ProcGlobal = NULL;
PROC_HDR *ProcGlobal = NULL;
NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
PGPROC *PreparedXactProcs = NULL;
PGPROC *PreparedXactProcs = NULL;
/* If we are waiting for a lock, this points to the associated LOCALLOCK */
static LOCALLOCK *lockAwaited = NULL;
@@ -222,9 +222,9 @@ InitProcGlobal(void)
/* Common initialization for all PGPROCs, regardless of type. */
/*
* Set up per-PGPROC semaphore, latch, and backendLock. Prepared
* xact dummy PGPROCs don't need these though - they're never
* associated with a real process
* Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
* dummy PGPROCs don't need these though - they're never associated
* with a real process
*/
if (i < MaxBackends + NUM_AUXILIARY_PROCS)
{
@@ -235,12 +235,12 @@ InitProcGlobal(void)
procs[i].pgprocno = i;
/*
* Newly created PGPROCs for normal backends or for autovacuum must
* be queued up on the appropriate free list. Because there can only
* ever be a small, fixed number of auxiliary processes, no free
* list is used in that case; InitAuxiliaryProcess() instead uses a
* linear search. PGPROCs for prepared transactions are added to a
* free list by TwoPhaseShmemInit().
* Newly created PGPROCs for normal backends or for autovacuum must be
* queued up on the appropriate free list. Because there can only
* ever be a small, fixed number of auxiliary processes, no free list
* is used in that case; InitAuxiliaryProcess() instead uses a linear
* search. PGPROCs for prepared transactions are added to a free list
* by TwoPhaseShmemInit().
*/
if (i < MaxConnections)
{
@@ -261,8 +261,8 @@ InitProcGlobal(void)
}
/*
* Save pointers to the blocks of PGPROC structures reserved for
* auxiliary processes and prepared transactions.
* Save pointers to the blocks of PGPROC structures reserved for auxiliary
* processes and prepared transactions.
*/
AuxiliaryProcs = &procs[MaxBackends];
PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
@@ -340,8 +340,8 @@ InitProcess(void)
MarkPostmasterChildActive();
/*
* Initialize all fields of MyProc, except for those previously initialized
* by InitProcGlobal.
* Initialize all fields of MyProc, except for those previously
* initialized by InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
@@ -366,7 +366,7 @@ InitProcess(void)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
int i;
int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
@@ -500,8 +500,8 @@ InitAuxiliaryProcess(void)
SpinLockRelease(ProcStructLock);
/*
* Initialize all fields of MyProc, except for those previously initialized
* by InitProcGlobal.
* Initialize all fields of MyProc, except for those previously
* initialized by InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
@@ -521,7 +521,7 @@ InitAuxiliaryProcess(void)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
int i;
int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
@@ -751,7 +751,7 @@ ProcKill(int code, Datum arg)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
int i;
int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
@@ -1031,8 +1031,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* Also, now that we will successfully clean up after an ereport, it's
* safe to check to see if there's a buffer pin deadlock against the
* Startup process. Of course, that's only necessary if we're doing
* Hot Standby and are not the Startup process ourselves.
* Startup process. Of course, that's only necessary if we're doing Hot
* Standby and are not the Startup process ourselves.
*/
if (RecoveryInProgress() && !InRecovery)
CheckRecoveryConflictDeadlock();

View File

@@ -20,7 +20,7 @@
#include "storage/s_lock.h"
slock_t dummy_spinlock;
slock_t dummy_spinlock;
static int spins_per_delay = DEFAULT_SPINS_PER_DELAY;

View File

@@ -325,7 +325,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
*
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
* relfilenode number from being recycled. Also, we do not carefully
* relfilenode number from being recycled. Also, we do not carefully
* track whether other forks have been created or not, but just attempt to
* unlink them unconditionally; so we should never complain about ENOENT.
*
@@ -767,9 +767,10 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
* NOTE: this assumption could only be wrong if another backend has
* truncated the relation. We rely on higher code levels to handle that
* scenario by closing and re-opening the md fd, which is handled via
* relcache flush. (Since the checkpointer doesn't participate in relcache
* flush, it could have segment chain entries for inactive segments;
* that's OK because the checkpointer never needs to compute relation size.)
* relcache flush. (Since the checkpointer doesn't participate in
* relcache flush, it could have segment chain entries for inactive
* segments; that's OK because the checkpointer never needs to compute
* relation size.)
*/
while (v->mdfd_chain != NULL)
{
@@ -1072,12 +1073,13 @@ mdsync(void)
* say "but an unreferenced SMgrRelation is still a leak!" Not
* really, because the only case in which a checkpoint is done
* by a process that isn't about to shut down is in the
* checkpointer, and it will periodically do smgrcloseall(). This
* fact justifies our not closing the reln in the success path
* either, which is a good thing since in non-checkpointer cases
* we couldn't safely do that.) Furthermore, in many cases
* the relation will have been dirtied through this same smgr
* relation, and so we can save a file open/close cycle.
* checkpointer, and it will periodically do smgrcloseall().
* This fact justifies our not closing the reln in the success
* path either, which is a good thing since in
* non-checkpointer cases we couldn't safely do that.)
* Furthermore, in many cases the relation will have been
* dirtied through this same smgr relation, and so we can save
* a file open/close cycle.
*/
reln = smgropen(entry->tag.rnode.node,
entry->tag.rnode.backend);
@@ -1470,8 +1472,8 @@ ForgetRelationFsyncRequests(RelFileNodeBackend rnode, ForkNumber forknum)
pg_usleep(10000L); /* 10 msec seems a good number */
/*
* Note we don't wait for the checkpointer to actually absorb the revoke
* message; see mdsync() for the implications.
* Note we don't wait for the checkpointer to actually absorb the
* revoke message; see mdsync() for the implications.
*/
}
}

View File

@@ -405,8 +405,8 @@ smgrdounlinkfork(SMgrRelation reln, ForkNumber forknum, bool isRedo)
(*(smgrsw[which].smgr_close)) (reln, forknum);
/*
* Get rid of any remaining buffers for the fork. bufmgr will just
* drop them without bothering to write the contents.
* Get rid of any remaining buffers for the fork. bufmgr will just drop
* them without bothering to write the contents.
*/
DropRelFileNodeBuffers(rnode, forknum, 0);