1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-15 03:41:20 +03:00

pgindent run before PG 9.1 beta 1.

This commit is contained in:
Bruce Momjian
2011-04-10 11:42:00 -04:00
parent 9a8b73147c
commit bf50caf105
446 changed files with 5737 additions and 5258 deletions

View File

@@ -644,17 +644,17 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/* OK, do the I/O */
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
FlushBuffer(buf, NULL);
LWLockRelease(buf->content_lock);
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
}
else
{
@@ -2029,7 +2029,7 @@ PrintBufferDescs(void)
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
relpathbackend(buf->tag.rnode, InvalidBackendId, buf->tag.forkNum),
relpathbackend(buf->tag.rnode, InvalidBackendId, buf->tag.forkNum),
buf->tag.blockNum, buf->flags,
buf->refcount, PrivateRefCount[i]);
}
@@ -2765,7 +2765,7 @@ local_buffer_write_error_callback(void *arg)
if (bufHdr != NULL)
{
char *path = relpathbackend(bufHdr->tag.rnode, MyBackendId,
bufHdr->tag.forkNum);
bufHdr->tag.forkNum);
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);

View File

@@ -77,7 +77,7 @@ typedef struct BufferAccessStrategyData
* struct.
*/
Buffer buffers[1]; /* VARIABLE SIZE ARRAY */
} BufferAccessStrategyData;
} BufferAccessStrategyData;
/* Prototypes for internal functions */

View File

@@ -150,8 +150,8 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
smgr->smgr_rnode.node.relNode, forkNum, blockNum,
-nextFreeLocalBuf - 1);
smgr->smgr_rnode.node.relNode, forkNum, blockNum,
-nextFreeLocalBuf - 1);
#endif
/*
@@ -311,7 +311,7 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
relpathbackend(bufHdr->tag.rnode, MyBackendId,
bufHdr->tag.forkNum),
bufHdr->tag.forkNum),
LocalRefCount[i]);
/* Remove entry from hashtable */
hresult = (LocalBufferLookupEnt *)
@@ -413,7 +413,7 @@ GetLocalBufferStorage(void)
/*
* We allocate local buffers in a context of their own, so that the
* space eaten for them is easily recognizable in MemoryContextStats
* output. Create the context on first use.
* output. Create the context on first use.
*/
if (LocalBufferContext == NULL)
LocalBufferContext =

View File

@@ -1063,15 +1063,15 @@ FileClose(File file)
* If we get an error, as could happen within the ereport/elog calls,
* we'll come right back here during transaction abort. Reset the
* flag to ensure that we can't get into an infinite loop. This code
* is arranged to ensure that the worst-case consequence is failing
* to emit log message(s), not failing to attempt the unlink.
* is arranged to ensure that the worst-case consequence is failing to
* emit log message(s), not failing to attempt the unlink.
*/
vfdP->fdstate &= ~FD_TEMPORARY;
if (log_temp_files >= 0)
{
struct stat filestats;
int stat_errno;
int stat_errno;
/* first try the stat() */
if (stat(vfdP->fileName, &filestats))
@@ -1900,7 +1900,7 @@ RemovePgTempFiles(void)
RemovePgTempFilesInDir(temp_path);
snprintf(temp_path, sizeof(temp_path), "pg_tblspc/%s/%s",
spc_de->d_name, TABLESPACE_VERSION_DIRECTORY);
spc_de->d_name, TABLESPACE_VERSION_DIRECTORY);
RemovePgTempRelationFiles(temp_path);
}
@@ -1977,7 +1977,7 @@ RemovePgTempRelationFiles(const char *tsdirname)
while ((de = ReadDir(ts_dir, tsdirname)) != NULL)
{
int i = 0;
int i = 0;
/*
* We're only interested in the per-database directories, which have
@@ -2023,7 +2023,7 @@ RemovePgTempRelationFilesInDbspace(const char *dbspacedirname)
snprintf(rm_path, sizeof(rm_path), "%s/%s",
dbspacedirname, de->d_name);
unlink(rm_path); /* note we ignore any error */
unlink(rm_path); /* note we ignore any error */
}
FreeDir(dbspace_dir);
@@ -2055,15 +2055,17 @@ looks_like_temp_rel_name(const char *name)
/* We might have _forkname or .segment or both. */
if (name[pos] == '_')
{
int forkchar = forkname_chars(&name[pos+1], NULL);
int forkchar = forkname_chars(&name[pos + 1], NULL);
if (forkchar <= 0)
return false;
pos += forkchar + 1;
}
if (name[pos] == '.')
{
int segchar;
for (segchar = 1; isdigit((unsigned char) name[pos+segchar]); ++segchar)
int segchar;
for (segchar = 1; isdigit((unsigned char) name[pos + segchar]); ++segchar)
;
if (segchar <= 1)
return false;

View File

@@ -24,14 +24,15 @@
#include "utils/memutils.h"
static void ResetUnloggedRelationsInTablespaceDir(const char *tsdirname,
int op);
int op);
static void ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname,
int op);
static bool parse_filename_for_nontemp_relation(const char *name,
int *oidchars, ForkNumber *fork);
typedef struct {
char oid[OIDCHARS+1];
typedef struct
{
char oid[OIDCHARS + 1];
} unlogged_relation_entry;
/*
@@ -49,13 +50,14 @@ ResetUnloggedRelations(int op)
char temp_path[MAXPGPATH];
DIR *spc_dir;
struct dirent *spc_de;
MemoryContext tmpctx, oldctx;
MemoryContext tmpctx,
oldctx;
/* Log it. */
ereport(DEBUG1,
(errmsg("resetting unlogged relations: cleanup %d init %d",
(op & UNLOGGED_RELATION_CLEANUP) != 0,
(op & UNLOGGED_RELATION_INIT) != 0)));
(op & UNLOGGED_RELATION_CLEANUP) != 0,
(op & UNLOGGED_RELATION_INIT) != 0)));
/*
* Just to be sure we don't leak any memory, let's create a temporary
@@ -85,7 +87,7 @@ ResetUnloggedRelations(int op)
continue;
snprintf(temp_path, sizeof(temp_path), "pg_tblspc/%s/%s",
spc_de->d_name, TABLESPACE_VERSION_DIRECTORY);
spc_de->d_name, TABLESPACE_VERSION_DIRECTORY);
ResetUnloggedRelationsInTablespaceDir(temp_path, op);
}
@@ -119,7 +121,7 @@ ResetUnloggedRelationsInTablespaceDir(const char *tsdirname, int op)
while ((de = ReadDir(ts_dir, tsdirname)) != NULL)
{
int i = 0;
int i = 0;
/*
* We're only interested in the per-database directories, which have
@@ -184,8 +186,8 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
/* Scan the directory. */
while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL)
{
ForkNumber forkNum;
int oidchars;
ForkNumber forkNum;
int oidchars;
unlogged_relation_entry ent;
/* Skip anything that doesn't look like a relation data file. */
@@ -198,8 +200,8 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
continue;
/*
* Put the OID portion of the name into the hash table, if it isn't
* already.
* Put the OID portion of the name into the hash table, if it
* isn't already.
*/
memset(ent.oid, 0, sizeof(ent.oid));
memcpy(ent.oid, de->d_name, oidchars);
@@ -236,9 +238,9 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
/* Scan the directory. */
while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL)
{
ForkNumber forkNum;
int oidchars;
bool found;
ForkNumber forkNum;
int oidchars;
bool found;
unlogged_relation_entry ent;
/* Skip anything that doesn't look like a relation data file. */
@@ -262,7 +264,8 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
if (found)
{
snprintf(rm_path, sizeof(rm_path), "%s/%s",
dbspacedirname, de->d_name);
dbspacedirname, de->d_name);
/*
* It's tempting to actually throw an error here, but since
* this code gets run during database startup, that could
@@ -284,9 +287,9 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
/*
* Initialization happens after cleanup is complete: we copy each init
* fork file to the corresponding main fork file. Note that if we are
* asked to do both cleanup and init, we may never get here: if the cleanup
* code determines that there are no init forks in this dbspace, it will
* return before we get to this point.
* asked to do both cleanup and init, we may never get here: if the
* cleanup code determines that there are no init forks in this dbspace,
* it will return before we get to this point.
*/
if ((op & UNLOGGED_RELATION_INIT) != 0)
{
@@ -304,11 +307,11 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
/* Scan the directory. */
while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL)
{
ForkNumber forkNum;
int oidchars;
char oidbuf[OIDCHARS+1];
char srcpath[MAXPGPATH];
char dstpath[MAXPGPATH];
ForkNumber forkNum;
int oidchars;
char oidbuf[OIDCHARS + 1];
char srcpath[MAXPGPATH];
char dstpath[MAXPGPATH];
/* Skip anything that doesn't look like a relation data file. */
if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars,
@@ -370,9 +373,9 @@ parse_filename_for_nontemp_relation(const char *name, int *oidchars,
*fork = MAIN_FORKNUM;
else
{
int forkchar;
int forkchar;
forkchar = forkname_chars(&name[pos+1], fork);
forkchar = forkname_chars(&name[pos + 1], fork);
if (forkchar <= 0)
return false;
pos += forkchar + 1;
@@ -381,8 +384,9 @@ parse_filename_for_nontemp_relation(const char *name, int *oidchars,
/* Check for a segment number. */
if (name[pos] == '.')
{
int segchar;
for (segchar = 1; isdigit((unsigned char) name[pos+segchar]); ++segchar)
int segchar;
for (segchar = 1; isdigit((unsigned char) name[pos + segchar]); ++segchar)
;
if (segchar <= 1)
return false;

View File

@@ -279,7 +279,7 @@ PostmasterIsAlive(bool amDirectChild)
#ifndef WIN32
if (amDirectChild)
{
pid_t ppid = getppid();
pid_t ppid = getppid();
/* If the postmaster is still our parent, it must be alive. */
if (ppid == PostmasterPid)
@@ -297,10 +297,10 @@ PostmasterIsAlive(bool amDirectChild)
}
/*
* Use kill() to see if the postmaster is still alive. This can
* sometimes give a false positive result, since the postmaster's PID
* may get recycled, but it is good enough for existing uses by
* indirect children and in debugging environments.
* Use kill() to see if the postmaster is still alive. This can sometimes
* give a false positive result, since the postmaster's PID may get
* recycled, but it is good enough for existing uses by indirect children
* and in debugging environments.
*/
return (kill(PostmasterPid, 0) == 0);
#else /* WIN32 */

View File

@@ -475,10 +475,10 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
return;
/*
* If our initial RunningTransactionsData had an overflowed snapshot then we knew
* we were missing some subxids from our snapshot. We can use this data as
* an initial snapshot, but we cannot yet mark it valid. We know that the
* missing subxids are equal to or earlier than nextXid. After we
* If our initial RunningTransactionsData had an overflowed snapshot then
* we knew we were missing some subxids from our snapshot. We can use this
* data as an initial snapshot, but we cannot yet mark it valid. We know
* that the missing subxids are equal to or earlier than nextXid. After we
* initialise we continue to apply changes during recovery, so once the
* oldestRunningXid is later than the nextXid from the initial snapshot we
* know that we no longer have missing information and can mark the
@@ -510,8 +510,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
*/
/*
* Release any locks belonging to old transactions that are not
* running according to the running-xacts record.
* Release any locks belonging to old transactions that are not running
* according to the running-xacts record.
*/
StandbyReleaseOldLocks(running->nextXid);
@@ -582,9 +582,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
* Now we've got the running xids we need to set the global values that
* are used to track snapshots as they evolve further.
*
* - latestCompletedXid which will be the xmax for snapshots
* - lastOverflowedXid which shows whether snapshots overflow
* - nextXid
* - latestCompletedXid which will be the xmax for snapshots -
* lastOverflowedXid which shows whether snapshots overflow - nextXid
*
* If the snapshot overflowed, then we still initialise with what we know,
* but the recovery snapshot isn't fully valid yet because we know there
@@ -611,9 +610,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
/*
* If a transaction wrote a commit record in the gap between taking and
* logging the snapshot then latestCompletedXid may already be higher
* than the value from the snapshot, so check before we use the incoming
* value.
* logging the snapshot then latestCompletedXid may already be higher than
* the value from the snapshot, so check before we use the incoming value.
*/
if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid,
running->latestCompletedXid))
@@ -1048,7 +1046,7 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
if (allDbs ||
proc->databaseId == MyDatabaseId ||
proc->databaseId == 0) /* include WalSender */
proc->databaseId == 0) /* include WalSender */
{
/* Fetch xid just once - see GetNewTransactionId */
TransactionId xid = proc->xid;
@@ -1075,8 +1073,8 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
if (RecoveryInProgress())
{
/*
* Check to see whether KnownAssignedXids contains an xid value
* older than the main procarray.
* Check to see whether KnownAssignedXids contains an xid value older
* than the main procarray.
*/
TransactionId kaxmin = KnownAssignedXidsGetOldestXmin();
@@ -1084,7 +1082,7 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
if (TransactionIdIsNormal(kaxmin) &&
TransactionIdPrecedes(kaxmin, result))
result = kaxmin;
result = kaxmin;
}
else
{
@@ -1100,9 +1098,9 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
* vacuum_defer_cleanup_age provides some additional "slop" for the
* benefit of hot standby queries on slave servers. This is quick and
* dirty, and perhaps not all that useful unless the master has a
* predictable transaction rate, but it's what we've got. Note that we
* are assuming vacuum_defer_cleanup_age isn't large enough to cause
* wraparound --- so guc.c should limit it to no more than the
* predictable transaction rate, but it's what we've got. Note that
* we are assuming vacuum_defer_cleanup_age isn't large enough to
* cause wraparound --- so guc.c should limit it to no more than the
* xidStopLimit threshold in varsup.c.
*/
result -= vacuum_defer_cleanup_age;
@@ -1483,9 +1481,9 @@ GetRunningTransactionData(void)
suboverflowed = true;
/*
* Top-level XID of a transaction is always less than any of
* its subxids, so we don't need to check if any of the subxids
* are smaller than oldestRunningXid
* Top-level XID of a transaction is always less than any of its
* subxids, so we don't need to check if any of the subxids are
* smaller than oldestRunningXid
*/
}
}

View File

@@ -193,7 +193,7 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
return;
waitStart = GetCurrentTimestamp();
new_status = NULL; /* we haven't changed the ps display */
new_status = NULL; /* we haven't changed the ps display */
while (VirtualTransactionIdIsValid(*waitlist))
{
@@ -963,14 +963,14 @@ void
LogAccessExclusiveLockPrepare(void)
{
/*
* Ensure that a TransactionId has been assigned to this transaction,
* for two reasons, both related to lock release on the standby.
* First, we must assign an xid so that RecordTransactionCommit() and
* Ensure that a TransactionId has been assigned to this transaction, for
* two reasons, both related to lock release on the standby. First, we
* must assign an xid so that RecordTransactionCommit() and
* RecordTransactionAbort() do not optimise away the transaction
* completion record which recovery relies upon to release locks. It's
* a hack, but for a corner case not worth adding code for into the
* main commit path. Second, must must assign an xid before the lock
* is recorded in shared memory, otherwise a concurrently executing
* completion record which recovery relies upon to release locks. It's a
* hack, but for a corner case not worth adding code for into the main
* commit path. Second, must must assign an xid before the lock is
* recorded in shared memory, otherwise a concurrently executing
* GetRunningTransactionLocks() might see a lock associated with an
* InvalidTransactionId which we later assert cannot happen.
*/

View File

@@ -844,8 +844,8 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
{
/*
* If the first page we found was after the truncation point, we're in
* a hole that we'll fill, but we need to delete the later page because
* the loop below won't visit it again.
* a hole that we'll fill, but we need to delete the later page
* because the loop below won't visit it again.
*/
if (olddata != NULL)
{

View File

@@ -586,7 +586,8 @@ LockAcquireExtended(const LOCKTAG *locktag,
* standby server. Only AccessExclusiveLocks can conflict with lock types
* that read-only transactions can acquire in a standby server.
*
* Make sure this definition matches the one in GetRunningTransactionLocks().
* Make sure this definition matches the one in
* GetRunningTransactionLocks().
*
* First we prepare to log, then after lock acquired we issue log record.
*/

View File

@@ -327,7 +327,7 @@ typedef struct OldSerXidControlData
TransactionId headXid; /* newest valid Xid in the SLRU */
TransactionId tailXid; /* oldest xmin we might be interested in */
bool warningIssued;
} OldSerXidControlData;
} OldSerXidControlData;
typedef struct OldSerXidControlData *OldSerXidControl;
@@ -477,7 +477,7 @@ ReleasePredXact(SERIALIZABLEXACT *sxact)
ptle = (PredXactListElement)
(((char *) sxact)
- offsetof(PredXactListElementData, sxact)
+offsetof(PredXactListElementData, link));
+ offsetof(PredXactListElementData, link));
SHMQueueDelete(&ptle->link);
SHMQueueInsertBefore(&PredXact->availableList, &ptle->link);
}
@@ -507,7 +507,7 @@ NextPredXact(SERIALIZABLEXACT *sxact)
ptle = (PredXactListElement)
(((char *) sxact)
- offsetof(PredXactListElementData, sxact)
+offsetof(PredXactListElementData, link));
+ offsetof(PredXactListElementData, link));
ptle = (PredXactListElement)
SHMQueueNext(&PredXact->activeList,
&ptle->link,
@@ -746,10 +746,10 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
Assert(TransactionIdIsValid(tailXid));
/*
* If the SLRU is currently unused, zero out the whole active region
* from tailXid to headXid before taking it into use. Otherwise zero
* out only any new pages that enter the tailXid-headXid range as we
* advance headXid.
* If the SLRU is currently unused, zero out the whole active region from
* tailXid to headXid before taking it into use. Otherwise zero out only
* any new pages that enter the tailXid-headXid range as we advance
* headXid.
*/
if (oldSerXidControl->headPage < 0)
{
@@ -855,8 +855,8 @@ OldSerXidSetActiveSerXmin(TransactionId xid)
/*
* When no sxacts are active, nothing overlaps, set the xid values to
* invalid to show that there are no valid entries. Don't clear headPage,
* though. A new xmin might still land on that page, and we don't want
* to repeatedly zero out the same page.
* though. A new xmin might still land on that page, and we don't want to
* repeatedly zero out the same page.
*/
if (!TransactionIdIsValid(xid))
{
@@ -901,7 +901,7 @@ OldSerXidSetActiveSerXmin(TransactionId xid)
void
CheckPointPredicate(void)
{
int tailPage;
int tailPage;
LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
@@ -921,16 +921,15 @@ CheckPointPredicate(void)
{
/*
* The SLRU is no longer needed. Truncate everything. If we try to
* leave the head page around to avoid re-zeroing it, we might not
* use the SLRU again until we're past the wrap-around point, which
* makes SLRU unhappy.
* leave the head page around to avoid re-zeroing it, we might not use
* the SLRU again until we're past the wrap-around point, which makes
* SLRU unhappy.
*
* While the API asks you to specify truncation by page, it silently
* ignores the request unless the specified page is in a segment
* past some allocated portion of the SLRU. We don't care which
* page in a later segment we hit, so just add the number of pages
* per segment to the head page to land us *somewhere* in the next
* segment.
* ignores the request unless the specified page is in a segment past
* some allocated portion of the SLRU. We don't care which page in a
* later segment we hit, so just add the number of pages per segment
* to the head page to land us *somewhere* in the next segment.
*/
tailPage = oldSerXidControl->headPage + SLRU_PAGES_PER_SEGMENT;
oldSerXidControl->headPage = -1;
@@ -1329,12 +1328,12 @@ SummarizeOldestCommittedSxact(void)
/*
* This function is only called if there are no sxact slots available.
* Some of them must belong to old, already-finished transactions, so
* there should be something in FinishedSerializableTransactions list
* that we can summarize. However, there's a race condition: while we
* were not holding any locks, a transaction might have ended and cleaned
* up all the finished sxact entries already, freeing up their sxact
* slots. In that case, we have nothing to do here. The caller will find
* one of the slots released by the other backend when it retries.
* there should be something in FinishedSerializableTransactions list that
* we can summarize. However, there's a race condition: while we were not
* holding any locks, a transaction might have ended and cleaned up all
* the finished sxact entries already, freeing up their sxact slots. In
* that case, we have nothing to do here. The caller will find one of the
* slots released by the other backend when it retries.
*/
if (SHMQueueEmpty(FinishedSerializableTransactions))
{
@@ -2213,7 +2212,7 @@ PredicateLockTuple(const Relation relation, const HeapTuple tuple)
*/
if (relation->rd_index == NULL)
{
TransactionId myxid;
TransactionId myxid;
targetxmin = HeapTupleHeaderGetXmin(tuple->t_data);
@@ -2223,6 +2222,7 @@ PredicateLockTuple(const Relation relation, const HeapTuple tuple)
if (TransactionIdFollowsOrEquals(targetxmin, TransactionXmin))
{
TransactionId xid = SubTransGetTopmostTransaction(targetxmin);
if (TransactionIdEquals(xid, myxid))
{
/* We wrote it; we already have a write lock. */
@@ -2272,7 +2272,7 @@ PredicateLockTupleRowVersionLink(const Relation relation,
PREDICATELOCKTARGETTAG oldtupletag;
PREDICATELOCKTARGETTAG oldpagetag;
PREDICATELOCKTARGETTAG newtupletag;
BlockNumber oldblk,
BlockNumber oldblk,
newblk;
OffsetNumber oldoff,
newoff;
@@ -2308,10 +2308,10 @@ PredicateLockTupleRowVersionLink(const Relation relation,
/*
* A page-level lock on the page containing the old tuple counts too.
* Anyone holding a lock on the page is logically holding a lock on
* the old tuple, so we need to acquire a lock on his behalf on the
* new tuple too. However, if the new tuple is on the same page as the
* old one, the old page-level lock already covers the new tuple.
* Anyone holding a lock on the page is logically holding a lock on the
* old tuple, so we need to acquire a lock on his behalf on the new tuple
* too. However, if the new tuple is on the same page as the old one, the
* old page-level lock already covers the new tuple.
*
* A relation-level lock always covers both tuple versions, so we don't
* need to worry about those here.
@@ -2668,10 +2668,10 @@ PredicateLockPageSplit(const Relation relation, const BlockNumber oldblkno,
/*
* Move the locks to the parent. This shouldn't fail.
*
* Note that here we are removing locks held by other
* backends, leading to a possible inconsistency in their
* local lock hash table. This is OK because we're replacing
* it with a lock that covers the old one.
* Note that here we are removing locks held by other backends,
* leading to a possible inconsistency in their local lock hash table.
* This is OK because we're replacing it with a lock that covers the
* old one.
*/
success = TransferPredicateLocksToNewTarget(oldtargettag,
newtargettag,
@@ -2696,16 +2696,15 @@ PredicateLockPageCombine(const Relation relation, const BlockNumber oldblkno,
const BlockNumber newblkno)
{
/*
* Page combines differ from page splits in that we ought to be
* able to remove the locks on the old page after transferring
* them to the new page, instead of duplicating them. However,
* because we can't edit other backends' local lock tables,
* removing the old lock would leave them with an entry in their
* LocalPredicateLockHash for a lock they're not holding, which
* isn't acceptable. So we wind up having to do the same work as a
* page split, acquiring a lock on the new page and keeping the old
* page locked too. That can lead to some false positives, but
* should be rare in practice.
* Page combines differ from page splits in that we ought to be able to
* remove the locks on the old page after transferring them to the new
* page, instead of duplicating them. However, because we can't edit other
* backends' local lock tables, removing the old lock would leave them
* with an entry in their LocalPredicateLockHash for a lock they're not
* holding, which isn't acceptable. So we wind up having to do the same
* work as a page split, acquiring a lock on the new page and keeping the
* old page locked too. That can lead to some false positives, but should
* be rare in practice.
*/
PredicateLockPageSplit(relation, oldblkno, newblkno);
}
@@ -3652,15 +3651,15 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
/*
* If we're getting a write lock on the tuple and we're not in a
* subtransaction, we don't need a predicate (SIREAD) lock. We
* can't use this optimization within a subtransaction because
* the subtransaction could be rolled back, and we would be left
* can't use this optimization within a subtransaction because the
* subtransaction could be rolled back, and we would be left
* without any lock at the top level.
*
*
* At this point our transaction already has an ExclusiveRowLock
* on the relation, so we are OK to drop the predicate lock on
* the tuple, if found, without fearing that another write
* against the tuple will occur before the MVCC information
* makes it to the buffer.
* on the relation, so we are OK to drop the predicate lock on the
* tuple, if found, without fearing that another write against the
* tuple will occur before the MVCC information makes it to the
* buffer.
*/
if (!IsSubTransaction()
&& GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
@@ -3722,8 +3721,8 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
/*
* Remove entry in local lock table if it exists and has
* no children. It's OK if it doesn't exist; that means
* the lock was transferred to a new target by a
* different backend.
* the lock was transferred to a new target by a different
* backend.
*/
if (locallock != NULL)
{
@@ -3733,8 +3732,8 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
{
rmlocallock = (LOCALPREDICATELOCK *)
hash_search_with_hash_value(LocalPredicateLockHash,
targettag, targettaghash,
HASH_REMOVE, NULL);
targettag, targettaghash,
HASH_REMOVE, NULL);
Assert(rmlocallock == locallock);
}
}
@@ -3772,9 +3771,9 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
LWLockAcquire(partitionLock, LW_SHARED);
/*
* The list may have been altered by another process
* while we weren't holding the partition lock. Start
* over at the front.
* The list may have been altered by another process while
* we weren't holding the partition lock. Start over at
* the front.
*/
nextpredlock = (PREDICATELOCK *)
SHMQueueNext(&(target->predicateLocks),
@@ -3862,8 +3861,8 @@ CheckForSerializableConflictIn(const Relation relation, const HeapTuple tuple,
relation->rd_node.dbNode,
relation->rd_id,
ItemPointerGetBlockNumber(&(tuple->t_data->t_ctid)),
ItemPointerGetOffsetNumber(&(tuple->t_data->t_ctid)),
HeapTupleHeaderGetXmin(tuple->t_data));
ItemPointerGetOffsetNumber(&(tuple->t_data->t_ctid)),
HeapTupleHeaderGetXmin(tuple->t_data));
CheckTargetForConflictsIn(&targettag);
}

View File

@@ -938,7 +938,7 @@ mdsync(void)
int processed = 0;
instr_time sync_start,
sync_end,
sync_diff;
sync_diff;
uint64 elapsed;
uint64 longest = 0;
uint64 total_elapsed = 0;
@@ -1094,7 +1094,7 @@ mdsync(void)
if (seg != NULL &&
FileSync(seg->mdfd_vfd) >= 0)
{
if (log_checkpoints && (! INSTR_TIME_IS_ZERO(sync_start)))
if (log_checkpoints && (!INSTR_TIME_IS_ZERO(sync_start)))
{
INSTR_TIME_SET_CURRENT(sync_end);
sync_diff = sync_end;
@@ -1104,8 +1104,8 @@ mdsync(void)
longest = elapsed;
total_elapsed += elapsed;
processed++;
elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f msec",
processed, FilePathName(seg->mdfd_vfd), (double) elapsed / 1000);
elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f msec",
processed, FilePathName(seg->mdfd_vfd), (double) elapsed / 1000);
}
break; /* success; break out of retry loop */
@@ -1268,7 +1268,7 @@ register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
return; /* passed it off successfully */
ereport(DEBUG1,
(errmsg("could not forward fsync request because request queue is full")));
(errmsg("could not forward fsync request because request queue is full")));
if (FileSync(seg->mdfd_vfd) < 0)
ereport(ERROR,

View File

@@ -48,16 +48,16 @@ typedef struct f_smgr
void (*smgr_unlink) (RelFileNodeBackend rnode, ForkNumber forknum,
bool isRedo);
void (*smgr_extend) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
BlockNumber blocknum, char *buffer, bool skipFsync);
void (*smgr_prefetch) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum);
void (*smgr_read) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer);
void (*smgr_write) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
BlockNumber blocknum, char *buffer, bool skipFsync);
BlockNumber (*smgr_nblocks) (SMgrRelation reln, ForkNumber forknum);
void (*smgr_truncate) (SMgrRelation reln, ForkNumber forknum,
BlockNumber nblocks);
BlockNumber nblocks);
void (*smgr_immedsync) (SMgrRelation reln, ForkNumber forknum);
void (*smgr_pre_ckpt) (void); /* may be NULL */
void (*smgr_sync) (void); /* may be NULL */