1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-26 23:43:30 +03:00

Phase 3 of pgindent updates.

Don't move parenthesized lines to the left, even if that means they
flow past the right margin.

By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis.  However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent.  That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.

This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.

This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.

Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
This commit is contained in:
Tom Lane
2017-06-21 15:35:54 -04:00
parent c7b8998ebb
commit 382ceffdf7
568 changed files with 4747 additions and 4745 deletions

View File

@@ -541,7 +541,7 @@ PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
if (RELATION_IS_OTHER_TEMP(reln))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
errmsg("cannot access temporary tables of other sessions")));
/* pass it off to localbuf.c */
LocalPrefetchBuffer(reln->rd_smgr, forkNum, blockNum);
@@ -804,9 +804,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
if (!PageIsNew((Page) bufBlock))
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
blockNum, relpath(smgr->smgr_rnode, forkNum)),
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
(errmsg("unexpected data beyond EOF in block %u of relation %s",
blockNum, relpath(smgr->smgr_rnode, forkNum)),
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
* We *must* do smgrextend before succeeding, else the page will not
@@ -1133,9 +1133,9 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/* OK, do the I/O */
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
FlushBuffer(buf, NULL);
LWLockRelease(BufferDescriptorGetContentLock(buf));
@@ -1144,9 +1144,9 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
&buf->tag);
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
}
else
{
@@ -3092,7 +3092,7 @@ PrintBufferDescs(void)
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
relpathbackend(buf->tag.rnode, InvalidBackendId, buf->tag.forkNum),
relpathbackend(buf->tag.rnode, InvalidBackendId, buf->tag.forkNum),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}

View File

@@ -674,7 +674,7 @@ StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf)
/* Don't muck with behavior of normal buffer-replacement strategy */
if (!strategy->current_was_in_ring ||
strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf))
strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf))
return false;
/*

View File

@@ -189,7 +189,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
/* Found a usable buffer */
LocalRefCount[b]++;
ResourceOwnerRememberBuffer(CurrentResourceOwner,
BufferDescriptorGetBuffer(bufHdr));
BufferDescriptorGetBuffer(bufHdr));
break;
}
}

View File

@@ -522,7 +522,7 @@ pg_flush_data(int fd, off_t offset, off_t nbytes)
/* FATAL error because mapping would remain */
ereport(FATAL,
(errcode_for_file_access(),
errmsg("could not munmap() while flushing data: %m")));
errmsg("could not munmap() while flushing data: %m")));
}
return;
@@ -1757,8 +1757,8 @@ FileWrite(File file, char *buffer, int amount, uint32 wait_event_info)
if (newTotal > (uint64) temp_file_limit * (uint64) 1024)
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("temporary file size exceeds temp_file_limit (%dkB)",
temp_file_limit)));
errmsg("temporary file size exceeds temp_file_limit (%dkB)",
temp_file_limit)));
}
}
@@ -2728,7 +2728,7 @@ RemovePgTempFiles(void)
continue;
snprintf(temp_path, sizeof(temp_path), "pg_tblspc/%s/%s/%s",
spc_de->d_name, TABLESPACE_VERSION_DIRECTORY, PG_TEMP_FILES_DIR);
spc_de->d_name, TABLESPACE_VERSION_DIRECTORY, PG_TEMP_FILES_DIR);
RemovePgTempFilesInDir(temp_path);
snprintf(temp_path, sizeof(temp_path), "pg_tblspc/%s/%s",

View File

@@ -429,7 +429,7 @@ dsm_backend_startup(void)
&dsm_control_mapped_size, WARNING);
ereport(FATAL,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("dynamic shared memory control segment is not valid")));
errmsg("dynamic shared memory control segment is not valid")));
}
}
#endif
@@ -935,7 +935,7 @@ dsm_unpin_segment(dsm_handle handle)
* dsm_impl_unpin_segment.
*/
dsm_impl_unpin_segment(handle,
&dsm_control->item[control_slot].impl_private_pm_handle);
&dsm_control->item[control_slot].impl_private_pm_handle);
/* Note that 1 means no references (0 means unused slot). */
if (--dsm_control->item[control_slot].refcnt == 1)

View File

@@ -258,8 +258,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
return false;
}
*mapped_address = NULL;
@@ -268,8 +268,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not remove shared memory segment \"%s\": %m",
name)));
errmsg("could not remove shared memory segment \"%s\": %m",
name)));
return false;
}
return true;
@@ -358,8 +358,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
return false;
}
*mapped_address = NULL;
@@ -530,8 +530,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
return false;
}
*mapped_address = NULL;
@@ -540,8 +540,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not remove shared memory segment \"%s\": %m",
name)));
errmsg("could not remove shared memory segment \"%s\": %m",
name)));
return false;
}
return true;
@@ -645,8 +645,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
_dosmaperr(GetLastError());
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
return false;
}
if (*impl_private != NULL
@@ -655,8 +655,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
_dosmaperr(GetLastError());
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not remove shared memory segment \"%s\": %m",
name)));
errmsg("could not remove shared memory segment \"%s\": %m",
name)));
return false;
}
@@ -711,8 +711,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
_dosmaperr(errcode);
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not create shared memory segment \"%s\": %m",
name)));
errmsg("could not create shared memory segment \"%s\": %m",
name)));
return false;
}
}
@@ -816,8 +816,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
return false;
}
*mapped_address = NULL;
@@ -826,8 +826,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not remove shared memory segment \"%s\": %m",
name)));
errmsg("could not remove shared memory segment \"%s\": %m",
name)));
return false;
}
return true;
@@ -960,8 +960,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
errmsg("could not unmap shared memory segment \"%s\": %m",
name)));
return false;
}
*mapped_address = NULL;
@@ -1026,8 +1026,8 @@ dsm_impl_pin_segment(dsm_handle handle, void *impl_private,
_dosmaperr(GetLastError());
ereport(ERROR,
(errcode_for_dynamic_shared_memory(),
errmsg("could not duplicate handle for \"%s\": %m",
name)));
errmsg("could not duplicate handle for \"%s\": %m",
name)));
}
/*
@@ -1074,8 +1074,8 @@ dsm_impl_unpin_segment(dsm_handle handle, void **impl_private)
_dosmaperr(GetLastError());
ereport(ERROR,
(errcode_for_dynamic_shared_memory(),
errmsg("could not duplicate handle for \"%s\": %m",
name)));
errmsg("could not duplicate handle for \"%s\": %m",
name)));
}
*impl_private = NULL;

View File

@@ -198,7 +198,7 @@ proc_exit_prepare(int code)
*/
while (--on_proc_exit_index >= 0)
(*on_proc_exit_list[on_proc_exit_index].function) (code,
on_proc_exit_list[on_proc_exit_index].arg);
on_proc_exit_list[on_proc_exit_index].arg);
on_proc_exit_index = 0;
}
@@ -226,7 +226,7 @@ shmem_exit(int code)
code, before_shmem_exit_index);
while (--before_shmem_exit_index >= 0)
(*before_shmem_exit_list[before_shmem_exit_index].function) (code,
before_shmem_exit_list[before_shmem_exit_index].arg);
before_shmem_exit_list[before_shmem_exit_index].arg);
before_shmem_exit_index = 0;
/*
@@ -259,7 +259,7 @@ shmem_exit(int code)
code, on_shmem_exit_index);
while (--on_shmem_exit_index >= 0)
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
on_shmem_exit_list[on_shmem_exit_index].arg);
on_shmem_exit_list[on_shmem_exit_index].arg);
on_shmem_exit_index = 0;
}

View File

@@ -1214,7 +1214,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
}
}
else if (cur_event->events == WL_POSTMASTER_DEATH &&
(cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
(cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
{
/*
* We expect an POLLHUP when the remote end is closed, but because

View File

@@ -732,8 +732,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
}
else
elog(trace_recovery(DEBUG1),
"recovery snapshot waiting for non-overflowed snapshot or "
"until oldest active xid on standby is at least %u (now %u)",
"recovery snapshot waiting for non-overflowed snapshot or "
"until oldest active xid on standby is at least %u (now %u)",
standbySnapshotPendingXmin,
running->oldestRunningXid);
return;
@@ -3243,7 +3243,7 @@ RecordKnownAssignedTransactionIds(TransactionId xid)
*/
void
ExpireTreeKnownAssignedTransactionIds(TransactionId xid, int nsubxids,
TransactionId *subxids, TransactionId max_xid)
TransactionId *subxids, TransactionId max_xid)
{
Assert(standbyState >= STANDBY_INITIALIZED);

View File

@@ -253,6 +253,6 @@ Size
shm_toc_estimate(shm_toc_estimator *e)
{
return add_size(offsetof(shm_toc, toc_entry),
add_size(mul_size(e->number_of_keys, sizeof(shm_toc_entry)),
e->space_for_chunks));
add_size(mul_size(e->number_of_keys, sizeof(shm_toc_entry)),
e->space_for_chunks));
}

View File

@@ -418,8 +418,8 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("could not create ShmemIndex entry for data structure \"%s\"",
name)));
errmsg("could not create ShmemIndex entry for data structure \"%s\"",
name)));
}
if (*foundPtr)
@@ -433,9 +433,9 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
{
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errmsg("ShmemIndex entry size is wrong for data structure"
" \"%s\": expected %zu, actual %zu",
name, size, result->size)));
(errmsg("ShmemIndex entry size is wrong for data structure"
" \"%s\": expected %zu, actual %zu",
name, size, result->size)));
}
structPtr = result->location;
}

View File

@@ -69,7 +69,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
*/
void
ReceiveSharedInvalidMessages(
void (*invalFunction) (SharedInvalidationMessage *msg),
void (*invalFunction) (SharedInvalidationMessage *msg),
void (*resetFunction) (void))
{
#define MAXINVALMSGS 32

View File

@@ -284,7 +284,7 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode
node.dbNode);
ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
}
void
@@ -312,7 +312,7 @@ ResolveRecoveryConflictWithTablespace(Oid tsid)
temp_file_users = GetConflictingVirtualXIDs(InvalidTransactionId,
InvalidOid);
ResolveRecoveryConflictWithVirtualXIDs(temp_file_users,
PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
}
void
@@ -376,7 +376,7 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag)
backends = GetLockConflicts(&locktag, AccessExclusiveLock);
ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_LOCK);
PROCSIG_RECOVERY_CONFLICT_LOCK);
}
else
{
@@ -529,7 +529,7 @@ CheckRecoveryConflictDeadlock(void)
ereport(ERROR,
(errcode(ERRCODE_T_R_DEADLOCK_DETECTED),
errmsg("canceling statement due to conflict with recovery"),
errdetail("User transaction caused buffer deadlock with recovery.")));
errdetail("User transaction caused buffer deadlock with recovery.")));
}
@@ -986,7 +986,7 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
/* array of TransactionIds */
if (xlrec.xcnt > 0)
XLogRegisterData((char *) CurrRunningXacts->xids,
(xlrec.xcnt + xlrec.subxcnt) * sizeof(TransactionId));
(xlrec.xcnt + xlrec.subxcnt) * sizeof(TransactionId));
recptr = XLogInsert(RM_STANDBY_ID, XLOG_RUNNING_XACTS);

View File

@@ -445,8 +445,8 @@ inv_seek(LargeObjectDesc *obj_desc, int64 offset, int whence)
if (newoffset < 0 || newoffset > MAX_LARGE_OBJECT_SIZE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg_internal("invalid large object seek target: " INT64_FORMAT,
newoffset)));
errmsg_internal("invalid large object seek target: " INT64_FORMAT,
newoffset)));
obj_desc->offset = newoffset;
return newoffset;

View File

@@ -527,8 +527,8 @@ FindLockCycleRecurse(PGPROC *checkProc,
if (memberProc->links.next != NULL && memberProc->waitLock != NULL &&
memberProc != checkProc &&
FindLockCycleRecurseMember(memberProc, checkProc, depth, softEdges,
nSoftEdges))
FindLockCycleRecurseMember(memberProc, checkProc, depth, softEdges,
nSoftEdges))
return true;
}
@@ -1030,7 +1030,7 @@ TopoSort(LOCK *lock,
for (c = 0; c <= last; ++c)
{
if (topoProcs[c] == proc || (topoProcs[c] != NULL &&
topoProcs[c]->lockGroupLeader == proc))
topoProcs[c]->lockGroupLeader == proc))
{
ordering[i - nmatches] = topoProcs[c];
topoProcs[c] = NULL;
@@ -1106,7 +1106,7 @@ DeadLockReport(void)
appendStringInfoChar(&clientbuf, '\n');
appendStringInfo(&clientbuf,
_("Process %d waits for %s on %s; blocked by process %d."),
_("Process %d waits for %s on %s; blocked by process %d."),
info->pid,
GetLockmodeName(info->locktag.locktag_lockmethodid,
info->lockmode),
@@ -1127,7 +1127,7 @@ DeadLockReport(void)
appendStringInfo(&logbuf,
_("Process %d: %s"),
info->pid,
pgstat_get_backend_current_activity(info->pid, false));
pgstat_get_backend_current_activity(info->pid, false));
}
pgstat_report_deadlock();

View File

@@ -401,7 +401,7 @@ InitLocks(void)
init_table_size,
max_table_size,
&info,
HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
/* Assume an average of 2 holders per lock */
max_table_size *= 2;
@@ -420,7 +420,7 @@ InitLocks(void)
init_table_size,
max_table_size,
&info,
HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
/*
* Allocate fast-path structures.
@@ -772,7 +772,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
locallock->lockOwners = NULL; /* in case next line fails */
locallock->lockOwners = (LOCALLOCKOWNER *)
MemoryContextAlloc(TopMemoryContext,
locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
}
else
{
@@ -2207,7 +2207,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
offsetof(PROCLOCK, procLink));
offsetof(PROCLOCK, procLink));
proclock;
proclock = nextplock)
{
@@ -2605,7 +2605,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
/* Find or create lock object. */
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
++lockmode)
{
PROCLOCK *proclock;
@@ -2772,7 +2772,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
if (vxids == NULL)
vxids = (VirtualTransactionId *)
MemoryContextAlloc(TopMemoryContext,
sizeof(VirtualTransactionId) * (MaxBackends + 1));
sizeof(VirtualTransactionId) * (MaxBackends + 1));
}
else
vxids = (VirtualTransactionId *)
@@ -3270,7 +3270,7 @@ PostPrepare_Locks(TransactionId xid)
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
offsetof(PROCLOCK, procLink));
offsetof(PROCLOCK, procLink));
proclock;
proclock = nextplock)
{
@@ -4002,7 +4002,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase max_locks_per_transaction.")));
errhint("You might need to increase max_locks_per_transaction.")));
}
/*
@@ -4067,7 +4067,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase max_locks_per_transaction.")));
errhint("You might need to increase max_locks_per_transaction.")));
}
/*
@@ -4156,8 +4156,8 @@ lock_twophase_standby_recover(TransactionId xid, uint16 info,
locktag->locktag_type == LOCKTAG_RELATION)
{
StandbyAcquireAccessExclusiveLock(xid,
locktag->locktag_field1 /* dboid */ ,
locktag->locktag_field2 /* reloid */ );
locktag->locktag_field1 /* dboid */ ,
locktag->locktag_field2 /* reloid */ );
}
}

View File

@@ -1474,7 +1474,7 @@ SummarizeOldestCommittedSxact(void)
/* Add to SLRU summary information. */
if (TransactionIdIsValid(sxact->topXid) && !SxactIsReadOnly(sxact))
OldSerXidAdd(sxact->topXid, SxactHasConflictOut(sxact)
? sxact->SeqNo.earliestOutConflictCommit : InvalidSerCommitSeqNo);
? sxact->SeqNo.earliestOutConflictCommit : InvalidSerCommitSeqNo);
/* Summarize and release the detail. */
ReleaseOneSerializableXact(sxact, false, true);
@@ -1754,8 +1754,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not import the requested snapshot"),
errdetail("The source process with pid %d is not running anymore.",
sourcepid)));
errdetail("The source process with pid %d is not running anymore.",
sourcepid)));
}
/*
@@ -1987,17 +1987,17 @@ GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
case PREDLOCKTAG_PAGE:
/* parent lock is relation lock */
SET_PREDICATELOCKTARGETTAG_RELATION(*parent,
GET_PREDICATELOCKTARGETTAG_DB(*tag),
GET_PREDICATELOCKTARGETTAG_RELATION(*tag));
GET_PREDICATELOCKTARGETTAG_DB(*tag),
GET_PREDICATELOCKTARGETTAG_RELATION(*tag));
return true;
case PREDLOCKTAG_TUPLE:
/* parent lock is page lock */
SET_PREDICATELOCKTARGETTAG_PAGE(*parent,
GET_PREDICATELOCKTARGETTAG_DB(*tag),
GET_PREDICATELOCKTARGETTAG_RELATION(*tag),
GET_PREDICATELOCKTARGETTAG_PAGE(*tag));
GET_PREDICATELOCKTARGETTAG_DB(*tag),
GET_PREDICATELOCKTARGETTAG_RELATION(*tag),
GET_PREDICATELOCKTARGETTAG_PAGE(*tag));
return true;
}
@@ -2393,7 +2393,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
locktag.myXact = sxact;
lock = (PREDICATELOCK *)
hash_search_with_hash_value(PredicateLockHash, &locktag,
PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
HASH_ENTER_NULL, &found);
if (!lock)
ereport(ERROR,
@@ -2774,8 +2774,8 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
hash_search_with_hash_value
(PredicateLockHash,
&oldpredlock->tag,
PredicateLockHashCodeFromTargetHashCode(&oldpredlock->tag,
oldtargettaghash),
PredicateLockHashCodeFromTargetHashCode(&oldpredlock->tag,
oldtargettaghash),
HASH_REMOVE, &found);
Assert(found);
}
@@ -2783,8 +2783,8 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
newpredlock = (PREDICATELOCK *)
hash_search_with_hash_value(PredicateLockHash,
&newpredlocktag,
PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
newtargettaghash),
PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
newtargettaghash),
HASH_ENTER_NULL,
&found);
if (!newpredlock)
@@ -3024,8 +3024,8 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
newpredlock = (PREDICATELOCK *)
hash_search_with_hash_value(PredicateLockHash,
&newpredlocktag,
PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
heaptargettaghash),
PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
heaptargettaghash),
HASH_ENTER,
&found);
if (!found)
@@ -3605,7 +3605,7 @@ ClearOldPredicateLocks(void)
LWLockAcquire(SerializableXactHashLock, LW_SHARED);
}
else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
&& finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
&& finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
{
/*
* Any active transactions that took their snapshot before this
@@ -3690,8 +3690,8 @@ ClearOldPredicateLocks(void)
SHMQueueDelete(&(predlock->xactLink));
hash_search_with_hash_value(PredicateLockHash, &tag,
PredicateLockHashCodeFromTargetHashCode(&tag,
targettaghash),
PredicateLockHashCodeFromTargetHashCode(&tag,
targettaghash),
HASH_REMOVE, NULL);
RemoveTargetIfNoLongerUsed(target, targettaghash);
@@ -3774,8 +3774,8 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
SHMQueueDelete(targetLink);
hash_search_with_hash_value(PredicateLockHash, &tag,
PredicateLockHashCodeFromTargetHashCode(&tag,
targettaghash),
PredicateLockHashCodeFromTargetHashCode(&tag,
targettaghash),
HASH_REMOVE, NULL);
if (summarize)
{
@@ -3784,8 +3784,8 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
/* Fold into dummy transaction list. */
tag.myXact = OldCommittedSxact;
predlock = hash_search_with_hash_value(PredicateLockHash, &tag,
PredicateLockHashCodeFromTargetHashCode(&tag,
targettaghash),
PredicateLockHashCodeFromTargetHashCode(&tag,
targettaghash),
HASH_ENTER_NULL, &found);
if (!predlock)
ereport(ERROR,
@@ -4036,7 +4036,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to read/write dependencies among transactions"),
errdetail_internal("Reason code: Canceled on conflict out to old pivot %u.", xid),
errhint("The transaction might succeed if retried.")));
errhint("The transaction might succeed if retried.")));
if (SxactHasSummaryConflictIn(MySerializableXact)
|| !SHMQueueEmpty(&MySerializableXact->inConflicts))
@@ -4044,7 +4044,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to read/write dependencies among transactions"),
errdetail_internal("Reason code: Canceled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
errhint("The transaction might succeed if retried.")));
errhint("The transaction might succeed if retried.")));
MySerializableXact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
}
@@ -4344,8 +4344,8 @@ CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
relation->rd_node.dbNode,
relation->rd_id,
ItemPointerGetBlockNumber(&(tuple->t_self)),
ItemPointerGetOffsetNumber(&(tuple->t_self)));
ItemPointerGetBlockNumber(&(tuple->t_self)),
ItemPointerGetOffsetNumber(&(tuple->t_self)));
CheckTargetForConflictsIn(&targettag);
}
@@ -4460,7 +4460,7 @@ CheckTableForSerializableConflictIn(Relation relation)
offsetof(PREDICATELOCK, targetLink));
if (predlock->tag.myXact != MySerializableXact
&& !RWConflictExists(predlock->tag.myXact, MySerializableXact))
&& !RWConflictExists(predlock->tag.myXact, MySerializableXact))
{
FlagRWConflict(predlock->tag.myXact, MySerializableXact);
}
@@ -4541,7 +4541,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
*------------------------------------------------------------------------
*/
if (SxactIsCommitted(writer)
&& (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
&& (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
failure = true;
/*------------------------------------------------------------------------
@@ -4585,7 +4585,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
&& (!SxactIsCommitted(writer)
|| t2->prepareSeqNo <= writer->commitSeqNo)
&& (!SxactIsReadOnly(reader)
|| t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
|| t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
{
failure = true;
break;
@@ -4630,7 +4630,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
&& (!SxactIsCommitted(t0)
|| t0->commitSeqNo >= writer->prepareSeqNo)
&& (!SxactIsReadOnly(t0)
|| t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
|| t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
{
failure = true;
break;

View File

@@ -1299,8 +1299,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
appendStringInfo(&logbuf,
_("Process %d waits for %s on %s."),
MyProcPid,
GetLockmodeName(lock->tag.locktag_lockmethodid,
lockmode),
GetLockmodeName(lock->tag.locktag_lockmethodid,
lockmode),
locktagbuf.data);
/* release lock as quickly as possible */
@@ -1308,9 +1308,9 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/* send the autovacuum worker Back to Old Kent Road */
ereport(DEBUG1,
(errmsg("sending cancel to blocking autovacuum PID %d",
pid),
errdetail_log("%s", logbuf.data)));
(errmsg("sending cancel to blocking autovacuum PID %d",
pid),
errdetail_log("%s", logbuf.data)));
if (kill(pid, SIGINT) < 0)
{
@@ -1326,8 +1326,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
*/
if (errno != ESRCH)
ereport(WARNING,
(errmsg("could not send signal to process %d: %m",
pid)));
(errmsg("could not send signal to process %d: %m",
pid)));
}
pfree(logbuf.data);
@@ -1385,7 +1385,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
procLocks = &(lock->procLocks);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
offsetof(PROCLOCK, lockLink));
offsetof(PROCLOCK, lockLink));
while (proclock)
{
@@ -1421,7 +1421,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
}
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
offsetof(PROCLOCK, lockLink));
offsetof(PROCLOCK, lockLink));
}
LWLockRelease(partitionLock);
@@ -1431,7 +1431,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
(errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs),
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
else if (deadlock_state == DS_HARD_DEADLOCK)
{
@@ -1446,7 +1446,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
(errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs),
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
}
@@ -1455,12 +1455,12 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
(errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs),
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
else if (myWaitStatus == STATUS_OK)
ereport(LOG,
(errmsg("process %d acquired %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs)));
(errmsg("process %d acquired %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs)));
else
{
Assert(myWaitStatus == STATUS_ERROR);
@@ -1476,9 +1476,9 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
if (deadlock_state != DS_HARD_DEADLOCK)
ereport(LOG,
(errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs),
MyProcPid, modename, buf.data, msecs, usecs),
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
}

View File

@@ -145,7 +145,7 @@ perform_spin_delay(SpinDelayStatus *status)
/* increase delay by a random fraction between 1X and 2X */
status->cur_delay += (int) (status->cur_delay *
((double) random() / (double) MAX_RANDOM_VALUE) + 0.5);
((double) random() / (double) MAX_RANDOM_VALUE) + 0.5);
/* wrap back to minimum delay when max is exceeded */
if (status->cur_delay > MAX_DELAY_USEC)
status->cur_delay = MIN_DELAY_USEC;

View File

@@ -557,8 +557,8 @@ PageRepairFragmentation(Page page)
if (totallen > (Size) (pd_special - pd_lower))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted item lengths: total %u, available space %u",
(unsigned int) totallen, pd_special - pd_lower)));
errmsg("corrupted item lengths: total %u, available space %u",
(unsigned int) totallen, pd_special - pd_lower)));
compactify_tuples(itemidbase, nstorage, page);
}
@@ -902,8 +902,8 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
offset != MAXALIGN(offset))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted item pointer: offset = %u, length = %u",
offset, (unsigned int) size)));
errmsg("corrupted item pointer: offset = %u, length = %u",
offset, (unsigned int) size)));
if (nextitm < nitems && offnum == itemnos[nextitm])
{
@@ -929,8 +929,8 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
if (totallen > (Size) (pd_special - pd_lower))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted item lengths: total %u, available space %u",
(unsigned int) totallen, pd_special - pd_lower)));
errmsg("corrupted item lengths: total %u, available space %u",
(unsigned int) totallen, pd_special - pd_lower)));
/*
* Looks good. Overwrite the line pointers with the copy, from which we've

View File

@@ -472,7 +472,7 @@ mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
if (errno != ENOENT)
ereport(WARNING,
(errcode_for_file_access(),
errmsg("could not remove file \"%s\": %m", segpath)));
errmsg("could not remove file \"%s\": %m", segpath)));
break;
}
}
@@ -997,9 +997,9 @@ mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not truncate file \"%s\" to %u blocks: %m",
FilePathName(v->mdfd_vfd),
nblocks)));
errmsg("could not truncate file \"%s\" to %u blocks: %m",
FilePathName(v->mdfd_vfd),
nblocks)));
if (!SmgrIsTemp(reln))
register_dirty_segment(reln, forknum, v);
}
@@ -1225,7 +1225,7 @@ mdsync(void)
/* Attempt to open and fsync the target segment */
seg = _mdfd_getseg(reln, forknum,
(BlockNumber) segno * (BlockNumber) RELSEG_SIZE,
(BlockNumber) segno * (BlockNumber) RELSEG_SIZE,
false,
EXTENSION_RETURN_NULL
| EXTENSION_DONT_CHECK_SIZE);
@@ -1233,7 +1233,7 @@ mdsync(void)
INSTR_TIME_SET_CURRENT(sync_start);
if (seg != NULL &&
FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) >= 0)
FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) >= 0)
{
/* Success; update statistics about sync timing */
INSTR_TIME_SET_CURRENT(sync_end);
@@ -1279,8 +1279,8 @@ mdsync(void)
else
ereport(DEBUG1,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\" but retrying: %m",
path)));
errmsg("could not fsync file \"%s\" but retrying: %m",
path)));
pfree(path);
/*
@@ -1925,9 +1925,9 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
return NULL;
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open file \"%s\" (target block %u): %m",
_mdfd_segpath(reln, forknum, nextsegno),
blkno)));
errmsg("could not open file \"%s\" (target block %u): %m",
_mdfd_segpath(reln, forknum, nextsegno),
blkno)));
}
}

View File

@@ -46,13 +46,13 @@ typedef struct f_smgr
void (*smgr_unlink) (RelFileNodeBackend rnode, ForkNumber forknum,
bool isRedo);
void (*smgr_extend) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
BlockNumber blocknum, char *buffer, bool skipFsync);
void (*smgr_prefetch) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum);
void (*smgr_read) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer);
void (*smgr_write) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
BlockNumber blocknum, char *buffer, bool skipFsync);
void (*smgr_writeback) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, BlockNumber nblocks);
BlockNumber (*smgr_nblocks) (SMgrRelation reln, ForkNumber forknum);