1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-12 05:01:15 +03:00

pgindent run for 9.6

This commit is contained in:
Robert Haas
2016-06-09 18:02:36 -04:00
parent 9164deea2f
commit 4bc424b968
252 changed files with 2670 additions and 2558 deletions

View File

@@ -47,7 +47,7 @@ brin_xlog_insert_update(XLogReaderState *record,
{
XLogRecPtr lsn = record->EndRecPtr;
Buffer buffer;
BlockNumber regpgno;
BlockNumber regpgno;
Page page;
XLogRedoAction action;

View File

@@ -101,7 +101,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs table pages only to this percentage",
RELOPT_KIND_HEAP,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100
},
@@ -110,7 +111,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs btree index pages only to this percentage",
RELOPT_KIND_BTREE,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100
},
@@ -119,7 +121,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs hash index pages only to this percentage",
RELOPT_KIND_HASH,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100
},
@@ -128,7 +131,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs gist index pages only to this percentage",
RELOPT_KIND_GIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100
},
@@ -137,7 +141,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs spgist index pages only to this percentage",
RELOPT_KIND_SPGIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
},
@@ -1475,8 +1480,8 @@ tablespace_reloptions(Datum reloptions, bool validate)
LOCKMODE
AlterTableGetRelOptionsLockLevel(List *defList)
{
LOCKMODE lockmode = NoLock;
ListCell *cell;
LOCKMODE lockmode = NoLock;
ListCell *cell;
if (defList == NIL)
return AccessExclusiveLock;
@@ -1486,8 +1491,8 @@ AlterTableGetRelOptionsLockLevel(List *defList)
foreach(cell, defList)
{
DefElem *def = (DefElem *) lfirst(cell);
int i;
DefElem *def = (DefElem *) lfirst(cell);
int i;
for (i = 0; relOpts[i]; i++)
{

View File

@@ -524,7 +524,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
int64 nDeletedHeapTuples = 0;
ginxlogDeleteListPages data;
Buffer buffers[GIN_NDELETE_AT_ONCE];
BlockNumber freespace[GIN_NDELETE_AT_ONCE];
BlockNumber freespace[GIN_NDELETE_AT_ONCE];
data.ndeleted = 0;
while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
@@ -745,30 +745,29 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
bool inVacuum = (stats == NULL);
/*
* We would like to prevent concurrent cleanup process. For
* that we will lock metapage in exclusive mode using LockPage()
* call. Nobody other will use that lock for metapage, so
* we keep possibility of concurrent insertion into pending list
* We would like to prevent concurrent cleanup process. For that we will
* lock metapage in exclusive mode using LockPage() call. Nobody other
* will use that lock for metapage, so we keep possibility of concurrent
* insertion into pending list
*/
if (inVacuum)
{
/*
* We are called from [auto]vacuum/analyze or
* gin_clean_pending_list() and we would like to wait
* concurrent cleanup to finish.
* We are called from [auto]vacuum/analyze or gin_clean_pending_list()
* and we would like to wait concurrent cleanup to finish.
*/
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
workMemory =
(IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ?
autovacuum_work_mem : maintenance_work_mem;
autovacuum_work_mem : maintenance_work_mem;
}
else
{
/*
* We are called from regular insert and if we see
* concurrent cleanup just exit in hope that concurrent
* process will clean up pending list.
* We are called from regular insert and if we see concurrent cleanup
* just exit in hope that concurrent process will clean up pending
* list.
*/
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
return;
@@ -829,9 +828,10 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
Assert(!GinPageIsDeleted(page));
/*
* Are we walk through the page which as we remember was a tail when we
* start our cleanup? But if caller asks us to clean up whole pending
* list then ignore old tail, we will work until list becomes empty.
* Are we walk through the page which as we remember was a tail when
* we start our cleanup? But if caller asks us to clean up whole
* pending list then ignore old tail, we will work until list becomes
* empty.
*/
if (blkno == blknoFinish && full_clean == false)
cleanupFinish = true;
@@ -917,8 +917,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
* locking */
/*
* remove read pages from pending list, at this point all
* content of read pages is in regular structure
* remove read pages from pending list, at this point all content
* of read pages is in regular structure
*/
shiftList(index, metabuffer, blkno, fill_fsm, stats);
@@ -961,9 +961,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
ReleaseBuffer(metabuffer);
/*
* As pending list pages can have a high churn rate, it is
* desirable to recycle them immediately to the FreeSpace Map when
* ordinary backends clean the list.
* As pending list pages can have a high churn rate, it is desirable to
* recycle them immediately to the FreeSpace Map when ordinary backends
* clean the list.
*/
if (fsm_vac && fill_fsm)
IndexFreeSpaceMapVacuum(index);
@@ -989,7 +989,7 @@ gin_clean_pending_list(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("recovery is in progress"),
errhint("GIN pending list cannot be cleaned up during recovery.")));
errhint("GIN pending list cannot be cleaned up during recovery.")));
/* Must be a GIN index */
if (indexRel->rd_rel->relkind != RELKIND_INDEX ||

View File

@@ -281,7 +281,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
&htup->t_self);
/* If we've maxed out our available memory, dump everything to the index */
if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L)
if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
{
ItemPointerData *list;
Datum key;

View File

@@ -540,8 +540,10 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
{
/* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
/*
* and cleanup any pending inserts */
* and cleanup any pending inserts
*/
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
false, stats);
}

View File

@@ -1498,8 +1498,9 @@ static void
gistvacuumpage(Relation rel, Page page, Buffer buffer)
{
OffsetNumber deletable[MaxIndexTuplesPerPage];
int ndeletable = 0;
OffsetNumber offnum, maxoff;
int ndeletable = 0;
OffsetNumber offnum,
maxoff;
Assert(GistPageIsLeaf(page));

View File

@@ -36,13 +36,13 @@
static void
gistkillitems(IndexScanDesc scan)
{
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
Buffer buffer;
Page page;
OffsetNumber offnum;
ItemId iid;
int i;
bool killedsomething = false;
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
Buffer buffer;
Page page;
OffsetNumber offnum;
ItemId iid;
int i;
bool killedsomething = false;
Assert(so->curBlkno != InvalidBlockNumber);
Assert(!XLogRecPtrIsInvalid(so->curPageLSN));
@@ -57,21 +57,22 @@ gistkillitems(IndexScanDesc scan)
page = BufferGetPage(buffer);
/*
* If page LSN differs it means that the page was modified since the last read.
* killedItems could be not valid so LP_DEAD hints applying is not safe.
* If page LSN differs it means that the page was modified since the last
* read. killedItems could be not valid so LP_DEAD hints applying is not
* safe.
*/
if(PageGetLSN(page) != so->curPageLSN)
if (PageGetLSN(page) != so->curPageLSN)
{
UnlockReleaseBuffer(buffer);
so->numKilled = 0; /* reset counter */
so->numKilled = 0; /* reset counter */
return;
}
Assert(GistPageIsLeaf(page));
/*
* Mark all killedItems as dead. We need no additional recheck,
* because, if page was modified, pageLSN must have changed.
* Mark all killedItems as dead. We need no additional recheck, because,
* if page was modified, pageLSN must have changed.
*/
for (i = 0; i < so->numKilled; i++)
{
@@ -390,7 +391,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
ItemId iid = PageGetItemId(page, i);
ItemId iid = PageGetItemId(page, i);
IndexTuple it;
bool match;
bool recheck;
@@ -400,10 +401,11 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
* If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual.
*/
if(scan->ignore_killed_tuples && ItemIdIsDead(iid))
if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
continue;
it = (IndexTuple) PageGetItem(page, iid);
/*
* Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward.
@@ -665,11 +667,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt);
MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
* sizeof(OffsetNumber));
* sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}
@@ -702,11 +704,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt);
MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
* sizeof(OffsetNumber));
* sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}

View File

@@ -230,8 +230,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
ScanKey skey = scan->keyData + i;
/*
* Copy consistent support function to ScanKey structure
* instead of function implementing filtering operator.
* Copy consistent support function to ScanKey structure instead
* of function implementing filtering operator.
*/
fmgr_info_copy(&(skey->sk_func),
&(so->giststate->consistentFn[skey->sk_attno - 1]),
@@ -303,8 +303,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
/*
* Copy distance support function to ScanKey structure
* instead of function implementing ordering operator.
* Copy distance support function to ScanKey structure instead of
* function implementing ordering operator.
*/
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);

View File

@@ -1687,7 +1687,7 @@ heap_parallelscan_nextpage(HeapScanDesc scan)
{
BlockNumber page = InvalidBlockNumber;
BlockNumber sync_startpage = InvalidBlockNumber;
BlockNumber report_page = InvalidBlockNumber;
BlockNumber report_page = InvalidBlockNumber;
ParallelHeapScanDesc parallel_scan;
Assert(scan->rs_parallel);

View File

@@ -178,7 +178,7 @@ static void
RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
{
Page page;
BlockNumber blockNum = InvalidBlockNumber,
BlockNumber blockNum = InvalidBlockNumber,
firstBlock = InvalidBlockNumber;
int extraBlocks = 0;
int lockWaiters = 0;
@@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
return;
/*
* It might seem like multiplying the number of lock waiters by as much
* as 20 is too aggressive, but benchmarking revealed that smaller numbers
* were insufficient. 512 is just an arbitrary cap to prevent pathological
* results.
* It might seem like multiplying the number of lock waiters by as much as
* 20 is too aggressive, but benchmarking revealed that smaller numbers
* were insufficient. 512 is just an arbitrary cap to prevent
* pathological results.
*/
extraBlocks = Min(512, lockWaiters * 20);
@@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
}
/*
* Updating the upper levels of the free space map is too expensive
* to do for every block, but it's worth doing once at the end to make
* sure that subsequent insertion activity sees all of those nifty free
* pages we just inserted.
* Updating the upper levels of the free space map is too expensive to do
* for every block, but it's worth doing once at the end to make sure that
* subsequent insertion activity sees all of those nifty free pages we
* just inserted.
*
* Note that we're using the freespace value that was reported for the
* last block we added as if it were the freespace value for every block
@@ -547,8 +547,8 @@ loop:
}
/*
* In addition to whatever extension we performed above, we always add
* at least one block to satisfy our own request.
* In addition to whatever extension we performed above, we always add at
* least one block to satisfy our own request.
*
* XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is

View File

@@ -105,8 +105,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
OldestXmin = RecentGlobalXmin;
else
OldestXmin =
TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
relation);
TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
relation);
Assert(TransactionIdIsValid(OldestXmin));

View File

@@ -272,7 +272,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
Page page;
uint8 *map;
uint8 *map;
#ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
@@ -291,7 +291,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf);
map = (uint8 *)PageGetContents(page);
map = (uint8 *) PageGetContents(page);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))

View File

@@ -395,7 +395,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* Check for a conflict-in as we would if we were going to
* write to this page. We aren't actually going to write,
* but we want a chance to report SSI conflicts that would
* otherwise be masked by this unique constraint violation.
* otherwise be masked by this unique constraint
* violation.
*/
CheckForSerializableConflictIn(rel, NULL, buf);

View File

@@ -813,8 +813,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/*
* Check to see if we need to issue one final WAL record for this index,
* which may be needed for correctness on a hot standby node when
* non-MVCC index scans could take place.
* which may be needed for correctness on a hot standby node when non-MVCC
* index scans could take place.
*
* If the WAL is replayed in hot standby, the replay process needs to get
* cleanup locks on all index leaf pages, just as we've been doing here.
@@ -1025,13 +1025,13 @@ restart:
if (ndeletable > 0)
{
/*
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes all
* information to the replay code to allow it to get a cleanup lock
* on all pages between the previous lastBlockVacuumed and this page.
* This ensures that WAL replay locks all leaf pages at some point,
* which is important should non-MVCC scans be requested.
* This is currently unused on standby, but we record it anyway, so
* that the WAL contains the required information.
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes
* all information to the replay code to allow it to get a cleanup
* lock on all pages between the previous lastBlockVacuumed and
* this page. This ensures that WAL replay locks all leaf pages at
* some point, which is important should non-MVCC scans be
* requested. This is currently unused on standby, but we record
* it anyway, so that the WAL contains the required information.
*
* Since we can visit leaf pages out-of-order when recursing,
* replay might end up locking such pages an extra time, but it

View File

@@ -392,15 +392,15 @@ btree_xlog_vacuum(XLogReaderState *record)
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/*
* This section of code is thought to be no longer needed, after
* analysis of the calling paths. It is retained to allow the code
* to be reinstated if a flaw is revealed in that thinking.
* This section of code is thought to be no longer needed, after analysis
* of the calling paths. It is retained to allow the code to be reinstated
* if a flaw is revealed in that thinking.
*
* If we are running non-MVCC scans using this index we need to do some
* additional work to ensure correctness, which is known as a "pin scan"
* described in more detail in next paragraphs. We used to do the extra
* work in all cases, whereas we now avoid that work in most cases.
* If lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* work in all cases, whereas we now avoid that work in most cases. If
* lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* additional work required for the pin scan.
*
* Avoiding this extra work is important since it requires us to touch

View File

@@ -29,8 +29,8 @@ generic_desc(StringInfo buf, XLogReaderState *record)
while (ptr < end)
{
OffsetNumber offset,
length;
OffsetNumber offset,
length;
memcpy(&offset, ptr, sizeof(offset));
ptr += sizeof(offset);

View File

@@ -26,7 +26,7 @@ logicalmsg_desc(StringInfo buf, XLogReaderState *record)
xl_logical_message *xlrec = (xl_logical_message *) rec;
appendStringInfo(buf, "%s message size %zu bytes",
xlrec->transactional ? "transactional" : "nontransactional",
xlrec->transactional ? "transactional" : "nontransactional",
xlrec->message_size);
}
}

View File

@@ -100,7 +100,7 @@ standby_desc_invalidations(StringInfo buf,
Oid dbId, Oid tsId,
bool relcacheInitFileInval)
{
int i;
int i;
if (relcacheInitFileInval)
appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u",

View File

@@ -205,8 +205,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
if (parsed.nmsgs > 0)
{
standby_desc_invalidations(
buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
XactCompletionRelcacheInitFileInval(parsed.xinfo));
buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
XactCompletionRelcacheInitFileInval(parsed.xinfo));
}
if (XactCompletionForceSyncCommit(parsed.xinfo))

View File

@@ -26,8 +26,8 @@
const struct config_enum_entry wal_level_options[] = {
{"minimal", WAL_LEVEL_MINIMAL, false},
{"replica", WAL_LEVEL_REPLICA, false},
{"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"logical", WAL_LEVEL_LOGICAL, false},
{NULL, 0, false}
};

View File

@@ -92,7 +92,7 @@ typedef struct CommitTimestampShared
{
TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit;
bool commitTsActive;
bool commitTsActive;
} CommitTimestampShared;
CommitTimestampShared *commitTsShared;
@@ -153,9 +153,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
* No-op if the module is not active.
*
* An unlocked read here is fine, because in a standby (the only place
* where the flag can change in flight) this routine is only called by
* the recovery process, which is also the only process which can change
* the flag.
* where the flag can change in flight) this routine is only called by the
* recovery process, which is also the only process which can change the
* flag.
*/
if (!commitTsShared->commitTsActive)
return;
@@ -767,8 +767,8 @@ ExtendCommitTs(TransactionId newestXact)
int pageno;
/*
* Nothing to do if module not enabled. Note we do an unlocked read of the
* flag here, which is okay because this routine is only called from
* Nothing to do if module not enabled. Note we do an unlocked read of
* the flag here, which is okay because this routine is only called from
* GetNewTransactionId, which is never called in a standby.
*/
Assert(!InRecovery);
@@ -855,7 +855,7 @@ AdvanceOldestCommitTsXid(TransactionId oldestXact)
{
LWLockAcquire(CommitTsLock, LW_EXCLUSIVE);
if (ShmemVariableCache->oldestCommitTsXid != InvalidTransactionId &&
TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
ShmemVariableCache->oldestCommitTsXid = oldestXact;
LWLockRelease(CommitTsLock);
}

View File

@@ -52,9 +52,8 @@ typedef struct
Buffer buffer; /* registered buffer */
int flags; /* flags for this buffer */
int deltaLen; /* space consumed in delta field */
char *image; /* copy of page image for modification,
* do not do it in-place to have aligned
* memory chunk */
char *image; /* copy of page image for modification, do not
* do it in-place to have aligned memory chunk */
char delta[MAX_DELTA_SIZE]; /* delta between page images */
} PageData;

View File

@@ -988,8 +988,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
char *oldest_datname = get_database_name(oldest_datoid);
/*
* Immediately kick autovacuum into action as we're already
* in ERROR territory.
* Immediately kick autovacuum into action as we're already in
* ERROR territory.
*/
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
@@ -1134,8 +1134,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used",
"database with OID %u must be vacuumed before %d more multixact members are used",
MultiXactState->offsetStopLimit - nextOffset + nmembers,
MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers,
MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers),
errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));

View File

@@ -134,9 +134,9 @@ CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
nworkers = 0;
/*
* If we are running under serializable isolation, we can't use
* parallel workers, at least not until somebody enhances that mechanism
* to be parallel-aware.
* If we are running under serializable isolation, we can't use parallel
* workers, at least not until somebody enhances that mechanism to be
* parallel-aware.
*/
if (IsolationIsSerializable())
nworkers = 0;
@@ -646,9 +646,9 @@ DestroyParallelContext(ParallelContext *pcxt)
}
/*
* We can't finish transaction commit or abort until all of the
* workers have exited. This means, in particular, that we can't respond
* to interrupts at this stage.
* We can't finish transaction commit or abort until all of the workers
* have exited. This means, in particular, that we can't respond to
* interrupts at this stage.
*/
HOLD_INTERRUPTS();
WaitForParallelWorkersToExit(pcxt);
@@ -918,7 +918,7 @@ ParallelWorkerMain(Datum main_arg)
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid magic number in dynamic shared memory segment")));
errmsg("invalid magic number in dynamic shared memory segment")));
/* Look up fixed parallel state. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
@@ -958,9 +958,9 @@ ParallelWorkerMain(Datum main_arg)
*/
/*
* Join locking group. We must do this before anything that could try
* to acquire a heavyweight lock, because any heavyweight locks acquired
* to this point could block either directly against the parallel group
* Join locking group. We must do this before anything that could try to
* acquire a heavyweight lock, because any heavyweight locks acquired to
* this point could block either directly against the parallel group
* leader or against some process which in turn waits for a lock that
* conflicts with the parallel group leader, causing an undetected
* deadlock. (If we can't join the lock group, the leader has gone away,

View File

@@ -152,7 +152,7 @@ SimpleLruShmemSize(int nslots, int nlsns)
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
@@ -224,7 +224,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
for (slotno = 0; slotno < nslots; slotno++)
{
LWLockInitialize(&shared->buffer_locks[slotno].lock,
shared->lwlock_tranche_id);
shared->lwlock_tranche_id);
shared->page_buffer[slotno] = ptr;
shared->page_status[slotno] = SLRU_PAGE_EMPTY;

View File

@@ -257,7 +257,7 @@ StartupSUBTRANS(TransactionId oldestActiveXID)
startPage++;
/* must account for wraparound */
if (startPage > TransactionIdToPage(MaxTransactionId))
startPage=0;
startPage = 0;
}
(void) ZeroSUBTRANSPage(startPage);

View File

@@ -140,13 +140,13 @@ typedef struct GlobalTransactionData
TimestampTz prepared_at; /* time of preparation */
/*
* Note that we need to keep track of two LSNs for each GXACT.
* We keep track of the start LSN because this is the address we must
* use to read state data back from WAL when committing a prepared GXACT.
* We keep track of the end LSN because that is the LSN we need to wait
* for prior to commit.
* Note that we need to keep track of two LSNs for each GXACT. We keep
* track of the start LSN because this is the address we must use to read
* state data back from WAL when committing a prepared GXACT. We keep
* track of the end LSN because that is the LSN we need to wait for prior
* to commit.
*/
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
Oid owner; /* ID of user that executed the xact */
@@ -980,7 +980,7 @@ StartPrepare(GlobalTransaction gxact)
hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels);
hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs,
&hdr.initfileinval);
hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
save_state_data(&hdr, sizeof(TwoPhaseFileHeader));
save_state_data(gxact->gid, hdr.gidlen);
@@ -1259,28 +1259,28 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
errdetail("Failed while allocating an XLog reading processor.")));
errdetail("Failed while allocating an XLog reading processor.")));
record = XLogReadRecord(xlogreader, lsn, &errormsg);
if (record == NULL)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read two-phase state from xlog at %X/%X",
(uint32) (lsn >> 32),
(uint32) lsn)));
(uint32) (lsn >> 32),
(uint32) lsn)));
if (XLogRecGetRmid(xlogreader) != RM_XACT_ID ||
(XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("expected two-phase state data is not present in xlog at %X/%X",
(uint32) (lsn >> 32),
(uint32) lsn)));
(uint32) (lsn >> 32),
(uint32) lsn)));
if (len != NULL)
*len = XLogRecGetDataLen(xlogreader);
*buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader));
*buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader));
memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader));
XLogReaderFree(xlogreader);
@@ -1347,10 +1347,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
xid = pgxact->xid;
/*
* Read and validate 2PC state data.
* State data will typically be stored in WAL files if the LSN is after the
* last checkpoint record, or moved to disk if for some reason they have
* lived for a long time.
* Read and validate 2PC state data. State data will typically be stored
* in WAL files if the LSN is after the last checkpoint record, or moved
* to disk if for some reason they have lived for a long time.
*/
if (gxact->ondisk)
buf = ReadTwoPhaseFile(xid, true);
@@ -1605,22 +1604,20 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START();
/*
* We are expecting there to be zero GXACTs that need to be
* copied to disk, so we perform all I/O while holding
* TwoPhaseStateLock for simplicity. This prevents any new xacts
* from preparing while this occurs, which shouldn't be a problem
* since the presence of long-lived prepared xacts indicates the
* transaction manager isn't active.
* We are expecting there to be zero GXACTs that need to be copied to
* disk, so we perform all I/O while holding TwoPhaseStateLock for
* simplicity. This prevents any new xacts from preparing while this
* occurs, which shouldn't be a problem since the presence of long-lived
* prepared xacts indicates the transaction manager isn't active.
*
* It's also possible to move I/O out of the lock, but on
* every error we should check whether somebody committed our
* transaction in different backend. Let's leave this optimisation
* for future, if somebody will spot that this place cause
* bottleneck.
* It's also possible to move I/O out of the lock, but on every error we
* should check whether somebody committed our transaction in different
* backend. Let's leave this optimisation for future, if somebody will
* spot that this place cause bottleneck.
*
* Note that it isn't possible for there to be a GXACT with
* a prepare_end_lsn set prior to the last checkpoint yet
* is marked invalid, because of the efforts with delayChkpt.
* Note that it isn't possible for there to be a GXACT with a
* prepare_end_lsn set prior to the last checkpoint yet is marked invalid,
* because of the efforts with delayChkpt.
*/
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
@@ -1633,7 +1630,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
gxact->prepare_end_lsn <= redo_horizon)
{
char *buf;
int len;
int len;
XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len);
RecreateTwoPhaseFile(pgxact->xid, buf, len);
@@ -1920,7 +1917,7 @@ RecoverPreparedTransactions(void)
TwoPhaseFileHeader *hdr;
TransactionId *subxids;
GlobalTransaction gxact;
const char *gid;
const char *gid;
int i;
xid = (TransactionId) strtoul(clde->d_name, NULL, 16);

View File

@@ -1166,19 +1166,19 @@ RecordTransactionCommit(void)
/*
* Transactions without an assigned xid can contain invalidation
* messages (e.g. explicit relcache invalidations or catcache
* invalidations for inplace updates); standbys need to process
* those. We can't emit a commit record without an xid, and we don't
* want to force assigning an xid, because that'd be problematic for
* e.g. vacuum. Hence we emit a bespoke record for the
* invalidations. We don't want to use that in case a commit record is
* emitted, so they happen synchronously with commits (besides not
* wanting to emit more WAL recoreds).
* invalidations for inplace updates); standbys need to process those.
* We can't emit a commit record without an xid, and we don't want to
* force assigning an xid, because that'd be problematic for e.g.
* vacuum. Hence we emit a bespoke record for the invalidations. We
* don't want to use that in case a commit record is emitted, so they
* happen synchronously with commits (besides not wanting to emit more
* WAL recoreds).
*/
if (nmsgs != 0)
{
LogStandbyInvalidations(nmsgs, invalMessages,
RelcacheInitFileInval);
wrote_xlog = true; /* not strictly necessary */
wrote_xlog = true; /* not strictly necessary */
}
/*
@@ -1272,8 +1272,8 @@ RecordTransactionCommit(void)
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
* KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < replica, but for now
* we don't.)
* might be OK to skip it only when wal_level < replica, but for now we
* don't.)
*
* However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG
@@ -5486,8 +5486,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
/*
* If asked by the primary (because someone is waiting for a synchronous
* commit = remote_apply), we will need to ask walreceiver to send a
* reply immediately.
* commit = remote_apply), we will need to ask walreceiver to send a reply
* immediately.
*/
if (XactCompletionApplyFeedback(parsed->xinfo))
XLogRequestWalReceiverReply();

View File

@@ -5004,9 +5004,9 @@ readRecoveryCommandFile(void)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target_action",
item->value),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target_action",
item->value),
errhint("Valid values are \"pause\", \"promote\", and \"shutdown\".")));
ereport(DEBUG2,
@@ -5087,9 +5087,9 @@ readRecoveryCommandFile(void)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target",
item->value),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target",
item->value),
errhint("The only allowed value is \"immediate\".")));
ereport(DEBUG2,
(errmsg_internal("recovery_target = '%s'",
@@ -5880,8 +5880,8 @@ CheckRequiredParameterValues(void)
}
/*
* For Hot Standby, the WAL must be generated with 'replica' mode, and
* we must have at least as many backend slots as the primary.
* For Hot Standby, the WAL must be generated with 'replica' mode, and we
* must have at least as many backend slots as the primary.
*/
if (ArchiveRecoveryRequested && EnableHotStandby)
{
@@ -6163,26 +6163,26 @@ StartupXLOG(void)
* is no use of such file. There is no harm in retaining it, but it
* is better to get rid of the map file so that we don't have any
* redundant file in data directory and it will avoid any sort of
* confusion. It seems prudent though to just rename the file out
* of the way rather than delete it completely, also we ignore any
* error that occurs in rename operation as even if map file is
* present without backup_label file, it is harmless.
* confusion. It seems prudent though to just rename the file out of
* the way rather than delete it completely, also we ignore any error
* that occurs in rename operation as even if map file is present
* without backup_label file, it is harmless.
*/
if (stat(TABLESPACE_MAP, &st) == 0)
{
unlink(TABLESPACE_MAP_OLD);
if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0)
ereport(LOG,
(errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("File \"%s\" was renamed to \"%s\".",
TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
(errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("File \"%s\" was renamed to \"%s\".",
TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
else
ereport(LOG,
(errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("Could not rename file \"%s\" to \"%s\": %m.",
TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
(errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("Could not rename file \"%s\" to \"%s\": %m.",
TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
}
/*
@@ -6314,24 +6314,24 @@ StartupXLOG(void)
ereport(DEBUG1,
(errmsg_internal("redo record is at %X/%X; shutdown %s",
(uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE")));
wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg_internal("next transaction ID: %u:%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid,
checkPoint.nextOid)));
checkPoint.nextXidEpoch, checkPoint.nextXid,
checkPoint.nextOid)));
ereport(DEBUG1,
(errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u",
checkPoint.nextMulti, checkPoint.nextMultiOffset)));
checkPoint.nextMulti, checkPoint.nextMultiOffset)));
ereport(DEBUG1,
(errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
checkPoint.oldestXid, checkPoint.oldestXidDB)));
(errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
checkPoint.oldestXid, checkPoint.oldestXidDB)));
ereport(DEBUG1,
(errmsg_internal("oldest MultiXactId: %u, in database %u",
checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
ereport(DEBUG1,
(errmsg_internal("commit timestamp Xid oldest/newest: %u/%u",
checkPoint.oldestCommitTsXid,
checkPoint.newestCommitTsXid)));
checkPoint.oldestCommitTsXid,
checkPoint.newestCommitTsXid)));
if (!TransactionIdIsNormal(checkPoint.nextXid))
ereport(PANIC,
(errmsg("invalid next transaction ID")));
@@ -6883,8 +6883,8 @@ StartupXLOG(void)
SpinLockRelease(&XLogCtl->info_lck);
/*
* If rm_redo called XLogRequestWalReceiverReply, then we
* wake up the receiver so that it notices the updated
* If rm_redo called XLogRequestWalReceiverReply, then we wake
* up the receiver so that it notices the updated
* lastReplayedEndRecPtr and sends a reply to the master.
*/
if (doRequestWalReceiverReply)

View File

@@ -104,8 +104,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
MemoryContext oldcontext;
/*
* Label file and tablespace map file need to be long-lived, since they
* are read in pg_stop_backup.
* Label file and tablespace map file need to be long-lived, since
* they are read in pg_stop_backup.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo();
@@ -113,7 +113,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
MemoryContextSwitchTo(oldcontext);
startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file,
dir, NULL, tblspc_map_file, false, true);
dir, NULL, tblspc_map_file, false, true);
nonexclusive_backup_running = true;
before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0);
@@ -138,8 +138,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
* Note: different from CancelBackup which just cancels online backup mode.
*
* Note: this version is only called to stop an exclusive backup. The function
* pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
* stop non-exclusive backups.
* pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
* stop non-exclusive backups.
*
* Permission checking for this function is managed through the normal
* GRANT system.
@@ -156,10 +156,10 @@ pg_stop_backup(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('f')?")));
/*
* Exclusive backups were typically started in a different connection,
* so don't try to verify that exclusive_backup_running is set in this one.
* Actual verification that an exclusive backup is in fact running is handled
* inside do_pg_stop_backup.
* Exclusive backups were typically started in a different connection, so
* don't try to verify that exclusive_backup_running is set in this one.
* Actual verification that an exclusive backup is in fact running is
* handled inside do_pg_stop_backup.
*/
stoppoint = do_pg_stop_backup(NULL, true, NULL);
@@ -182,16 +182,16 @@ pg_stop_backup(PG_FUNCTION_ARGS)
Datum
pg_stop_backup_v2(PG_FUNCTION_ARGS)
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
Datum values[3];
bool nulls[3];
MemoryContext per_query_ctx;
MemoryContext oldcontext;
Datum values[3];
bool nulls[3];
bool exclusive = PG_GETARG_BOOL(0);
XLogRecPtr stoppoint;
bool exclusive = PG_GETARG_BOOL(0);
XLogRecPtr stoppoint;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@@ -248,9 +248,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('t')?")));
/*
* Stop the non-exclusive backup. Return a copy of the backup
* label and tablespace map so they can be written to disk by
* the caller.
* Stop the non-exclusive backup. Return a copy of the backup label
* and tablespace map so they can be written to disk by the caller.
*/
stoppoint = do_pg_stop_backup(label_file->data, true, NULL);
nonexclusive_backup_running = false;
@@ -269,7 +268,7 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
}
/* Stoppoint is included on both exclusive and nonexclusive backups */
values[0] = LSNGetDatum(stoppoint);
values[0] = LSNGetDatum(stoppoint);
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
tuplestore_donestoring(typstore);

View File

@@ -322,7 +322,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
if (total_len < SizeOfXLogRecord)
{
report_invalid_record(state,
"invalid record length at %X/%X: wanted %u, got %u",
"invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, total_len);
goto err;
@@ -621,7 +621,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
if (record->xl_tot_len < SizeOfXLogRecord)
{
report_invalid_record(state,
"invalid record length at %X/%X: wanted %u, got %u",
"invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, record->xl_tot_len);
return false;