1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-15 03:41:20 +03:00

Run pgindent on 9.2 source tree in preparation for first 9.3

commit-fest.
This commit is contained in:
Bruce Momjian
2012-06-10 15:20:04 -04:00
parent 60801944fa
commit 927d61eeff
494 changed files with 7343 additions and 7046 deletions

View File

@@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
* Testing during the PostgreSQL 9.2 development cycle revealed that on a
* large multi-processor system, it was possible to have more CLOG page
* requests in flight at one time than the numebr of CLOG buffers which existed
* at that time, which was hardcoded to 8. Further testing revealed that
* at that time, which was hardcoded to 8. Further testing revealed that
* performance dropped off with more than 32 CLOG buffers, possibly because
* the linear buffer search algorithm doesn't scale well.
*

View File

@@ -903,12 +903,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
{
int slotno;
int cur_count;
int bestvalidslot = 0; /* keep compiler quiet */
int bestvalidslot = 0; /* keep compiler quiet */
int best_valid_delta = -1;
int best_valid_page_number = 0; /* keep compiler quiet */
int bestinvalidslot = 0; /* keep compiler quiet */
int best_valid_page_number = 0; /* keep compiler quiet */
int bestinvalidslot = 0; /* keep compiler quiet */
int best_invalid_delta = -1;
int best_invalid_page_number = 0; /* keep compiler quiet */
int best_invalid_page_number = 0; /* keep compiler quiet */
/* See if page already has a buffer assigned */
for (slotno = 0; slotno < shared->num_slots; slotno++)
@@ -920,15 +920,15 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If we find any EMPTY slot, just select that one. Else choose a
* victim page to replace. We normally take the least recently used
* victim page to replace. We normally take the least recently used
* valid page, but we will never take the slot containing
* latest_page_number, even if it appears least recently used. We
* latest_page_number, even if it appears least recently used. We
* will select a slot that is already I/O busy only if there is no
* other choice: a read-busy slot will not be least recently used once
* the read finishes, and waiting for an I/O on a write-busy slot is
* inferior to just picking some other slot. Testing shows the slot
* we pick instead will often be clean, allowing us to begin a read
* at once.
* we pick instead will often be clean, allowing us to begin a read at
* once.
*
* Normally the page_lru_count values will all be different and so
* there will be a well-defined LRU page. But since we allow
@@ -997,10 +997,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If all pages (except possibly the latest one) are I/O busy, we'll
* have to wait for an I/O to complete and then retry. In that unhappy
* case, we choose to wait for the I/O on the least recently used slot,
* on the assumption that it was likely initiated first of all the I/Os
* in progress and may therefore finish first.
* have to wait for an I/O to complete and then retry. In that
* unhappy case, we choose to wait for the I/O on the least recently
* used slot, on the assumption that it was likely initiated first of
* all the I/Os in progress and may therefore finish first.
*/
if (best_valid_delta < 0)
{
@@ -1168,20 +1168,20 @@ restart:;
/*
* SlruScanDirectory callback
* This callback reports true if there's any segment prior to the one
* containing the page passed as "data".
* This callback reports true if there's any segment prior to the one
* containing the page passed as "data".
*/
bool
SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
{
int cutoffPage = *(int *) data;
int cutoffPage = *(int *) data;
cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
if (ctl->PagePrecedes(segpage, cutoffPage))
return true; /* found one; don't iterate any more */
return true; /* found one; don't iterate any more */
return false; /* keep going */
return false; /* keep going */
}
/*
@@ -1191,8 +1191,8 @@ SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data
static bool
SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
{
char path[MAXPGPATH];
int cutoffPage = *(int *) data;
char path[MAXPGPATH];
int cutoffPage = *(int *) data;
if (ctl->PagePrecedes(segpage, cutoffPage))
{
@@ -1202,7 +1202,7 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
unlink(path);
}
return false; /* keep going */
return false; /* keep going */
}
/*
@@ -1212,14 +1212,14 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
bool
SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
{
char path[MAXPGPATH];
char path[MAXPGPATH];
snprintf(path, MAXPGPATH, "%s/%s", ctl->Dir, filename);
ereport(DEBUG2,
(errmsg("removing file \"%s\"", path)));
unlink(path);
return false; /* keep going */
return false; /* keep going */
}
/*

View File

@@ -360,8 +360,9 @@ static void
GXactLoadSubxactData(GlobalTransaction gxact, int nsubxacts,
TransactionId *children)
{
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
/* We need no extra lock since the GXACT isn't valid yet */
if (nsubxacts > PGPROC_MAX_CACHED_SUBXIDS)
{
@@ -410,7 +411,7 @@ LockGXact(const char *gid, Oid user)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
/* Ignore not-yet-valid GIDs */
if (!gxact->valid)
@@ -523,7 +524,7 @@ TransactionIdIsPrepared(TransactionId xid)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (gxact->valid && pgxact->xid == xid)
{
@@ -648,8 +649,8 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
while (status->array != NULL && status->currIdx < status->ngxacts)
{
GlobalTransaction gxact = &status->array[status->currIdx++];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
Datum values[5];
bool nulls[5];
HeapTuple tuple;
@@ -719,7 +720,7 @@ TwoPhaseGetDummyProc(TransactionId xid)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (pgxact->xid == xid)
{
@@ -850,8 +851,8 @@ save_state_data(const void *data, uint32 len)
void
StartPrepare(GlobalTransaction gxact)
{
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
TransactionId xid = pgxact->xid;
TwoPhaseFileHeader hdr;
TransactionId *children;
@@ -1063,9 +1064,9 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
* Mark the prepared transaction as valid. As soon as xact.c marks MyPgXact
* as not running our XID (which it will do immediately after this
* function returns), others can commit/rollback the xact.
* Mark the prepared transaction as valid. As soon as xact.c marks
* MyPgXact as not running our XID (which it will do immediately after
* this function returns), others can commit/rollback the xact.
*
* NB: a side effect of this is to make a dummy ProcArray entry for the
* prepared XID. This must happen before we clear the XID from MyPgXact,
@@ -1551,7 +1552,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (gxact->valid &&
XLByteLE(gxact->prepare_lsn, redo_horizon))
@@ -1707,7 +1708,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
* XID, and they may force us to advance nextXid.
*
* We don't expect anyone else to modify nextXid, hence we don't
* need to hold a lock while examining it. We still acquire the
* need to hold a lock while examining it. We still acquire the
* lock to modify it, though.
*/
subxids = (TransactionId *)

View File

@@ -174,8 +174,8 @@ GetNewTransactionId(bool isSubXact)
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
* are relying on fetch/store of an xid to be atomic, else other backends
* might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*

View File

@@ -1019,6 +1019,7 @@ RecordTransactionCommit(void)
XLogRecData rdata[4];
int lastrdata = 0;
xl_xact_commit xlrec;
/*
* Set flags required for recovery processing of commits.
*/
@@ -1073,7 +1074,8 @@ RecordTransactionCommit(void)
{
XLogRecData rdata[2];
int lastrdata = 0;
xl_xact_commit_compact xlrec;
xl_xact_commit_compact xlrec;
xlrec.xact_time = xactStopTimestamp;
xlrec.nsubxacts = nchildren;
rdata[0].data = (char *) (&xlrec);
@@ -2102,7 +2104,7 @@ PrepareTransaction(void)
if (XactHasExportedSnapshots())
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot PREPARE a transaction that has exported snapshots")));
errmsg("cannot PREPARE a transaction that has exported snapshots")));
/* Prevent cancel/die interrupt while cleaning up */
HOLD_INTERRUPTS();
@@ -2602,10 +2604,10 @@ CommitTransactionCommand(void)
break;
/*
* We were issued a RELEASE command, so we end the
* current subtransaction and return to the parent transaction.
* The parent might be ended too, so repeat till we find an
* INPROGRESS transaction or subtransaction.
* We were issued a RELEASE command, so we end the current
* subtransaction and return to the parent transaction. The parent
* might be ended too, so repeat till we find an INPROGRESS
* transaction or subtransaction.
*/
case TBLOCK_SUBRELEASE:
do
@@ -2623,9 +2625,9 @@ CommitTransactionCommand(void)
* hierarchy and perform final commit. We do this by rolling up
* any subtransactions into their parent, which leads to O(N^2)
* operations with respect to resource owners - this isn't that
* bad until we approach a thousands of savepoints but is necessary
* for correctness should after triggers create new resource
* owners.
* bad until we approach a thousands of savepoints but is
* necessary for correctness should after triggers create new
* resource owners.
*/
case TBLOCK_SUBCOMMIT:
do
@@ -4551,11 +4553,11 @@ xactGetCommittedChildren(TransactionId **ptr)
*/
static void
xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
TransactionId *sub_xids, int nsubxacts,
SharedInvalidationMessage *inval_msgs, int nmsgs,
RelFileNode *xnodes, int nrels,
Oid dbId, Oid tsId,
uint32 xinfo)
TransactionId *sub_xids, int nsubxacts,
SharedInvalidationMessage *inval_msgs, int nmsgs,
RelFileNode *xnodes, int nrels,
Oid dbId, Oid tsId,
uint32 xinfo)
{
TransactionId max_xid;
int i;
@@ -4659,12 +4661,13 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
XLogFlush(lsn);
}
/*
* Utility function to call xact_redo_commit_internal after breaking down xlrec
*/
static void
xact_redo_commit(xl_xact_commit *xlrec,
TransactionId xid, XLogRecPtr lsn)
TransactionId xid, XLogRecPtr lsn)
{
TransactionId *subxacts;
SharedInvalidationMessage *inval_msgs;
@@ -4675,11 +4678,11 @@ xact_redo_commit(xl_xact_commit *xlrec,
inval_msgs = (SharedInvalidationMessage *) &(subxacts[xlrec->nsubxacts]);
xact_redo_commit_internal(xid, lsn, subxacts, xlrec->nsubxacts,
inval_msgs, xlrec->nmsgs,
xlrec->xnodes, xlrec->nrels,
xlrec->dbId,
xlrec->tsId,
xlrec->xinfo);
inval_msgs, xlrec->nmsgs,
xlrec->xnodes, xlrec->nrels,
xlrec->dbId,
xlrec->tsId,
xlrec->xinfo);
}
/*
@@ -4687,14 +4690,14 @@ xact_redo_commit(xl_xact_commit *xlrec,
*/
static void
xact_redo_commit_compact(xl_xact_commit_compact *xlrec,
TransactionId xid, XLogRecPtr lsn)
TransactionId xid, XLogRecPtr lsn)
{
xact_redo_commit_internal(xid, lsn, xlrec->subxacts, xlrec->nsubxacts,
NULL, 0, /* inval msgs */
NULL, 0, /* relfilenodes */
InvalidOid, /* dbId */
InvalidOid, /* tsId */
0); /* xinfo */
NULL, 0, /* inval msgs */
NULL, 0, /* relfilenodes */
InvalidOid, /* dbId */
InvalidOid, /* tsId */
0); /* xinfo */
}
/*

View File

@@ -344,10 +344,10 @@ typedef struct XLogCtlInsert
/*
* fullPageWrites is the master copy used by all backends to determine
* whether to write full-page to WAL, instead of using process-local
* one. This is required because, when full_page_writes is changed
* by SIGHUP, we must WAL-log it before it actually affects
* WAL-logging by backends. Checkpointer sets at startup or after SIGHUP.
* whether to write full-page to WAL, instead of using process-local one.
* This is required because, when full_page_writes is changed by SIGHUP,
* we must WAL-log it before it actually affects WAL-logging by backends.
* Checkpointer sets at startup or after SIGHUP.
*/
bool fullPageWrites;
@@ -455,8 +455,11 @@ typedef struct XLogCtlData
XLogRecPtr recoveryLastRecPtr;
/* timestamp of last COMMIT/ABORT record replayed (or being replayed) */
TimestampTz recoveryLastXTime;
/* timestamp of when we started replaying the current chunk of WAL data,
* only relevant for replication or archive recovery */
/*
* timestamp of when we started replaying the current chunk of WAL data,
* only relevant for replication or archive recovery
*/
TimestampTz currentChunkStartTime;
/* end of the last record restored from the archive */
XLogRecPtr restoreLastRecPtr;
@@ -580,7 +583,7 @@ static bool updateMinRecoveryPoint = true;
* to replay all the WAL, so reachedConsistency is never set. During archive
* recovery, the database is consistent once minRecoveryPoint is reached.
*/
bool reachedConsistency = false;
bool reachedConsistency = false;
static bool InRedo = false;
@@ -750,8 +753,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* insert lock, but it seems better to avoid doing CRC calculations while
* holding the lock.
*
* We add entries for backup blocks to the chain, so that they don't
* need any special treatment in the critical section where the chunks are
* We add entries for backup blocks to the chain, so that they don't need
* any special treatment in the critical section where the chunks are
* copied into the WAL buffers. Those entries have to be unlinked from the
* chain if we have to loop back here.
*/
@@ -896,10 +899,10 @@ begin:;
/*
* Calculate CRC of the data, including all the backup blocks
*
* Note that the record header isn't added into the CRC initially since
* we don't know the prev-link yet. Thus, the CRC will represent the CRC
* of the whole record in the order: rdata, then backup blocks, then
* record header.
* Note that the record header isn't added into the CRC initially since we
* don't know the prev-link yet. Thus, the CRC will represent the CRC of
* the whole record in the order: rdata, then backup blocks, then record
* header.
*/
INIT_CRC32(rdata_crc);
for (rdt = rdata; rdt != NULL; rdt = rdt->next)
@@ -948,10 +951,10 @@ begin:;
}
/*
* Also check to see if fullPageWrites or forcePageWrites was just turned on;
* if we weren't already doing full-page writes then go back and recompute.
* (If it was just turned off, we could recompute the record without full pages,
* but we choose not to bother.)
* Also check to see if fullPageWrites or forcePageWrites was just turned
* on; if we weren't already doing full-page writes then go back and
* recompute. (If it was just turned off, we could recompute the record
* without full pages, but we choose not to bother.)
*/
if ((Insert->fullPageWrites || Insert->forcePageWrites) && !doPageWrites)
{
@@ -1575,15 +1578,15 @@ AdvanceXLInsertBuffer(bool new_segment)
* WAL records beginning in this page have removable backup blocks. This
* allows the WAL archiver to know whether it is safe to compress archived
* WAL data by transforming full-block records into the non-full-block
* format. It is sufficient to record this at the page level because we
* format. It is sufficient to record this at the page level because we
* force a page switch (in fact a segment switch) when starting a backup,
* so the flag will be off before any records can be written during the
* backup. At the end of a backup, the last page will be marked as all
* backup. At the end of a backup, the last page will be marked as all
* unsafe when perhaps only part is unsafe, but at worst the archiver
* would miss the opportunity to compress a few records.
*/
if (!Insert->forcePageWrites)
NewPage->xlp_info |= XLP_BKP_REMOVABLE;
NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
/*
* If first page of an XLOG segment file, make it a long header.
@@ -1827,11 +1830,11 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
Write->lastSegSwitchTime = (pg_time_t) time(NULL);
/*
* Request a checkpoint if we've consumed too
* much xlog since the last one. For speed, we first check
* using the local copy of RedoRecPtr, which might be out of
* date; if it looks like a checkpoint is needed, forcibly
* update RedoRecPtr and recheck.
* Request a checkpoint if we've consumed too much xlog since
* the last one. For speed, we first check using the local
* copy of RedoRecPtr, which might be out of date; if it looks
* like a checkpoint is needed, forcibly update RedoRecPtr and
* recheck.
*/
if (IsUnderPostmaster &&
XLogCheckpointNeeded(openLogId, openLogSeg))
@@ -1931,7 +1934,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
/*
* If the WALWriter is sleeping, we should kick it to make it come out of
* low-power mode. Otherwise, determine whether there's a full page of
* low-power mode. Otherwise, determine whether there's a full page of
* WAL available to write.
*/
if (!sleeping)
@@ -1945,9 +1948,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
}
/*
* Nudge the WALWriter: it has a full page of WAL to write, or we want
* it to come out of low-power mode so that this async commit will reach
* disk within the expected amount of time.
* Nudge the WALWriter: it has a full page of WAL to write, or we want it
* to come out of low-power mode so that this async commit will reach disk
* within the expected amount of time.
*/
if (ProcGlobal->walwriterLatch)
SetLatch(ProcGlobal->walwriterLatch);
@@ -2076,8 +2079,8 @@ XLogFlush(XLogRecPtr record)
WriteRqstPtr = record;
/*
* Now wait until we get the write lock, or someone else does the
* flush for us.
* Now wait until we get the write lock, or someone else does the flush
* for us.
*/
for (;;)
{
@@ -2182,7 +2185,7 @@ XLogFlush(XLogRecPtr record)
* block, and flush through the latest one of those. Thus, if async commits
* are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
* one or two. (When flushing complete blocks, we allow XLogWrite to write
* one or two. (When flushing complete blocks, we allow XLogWrite to write
* "flexibly", meaning it can stop at the end of the buffer ring; this makes a
* difference only with very high load or long wal_writer_delay, but imposes
* one extra cycle for the worst case for async commits.)
@@ -2273,7 +2276,8 @@ XLogBackgroundFlush(void)
/*
* If we wrote something then we have something to send to standbys also,
* otherwise the replication delay become around 7s with just async commit.
* otherwise the replication delay become around 7s with just async
* commit.
*/
if (wrote_something)
WalSndWakeup();
@@ -2776,17 +2780,17 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
}
/*
* If the segment was fetched from archival storage, replace
* the existing xlog segment (if any) with the archival version.
* If the segment was fetched from archival storage, replace the existing
* xlog segment (if any) with the archival version.
*/
if (source == XLOG_FROM_ARCHIVE)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr endptr;
char xlogfpath[MAXPGPATH];
bool reload = false;
struct stat statbuf;
XLogRecPtr endptr;
char xlogfpath[MAXPGPATH];
bool reload = false;
struct stat statbuf;
XLogFilePath(xlogfpath, tli, log, seg);
if (stat(xlogfpath, &statbuf) == 0)
@@ -2801,9 +2805,9 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
if (rename(path, xlogfpath) < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
path, xlogfpath)));
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
path, xlogfpath)));
/*
* If the existing segment was replaced, since walsenders might have
@@ -3812,7 +3816,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt)
RecPtr = &tmpRecPtr;
/*
* RecPtr is pointing to end+1 of the previous WAL record. We must
* RecPtr is pointing to end+1 of the previous WAL record. We must
* advance it if necessary to where the next record starts. First,
* align to next page if no more records can fit on the current page.
*/
@@ -5389,10 +5393,10 @@ readRecoveryCommandFile(void)
}
if (rtli)
ereport(DEBUG2,
(errmsg_internal("recovery_target_timeline = %u", rtli)));
(errmsg_internal("recovery_target_timeline = %u", rtli)));
else
ereport(DEBUG2,
(errmsg_internal("recovery_target_timeline = latest")));
(errmsg_internal("recovery_target_timeline = latest")));
}
else if (strcmp(item->name, "recovery_target_xid") == 0)
{
@@ -5404,7 +5408,7 @@ readRecoveryCommandFile(void)
item->value)));
ereport(DEBUG2,
(errmsg_internal("recovery_target_xid = %u",
recoveryTargetXid)));
recoveryTargetXid)));
recoveryTarget = RECOVERY_TARGET_XID;
}
else if (strcmp(item->name, "recovery_target_time") == 0)
@@ -5428,7 +5432,7 @@ readRecoveryCommandFile(void)
Int32GetDatum(-1)));
ereport(DEBUG2,
(errmsg_internal("recovery_target_time = '%s'",
timestamptz_to_str(recoveryTargetTime))));
timestamptz_to_str(recoveryTargetTime))));
}
else if (strcmp(item->name, "recovery_target_name") == 0)
{
@@ -5576,13 +5580,13 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
}
/*
* If we are establishing a new timeline, we have to copy data from
* the last WAL segment of the old timeline to create a starting WAL
* segment for the new timeline.
* If we are establishing a new timeline, we have to copy data from the
* last WAL segment of the old timeline to create a starting WAL segment
* for the new timeline.
*
* Notify the archiver that the last WAL segment of the old timeline
* is ready to copy to archival storage. Otherwise, it is not archived
* for a while.
* Notify the archiver that the last WAL segment of the old timeline is
* ready to copy to archival storage. Otherwise, it is not archived for a
* while.
*/
if (endTLI != ThisTimeLineID)
{
@@ -5604,8 +5608,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
XLogArchiveCleanup(xlogpath);
/*
* Since there might be a partial WAL segment named RECOVERYXLOG,
* get rid of it.
* Since there might be a partial WAL segment named RECOVERYXLOG, get rid
* of it.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
unlink(recoveryPath); /* ignore any error */
@@ -6323,11 +6327,11 @@ StartupXLOG(void)
/*
* Set backupStartPoint if we're starting recovery from a base backup.
*
* Set backupEndPoint and use minRecoveryPoint as the backup end location
* if we're starting recovery from a base backup which was taken from
* the standby. In this case, the database system status in pg_control must
* indicate DB_IN_ARCHIVE_RECOVERY. If not, which means that backup
* is corrupted, so we cancel recovery.
* Set backupEndPoint and use minRecoveryPoint as the backup end
* location if we're starting recovery from a base backup which was
* taken from the standby. In this case, the database system status in
* pg_control must indicate DB_IN_ARCHIVE_RECOVERY. If not, which
* means that backup is corrupted, so we cancel recovery.
*/
if (haveBackupLabel)
{
@@ -6340,7 +6344,7 @@ StartupXLOG(void)
ereport(FATAL,
(errmsg("backup_label contains inconsistent data with control file"),
errhint("This means that the backup is corrupted and you will "
"have to use another backup for recovery.")));
"have to use another backup for recovery.")));
ControlFile->backupEndPoint = ControlFile->minRecoveryPoint;
}
}
@@ -6383,15 +6387,15 @@ StartupXLOG(void)
/*
* We're in recovery, so unlogged relations may be trashed and must be
* reset. This should be done BEFORE allowing Hot Standby connections,
* so that read-only backends don't try to read whatever garbage is
* left over from before.
* reset. This should be done BEFORE allowing Hot Standby
* connections, so that read-only backends don't try to read whatever
* garbage is left over from before.
*/
ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
/*
* Likewise, delete any saved transaction snapshot files that got
* left behind by crashed backends.
* Likewise, delete any saved transaction snapshot files that got left
* behind by crashed backends.
*/
DeleteAllExportedSnapshotFiles();
@@ -6489,10 +6493,11 @@ StartupXLOG(void)
/*
* Let postmaster know we've started redo now, so that it can launch
* checkpointer to perform restartpoints. We don't bother during crash
* recovery as restartpoints can only be performed during archive
* recovery. And we'd like to keep crash recovery simple, to avoid
* introducing bugs that could affect you when recovering after crash.
* checkpointer to perform restartpoints. We don't bother during
* crash recovery as restartpoints can only be performed during
* archive recovery. And we'd like to keep crash recovery simple, to
* avoid introducing bugs that could affect you when recovering after
* crash.
*
* After this point, we can no longer assume that we're the only
* process in addition to postmaster! Also, fsync requests are
@@ -6649,8 +6654,8 @@ StartupXLOG(void)
{
/*
* We have reached the end of base backup, the point where
* the minimum recovery point in pg_control indicates.
* The data on disk is now consistent. Reset backupStartPoint
* the minimum recovery point in pg_control indicates. The
* data on disk is now consistent. Reset backupStartPoint
* and backupEndPoint.
*/
elog(DEBUG1, "end of backup reached");
@@ -6863,9 +6868,9 @@ StartupXLOG(void)
oldestActiveXID = PrescanPreparedTransactions(NULL, NULL);
/*
* Update full_page_writes in shared memory and write an
* XLOG_FPW_CHANGE record before resource manager writes cleanup
* WAL records or checkpoint record is written.
* Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
* record before resource manager writes cleanup WAL records or checkpoint
* record is written.
*/
Insert->fullPageWrites = lastFullPageWrites;
LocalSetXLogInsertAllowed();
@@ -6954,8 +6959,8 @@ StartupXLOG(void)
LWLockRelease(ProcArrayLock);
/*
* Start up the commit log and subtrans, if not already done for
* hot standby.
* Start up the commit log and subtrans, if not already done for hot
* standby.
*/
if (standbyState == STANDBY_DISABLED)
{
@@ -7705,9 +7710,9 @@ CreateCheckPoint(int flags)
checkPoint.time = (pg_time_t) time(NULL);
/*
* For Hot Standby, derive the oldestActiveXid before we fix the redo pointer.
* This allows us to begin accumulating changes to assemble our starting
* snapshot of locks and transactions.
* For Hot Standby, derive the oldestActiveXid before we fix the redo
* pointer. This allows us to begin accumulating changes to assemble our
* starting snapshot of locks and transactions.
*/
if (!shutdown && XLogStandbyInfoActive())
checkPoint.oldestActiveXid = GetOldestActiveTransactionId();
@@ -8062,7 +8067,7 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
volatile XLogCtlData *xlogctl = XLogCtl;
/*
* Is it safe to restartpoint? We must ask each of the resource managers
* Is it safe to restartpoint? We must ask each of the resource managers
* whether they have any partial state information that might prevent a
* correct restart from this point. If so, we skip this opportunity, but
* return at the next checkpoint record for another try.
@@ -8082,10 +8087,11 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
* Also refrain from creating a restartpoint if we have seen any references
* to non-existent pages. Restarting recovery from the restartpoint would
* not see the references, so we would lose the cross-check that the pages
* belonged to a relation that was dropped later.
* Also refrain from creating a restartpoint if we have seen any
* references to non-existent pages. Restarting recovery from the
* restartpoint would not see the references, so we would lose the
* cross-check that the pages belonged to a relation that was dropped
* later.
*/
if (XLogHaveInvalidPages())
{
@@ -8098,8 +8104,8 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
* Copy the checkpoint record to shared memory, so that checkpointer
* can work out the next time it wants to perform a restartpoint.
* Copy the checkpoint record to shared memory, so that checkpointer can
* work out the next time it wants to perform a restartpoint.
*/
SpinLockAcquire(&xlogctl->info_lck);
XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
@@ -8493,8 +8499,8 @@ UpdateFullPageWrites(void)
* Do nothing if full_page_writes has not been changed.
*
* It's safe to check the shared full_page_writes without the lock,
* because we assume that there is no concurrently running process
* which can update it.
* because we assume that there is no concurrently running process which
* can update it.
*/
if (fullPageWrites == Insert->fullPageWrites)
return;
@@ -8505,8 +8511,8 @@ UpdateFullPageWrites(void)
* It's always safe to take full page images, even when not strictly
* required, but not the other round. So if we're setting full_page_writes
* to true, first set it true and then write the WAL record. If we're
* setting it to false, first write the WAL record and then set the
* global flag.
* setting it to false, first write the WAL record and then set the global
* flag.
*/
if (fullPageWrites)
{
@@ -8516,12 +8522,12 @@ UpdateFullPageWrites(void)
}
/*
* Write an XLOG_FPW_CHANGE record. This allows us to keep
* track of full_page_writes during archive recovery, if required.
* Write an XLOG_FPW_CHANGE record. This allows us to keep track of
* full_page_writes during archive recovery, if required.
*/
if (XLogStandbyInfoActive() && !RecoveryInProgress())
{
XLogRecData rdata;
XLogRecData rdata;
rdata.data = (char *) (&fullPageWrites);
rdata.len = sizeof(bool);
@@ -8561,7 +8567,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* We used to try to take the maximum of ShmemVariableCache->nextOid
* and the recorded nextOid, but that fails if the OID counter wraps
* around. Since no OID allocation should be happening during replay
* around. Since no OID allocation should be happening during replay
* anyway, better to just believe the record exactly. We still take
* OidGenLock while setting the variable, just in case.
*/
@@ -8597,7 +8603,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
!XLogRecPtrIsInvalid(ControlFile->backupStartPoint) &&
XLogRecPtrIsInvalid(ControlFile->backupEndPoint))
ereport(PANIC,
(errmsg("online backup was canceled, recovery cannot continue")));
(errmsg("online backup was canceled, recovery cannot continue")));
/*
* If we see a shutdown checkpoint, we know that nothing was running
@@ -8797,9 +8803,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
/*
* Update the LSN of the last replayed XLOG_FPW_CHANGE record
* so that do_pg_start_backup() and do_pg_stop_backup() can check
* whether full_page_writes has been disabled during online backup.
* Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
* do_pg_start_backup() and do_pg_stop_backup() can check whether
* full_page_writes has been disabled during online backup.
*/
if (!fpw)
{
@@ -8825,7 +8831,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
CheckPoint *checkpoint = (CheckPoint *) rec;
appendStringInfo(buf, "checkpoint: redo %X/%X; "
"tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"oldest xid %u in DB %u; oldest running xid %u; %s",
checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
checkpoint->ThisTimeLineID,
@@ -9115,8 +9121,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
errhint("WAL control functions cannot be executed during recovery.")));
/*
* During recovery, we don't need to check WAL level. Because, if WAL level
* is not sufficient, it's impossible to get here during recovery.
* During recovery, we don't need to check WAL level. Because, if WAL
* level is not sufficient, it's impossible to get here during recovery.
*/
if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR,
@@ -9179,7 +9185,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
* old timeline IDs. That would otherwise happen if you called
* pg_start_backup() right after restoring from a PITR archive: the
* first WAL segment containing the startup checkpoint has pages in
* the beginning with the old timeline ID. That can cause trouble at
* the beginning with the old timeline ID. That can cause trouble at
* recovery: we won't have a history file covering the old timeline if
* pg_xlog directory was not included in the base backup and the WAL
* archive was cleared too before starting the backup.
@@ -9202,17 +9208,18 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
bool checkpointfpw;
/*
* Force a CHECKPOINT. Aside from being necessary to prevent torn
* Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
*
* During recovery, establish a restartpoint if possible. We use the last
* restartpoint as the backup starting checkpoint. This means that two
* successive backup runs can have same checkpoint positions.
* During recovery, establish a restartpoint if possible. We use
* the last restartpoint as the backup starting checkpoint. This
* means that two successive backup runs can have same checkpoint
* positions.
*
* Since the fact that we are executing do_pg_start_backup() during
* recovery means that checkpointer is running, we can use
* Since the fact that we are executing do_pg_start_backup()
* during recovery means that checkpointer is running, we can use
* RequestCheckpoint() to establish a restartpoint.
*
* We use CHECKPOINT_IMMEDIATE only if requested by user (via
@@ -9237,12 +9244,12 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr recptr;
XLogRecPtr recptr;
/*
* Check to see if all WAL replayed during online backup (i.e.,
* since last restartpoint used as backup starting checkpoint)
* contain full-page writes.
* Check to see if all WAL replayed during online backup
* (i.e., since last restartpoint used as backup starting
* checkpoint) contain full-page writes.
*/
SpinLockAcquire(&xlogctl->info_lck);
recptr = xlogctl->lastFpwDisableRecPtr;
@@ -9250,20 +9257,20 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
if (!checkpointfpw || XLByteLE(startpoint, recptr))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"since last restartpoint"),
errhint("This means that the backup being taken on standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
"and then try an online backup again.")));
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"since last restartpoint"),
errhint("This means that the backup being taken on standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
"and then try an online backup again.")));
/*
* During recovery, since we don't use the end-of-backup WAL
* record and don't write the backup history file, the starting WAL
* location doesn't need to be unique. This means that two base
* backups started at the same time might use the same checkpoint
* as starting locations.
* record and don't write the backup history file, the
* starting WAL location doesn't need to be unique. This means
* that two base backups started at the same time might use
* the same checkpoint as starting locations.
*/
gotUniqueStartpoint = true;
}
@@ -9443,8 +9450,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
errhint("WAL control functions cannot be executed during recovery.")));
/*
* During recovery, we don't need to check WAL level. Because, if WAL level
* is not sufficient, it's impossible to get here during recovery.
* During recovery, we don't need to check WAL level. Because, if WAL
* level is not sufficient, it's impossible to get here during recovery.
*/
if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR,
@@ -9537,9 +9544,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
/*
* Parse the BACKUP FROM line. If we are taking an online backup from
* the standby, we confirm that the standby has not been promoted
* during the backup.
* Parse the BACKUP FROM line. If we are taking an online backup from the
* standby, we confirm that the standby has not been promoted during the
* backup.
*/
ptr = strstr(remaining, "BACKUP FROM:");
if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1)
@@ -9555,30 +9562,30 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
"Try taking another online backup.")));
/*
* During recovery, we don't write an end-of-backup record. We assume
* that pg_control was backed up last and its minimum recovery
* point can be available as the backup end location. Since we don't
* have an end-of-backup record, we use the pg_control value to check
* whether we've reached the end of backup when starting recovery from
* this backup. We have no way of checking if pg_control wasn't backed
* up last however.
* During recovery, we don't write an end-of-backup record. We assume that
* pg_control was backed up last and its minimum recovery point can be
* available as the backup end location. Since we don't have an
* end-of-backup record, we use the pg_control value to check whether
* we've reached the end of backup when starting recovery from this
* backup. We have no way of checking if pg_control wasn't backed up last
* however.
*
* We don't force a switch to new WAL file and wait for all the required
* files to be archived. This is okay if we use the backup to start
* the standby. But, if it's for an archive recovery, to ensure all the
* required files are available, a user should wait for them to be archived,
* or include them into the backup.
* files to be archived. This is okay if we use the backup to start the
* standby. But, if it's for an archive recovery, to ensure all the
* required files are available, a user should wait for them to be
* archived, or include them into the backup.
*
* We return the current minimum recovery point as the backup end
* location. Note that it's would be bigger than the exact backup end
* location if the minimum recovery point is updated since the backup
* of pg_control. This is harmless for current uses.
* location if the minimum recovery point is updated since the backup of
* pg_control. This is harmless for current uses.
*
* XXX currently a backup history file is for informational and debug
* purposes only. It's not essential for an online backup. Furthermore,
* even if it's created, it will not be archived during recovery because
* an archiver is not invoked. So it doesn't seem worthwhile to write
* a backup history file during recovery.
* an archiver is not invoked. So it doesn't seem worthwhile to write a
* backup history file during recovery.
*/
if (backup_started_in_recovery)
{
@@ -9597,12 +9604,12 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
if (XLByteLE(startpoint, recptr))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"during online backup"),
errhint("This means that the backup being taken on standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
"and then try an online backup again.")));
errmsg("WAL generated with full_page_writes=off was replayed "
"during online backup"),
errhint("This means that the backup being taken on standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
"and then try an online backup again.")));
LWLockAcquire(ControlFileLock, LW_SHARED);
@@ -9905,10 +9912,11 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
/*
* BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't
* restore from an older backup anyway, but since the information on it
* is not strictly required, don't error out if it's missing for some reason.
* BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't restore
* from an older backup anyway, but since the information on it is not
* strictly required, don't error out if it's missing for some reason.
*/
if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
{
@@ -10050,8 +10058,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
{
/*
* Request a restartpoint if we've replayed too much
* xlog since the last one.
* Request a restartpoint if we've replayed too much xlog since the
* last one.
*/
if (StandbyMode && bgwriterLaunched)
{

View File

@@ -80,10 +80,10 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
/*
* Once recovery has reached a consistent state, the invalid-page table
* should be empty and remain so. If a reference to an invalid page is
* found after consistency is reached, PANIC immediately. This might
* seem aggressive, but it's better than letting the invalid reference
* linger in the hash table until the end of recovery and PANIC there,
* which might come only much later if this is a standby server.
* found after consistency is reached, PANIC immediately. This might seem
* aggressive, but it's better than letting the invalid reference linger
* in the hash table until the end of recovery and PANIC there, which
* might come only much later if this is a standby server.
*/
if (reachedConsistency)
{