1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-27 12:41:57 +03:00

pgindent run for release 9.3

This is the first run of the Perl-based pgindent script.  Also update
pgindent instructions.
This commit is contained in:
Bruce Momjian
2013-05-29 16:58:43 -04:00
parent 07ab261ef3
commit 9af4159fce
367 changed files with 4222 additions and 3829 deletions

View File

@ -173,7 +173,6 @@ void
ginFindParents(GinBtree btree, GinBtreeStack *stack,
BlockNumber rootBlkno)
{
Page page;
Buffer buffer;
BlockNumber blkno,

View File

@ -610,9 +610,9 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
newtup = gistgetadjusted(indexrel, idxtuple, itup, giststate);
if (newtup)
{
blkno = gistbufferinginserttuples(buildstate, buffer, level,
&newtup, 1, childoffnum,
InvalidBlockNumber, InvalidOffsetNumber);
blkno = gistbufferinginserttuples(buildstate, buffer, level,
&newtup, 1, childoffnum,
InvalidBlockNumber, InvalidOffsetNumber);
/* gistbufferinginserttuples() released the buffer */
}
else
@ -680,7 +680,7 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
GISTBuildBuffers *gfbb = buildstate->gfbb;
List *splitinfo;
bool is_split;
BlockNumber placed_to_blk = InvalidBlockNumber;
BlockNumber placed_to_blk = InvalidBlockNumber;
is_split = gistplacetopage(buildstate->indexrel,
buildstate->freespace,

View File

@ -364,8 +364,9 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
item->blkno = ItemPointerGetBlockNumber(&it->t_tid);
/*
* LSN of current page is lsn of parent page for child. We only
* have a shared lock, so we need to get the LSN atomically.
* LSN of current page is lsn of parent page for child. We
* only have a shared lock, so we need to get the LSN
* atomically.
*/
item->data.parentlsn = BufferGetLSNAtomic(buffer);
}

View File

@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
* some inserts to go to other equally-good subtrees.
*
* keep_current_best is -1 if we haven't yet had to make a random choice
* whether to keep the current best tuple. If we have done so, and
* whether to keep the current best tuple. If we have done so, and
* decided to keep it, keep_current_best is 1; if we've decided to
* replace, keep_current_best is 0. (This state will be reset to -1 as
* soon as we've made the replacement, but sometimes we make the choice in
@ -810,8 +810,8 @@ gistGetFakeLSN(Relation rel)
if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
{
/*
* Temporary relations are only accessible in our session, so a
* simple backend-local counter will do.
* Temporary relations are only accessible in our session, so a simple
* backend-local counter will do.
*/
return counter++;
}

View File

@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */
* follow-right flag, because that change is not included in the full-page
* image. To be sure that the intermediate state with the wrong flag value is
* not visible to concurrent Hot Standby queries, this function handles
* restoring the full-page image as well as updating the flag. (Note that
* restoring the full-page image as well as updating the flag. (Note that
* we never need to do anything else to the child page in the current WAL
* action.)
*/
@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
/*
* We need to acquire and hold lock on target page while updating the left
* child page. If we have a full-page image of target page, getting the
* child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.

View File

@ -90,7 +90,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
/*
* If the previous iteration of this loop locked what is still the
* correct target bucket, we are done. Otherwise, drop any old lock
* correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)

View File

@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
/*
* If the previous iteration of this loop locked what is still the
* correct target bucket, we are done. Otherwise, drop any old lock
* correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)

View File

@ -120,32 +120,34 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi,
static const struct
{
LOCKMODE hwlock;
MultiXactStatus lockstatus;
MultiXactStatus updstatus;
MultiXactStatus lockstatus;
MultiXactStatus updstatus;
}
tupleLockExtraInfo[MaxLockTupleMode + 1] =
tupleLockExtraInfo[MaxLockTupleMode + 1] =
{
{ /* LockTupleKeyShare */
{ /* LockTupleKeyShare */
AccessShareLock,
MultiXactStatusForKeyShare,
-1 /* KeyShare does not allow updating tuples */
-1 /* KeyShare does not allow updating tuples */
},
{ /* LockTupleShare */
{ /* LockTupleShare */
RowShareLock,
MultiXactStatusForShare,
-1 /* Share does not allow updating tuples */
-1 /* Share does not allow updating tuples */
},
{ /* LockTupleNoKeyExclusive */
{ /* LockTupleNoKeyExclusive */
ExclusiveLock,
MultiXactStatusForNoKeyUpdate,
MultiXactStatusNoKeyUpdate
},
{ /* LockTupleExclusive */
{ /* LockTupleExclusive */
AccessExclusiveLock,
MultiXactStatusForUpdate,
MultiXactStatusUpdate
}
};
/* Get the LOCKMODE for a given MultiXactStatus */
#define LOCKMODE_from_mxstatus(status) \
(tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
@ -168,12 +170,12 @@ tupleLockExtraInfo[MaxLockTupleMode + 1] =
*/
static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
{
LockTupleKeyShare, /* ForKeyShare */
LockTupleShare, /* ForShare */
LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
LockTupleExclusive, /* ForUpdate */
LockTupleNoKeyExclusive, /* NoKeyUpdate */
LockTupleExclusive /* Update */
LockTupleKeyShare, /* ForKeyShare */
LockTupleShare, /* ForShare */
LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
LockTupleExclusive, /* ForUpdate */
LockTupleNoKeyExclusive, /* NoKeyUpdate */
LockTupleExclusive /* Update */
};
/* Get the LockTupleMode for a given MultiXactStatus */
@ -365,10 +367,10 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
* page. That's how index-only scans work fine in hot standby. A crucial
* difference between index-only scans and heap scans is that the
* index-only scan completely relies on the visibility map where as heap
* scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if the
* page-level flag can be trusted in the same way, because it might get
* propagated somehow without being explicitly WAL-logged, e.g. via a full
* page write. Until we can prove that beyond doubt, let's check each
* scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
* the page-level flag can be trusted in the same way, because it might
* get propagated somehow without being explicitly WAL-logged, e.g. via a
* full page write. Until we can prove that beyond doubt, let's check each
* tuple for visibility the hard way.
*/
all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
@ -1880,7 +1882,7 @@ heap_get_latest_tid(Relation relation,
* tuple. Check for XMIN match.
*/
if (TransactionIdIsValid(priorXmax) &&
!TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
!TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
{
UnlockReleaseBuffer(buffer);
break;
@ -2488,7 +2490,7 @@ compute_infobits(uint16 infomask, uint16 infomask2)
((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
/* note we ignore HEAP_XMAX_SHR_LOCK here */
/* note we ignore HEAP_XMAX_SHR_LOCK here */
((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
XLHL_KEYS_UPDATED : 0);
@ -2730,13 +2732,12 @@ l1:
}
/*
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId setting.
* We can be certain that the transaction will never become a member of
* any older MultiXactIds than that. (We have to do this even if we
* end up just using our own TransactionId below, since some other
* backend could incorporate our XID into a MultiXact immediately
* afterwards.)
* If this is the first possibly-multixact-able operation in the current
* transaction, set my per-backend OldestMemberMXactId setting. We can be
* certain that the transaction will never become a member of any older
* MultiXactIds than that. (We have to do this even if we end up just
* using our own TransactionId below, since some other backend could
* incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@ -2846,7 +2847,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
true /* wait for commit */,
true /* wait for commit */ ,
&hufd);
switch (result)
{
@ -2936,7 +2937,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
bool checked_lockers;
bool locker_remains;
TransactionId xmax_new_tuple,
xmax_old_tuple;
xmax_old_tuple;
uint16 infomask_old_tuple,
infomask2_old_tuple,
infomask_new_tuple,
@ -3006,13 +3007,13 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* If we're not updating any "key" column, we can grab a weaker lock type.
* This allows for more concurrency when we are running simultaneously with
* foreign key checks.
* This allows for more concurrency when we are running simultaneously
* with foreign key checks.
*
* Note that if a column gets detoasted while executing the update, but the
* value ends up being the same, this test will fail and we will use the
* stronger lock. This is acceptable; the important case to optimize is
* updates that don't manipulate key columns, not those that
* Note that if a column gets detoasted while executing the update, but
* the value ends up being the same, this test will fail and we will use
* the stronger lock. This is acceptable; the important case to optimize
* is updates that don't manipulate key columns, not those that
* serendipitiously arrive at the same key values.
*/
HeapSatisfiesHOTandKeyUpdate(relation, hot_attrs, key_attrs,
@ -3026,12 +3027,12 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId setting.
* We can be certain that the transaction will never become a member of
* any older MultiXactIds than that. (We have to do this even if we
* end up just using our own TransactionId below, since some other
* backend could incorporate our XID into a MultiXact immediately
* afterwards.)
* current transaction, set my per-backend OldestMemberMXactId
* setting. We can be certain that the transaction will never become a
* member of any older MultiXactIds than that. (We have to do this
* even if we end up just using our own TransactionId below, since
* some other backend could incorporate our XID into a MultiXact
* immediately afterwards.)
*/
MultiXactIdSetOldestMember();
}
@ -3064,7 +3065,7 @@ l2:
}
else if (result == HeapTupleBeingUpdated && wait)
{
TransactionId xwait;
TransactionId xwait;
uint16 infomask;
bool can_continue = false;
@ -3073,13 +3074,14 @@ l2:
/*
* XXX note that we don't consider the "no wait" case here. This
* isn't a problem currently because no caller uses that case, but it
* should be fixed if such a caller is introduced. It wasn't a problem
* previously because this code would always wait, but now that some
* tuple locks do not conflict with one of the lock modes we use, it is
* possible that this case is interesting to handle specially.
* should be fixed if such a caller is introduced. It wasn't a
* problem previously because this code would always wait, but now
* that some tuple locks do not conflict with one of the lock modes we
* use, it is possible that this case is interesting to handle
* specially.
*
* This may cause failures with third-party code that calls heap_update
* directly.
* This may cause failures with third-party code that calls
* heap_update directly.
*/
/* must copy state data before unlocking buffer */
@ -3109,15 +3111,15 @@ l2:
* gone (or even not sleep at all in some cases); we need to preserve
* it as locker, unless it is gone completely.
*
* If it's not a multi, we need to check for sleeping conditions before
* actually going to sleep. If the update doesn't conflict with the
* locks, we just continue without sleeping (but making sure it is
* preserved).
* If it's not a multi, we need to check for sleeping conditions
* before actually going to sleep. If the update doesn't conflict
* with the locks, we just continue without sleeping (but making sure
* it is preserved).
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
TransactionId update_xact;
int remain;
TransactionId update_xact;
int remain;
/* wait for multixact */
MultiXactIdWait((MultiXactId) xwait, mxact_status, &remain,
@ -3135,18 +3137,18 @@ l2:
goto l2;
/*
* Note that the multixact may not be done by now. It could have
* Note that the multixact may not be done by now. It could have
* surviving members; our own xact or other subxacts of this
* backend, and also any other concurrent transaction that locked
* the tuple with KeyShare if we only got TupleLockUpdate. If this
* is the case, we have to be careful to mark the updated tuple
* with the surviving members in Xmax.
* the tuple with KeyShare if we only got TupleLockUpdate. If
* this is the case, we have to be careful to mark the updated
* tuple with the surviving members in Xmax.
*
* Note that there could have been another update in the MultiXact.
* In that case, we need to check whether it committed or aborted.
* If it aborted we are safe to update it again; otherwise there is
* an update conflict, and we have to return HeapTupleUpdated
* below.
* Note that there could have been another update in the
* MultiXact. In that case, we need to check whether it committed
* or aborted. If it aborted we are safe to update it again;
* otherwise there is an update conflict, and we have to return
* HeapTupleUpdated below.
*
* In the LockTupleExclusive case, we still need to preserve the
* surviving members: those would include the tuple locks we had
@ -3167,21 +3169,21 @@ l2:
else
{
/*
* If it's just a key-share locker, and we're not changing the
* key columns, we don't need to wait for it to end; but we
* need to preserve it as locker.
* If it's just a key-share locker, and we're not changing the key
* columns, we don't need to wait for it to end; but we need to
* preserve it as locker.
*/
if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* recheck the locker; if someone else changed the tuple while we
* weren't looking, start over.
* recheck the locker; if someone else changed the tuple while
* we weren't looking, start over.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
xwait))
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
xwait))
goto l2;
can_continue = true;
@ -3194,13 +3196,13 @@ l2:
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* xwait is done, but if xwait had just locked the tuple then some
* other xact could update this tuple before we get to this point.
* Check for xmax change, and start over if so.
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
* this point. Check for xmax change, and start over if so.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
xwait))
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
xwait))
goto l2;
/* Otherwise check if it committed or aborted */
@ -3247,8 +3249,8 @@ l2:
* visible while we were busy locking the buffer, or during some
* subsequent window during which we had it unlocked, we'll have to unlock
* and re-lock, to avoid holding the buffer lock across an I/O. That's a
* bit unfortunate, especially since we'll now have to recheck whether
* the tuple has been locked or updated under us, but hopefully it won't
* bit unfortunate, especially since we'll now have to recheck whether the
* tuple has been locked or updated under us, but hopefully it won't
* happen very often.
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@ -3656,9 +3658,9 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
* there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do a
* single heap_deform_tuple call on each tuple, instead? But that doesn't
* work for system columns ...
* there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
* a single heap_deform_tuple call on each tuple, instead? But that
* doesn't work for system columns ...
*/
value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
@ -3720,12 +3722,12 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
bool *satisfies_hot, bool *satisfies_key,
HeapTuple oldtup, HeapTuple newtup)
{
int next_hot_attnum;
int next_key_attnum;
bool hot_result = true;
bool key_result = true;
bool key_done = false;
bool hot_done = false;
int next_hot_attnum;
int next_key_attnum;
bool hot_result = true;
bool key_result = true;
bool key_done = false;
bool hot_done = false;
next_hot_attnum = bms_first_member(hot_attrs);
if (next_hot_attnum == -1)
@ -3743,8 +3745,8 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
for (;;)
{
int check_now;
bool changed;
int check_now;
bool changed;
/* both bitmapsets are now empty */
if (key_done && hot_done)
@ -3813,7 +3815,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
result = heap_update(relation, otid, tup,
GetCurrentCommandId(true), InvalidSnapshot,
true /* wait for commit */,
true /* wait for commit */ ,
&hufd, &lockmode);
switch (result)
{
@ -3843,7 +3845,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
static MultiXactStatus
get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
{
MultiXactStatus retval;
MultiXactStatus retval;
if (is_update)
retval = tupleLockExtraInfo[mode].updstatus;
@ -3933,7 +3935,7 @@ l3:
uint16 infomask;
uint16 infomask2;
bool require_sleep;
ItemPointerData t_ctid;
ItemPointerData t_ctid;
/* must copy state data before unlocking buffer */
xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
@ -3944,22 +3946,22 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
/*
* If any subtransaction of the current top transaction already holds a
* lock as strong or stronger than what we're requesting, we
* If any subtransaction of the current top transaction already holds
* a lock as strong or stronger than what we're requesting, we
* effectively hold the desired lock already. We *must* succeed
* without trying to take the tuple lock, else we will deadlock against
* anyone wanting to acquire a stronger lock.
* without trying to take the tuple lock, else we will deadlock
* against anyone wanting to acquire a stronger lock.
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
int i;
int nmembers;
int i;
int nmembers;
MultiXactMember *members;
/*
* We don't need to allow old multixacts here; if that had been the
* case, HeapTupleSatisfiesUpdate would have returned MayBeUpdated
* and we wouldn't be here.
* We don't need to allow old multixacts here; if that had been
* the case, HeapTupleSatisfiesUpdate would have returned
* MayBeUpdated and we wouldn't be here.
*/
nmembers = GetMultiXactIdMembers(xwait, &members, false);
@ -3967,7 +3969,7 @@ l3:
{
if (TransactionIdIsCurrentTransactionId(members[i].xid))
{
LockTupleMode membermode;
LockTupleMode membermode;
membermode = TUPLOCK_from_mxstatus(members[i].status);
@ -4001,8 +4003,8 @@ l3:
if (!ConditionalLockTupleTuplock(relation, tid, mode))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
RelationGetRelationName(relation))));
errmsg("could not obtain lock on row in relation \"%s\"",
RelationGetRelationName(relation))));
}
else
LockTupleTuplock(relation, tid, mode);
@ -4023,34 +4025,34 @@ l3:
* continue if the key hasn't been modified.
*
* However, if there are updates, we need to walk the update chain
* to mark future versions of the row as locked, too. That way, if
* somebody deletes that future version, we're protected against
* the key going away. This locking of future versions could block
* momentarily, if a concurrent transaction is deleting a key; or
* it could return a value to the effect that the transaction
* deleting the key has already committed. So we do this before
* re-locking the buffer; otherwise this would be prone to
* deadlocks.
* to mark future versions of the row as locked, too. That way,
* if somebody deletes that future version, we're protected
* against the key going away. This locking of future versions
* could block momentarily, if a concurrent transaction is
* deleting a key; or it could return a value to the effect that
* the transaction deleting the key has already committed. So we
* do this before re-locking the buffer; otherwise this would be
* prone to deadlocks.
*
* Note that the TID we're locking was grabbed before we unlocked
* the buffer. For it to change while we're not looking, the other
* properties we're testing for below after re-locking the buffer
* would also change, in which case we would restart this loop
* above.
* the buffer. For it to change while we're not looking, the
* other properties we're testing for below after re-locking the
* buffer would also change, in which case we would restart this
* loop above.
*/
if (!(infomask2 & HEAP_KEYS_UPDATED))
{
bool updated;
bool updated;
updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
/*
* If there are updates, follow the update chain; bail out
* if that cannot be done.
* If there are updates, follow the update chain; bail out if
* that cannot be done.
*/
if (follow_updates && updated)
{
HTSU_Result res;
HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@ -4069,8 +4071,9 @@ l3:
/*
* Make sure it's still an appropriate lock, else start over.
* Also, if it wasn't updated before we released the lock, but
* is updated now, we start over too; the reason is that we now
* need to follow the update chain to lock the new versions.
* is updated now, we start over too; the reason is that we
* now need to follow the update chain to lock the new
* versions.
*/
if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
@ -4114,20 +4117,20 @@ l3:
{
/*
* If we're requesting NoKeyExclusive, we might also be able to
* avoid sleeping; just ensure that there's no other lock type than
* KeyShare. Note that this is a bit more involved than just
* avoid sleeping; just ensure that there's no other lock type
* than KeyShare. Note that this is a bit more involved than just
* checking hint bits -- we need to expand the multixact to figure
* out lock modes for each one (unless there was only one such
* locker).
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
int nmembers;
int nmembers;
MultiXactMember *members;
/*
* We don't need to allow old multixacts here; if that had been
* the case, HeapTupleSatisfiesUpdate would have returned
* We don't need to allow old multixacts here; if that had
* been the case, HeapTupleSatisfiesUpdate would have returned
* MayBeUpdated and we wouldn't be here.
*/
nmembers = GetMultiXactIdMembers(xwait, &members, false);
@ -4135,15 +4138,15 @@ l3:
if (nmembers <= 0)
{
/*
* No need to keep the previous xmax here. This is unlikely
* to happen.
* No need to keep the previous xmax here. This is
* unlikely to happen.
*/
require_sleep = false;
}
else
{
int i;
bool allowed = true;
int i;
bool allowed = true;
for (i = 0; i < nmembers; i++)
{
@ -4180,8 +4183,8 @@ l3:
/* if the xmax changed in the meantime, start over */
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
goto l3;
/* otherwise, we're good */
require_sleep = false;
@ -4221,7 +4224,7 @@ l3:
if (follow_updates &&
!HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
HTSU_Result res;
HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@ -4243,15 +4246,15 @@ l3:
* for xmax change, and start over if so.
*/
if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
goto l3;
/*
* Of course, the multixact might not be done here: if we're
* requesting a light lock mode, other transactions with light
* locks could still be alive, as well as locks owned by our
* own xact or other subxacts of this backend. We need to
* own xact or other subxacts of this backend. We need to
* preserve the surviving MultiXact members. Note that it
* isn't absolutely necessary in the latter case, but doing so
* is simpler.
@ -4275,7 +4278,7 @@ l3:
if (follow_updates &&
!HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
HTSU_Result res;
HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@ -4294,15 +4297,15 @@ l3:
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
* this point. Check for xmax change, and start over if so.
* this point. Check for xmax change, and start over if so.
*/
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
xwait))
goto l3;
/*
* Otherwise check if it committed or aborted. Note we cannot
* Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that should have been handled above. So
* that transaction must necessarily be gone by now.
@ -4355,8 +4358,8 @@ failed:
* for cases where it is a plain TransactionId.
*
* Note in particular that this covers the case where we already hold
* exclusive lock on the tuple and the caller only wants key share or share
* lock. It would certainly not do to give up the exclusive lock.
* exclusive lock on the tuple and the caller only wants key share or
* share lock. It would certainly not do to give up the exclusive lock.
*/
if (!(old_infomask & (HEAP_XMAX_INVALID |
HEAP_XMAX_COMMITTED |
@ -4379,13 +4382,12 @@ failed:
}
/*
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId setting.
* We can be certain that the transaction will never become a member of
* any older MultiXactIds than that. (We have to do this even if we
* end up just using our own TransactionId below, since some other
* backend could incorporate our XID into a MultiXact immediately
* afterwards.)
* If this is the first possibly-multixact-able operation in the current
* transaction, set my per-backend OldestMemberMXactId setting. We can be
* certain that the transaction will never become a member of any older
* MultiXactIds than that. (We have to do this even if we end up just
* using our own TransactionId below, since some other backend could
* incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@ -4419,11 +4421,11 @@ failed:
HeapTupleHeaderSetXmax(tuple->t_data, xid);
/*
* Make sure there is no forward chain link in t_ctid. Note that in the
* Make sure there is no forward chain link in t_ctid. Note that in the
* cases where the tuple has been updated, we must not overwrite t_ctid,
* because it was set by the updater. Moreover, if the tuple has been
* updated, we need to follow the update chain to lock the new versions
* of the tuple as well.
* updated, we need to follow the update chain to lock the new versions of
* the tuple as well.
*/
if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
tuple->t_data->t_ctid = *tid;
@ -4514,9 +4516,9 @@ compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
TransactionId *result_xmax, uint16 *result_infomask,
uint16 *result_infomask2)
{
TransactionId new_xmax;
uint16 new_infomask,
new_infomask2;
TransactionId new_xmax;
uint16 new_infomask,
new_infomask2;
l5:
new_infomask = 0;
@ -4562,11 +4564,11 @@ l5:
}
else if (old_infomask & HEAP_XMAX_IS_MULTI)
{
MultiXactStatus new_status;
MultiXactStatus new_status;
/*
* Currently we don't allow XMAX_COMMITTED to be set for multis,
* so cross-check.
* Currently we don't allow XMAX_COMMITTED to be set for multis, so
* cross-check.
*/
Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
@ -4587,10 +4589,11 @@ l5:
/*
* If the XMAX is already a MultiXactId, then we need to expand it to
* include add_to_xmax; but if all the members were lockers and are all
* gone, we can do away with the IS_MULTI bit and just set add_to_xmax
* as the only locker/updater. If all lockers are gone and we have an
* updater that aborted, we can also do without a multi.
* include add_to_xmax; but if all the members were lockers and are
* all gone, we can do away with the IS_MULTI bit and just set
* add_to_xmax as the only locker/updater. If all lockers are gone
* and we have an updater that aborted, we can also do without a
* multi.
*
* The cost of doing GetMultiXactIdMembers would be paid by
* MultiXactIdExpand if we weren't to do this, so this check is not
@ -4624,8 +4627,8 @@ l5:
* It's a committed update, so we need to preserve him as updater of
* the tuple.
*/
MultiXactStatus status;
MultiXactStatus new_status;
MultiXactStatus status;
MultiXactStatus new_status;
if (old_infomask2 & HEAP_KEYS_UPDATED)
status = MultiXactStatusUpdate;
@ -4633,6 +4636,7 @@ l5:
status = MultiXactStatusNoKeyUpdate;
new_status = get_mxact_status_for_lock(mode, is_update);
/*
* since it's not running, it's obviously impossible for the old
* updater to be identical to the current one, so we need not check
@ -4648,8 +4652,8 @@ l5:
* create a new MultiXactId that includes both the old locker or
* updater and our own TransactionId.
*/
MultiXactStatus status;
MultiXactStatus new_status;
MultiXactStatus status;
MultiXactStatus new_status;
if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
{
@ -4668,8 +4672,8 @@ l5:
{
/*
* LOCK_ONLY can be present alone only when a page has been
* upgraded by pg_upgrade. But in that case,
* TransactionIdIsInProgress() should have returned false. We
* upgraded by pg_upgrade. But in that case,
* TransactionIdIsInProgress() should have returned false. We
* assume it's no longer locked in this case.
*/
elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
@ -4696,8 +4700,8 @@ l5:
*/
if (xmax == add_to_xmax)
{
LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
bool old_isupd = ISUPDATE_from_mxstatus(status);
LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
bool old_isupd = ISUPDATE_from_mxstatus(status);
/*
* We can do this if the new LockTupleMode is higher or equal than
@ -4728,8 +4732,8 @@ l5:
* It's a committed update, so we gotta preserve him as updater of the
* tuple.
*/
MultiXactStatus status;
MultiXactStatus new_status;
MultiXactStatus status;
MultiXactStatus new_status;
if (old_infomask2 & HEAP_KEYS_UPDATED)
status = MultiXactStatusUpdate;
@ -4737,6 +4741,7 @@ l5:
status = MultiXactStatusNoKeyUpdate;
new_status = get_mxact_status_for_lock(mode, is_update);
/*
* since it's not running, it's obviously impossible for the old
* updater to be identical to the current one, so we need not check
@ -4774,14 +4779,14 @@ static HTSU_Result
heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
LockTupleMode mode)
{
ItemPointerData tupid;
HeapTupleData mytup;
Buffer buf;
uint16 new_infomask,
new_infomask2,
old_infomask;
TransactionId xmax,
new_xmax;
ItemPointerData tupid;
HeapTupleData mytup;
Buffer buf;
uint16 new_infomask,
new_infomask2,
old_infomask;
TransactionId xmax,
new_xmax;
ItemPointerCopy(tid, &tupid);
@ -4802,16 +4807,17 @@ l4:
xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
/*
* If this tuple is updated and the key has been modified (or deleted),
* what we do depends on the status of the updating transaction: if
* it's live, we sleep until it finishes; if it has committed, we have
* to fail (i.e. return HeapTupleUpdated); if it aborted, we ignore it.
* For updates that didn't touch the key, we can just plough ahead.
* If this tuple is updated and the key has been modified (or
* deleted), what we do depends on the status of the updating
* transaction: if it's live, we sleep until it finishes; if it has
* committed, we have to fail (i.e. return HeapTupleUpdated); if it
* aborted, we ignore it. For updates that didn't touch the key, we
* can just plough ahead.
*/
if (!(old_infomask & HEAP_XMAX_INVALID) &&
(mytup.t_data->t_infomask2 & HEAP_KEYS_UPDATED))
{
TransactionId update_xid;
TransactionId update_xid;
/*
* Note: we *must* check TransactionIdIsInProgress before
@ -4832,7 +4838,7 @@ l4:
goto l4;
}
else if (TransactionIdDidAbort(update_xid))
; /* okay to proceed */
; /* okay to proceed */
else if (TransactionIdDidCommit(update_xid))
{
UnlockReleaseBuffer(buf);
@ -4861,7 +4867,7 @@ l4:
{
xl_heap_lock_updated xlrec;
XLogRecPtr recptr;
XLogRecData rdata[2];
XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.target.node = rel->rd_node;
@ -4889,7 +4895,7 @@ l4:
/* if we find the end of update chain, we're done. */
if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
HeapTupleHeaderIsOnlyLocked(mytup.t_data))
{
UnlockReleaseBuffer(buf);
@ -4904,13 +4910,13 @@ l4:
/*
* heap_lock_updated_tuple
* Follow update chain when locking an updated tuple, acquiring locks (row
* marks) on the updated versions.
* Follow update chain when locking an updated tuple, acquiring locks (row
* marks) on the updated versions.
*
* The initial tuple is assumed to be already locked.
*
* This function doesn't check visibility, it just inconditionally marks the
* tuple(s) as locked. If any tuple in the updated chain is being deleted
* tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished.
*
@ -4932,12 +4938,12 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
{
/*
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId setting.
* We can be certain that the transaction will never become a member of
* any older MultiXactIds than that. (We have to do this even if we
* end up just using our own TransactionId below, since some other
* backend could incorporate our XID into a MultiXact immediately
* afterwards.)
* current transaction, set my per-backend OldestMemberMXactId
* setting. We can be certain that the transaction will never become a
* member of any older MultiXactIds than that. (We have to do this
* even if we end up just using our own TransactionId below, since
* some other backend could incorporate our XID into a MultiXact
* immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@ -5117,9 +5123,9 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
/*
* The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
* + LOCKED. Normalize to INVALID just to be sure no one gets
* confused. Also get rid of the HEAP_KEYS_UPDATED bit.
* The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
* LOCKED. Normalize to INVALID just to be sure no one gets confused.
* Also get rid of the HEAP_KEYS_UPDATED bit.
*/
tuple->t_infomask &= ~HEAP_XMAX_BITS;
tuple->t_infomask |= HEAP_XMAX_INVALID;
@ -5172,13 +5178,13 @@ static void
GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
uint16 *new_infomask2)
{
int nmembers;
MultiXactMember *members;
int i;
uint16 bits = HEAP_XMAX_IS_MULTI;
uint16 bits2 = 0;
bool has_update = false;
LockTupleMode strongest = LockTupleKeyShare;
int nmembers;
MultiXactMember *members;
int i;
uint16 bits = HEAP_XMAX_IS_MULTI;
uint16 bits2 = 0;
bool has_update = false;
LockTupleMode strongest = LockTupleKeyShare;
/*
* We only use this in multis we just created, so they cannot be values
@ -5188,7 +5194,7 @@ GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
for (i = 0; i < nmembers; i++)
{
LockTupleMode mode;
LockTupleMode mode;
/*
* Remember the strongest lock mode held by any member of the
@ -5249,22 +5255,22 @@ GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
static TransactionId
MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
{
TransactionId update_xact = InvalidTransactionId;
MultiXactMember *members;
int nmembers;
TransactionId update_xact = InvalidTransactionId;
MultiXactMember *members;
int nmembers;
Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
Assert(t_infomask & HEAP_XMAX_IS_MULTI);
/*
* Since we know the LOCK_ONLY bit is not set, this cannot be a
* multi from pre-pg_upgrade.
* Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
* pre-pg_upgrade.
*/
nmembers = GetMultiXactIdMembers(xmax, &members, false);
if (nmembers > 0)
{
int i;
int i;
for (i = 0; i < nmembers; i++)
{
@ -5284,6 +5290,7 @@ MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
members[i].status == MultiXactStatusUpdate);
update_xact = members[i].xid;
#ifndef USE_ASSERT_CHECKING
/*
* in an assert-enabled build, walk the whole array to ensure
* there's no other updater.
@ -5300,7 +5307,7 @@ MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
/*
* HeapTupleGetUpdateXid
* As above, but use a HeapTupleHeader
* As above, but use a HeapTupleHeader
*
* See also HeapTupleHeaderGetUpdateXid, which can be used without previously
* checking the hint bits.
@ -5314,7 +5321,7 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
/*
* Do_MultiXactIdWait
* Actual implementation for the two functions below.
* Actual implementation for the two functions below.
*
* We do this by sleeping on each member using XactLockTableWait. Any
* members that belong to the current backend are *not* waited for, however;
@ -5432,7 +5439,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
* are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
* are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.
@ -6091,7 +6098,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_freeze *xlrec = (xl_heap_freeze *) XLogRecGetData(record);
TransactionId cutoff_xid = xlrec->cutoff_xid;
MultiXactId cutoff_multi = xlrec->cutoff_multi;
MultiXactId cutoff_multi = xlrec->cutoff_multi;
Buffer buffer;
Page page;
@ -6361,7 +6368,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@ -6729,7 +6736,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
goto newt;
page = (Page) BufferGetPage(obuffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
if (samepage)
{
@ -6931,7 +6938,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@ -6962,7 +6969,7 @@ static void
heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_lock_updated *xlrec =
(xl_heap_lock_updated *) XLogRecGetData(record);
(xl_heap_lock_updated *) XLogRecGetData(record);
Buffer buffer;
Page page;
OffsetNumber offnum;
@ -6983,7 +6990,7 @@ heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@ -7033,7 +7040,7 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;

View File

@ -129,7 +129,7 @@ typedef struct RewriteStateData
* determine tuple visibility */
TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
* point */
MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
* cutoff point for multixacts */
MemoryContext rs_cxt; /* for hash tables and entries and tuples in
* them */

View File

@ -292,7 +292,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
*/
if (DataChecksumsEnabled())
{
Page heapPage = BufferGetPage(heapBuf);
Page heapPage = BufferGetPage(heapBuf);
/* caller is expected to set PD_ALL_VISIBLE first */
Assert(PageIsAllVisible(heapPage));

View File

@ -532,8 +532,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
START_CRIT_SECTION();
/*
* We don't do MarkBufferDirty here because we're about to initialise
* the page, and nobody else can see it yet.
* We don't do MarkBufferDirty here because we're about to initialise the
* page, and nobody else can see it yet.
*/
/* XLOG stuff */
@ -552,8 +552,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rdata);
/*
* We don't do PageSetLSN here because we're about to initialise
* the page, so no need.
* We don't do PageSetLSN here because we're about to initialise the
* page, so no need.
*/
}

View File

@ -373,7 +373,7 @@ btree_xlog_split(bool onleft, bool isroot,
* Note that this code ensures that the items remaining on the
* left page are in the correct item number order, but it does not
* reproduce the physical order they would have had. Is this
* worth changing? See also _bt_restore_page().
* worth changing? See also _bt_restore_page().
*/
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
@ -606,18 +606,18 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* In what follows, we have to examine the previous state of the index
* page, as well as the heap page(s) it points to. This is only valid if
* page, as well as the heap page(s) it points to. This is only valid if
* WAL replay has reached a consistent database state; which means that
* the preceding check is not just an optimization, but is *necessary*.
* We won't have let in any user sessions before we reach consistency.
* the preceding check is not just an optimization, but is *necessary*. We
* won't have let in any user sessions before we reach consistency.
*/
if (!reachedConsistency)
elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
/*
* Get index page. If the DB is consistent, this should not fail, nor
* Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
* InvalidTransactionId to cancel all HS transactions. That's probably
* InvalidTransactionId to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
@ -701,10 +701,10 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* XXX If all heap tuples were LP_DEAD then we will be returning
* InvalidTransactionId here, causing conflict for all HS
* transactions. That should happen very rarely (reasoning please?). Also
* note that caller can't tell the difference between this case and the
* fast path exit above. May need to change that in future.
* InvalidTransactionId here, causing conflict for all HS transactions.
* That should happen very rarely (reasoning please?). Also note that
* caller can't tell the difference between this case and the fast path
* exit above. May need to change that in future.
*/
return latestRemovedXid;
}
@ -721,7 +721,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
* If we have any conflict processing to do, it must happen before we
* update the page.
*
* Btree delete records can conflict with standby queries. You might
* Btree delete records can conflict with standby queries. You might
* think that vacuum records would conflict as well, but we've handled
* that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
* cleaned by the vacuum of the heap and so we can resolve any conflicts

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* clogdesc.c
* rmgr descriptor routines for access/transam/clog.c
* rmgr descriptor routines for access/transam/clog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/clogdesc.c
* src/backend/access/rmgrdesc/clogdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* dbasedesc.c
* rmgr descriptor routines for commands/dbcommands.c
* rmgr descriptor routines for commands/dbcommands.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/dbasedesc.c
* src/backend/access/rmgrdesc/dbasedesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* gindesc.c
* rmgr descriptor routines for access/transam/gin/ginxlog.c
* rmgr descriptor routines for access/transam/gin/ginxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/gindesc.c
* src/backend/access/rmgrdesc/gindesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* gistdesc.c
* rmgr descriptor routines for access/gist/gistxlog.c
* rmgr descriptor routines for access/gist/gistxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/gistdesc.c
* src/backend/access/rmgrdesc/gistdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* hashdesc.c
* rmgr descriptor routines for access/hash/hash.c
* rmgr descriptor routines for access/hash/hash.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/hashdesc.c
* src/backend/access/rmgrdesc/hashdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* heapdesc.c
* rmgr descriptor routines for access/heap/heapam.c
* rmgr descriptor routines for access/heap/heapam.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/heapdesc.c
* src/backend/access/rmgrdesc/heapdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* mxactdesc.c
* rmgr descriptor routines for access/transam/multixact.c
* rmgr descriptor routines for access/transam/multixact.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/mxactdesc.c
* src/backend/access/rmgrdesc/mxactdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* nbtdesc.c
* rmgr descriptor routines for access/nbtree/nbtxlog.c
* rmgr descriptor routines for access/nbtree/nbtxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/nbtdesc.c
* src/backend/access/rmgrdesc/nbtdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* relmapdesc.c
* rmgr descriptor routines for utils/cache/relmapper.c
* rmgr descriptor routines for utils/cache/relmapper.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/relmapdesc.c
* src/backend/access/rmgrdesc/relmapdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* seqdesc.c
* rmgr descriptor routines for commands/sequence.c
* rmgr descriptor routines for commands/sequence.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/seqdesc.c
* src/backend/access/rmgrdesc/seqdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* smgrdesc.c
* rmgr descriptor routines for catalog/storage.c
* rmgr descriptor routines for catalog/storage.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/smgrdesc.c
* src/backend/access/rmgrdesc/smgrdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* spgdesc.c
* rmgr descriptor routines for access/spgist/spgxlog.c
* rmgr descriptor routines for access/spgist/spgxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/spgdesc.c
* src/backend/access/rmgrdesc/spgdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* standbydesc.c
* rmgr descriptor routines for storage/ipc/standby.c
* rmgr descriptor routines for storage/ipc/standby.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/standbydesc.c
* src/backend/access/rmgrdesc/standbydesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* tblspcdesc.c
* rmgr descriptor routines for commands/tablespace.c
* rmgr descriptor routines for commands/tablespace.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/tblspcdesc.c
* src/backend/access/rmgrdesc/tblspcdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* xactdesc.c
* rmgr descriptor routines for access/transam/xact.c
* rmgr descriptor routines for access/transam/xact.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/xactdesc.c
* src/backend/access/rmgrdesc/xactdesc.c
*
*-------------------------------------------------------------------------
*/

View File

@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* xlogdesc.c
* rmgr descriptor routines for access/transam/xlog.c
* rmgr descriptor routines for access/transam/xlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/xlogdesc.c
* src/backend/access/rmgrdesc/xlogdesc.c
*
*-------------------------------------------------------------------------
*/
@ -45,7 +45,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
"tli %u; prev tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"oldest xid %u in DB %u; oldest multi %u in DB %u; "
"oldest running xid %u; %s",
(uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
(uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
checkpoint->ThisTimeLineID,
checkpoint->PrevTimeLineID,
checkpoint->fullPageWrites ? "true" : "false",
@ -84,7 +84,8 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
}
else if (info == XLOG_HINT)
{
BkpBlock *bkp = (BkpBlock *) rec;
BkpBlock *bkp = (BkpBlock *) rec;
appendStringInfo(buf, "page hint: %s block %u",
relpathperm(bkp->node, bkp->fork),
bkp->block);

View File

@ -30,7 +30,7 @@
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). So we can safely create prefixes up to
* BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
* it is always possible to get "SPGiST inner tuple size exceeds maximum"
* if there are too many distinct next-byte values at a given place in the

View File

@ -5,7 +5,7 @@
*
* The pg_multixact manager is a pg_clog-like manager that stores an array of
* MultiXactMember for each MultiXactId. It is a fundamental part of the
* shared-row-lock implementation. Each MultiXactMember is comprised of a
* shared-row-lock implementation. Each MultiXactMember is comprised of a
* TransactionId and a set of flag bits. The name is a bit historical:
* originally, a MultiXactId consisted of more than one TransactionId (except
* in rare corner cases), hence "multi". Nowadays, however, it's perfectly
@ -50,7 +50,7 @@
* The minimum value in each database is stored in pg_database, and the
* global minimum is part of pg_control. Any vacuum that is able to
* advance its database's minimum value also computes a new global minimum,
* and uses this value to truncate older segments. When new multixactid
* and uses this value to truncate older segments. When new multixactid
* values are to be created, care is taken that the counter does not
* fall within the wraparound horizon considering the global minimum value.
*
@ -108,7 +108,7 @@
* additional flag bits for each TransactionId. To do this without getting
* into alignment issues, we store four bytes of flags, and then the
* corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and
* are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
* are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
* per page. This wastes 12 bytes per page, but that's OK -- simplicity (and
* performance) trumps space efficiency here.
*
@ -177,17 +177,17 @@ typedef struct MultiXactStateData
MultiXactId lastTruncationPoint;
/*
* oldest multixact that is still on disk. Anything older than this should
* not be consulted.
* oldest multixact that is still on disk. Anything older than this
* should not be consulted.
*/
MultiXactId oldestMultiXactId;
Oid oldestMultiXactDB;
MultiXactId oldestMultiXactId;
Oid oldestMultiXactDB;
/* support for anti-wraparound measures */
MultiXactId multiVacLimit;
MultiXactId multiWarnLimit;
MultiXactId multiStopLimit;
MultiXactId multiWrapLimit;
MultiXactId multiVacLimit;
MultiXactId multiWarnLimit;
MultiXactId multiStopLimit;
MultiXactId multiWrapLimit;
/*
* Per-backend data starts here. We have two arrays stored in the area
@ -252,7 +252,7 @@ static MultiXactId *OldestVisibleMXactId;
* so they will be uninteresting by the time our next transaction starts.
* (XXX not clear that this is correct --- other members of the MultiXact
* could hang around longer than we did. However, it's not clear what a
* better policy for flushing old cache entries would be.) FIXME actually
* better policy for flushing old cache entries would be.) FIXME actually
* this is plain wrong now that multixact's may contain update Xids.
*
* We allocate the cache entries in a memory context that is deleted at
@ -291,7 +291,7 @@ static void RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
static MultiXactId GetNewMultiXactId(int nmembers, MultiXactOffset *offset);
/* MultiXact cache management */
static int mxactMemberComparator(const void *arg1, const void *arg2);
static int mxactMemberComparator(const void *arg1, const void *arg2);
static MultiXactId mXactCacheGetBySet(int nmembers, MultiXactMember *members);
static int mXactCacheGetById(MultiXactId multi, MultiXactMember **members);
static void mXactCachePut(MultiXactId multi, int nmembers,
@ -387,15 +387,15 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
multi, xid, mxstatus_to_string(status));
/*
* Note: we don't allow for old multis here. The reason is that the
* only caller of this function does a check that the multixact is
* no longer running.
* Note: we don't allow for old multis here. The reason is that the only
* caller of this function does a check that the multixact is no longer
* running.
*/
nmembers = GetMultiXactIdMembers(multi, &members, false);
if (nmembers < 0)
{
MultiXactMember member;
MultiXactMember member;
/*
* The MultiXactId is obsolete. This can only happen if all the
@ -430,14 +430,14 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
}
/*
* Determine which of the members of the MultiXactId are still of interest.
* This is any running transaction, and also any transaction that grabbed
* something stronger than just a lock and was committed. (An update that
* aborted is of no interest here.)
* Determine which of the members of the MultiXactId are still of
* interest. This is any running transaction, and also any transaction
* that grabbed something stronger than just a lock and was committed.
* (An update that aborted is of no interest here.)
*
* (Removing dead members is just an optimization, but a useful one.
* Note we have the same race condition here as above: j could be 0 at the
* end of the loop.)
* (Removing dead members is just an optimization, but a useful one. Note
* we have the same race condition here as above: j could be 0 at the end
* of the loop.)
*/
newMembers = (MultiXactMember *)
palloc(sizeof(MultiXactMember) * (nmembers + 1));
@ -641,12 +641,12 @@ MultiXactIdSetOldestVisible(void)
/*
* ReadNextMultiXactId
* Return the next MultiXactId to be assigned, but don't allocate it
* Return the next MultiXactId to be assigned, but don't allocate it
*/
MultiXactId
ReadNextMultiXactId(void)
{
MultiXactId mxid;
MultiXactId mxid;
/* XXX we could presumably do this without a lock. */
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@ -722,9 +722,9 @@ CreateMultiXactId(int nmembers, MultiXactMember *members)
/*
* XXX Note: there's a lot of padding space in MultiXactMember. We could
* find a more compact representation of this Xlog record -- perhaps all the
* status flags in one XLogRecData, then all the xids in another one? Not
* clear that it's worth the trouble though.
* find a more compact representation of this Xlog record -- perhaps all
* the status flags in one XLogRecData, then all the xids in another one?
* Not clear that it's worth the trouble though.
*/
rdata[0].data = (char *) (&xlrec);
rdata[0].len = SizeOfMultiXactCreate;
@ -878,7 +878,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
/*----------
* Check to see if it's safe to assign another MultiXactId. This protects
* against catastrophic data loss due to multixact wraparound. The basic
* against catastrophic data loss due to multixact wraparound. The basic
* rules are:
*
* If we're past multiVacLimit, start trying to force autovacuum cycles.
@ -892,7 +892,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
{
/*
* For safety's sake, we release MultiXactGenLock while sending
* signals, warnings, etc. This is not so much because we care about
* signals, warnings, etc. This is not so much because we care about
* preserving concurrency in this situation, as to avoid any
* possibility of deadlock while doing get_database_name(). First,
* copy all the shared values we'll need in this path.
@ -923,15 +923,15 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database \"%s\"",
oldest_datname),
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
else
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database with OID %u",
oldest_datoid),
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
}
else if (!MultiXactIdPrecedes(result, multiWarnLimit))
{
@ -943,15 +943,15 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errmsg("database \"%s\" must be vacuumed before %u more MultiXactIds are used",
oldest_datname,
multiWrapLimit - result),
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
else
ereport(WARNING,
(errmsg("database with OID %u must be vacuumed before %u more MultiXactIds are used",
oldest_datoid,
multiWrapLimit - result),
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
}
/* Re-acquire lock and start over */
@ -995,10 +995,10 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
*
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
* or the first value on a segment-beginning page after this routine exits,
* so anyone else looking at the variable must be prepared to deal with
* either case. Similarly, nextOffset may be zero, but we won't use that
* as the actual start offset of the next multixact.
* or the first value on a segment-beginning page after this routine
* exits, so anyone else looking at the variable must be prepared to deal
* with either case. Similarly, nextOffset may be zero, but we won't use
* that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
@ -1066,18 +1066,18 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it should have already been frozen by vacuum. We've truncated
* the on-disk structures anyway. Returning the wrong values could lead to
* an incorrect visibility result. However, to support pg_upgrade we need
* to allow an empty set to be returned regardless, if the caller is
* the on-disk structures anyway. Returning the wrong values could lead
* to an incorrect visibility result. However, to support pg_upgrade we
* need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
* tuple are consistent with the pg_upgrade scenario). If the caller is
* tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
* seen, it implies undetected ID wraparound has occurred. This raises
* a hard error.
* seen, it implies undetected ID wraparound has occurred. This raises a
* hard error.
*
* Shared lock is enough here since we aren't modifying any global state.
* Acquire it just long enough to grab the current counter values. We may
@ -1095,8 +1095,8 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
{
ereport(allow_old ? DEBUG1 : ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
multi)));
errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
multi)));
return -1;
}
@ -1349,7 +1349,7 @@ mXactCacheGetById(MultiXactId multi, MultiXactMember **members)
memcpy(ptr, entry->members, size);
debug_elog3(DEBUG2, "CacheGet: found %s",
mxid_to_string(multi, entry->nmembers, entry->members));
mxid_to_string(multi, entry->nmembers, entry->members));
return entry->nmembers;
}
}
@ -1423,8 +1423,8 @@ mxstatus_to_string(MultiXactStatus status)
char *
mxid_to_string(MultiXactId multi, int nmembers, MultiXactMember *members)
{
static char *str = NULL;
StringInfoData buf;
static char *str = NULL;
StringInfoData buf;
int i;
if (str != NULL)
@ -1721,7 +1721,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog)
*
* StartupXLOG has already established nextMXact/nextOffset by calling
* MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact, and the oldestMulti
* info from pg_control and/or MultiXactAdvanceOldest. Note that we may
* info from pg_control and/or MultiXactAdvanceOldest. Note that we may
* already have replayed WAL data into the SLRU files.
*
* We don't need any locks here, really; the SLRU locks are taken
@ -1883,17 +1883,17 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
void
SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
{
MultiXactId multiVacLimit;
MultiXactId multiWarnLimit;
MultiXactId multiStopLimit;
MultiXactId multiWrapLimit;
MultiXactId curMulti;
MultiXactId multiVacLimit;
MultiXactId multiWarnLimit;
MultiXactId multiStopLimit;
MultiXactId multiWrapLimit;
MultiXactId curMulti;
Assert(MultiXactIdIsValid(oldest_datminmxid));
/*
* The place where we actually get into deep trouble is halfway around
* from the oldest potentially-existing XID/multi. (This calculation is
* from the oldest potentially-existing XID/multi. (This calculation is
* probably off by one or two counts for Xids, because the special XIDs
* reduce the size of the loop a little bit. But we throw in plenty of
* slop below, so it doesn't matter.)
@ -1911,11 +1911,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
multiStopLimit -= FirstMultiXactId;
/*
* We'll start complaining loudly when we get within 10M multis of the stop
* point. This is kind of arbitrary, but if you let your gas gauge get
* down to 1% of full, would you be looking for the next gas station? We
* need to be fairly liberal about this number because there are lots of
* scenarios where most transactions are done by automatic clients that
* We'll start complaining loudly when we get within 10M multis of the
* stop point. This is kind of arbitrary, but if you let your gas gauge
* get down to 1% of full, would you be looking for the next gas station?
* We need to be fairly liberal about this number because there are lots
* of scenarios where most transactions are done by automatic clients that
* won't pay attention to warnings. (No, we're not gonna make this
* configurable. If you know enough to configure it, you know enough to
* not get in this kind of trouble in the first place.)
@ -1925,8 +1925,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
multiWarnLimit -= FirstMultiXactId;
/*
* We'll start trying to force autovacuums when oldest_datminmxid gets
* to be more than 200 million transactions old.
* We'll start trying to force autovacuums when oldest_datminmxid gets to
* be more than 200 million transactions old.
*/
multiVacLimit = oldest_datminmxid + 200000000;
if (multiVacLimit < FirstMultiXactId)
@ -1945,8 +1945,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/* Log the info */
ereport(DEBUG1,
(errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
multiWrapLimit, oldest_datoid)));
(errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
multiWrapLimit, oldest_datoid)));
/*
* If past the autovacuum force point, immediately signal an autovac
@ -2127,9 +2127,9 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
MultiXactId
GetOldestMultiXactId(void)
{
MultiXactId oldestMXact;
MultiXactId nextMXact;
int i;
MultiXactId oldestMXact;
MultiXactId nextMXact;
int i;
/*
* This is the oldest valid value among all the OldestMemberMXactId[] and
@ -2168,17 +2168,17 @@ GetOldestMultiXactId(void)
typedef struct mxtruncinfo
{
int earliestExistingPage;
int earliestExistingPage;
} mxtruncinfo;
/*
* SlruScanDirectory callback
* This callback determines the earliest existing page number.
* This callback determines the earliest existing page number.
*/
static bool
SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
{
mxtruncinfo *trunc = (mxtruncinfo *) data;
mxtruncinfo *trunc = (mxtruncinfo *) data;
if (trunc->earliestExistingPage == -1 ||
ctl->PagePrecedes(segpage, trunc->earliestExistingPage))
@ -2186,7 +2186,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
trunc->earliestExistingPage = segpage;
}
return false; /* keep going */
return false; /* keep going */
}
/*
@ -2200,16 +2200,16 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
void
TruncateMultiXact(MultiXactId oldestMXact)
{
MultiXactOffset oldestOffset;
mxtruncinfo trunc;
MultiXactId earliest;
MultiXactOffset oldestOffset;
mxtruncinfo trunc;
MultiXactId earliest;
/*
* Note we can't just plow ahead with the truncation; it's possible that
* there are no segments to truncate, which is a problem because we are
* going to attempt to read the offsets page to determine where to truncate
* the members SLRU. So we first scan the directory to determine the
* earliest offsets page number that we can read without error.
* going to attempt to read the offsets page to determine where to
* truncate the members SLRU. So we first scan the directory to determine
* the earliest offsets page number that we can read without error.
*/
trunc.earliestExistingPage = -1;
SlruScanDirectory(MultiXactOffsetCtl, SlruScanDirCbFindEarliest, &trunc);
@ -2220,9 +2220,9 @@ TruncateMultiXact(MultiXactId oldestMXact)
return;
/*
* First, compute the safe truncation point for MultiXactMember.
* This is the starting offset of the multixact we were passed
* as MultiXactOffset cutoff.
* First, compute the safe truncation point for MultiXactMember. This is
* the starting offset of the multixact we were passed as MultiXactOffset
* cutoff.
*/
{
int pageno;
@ -2380,7 +2380,7 @@ multixact_redo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_MULTIXACT_CREATE_ID)
{
xl_multixact_create *xlrec =
(xl_multixact_create *) XLogRecGetData(record);
(xl_multixact_create *) XLogRecGetData(record);
TransactionId max_xid;
int i;
@ -2427,12 +2427,12 @@ pg_get_multixact_members(PG_FUNCTION_ARGS)
{
typedef struct
{
MultiXactMember *members;
int nmembers;
int iter;
MultiXactMember *members;
int nmembers;
int iter;
} mxact;
MultiXactId mxid = PG_GETARG_UINT32(0);
mxact *multi;
MultiXactId mxid = PG_GETARG_UINT32(0);
mxact *multi;
FuncCallContext *funccxt;
if (mxid < FirstMultiXactId)

View File

@ -15,7 +15,7 @@
* <parentTLI> <switchpoint> <reason>
*
* parentTLI ID of the parent timeline
* switchpoint XLogRecPtr of the WAL position where the switch happened
* switchpoint XLogRecPtr of the WAL position where the switch happened
* reason human-readable explanation of why the timeline was changed
*
* The fields are separated by tabs. Lines beginning with # are comments, and
@ -49,7 +49,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
{
char path[MAXPGPATH];
char histfname[MAXFNAMELEN];
TimeLineID tli;
TimeLineID tli;
for (tli = begin; tli < end; tli++)
{
@ -179,8 +179,8 @@ readTimeLineHistory(TimeLineID targetTLI)
errhint("Timeline IDs must be less than child timeline's ID.")));
/*
* Create one more entry for the "tip" of the timeline, which has no
* entry in the history file.
* Create one more entry for the "tip" of the timeline, which has no entry
* in the history file.
*/
entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
entry->tli = targetTLI;
@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
/*
* Prefer link() to rename() here just to be really sure that we don't
* overwrite an existing file. However, there shouldn't be one, so
* overwrite an existing file. However, there shouldn't be one, so
* rename() is an acceptable substitute except for the truly paranoid.
*/
#if HAVE_WORKING_LINK
@ -530,7 +530,7 @@ writeTimeLineHistoryFile(TimeLineID tli, char *content, int size)
bool
tliInHistory(TimeLineID tli, List *expectedTLEs)
{
ListCell *cell;
ListCell *cell;
foreach(cell, expectedTLEs)
{
@ -548,11 +548,12 @@ tliInHistory(TimeLineID tli, List *expectedTLEs)
TimeLineID
tliOfPointInHistory(XLogRecPtr ptr, List *history)
{
ListCell *cell;
ListCell *cell;
foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
if ((XLogRecPtrIsInvalid(tle->begin) || tle->begin <= ptr) &&
(XLogRecPtrIsInvalid(tle->end) || ptr < tle->end))
{
@ -563,7 +564,7 @@ tliOfPointInHistory(XLogRecPtr ptr, List *history)
/* shouldn't happen. */
elog(ERROR, "timeline history was not contiguous");
return 0; /* keep compiler quiet */
return 0; /* keep compiler quiet */
}
/*
@ -579,7 +580,7 @@ tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
if (nextTLI)
*nextTLI = 0;
foreach (cell, history)
foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
@ -592,5 +593,5 @@ tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
ereport(ERROR,
(errmsg("requested timeline %u is not in this server's history",
tli)));
return InvalidXLogRecPtr; /* keep compiler quiet */
return InvalidXLogRecPtr; /* keep compiler quiet */
}

View File

@ -1024,8 +1024,8 @@ RecordTransactionCommit(void)
*
* It's safe to change the delayChkpt flag of our own backend without
* holding the ProcArrayLock, since we're the only one modifying it.
* This makes checkpoint's determination of which xacts are delayChkpt a
* bit fuzzy, but it doesn't matter.
* This makes checkpoint's determination of which xacts are delayChkpt
* a bit fuzzy, but it doesn't matter.
*/
START_CRIT_SECTION();
MyPgXact->delayChkpt = true;
@ -4683,12 +4683,11 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
* from the template database, and then commit the transaction. If we
* crash after all the files have been copied but before the commit, you
* have files in the data directory without an entry in pg_database. To
* minimize the window
* for that, we use ForceSyncCommit() to rush the commit record to disk as
* quick as possible. We have the same window during recovery, and forcing
* an XLogFlush() (which updates minRecoveryPoint during recovery) helps
* to reduce that problem window, for any user that requested
* ForceSyncCommit().
* minimize the window for that, we use ForceSyncCommit() to rush the
* commit record to disk as quick as possible. We have the same window
* during recovery, and forcing an XLogFlush() (which updates
* minRecoveryPoint during recovery) helps to reduce that problem window,
* for any user that requested ForceSyncCommit().
*/
if (XactCompletionForceSyncCommit(xinfo))
XLogFlush(lsn);

File diff suppressed because it is too large Load Diff

View File

@ -87,9 +87,9 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* of log segments that weren't yet transferred to the archive.
*
* Notice that we don't actually overwrite any files when we copy back
* from archive because the restore_command may inadvertently
* restore inappropriate xlogs, or they may be corrupt, so we may wish to
* fallback to the segments remaining in current XLOGDIR later. The
* from archive because the restore_command may inadvertently restore
* inappropriate xlogs, or they may be corrupt, so we may wish to fallback
* to the segments remaining in current XLOGDIR later. The
* copy-from-archive filename is always the same, ensuring that we don't
* run out of disk space on long recoveries.
*/
@ -433,19 +433,20 @@ KeepFileRestoredFromArchive(char *path, char *xlogfname)
if (stat(xlogfpath, &statbuf) == 0)
{
char oldpath[MAXPGPATH];
char oldpath[MAXPGPATH];
#ifdef WIN32
static unsigned int deletedcounter = 1;
/*
* On Windows, if another process (e.g a walsender process) holds
* the file open in FILE_SHARE_DELETE mode, unlink will succeed,
* but the file will still show up in directory listing until the
* last handle is closed, and we cannot rename the new file in its
* place until that. To avoid that problem, rename the old file to
* a temporary name first. Use a counter to create a unique
* filename, because the same file might be restored from the
* archive multiple times, and a walsender could still be holding
* onto an old deleted version of it.
* On Windows, if another process (e.g a walsender process) holds the
* file open in FILE_SHARE_DELETE mode, unlink will succeed, but the
* file will still show up in directory listing until the last handle
* is closed, and we cannot rename the new file in its place until
* that. To avoid that problem, rename the old file to a temporary
* name first. Use a counter to create a unique filename, because the
* same file might be restored from the archive multiple times, and a
* walsender could still be holding onto an old deleted version of it.
*/
snprintf(oldpath, MAXPGPATH, "%s.deleted%u",
xlogfpath, deletedcounter++);
@ -474,17 +475,17 @@ KeepFileRestoredFromArchive(char *path, char *xlogfname)
path, xlogfpath)));
/*
* Create .done file forcibly to prevent the restored segment from
* being archived again later.
* Create .done file forcibly to prevent the restored segment from being
* archived again later.
*/
XLogArchiveForceDone(xlogfname);
/*
* If the existing file was replaced, since walsenders might have it
* open, request them to reload a currently-open segment. This is only
* required for WAL segments, walsenders don't hold other files open, but
* there's no harm in doing this too often, and we don't know what kind
* of a file we're dealing with here.
* If the existing file was replaced, since walsenders might have it open,
* request them to reload a currently-open segment. This is only required
* for WAL segments, walsenders don't hold other files open, but there's
* no harm in doing this too often, and we don't know what kind of a file
* we're dealing with here.
*/
if (reload)
WalSndRqstFileReload();

View File

@ -545,8 +545,8 @@ pg_xlog_location_diff(PG_FUNCTION_ARGS)
* XXX: this won't handle values higher than 2^63 correctly.
*/
result = DatumGetNumeric(DirectFunctionCall2(numeric_sub,
DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
PG_RETURN_NUMERIC(result);
}
@ -584,7 +584,7 @@ pg_backup_start_time(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m",
BACKUP_LABEL_FILE)));
BACKUP_LABEL_FILE)));
PG_RETURN_NULL();
}
@ -602,13 +602,13 @@ pg_backup_start_time(PG_FUNCTION_ARGS)
if (ferror(lfp))
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
/* Close the backup label file. */
if (FreeFile(lfp))
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
if (strlen(backup_start_time) == 0)
ereport(ERROR,

View File

@ -221,9 +221,9 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
targetRecOff = RecPtr % XLOG_BLCKSZ;
/*
* Read the page containing the record into state->readBuf. Request
* enough byte to cover the whole record header, or at least the part of
* it that fits on the same page.
* Read the page containing the record into state->readBuf. Request enough
* byte to cover the whole record header, or at least the part of it that
* fits on the same page.
*/
readOff = ReadPageInternal(state,
targetPagePtr,