1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

pgindent run for 9.5

This commit is contained in:
Bruce Momjian
2015-05-23 21:35:49 -04:00
parent 225892552b
commit 807b9e0dff
414 changed files with 5810 additions and 5308 deletions

View File

@ -80,7 +80,7 @@ bool synchronize_seqscans = true;
static HeapScanDesc heap_beginscan_internal(Relation relation,
Snapshot snapshot,
int nkeys, ScanKey key,
bool allow_strat, bool allow_sync, bool allow_pagemode,
bool allow_strat, bool allow_sync, bool allow_pagemode,
bool is_bitmapscan, bool is_samplescan,
bool temp_snap);
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
@ -1366,8 +1366,8 @@ heap_beginscan_sampling(Relation relation, Snapshot snapshot,
static HeapScanDesc
heap_beginscan_internal(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
bool allow_strat, bool allow_sync, bool allow_pagemode,
bool is_bitmapscan, bool is_samplescan, bool temp_snap)
bool allow_strat, bool allow_sync, bool allow_pagemode,
bool is_bitmapscan, bool is_samplescan, bool temp_snap)
{
HeapScanDesc scan;
@ -2284,9 +2284,9 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
{
/*
* For now, parallel operations are required to be strictly read-only.
* Unlike heap_update() and heap_delete(), an insert should never create
* a combo CID, so it might be possible to relax this restriction, but
* not without more thought and testing.
* Unlike heap_update() and heap_delete(), an insert should never create a
* combo CID, so it might be possible to relax this restriction, but not
* without more thought and testing.
*/
if (IsInParallelMode())
ereport(ERROR,
@ -2768,8 +2768,8 @@ l1:
infomask = tp.t_data->t_infomask;
/*
* Sleep until concurrent transaction ends -- except when there's a single
* locker and it's our own transaction. Note we don't care
* Sleep until concurrent transaction ends -- except when there's a
* single locker and it's our own transaction. Note we don't care
* which lock mode the locker has, because we need the strongest one.
*
* Before sleeping, we need to acquire tuple lock to establish our
@ -2822,8 +2822,8 @@ l1:
else if (!TransactionIdIsCurrentTransactionId(xwait))
{
/*
* Wait for regular transaction to end; but first, acquire
* tuple lock.
* Wait for regular transaction to end; but first, acquire tuple
* lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
@ -3336,8 +3336,8 @@ l2:
*
* Before sleeping, we need to acquire tuple lock to establish our
* priority for the tuple (see heap_lock_tuple). LockTuple will
* release us when we are next-in-line for the tuple. Note we must not
* acquire the tuple lock until we're sure we're going to sleep;
* release us when we are next-in-line for the tuple. Note we must
* not acquire the tuple lock until we're sure we're going to sleep;
* otherwise we're open for race conditions with other transactions
* holding the tuple lock which sleep on us.
*
@ -3374,8 +3374,8 @@ l2:
*/
if (xmax_infomask_changed(oldtup.t_data->t_infomask,
infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
xwait))
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
xwait))
goto l2;
}
@ -3425,9 +3425,9 @@ l2:
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
/*
* If it's just a key-share locker, and we're not changing the
* key columns, we don't need to wait for it to end; but we
* need to preserve it as locker.
* If it's just a key-share locker, and we're not changing the key
* columns, we don't need to wait for it to end; but we need to
* preserve it as locker.
*/
checked_lockers = true;
locker_remains = true;
@ -3436,8 +3436,8 @@ l2:
else
{
/*
* Wait for regular transaction to end; but first, acquire
* tuple lock.
* Wait for regular transaction to end; but first, acquire tuple
* lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
@ -3454,7 +3454,7 @@ l2:
*/
if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
!TransactionIdEquals(xwait,
HeapTupleHeaderGetRawXmax(oldtup.t_data)))
HeapTupleHeaderGetRawXmax(oldtup.t_data)))
goto l2;
/* Otherwise check if it committed or aborted */
@ -3779,7 +3779,7 @@ l2:
HeapTupleClearHeapOnly(newtup);
}
RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
if (!already_marked)
{
@ -4477,7 +4477,7 @@ l3:
if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
TransactionIdIsCurrentTransactionId(xwait))
{
/* ... but if the xmax changed in the meantime, start over */
/* ... but if the xmax changed in the meantime, start over */
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@ -4501,8 +4501,8 @@ l3:
* for the tuple. We must do this even if we are share-locking.
*
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while rechecking
* tuple state.
* this arranges that we stay at the head of the line while
* rechecking tuple state.
*/
if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
&have_tuple_lock))
@ -4530,11 +4530,11 @@ l3:
{
case LockWaitBlock:
MultiXactIdWait((MultiXactId) xwait, status, infomask,
relation, &tuple->t_self, XLTW_Lock, NULL);
relation, &tuple->t_self, XLTW_Lock, NULL);
break;
case LockWaitSkip:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
status, infomask, relation,
status, infomask, relation,
NULL))
{
result = HeapTupleWouldBlock;
@ -4545,12 +4545,12 @@ l3:
break;
case LockWaitError:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
status, infomask, relation,
status, infomask, relation,
NULL))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
RelationGetRelationName(relation))));
RelationGetRelationName(relation))));
break;
}
@ -4588,7 +4588,7 @@ l3:
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
RelationGetRelationName(relation))));
RelationGetRelationName(relation))));
break;
}
}
@ -4613,9 +4613,9 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
* this point. Check for xmax change, and start over if so.
* xwait is done, but if xwait had just locked the tuple then some
* other xact could update this tuple before we get to this point.
* Check for xmax change, and start over if so.
*/
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@ -4628,9 +4628,9 @@ l3:
* Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that would have been handled above. So
* that transaction must necessarily be gone by now. But don't
* check for this in the multixact case, because some locker
* transactions might still be running.
* that transaction must necessarily be gone by now. But
* don't check for this in the multixact case, because some
* locker transactions might still be running.
*/
UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
}
@ -4810,8 +4810,8 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
if (!ConditionalLockTupleTuplock(relation, tid, mode))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
RelationGetRelationName(relation))));
errmsg("could not obtain lock on row in relation \"%s\"",
RelationGetRelationName(relation))));
break;
}
*have_tuple_lock = true;
@ -5513,8 +5513,8 @@ heap_finish_speculative(Relation relation, HeapTuple tuple)
MarkBufferDirty(buffer);
/*
* Replace the speculative insertion token with a real t_ctid,
* pointing to itself like it does on regular tuples.
* Replace the speculative insertion token with a real t_ctid, pointing to
* itself like it does on regular tuples.
*/
htup->t_ctid = tuple->t_self;
@ -6447,23 +6447,23 @@ static bool
DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
LockTupleMode lockmode)
{
bool allow_old;
int nmembers;
bool allow_old;
int nmembers;
MultiXactMember *members;
bool result = false;
LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
bool result = false;
LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
HEAP_XMAX_IS_LOCKED_ONLY(infomask));
if (nmembers >= 0)
{
int i;
int i;
for (i = 0; i < nmembers; i++)
{
TransactionId memxid;
LOCKMODE memlockmode;
TransactionId memxid;
LOCKMODE memlockmode;
memlockmode = LOCKMODE_from_mxstatus(members[i].status);
@ -7093,7 +7093,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
{
XLogRegisterBufData(0,
((char *) newtup->t_data) + SizeofHeapTupleHeader,
newtup->t_len - SizeofHeapTupleHeader - suffixlen);
newtup->t_len - SizeofHeapTupleHeader - suffixlen);
}
else
{
@ -7105,8 +7105,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
{
XLogRegisterBufData(0,
((char *) newtup->t_data) + SizeofHeapTupleHeader,
newtup->t_data->t_hoff - SizeofHeapTupleHeader);
((char *) newtup->t_data) + SizeofHeapTupleHeader,
newtup->t_data->t_hoff - SizeofHeapTupleHeader);
}
/* data after common prefix */
@ -7289,8 +7289,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
{
/*
* The OID column can appear in an index definition, but that's
* OK, because we always copy the OID if present (see below). Other
* system columns may not.
* OK, because we always copy the OID if present (see below).
* Other system columns may not.
*/
if (attno == ObjectIdAttributeNumber)
continue;

View File

@ -60,9 +60,9 @@ RelationPutHeapTuple(Relation relation,
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
/*
* Insert the correct position into CTID of the stored tuple, too
* (unless this is a speculative insertion, in which case the token is
* held in CTID field instead)
* Insert the correct position into CTID of the stored tuple, too (unless
* this is a speculative insertion, in which case the token is held in
* CTID field instead)
*/
if (!token)
{