1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

pgindent run before PG 9.1 beta 1.

This commit is contained in:
Bruce Momjian
2011-04-10 11:42:00 -04:00
parent 9a8b73147c
commit bf50caf105
446 changed files with 5737 additions and 5258 deletions

View File

@ -1070,7 +1070,7 @@ relation_close(Relation relation, LOCKMODE lockmode)
* This is essentially relation_open plus check that the relation
* is not an index nor a composite type. (The caller should also
* check that it's not a view or foreign table before assuming it has
* storage.)
* storage.)
* ----------------
*/
Relation
@ -1922,8 +1922,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* We're about to do the actual insert -- check for conflict at the
* relation or buffer level first, to avoid possibly having to roll
* back work we've just done.
* relation or buffer level first, to avoid possibly having to roll back
* work we've just done.
*/
CheckForSerializableConflictIn(relation, NULL, buffer);
@ -2228,8 +2228,8 @@ l1:
}
/*
* We're about to do the actual delete -- check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to do the actual delete -- check for conflict first, to
* avoid possibly having to roll back work we've just done.
*/
CheckForSerializableConflictIn(relation, &tp, buffer);
@ -2587,8 +2587,8 @@ l2:
}
/*
* We're about to do the actual update -- check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to do the actual update -- check for conflict first, to
* avoid possibly having to roll back work we've just done.
*/
CheckForSerializableConflictIn(relation, &oldtup, buffer);
@ -2737,8 +2737,8 @@ l2:
}
/*
* We're about to create the new tuple -- check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to create the new tuple -- check for conflict first, to
* avoid possibly having to roll back work we've just done.
*
* NOTE: For a tuple insert, we only need to check for table locks, since
* predicate locking at the index level will cover ranges for anything
@ -3860,12 +3860,12 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
}
/*
* Ignore tuples inserted by an aborted transaction or
* if the tuple was updated/deleted by the inserting transaction.
* Ignore tuples inserted by an aborted transaction or if the tuple was
* updated/deleted by the inserting transaction.
*
* Look for a committed hint bit, or if no xmin bit is set, check clog.
* This needs to work on both master and standby, where it is used
* to assess btree delete records.
* This needs to work on both master and standby, where it is used to
* assess btree delete records.
*/
if ((tuple->t_infomask & HEAP_XMIN_COMMITTED) ||
(!(tuple->t_infomask & HEAP_XMIN_COMMITTED) &&
@ -3874,7 +3874,7 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
{
if (xmax != xmin &&
TransactionIdFollows(xmax, *latestRemovedXid))
*latestRemovedXid = xmax;
*latestRemovedXid = xmax;
}
/* *latestRemovedXid may still be invalid at end */
@ -4158,8 +4158,8 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata);
/*
* The page may be uninitialized. If so, we can't set the LSN
* and TLI because that would corrupt the page.
* The page may be uninitialized. If so, we can't set the LSN and TLI
* because that would corrupt the page.
*/
if (!PageIsNew(page))
{
@ -4352,8 +4352,8 @@ heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record)
memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ);
/*
* The page may be uninitialized. If so, we can't set the LSN
* and TLI because that would corrupt the page.
* The page may be uninitialized. If so, we can't set the LSN and TLI
* because that would corrupt the page.
*/
if (!PageIsNew(page))
{

View File

@ -150,7 +150,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
Buffer
RelationGetBufferForTuple(Relation relation, Size len,
Buffer otherBuffer, int options,
struct BulkInsertStateData *bistate)
struct BulkInsertStateData * bistate)
{
bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
Buffer buffer = InvalidBuffer;

View File

@ -131,7 +131,7 @@ typedef struct RewriteStateData
* them */
HTAB *rs_unresolved_tups; /* unmatched A tuples */
HTAB *rs_old_new_tid_map; /* unmatched B tuples */
} RewriteStateData;
} RewriteStateData;
/*
* The lookup keys for the hash tables are tuple TID and xmin (we must check
@ -277,7 +277,7 @@ end_heap_rewrite(RewriteState state)
}
/*
* If the rel is WAL-logged, must fsync before commit. We use heap_sync
* If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less