1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-14 08:21:07 +03:00

Run pgindent on 9.2 source tree in preparation for first 9.3

commit-fest.
This commit is contained in:
Bruce Momjian
2012-06-10 15:20:04 -04:00
parent 60801944fa
commit 927d61eeff
494 changed files with 7343 additions and 7046 deletions

View File

@ -223,9 +223,9 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
}
/*
* Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters
* many pages' worth of consecutive dead tuples.
* Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters many
* pages' worth of consecutive dead tuples.
*/
CHECK_FOR_INTERRUPTS();
@ -997,8 +997,8 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
*
* Same as relation_openrv, but with an additional missing_ok argument
* allowing a NULL return rather than an error if the relation is not
* found. (Note that some other causes, such as permissions problems,
* will still result in an ereport.)
* found. (Note that some other causes, such as permissions problems,
* will still result in an ereport.)
* ----------------
*/
Relation
@ -1105,7 +1105,7 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
* by a RangeVar node
*
* As above, but optionally return NULL instead of failing for
* relation-not-found.
* relation-not-found.
* ----------------
*/
Relation
@ -1588,10 +1588,10 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
/*
* When first_call is true (and thus, skip is initially false) we'll
* return the first tuple we find. But on later passes, heapTuple
* return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever),
* so we skip it and return the next match we find.
* Returning it again would be incorrect (and would loop forever), so
* we skip it and return the next match we find.
*/
if (!skip)
{
@ -1651,7 +1651,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
{
bool result;
Buffer buffer;
HeapTupleData heapTuple;
HeapTupleData heapTuple;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@ -1885,14 +1885,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
/*
* We're about to do the actual insert -- but check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to do the actual insert -- but check for conflict first, to
* avoid possibly having to roll back work we've just done.
*
* For a heap insert, we only need to check for table-level SSI locks.
* Our new tuple can't possibly conflict with existing tuple locks, and
* heap page locks are only consolidated versions of tuple locks; they do
* not lock "gaps" as index page locks do. So we don't need to identify
* a buffer before making the call.
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
* lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@ -2123,11 +2123,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* We're about to do the actual inserts -- but check for conflict first,
* to avoid possibly having to roll back work we've just done.
*
* For a heap insert, we only need to check for table-level SSI locks.
* Our new tuple can't possibly conflict with existing tuple locks, and
* heap page locks are only consolidated versions of tuple locks; they do
* not lock "gaps" as index page locks do. So we don't need to identify
* a buffer before making the call.
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
* lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@ -2137,12 +2137,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
Buffer buffer;
Buffer vmbuffer = InvalidBuffer;
bool all_visible_cleared = false;
int nthispage;
int nthispage;
/*
* Find buffer where at least the next tuple will fit. If the page
* is all-visible, this will also pin the requisite visibility map
* page.
* Find buffer where at least the next tuple will fit. If the page is
* all-visible, this will also pin the requisite visibility map page.
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
InvalidBuffer, options, bistate,
@ -2358,7 +2357,7 @@ heap_delete(Relation relation, ItemPointer tid,
ItemId lp;
HeapTupleData tp;
Page page;
BlockNumber block;
BlockNumber block;
Buffer buffer;
Buffer vmbuffer = InvalidBuffer;
bool have_tuple_lock = false;
@ -2372,10 +2371,10 @@ heap_delete(Relation relation, ItemPointer tid,
page = BufferGetPage(buffer);
/*
* Before locking the buffer, pin the visibility map page if it appears
* to be necessary. Since we haven't got the lock yet, someone else might
* be in the middle of changing this, so we'll need to recheck after
* we have the lock.
* Before locking the buffer, pin the visibility map page if it appears to
* be necessary. Since we haven't got the lock yet, someone else might be
* in the middle of changing this, so we'll need to recheck after we have
* the lock.
*/
if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer);
@ -2717,7 +2716,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
HeapTupleData oldtup;
HeapTuple heaptup;
Page page;
BlockNumber block;
BlockNumber block;
Buffer buffer,
newbuf,
vmbuffer = InvalidBuffer,
@ -2753,10 +2752,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
page = BufferGetPage(buffer);
/*
* Before locking the buffer, pin the visibility map page if it appears
* to be necessary. Since we haven't got the lock yet, someone else might
* be in the middle of changing this, so we'll need to recheck after
* we have the lock.
* Before locking the buffer, pin the visibility map page if it appears to
* be necessary. Since we haven't got the lock yet, someone else might be
* in the middle of changing this, so we'll need to recheck after we have
* the lock.
*/
if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer);
@ -2900,11 +2899,11 @@ l2:
/*
* If we didn't pin the visibility map page and the page has become all
* visible while we were busy locking the buffer, or during some subsequent
* window during which we had it unlocked, we'll have to unlock and
* re-lock, to avoid holding the buffer lock across an I/O. That's a bit
* unfortunate, esepecially since we'll now have to recheck whether the
* tuple has been locked or updated under us, but hopefully it won't
* visible while we were busy locking the buffer, or during some
* subsequent window during which we had it unlocked, we'll have to unlock
* and re-lock, to avoid holding the buffer lock across an I/O. That's a
* bit unfortunate, esepecially since we'll now have to recheck whether
* the tuple has been locked or updated under us, but hopefully it won't
* happen very often.
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@ -3196,11 +3195,11 @@ l2:
/*
* Mark old tuple for invalidation from system caches at next command
* boundary, and mark the new tuple for invalidation in case we abort.
* We have to do this before releasing the buffer because oldtup is in
* the buffer. (heaptup is all in local memory, but it's necessary to
* process both tuple versions in one call to inval.c so we can avoid
* redundant sinval messages.)
* boundary, and mark the new tuple for invalidation in case we abort. We
* have to do this before releasing the buffer because oldtup is in the
* buffer. (heaptup is all in local memory, but it's necessary to process
* both tuple versions in one call to inval.c so we can avoid redundant
* sinval messages.)
*/
CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
@ -4069,7 +4068,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid)
*/
bool
heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
Buffer buf)
Buffer buf)
{
TransactionId xid;
@ -4368,9 +4367,9 @@ log_heap_freeze(Relation reln, Buffer buffer,
}
/*
* Perform XLogInsert for a heap-visible operation. 'block' is the block
* Perform XLogInsert for a heap-visible operation. 'block' is the block
* being marked all-visible, and vm_buffer is the buffer containing the
* corresponding visibility map block. Both should have already been modified
* corresponding visibility map block. Both should have already been modified
* and dirtied.
*/
XLogRecPtr
@ -4705,7 +4704,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
Page page;
/*
* Read the heap page, if it still exists. If the heap file has been
* Read the heap page, if it still exists. If the heap file has been
* dropped or truncated later in recovery, this might fail. In that case,
* there's no point in doing anything further, since the visibility map
* will have to be cleared out at the same time.
@ -4731,17 +4730,16 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* We don't bump the LSN of the heap page when setting the visibility
* map bit, because that would generate an unworkable volume of
* full-page writes. This exposes us to torn page hazards, but since
* we're not inspecting the existing page contents in any way, we
* don't care.
* We don't bump the LSN of the heap page when setting the visibility map
* bit, because that would generate an unworkable volume of full-page
* writes. This exposes us to torn page hazards, but since we're not
* inspecting the existing page contents in any way, we don't care.
*
* However, all operations that clear the visibility map bit *do* bump
* the LSN, and those operations will only be replayed if the XLOG LSN
* follows the page LSN. Thus, if the page LSN has advanced past our
* XLOG record's LSN, we mustn't mark the page all-visible, because
* the subsequent update won't be replayed to clear the flag.
* However, all operations that clear the visibility map bit *do* bump the
* LSN, and those operations will only be replayed if the XLOG LSN follows
* the page LSN. Thus, if the page LSN has advanced past our XLOG
* record's LSN, we mustn't mark the page all-visible, because the
* subsequent update won't be replayed to clear the flag.
*/
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@ -4772,10 +4770,10 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* Don't set the bit if replay has already passed this point.
*
* It might be safe to do this unconditionally; if replay has past
* this point, we'll replay at least as far this time as we did before,
* and if this bit needs to be cleared, the record responsible for
* doing so should be again replayed, and clear it. For right now,
* out of an abundance of conservatism, we use the same test here
* this point, we'll replay at least as far this time as we did
* before, and if this bit needs to be cleared, the record responsible
* for doing so should be again replayed, and clear it. For right
* now, out of an abundance of conservatism, we use the same test here
* we did for the heap page; if this results in a dropped bit, no real
* harm is done; and the next VACUUM will fix it.
*/
@ -5183,7 +5181,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
if (xlrec->all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, block, &vmbuffer);
@ -5267,7 +5265,7 @@ newt:;
if (xlrec->new_all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, block, &vmbuffer);
@ -5690,7 +5688,7 @@ heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
else
appendStringInfo(buf, "multi-insert: ");
appendStringInfo(buf, "rel %u/%u/%u; blk %u; %d tuples",
xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
xlrec->blkno, xlrec->ntuples);
}
else

View File

@ -109,8 +109,8 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
BlockNumber block1, BlockNumber block2,
Buffer *vmbuffer1, Buffer *vmbuffer2)
{
bool need_to_pin_buffer1;
bool need_to_pin_buffer2;
bool need_to_pin_buffer1;
bool need_to_pin_buffer2;
Assert(BufferIsValid(buffer1));
Assert(buffer2 == InvalidBuffer || buffer1 <= buffer2);
@ -145,7 +145,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
/*
* If there are two buffers involved and we pinned just one of them,
* it's possible that the second one became all-visible while we were
* busy pinning the first one. If it looks like that's a possible
* busy pinning the first one. If it looks like that's a possible
* scenario, we'll need to make a second pass through this loop.
*/
if (buffer2 == InvalidBuffer || buffer1 == buffer2
@ -302,11 +302,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
* block if one was given, taking suitable care with lock ordering and
* the possibility they are the same block.
*
* If the page-level all-visible flag is set, caller will need to clear
* both that and the corresponding visibility map bit. However, by the
* time we return, we'll have x-locked the buffer, and we don't want to
* do any I/O while in that state. So we check the bit here before
* taking the lock, and pin the page if it appears necessary.
* If the page-level all-visible flag is set, caller will need to
* clear both that and the corresponding visibility map bit. However,
* by the time we return, we'll have x-locked the buffer, and we don't
* want to do any I/O while in that state. So we check the bit here
* before taking the lock, and pin the page if it appears necessary.
* Checking without the lock creates a risk of getting the wrong
* answer, so we'll have to recheck after acquiring the lock.
*/
@ -347,23 +347,24 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We now have the target page (and the other buffer, if any) pinned
* and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now
* be out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to give
* up our locks, go get the pin(s) we failed to get earlier, and
* and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now be
* out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to
* give up our locks, go get the pin(s) we failed to get earlier, and
* re-lock. That's pretty painful, but hopefully shouldn't happen
* often.
*
* Note that there's a small possibility that we didn't pin the
* page above but still have the correct page pinned anyway, either
* because we've already made a previous pass through this loop, or
* because caller passed us the right page anyway.
* Note that there's a small possibility that we didn't pin the page
* above but still have the correct page pinned anyway, either because
* we've already made a previous pass through this loop, or because
* caller passed us the right page anyway.
*
* Note also that it's possible that by the time we get the pin and
* retake the buffer locks, the visibility map bit will have been
* cleared by some other backend anyway. In that case, we'll have done
* a bit of extra work for no gain, but there's no real harm done.
* cleared by some other backend anyway. In that case, we'll have
* done a bit of extra work for no gain, but there's no real harm
* done.
*/
if (otherBuffer == InvalidBuffer || buffer <= otherBuffer)
GetVisibilityMapPins(relation, buffer, otherBuffer,

View File

@ -75,7 +75,7 @@ do { \
static void toast_delete_datum(Relation rel, Datum value);
static Datum toast_save_datum(Relation rel, Datum value,
struct varlena *oldexternal, int options);
struct varlena * oldexternal, int options);
static bool toastrel_valueid_exists(Relation toastrel, Oid valueid);
static bool toastid_valueid_exists(Oid toastrelid, Oid valueid);
static struct varlena *toast_fetch_datum(struct varlena * attr);
@ -1233,7 +1233,7 @@ toast_compress_datum(Datum value)
*/
static Datum
toast_save_datum(Relation rel, Datum value,
struct varlena *oldexternal, int options)
struct varlena * oldexternal, int options)
{
Relation toastrel;
Relation toastidx;
@ -1353,7 +1353,7 @@ toast_save_datum(Relation rel, Datum value,
* those versions could easily reference the same toast value.
* When we copy the second or later version of such a row,
* reusing the OID will mean we select an OID that's already
* in the new toast table. Check for that, and if so, just
* in the new toast table. Check for that, and if so, just
* fall through without writing the data again.
*
* While annoying and ugly-looking, this is a good thing

View File

@ -16,7 +16,7 @@
* visibilitymap_pin_ok - check whether correct map page is already pinned
* visibilitymap_set - set a bit in a previously pinned page
* visibilitymap_test - test if a bit is set
* visibilitymap_count - count number of bits set in visibility map
* visibilitymap_count - count number of bits set in visibility map
* visibilitymap_truncate - truncate the visibility map
*
* NOTES
@ -27,7 +27,7 @@
* the sense that we make sure that whenever a bit is set, we know the
* condition is true, but if a bit is not set, it might or might not be true.
*
* Clearing a visibility map bit is not separately WAL-logged. The callers
* Clearing a visibility map bit is not separately WAL-logged. The callers
* must make sure that whenever a bit is cleared, the bit is cleared on WAL
* replay of the updating operation as well.
*
@ -36,9 +36,9 @@
* it may still be the case that every tuple on the page is visible to all
* transactions; we just don't know that for certain. The difficulty is that
* there are two bits which are typically set together: the PD_ALL_VISIBLE bit
* on the page itself, and the visibility map bit. If a crash occurs after the
* on the page itself, and the visibility map bit. If a crash occurs after the
* visibility map page makes it to disk and before the updated heap page makes
* it to disk, redo must set the bit on the heap page. Otherwise, the next
* it to disk, redo must set the bit on the heap page. Otherwise, the next
* insert, update, or delete on the heap page will fail to realize that the
* visibility map bit must be cleared, possibly causing index-only scans to
* return wrong answers.
@ -59,10 +59,10 @@
* the buffer lock over any I/O that may be required to read in the visibility
* map page. To avoid this, we examine the heap page before locking it;
* if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
* bit. Then, we lock the buffer. But this creates a race condition: there
* bit. Then, we lock the buffer. But this creates a race condition: there
* is a possibility that in the time it takes to lock the buffer, the
* PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
* buffer, pin the visibility map page, and relock the buffer. This shouldn't
* buffer, pin the visibility map page, and relock the buffer. This shouldn't
* happen often, because only VACUUM currently sets visibility map bits,
* and the race will only occur if VACUUM processes a given page at almost
* exactly the same time that someone tries to further modify it.
@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* visibilitymap_set - set a bit on a previously pinned page
*
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
* or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
* page LSN to that value. cutoff_xid is the largest xmin on the page being
* page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
@ -295,10 +295,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
* releasing *buf after it's done testing and setting bits.
*
* NOTE: This function is typically called without a lock on the heap page,
* so somebody else could change the bit just after we look at it. In fact,
* so somebody else could change the bit just after we look at it. In fact,
* since we don't lock the visibility map page either, it's even possible that
* someone else could have changed the bit just before we look at it, but yet
* we might see the old value. It is the caller's responsibility to deal with
* we might see the old value. It is the caller's responsibility to deal with
* all concurrency issues!
*/
bool
@ -344,7 +344,7 @@ visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *buf)
}
/*
* visibilitymap_count - count number of bits set in visibility map
* visibilitymap_count - count number of bits set in visibility map
*
* Note: we ignore the possibility of race conditions when the table is being
* extended concurrently with the call. New pages added to the table aren't
@ -356,16 +356,16 @@ visibilitymap_count(Relation rel)
BlockNumber result = 0;
BlockNumber mapBlock;
for (mapBlock = 0; ; mapBlock++)
for (mapBlock = 0;; mapBlock++)
{
Buffer mapBuffer;
unsigned char *map;
int i;
/*
* Read till we fall off the end of the map. We assume that any
* extra bytes in the last page are zeroed, so we don't bother
* excluding them from the count.
* Read till we fall off the end of the map. We assume that any extra
* bytes in the last page are zeroed, so we don't bother excluding
* them from the count.
*/
mapBuffer = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(mapBuffer))
@ -496,11 +496,11 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
Buffer buf;
/*
* We might not have opened the relation at the smgr level yet, or we might
* have been forced to close it by a sinval message. The code below won't
* necessarily notice relation extension immediately when extend = false,
* so we rely on sinval messages to ensure that our ideas about the size of
* the map aren't too far out of date.
* We might not have opened the relation at the smgr level yet, or we
* might have been forced to close it by a sinval message. The code below
* won't necessarily notice relation extension immediately when extend =
* false, so we rely on sinval messages to ensure that our ideas about the
* size of the map aren't too far out of date.
*/
RelationOpenSmgr(rel);