mirror of
https://github.com/postgres/postgres.git
synced 2025-10-31 10:30:33 +03:00
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
This commit is contained in:
@@ -90,7 +90,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
|
||||
* By here, itup is filled in, including the TID.
|
||||
*
|
||||
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
|
||||
* will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
|
||||
* will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
|
||||
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
|
||||
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
|
||||
* don't actually insert.
|
||||
@@ -129,7 +129,7 @@ top:
|
||||
* If the page was split between the time that we surrendered our read
|
||||
* lock and acquired our write lock, then this page may no longer be the
|
||||
* right place for the key we want to insert. In this case, we need to
|
||||
* move right in the tree. See Lehman and Yao for an excruciatingly
|
||||
* move right in the tree. See Lehman and Yao for an excruciatingly
|
||||
* precise description.
|
||||
*/
|
||||
buf = _bt_moveright(rel, buf, natts, itup_scankey, false,
|
||||
@@ -211,7 +211,7 @@ top:
|
||||
* is the first tuple on the next page.
|
||||
*
|
||||
* Returns InvalidTransactionId if there is no conflict, else an xact ID
|
||||
* we must wait for to see if it commits a conflicting tuple. If an actual
|
||||
* we must wait for to see if it commits a conflicting tuple. If an actual
|
||||
* conflict is detected, no return --- just ereport().
|
||||
*
|
||||
* However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
|
||||
@@ -293,7 +293,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
|
||||
|
||||
/*
|
||||
* If we are doing a recheck, we expect to find the tuple we
|
||||
* are rechecking. It's not a duplicate, but we have to keep
|
||||
* are rechecking. It's not a duplicate, but we have to keep
|
||||
* scanning.
|
||||
*/
|
||||
if (checkUnique == UNIQUE_CHECK_EXISTING &&
|
||||
@@ -482,7 +482,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
|
||||
* If the new key is equal to one or more existing keys, we can
|
||||
* legitimately place it anywhere in the series of equal keys --- in fact,
|
||||
* if the new key is equal to the page's "high key" we can place it on
|
||||
* the next page. If it is equal to the high key, and there's not room
|
||||
* the next page. If it is equal to the high key, and there's not room
|
||||
* to insert the new tuple on the current page without splitting, then
|
||||
* we can move right hoping to find more free space and avoid a split.
|
||||
* (We should not move right indefinitely, however, since that leads to
|
||||
@@ -494,7 +494,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
|
||||
* removing any LP_DEAD tuples.
|
||||
*
|
||||
* On entry, *buf and *offsetptr point to the first legal position
|
||||
* where the new tuple could be inserted. The caller should hold an
|
||||
* where the new tuple could be inserted. The caller should hold an
|
||||
* exclusive lock on *buf. *offsetptr can also be set to
|
||||
* InvalidOffsetNumber, in which case the function will search for the
|
||||
* right location within the page if needed. On exit, they point to the
|
||||
@@ -564,7 +564,7 @@ _bt_findinsertloc(Relation rel,
|
||||
* on every insert. We implement "get tired" as a random choice,
|
||||
* since stopping after scanning a fixed number of pages wouldn't work
|
||||
* well (we'd never reach the right-hand side of previously split
|
||||
* pages). Currently the probability of moving right is set at 0.99,
|
||||
* pages). Currently the probability of moving right is set at 0.99,
|
||||
* which may seem too high to change the behavior much, but it does an
|
||||
* excellent job of preventing O(N^2) behavior with many equal keys.
|
||||
*----------
|
||||
@@ -574,7 +574,7 @@ _bt_findinsertloc(Relation rel,
|
||||
while (PageGetFreeSpace(page) < itemsz)
|
||||
{
|
||||
Buffer rbuf;
|
||||
BlockNumber rblkno;
|
||||
BlockNumber rblkno;
|
||||
|
||||
/*
|
||||
* before considering moving right, see if we can obtain enough space
|
||||
@@ -620,10 +620,10 @@ _bt_findinsertloc(Relation rel,
|
||||
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
/*
|
||||
* If this page was incompletely split, finish the split now.
|
||||
* We do this while holding a lock on the left sibling, which
|
||||
* is not good because finishing the split could be a fairly
|
||||
* lengthy operation. But this should happen very seldom.
|
||||
* If this page was incompletely split, finish the split now. We
|
||||
* do this while holding a lock on the left sibling, which is not
|
||||
* good because finishing the split could be a fairly lengthy
|
||||
* operation. But this should happen very seldom.
|
||||
*/
|
||||
if (P_INCOMPLETE_SPLIT(lpageop))
|
||||
{
|
||||
@@ -681,7 +681,7 @@ _bt_findinsertloc(Relation rel,
|
||||
* + updates the metapage if a true root or fast root is split.
|
||||
*
|
||||
* On entry, we must have the correct buffer in which to do the
|
||||
* insertion, and the buffer must be pinned and write-locked. On return,
|
||||
* insertion, and the buffer must be pinned and write-locked. On return,
|
||||
* we will have dropped both the pin and the lock on the buffer.
|
||||
*
|
||||
* When inserting to a non-leaf page, 'cbuf' is the left-sibling of the
|
||||
@@ -978,7 +978,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright,
|
||||
* origpage is the original page to be split. leftpage is a temporary
|
||||
* buffer that receives the left-sibling data, which will be copied back
|
||||
* into origpage on success. rightpage is the new page that receives the
|
||||
* right-sibling data. If we fail before reaching the critical section,
|
||||
* right-sibling data. If we fail before reaching the critical section,
|
||||
* origpage hasn't been modified and leftpage is only workspace. In
|
||||
* principle we shouldn't need to worry about rightpage either, because it
|
||||
* hasn't been linked into the btree page structure; but to avoid leaving
|
||||
@@ -1196,7 +1196,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright,
|
||||
* page. If you're confused, imagine that page A splits to A B and
|
||||
* then again, yielding A C B, while vacuum is in progress. Tuples
|
||||
* originally in A could now be in either B or C, hence vacuum must
|
||||
* examine both pages. But if D, our right sibling, has a different
|
||||
* examine both pages. But if D, our right sibling, has a different
|
||||
* cycleid then it could not contain any tuples that were in A when
|
||||
* the vacuum started.
|
||||
*/
|
||||
@@ -1330,11 +1330,10 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright,
|
||||
lastrdata++;
|
||||
|
||||
/*
|
||||
* Although we don't need to WAL-log anything on the left page,
|
||||
* we still need XLogInsert to consider storing a full-page image
|
||||
* of the left page, so make an empty entry referencing that
|
||||
* buffer. This also ensures that the left page is always backup
|
||||
* block 1.
|
||||
* Although we don't need to WAL-log anything on the left page, we
|
||||
* still need XLogInsert to consider storing a full-page image of
|
||||
* the left page, so make an empty entry referencing that buffer.
|
||||
* This also ensures that the left page is always backup block 1.
|
||||
*/
|
||||
lastrdata->data = NULL;
|
||||
lastrdata->len = 0;
|
||||
@@ -1448,7 +1447,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright,
|
||||
*
|
||||
* We return the index of the first existing tuple that should go on the
|
||||
* righthand page, plus a boolean indicating whether the new tuple goes on
|
||||
* the left or right page. The bool is necessary to disambiguate the case
|
||||
* the left or right page. The bool is necessary to disambiguate the case
|
||||
* where firstright == newitemoff.
|
||||
*/
|
||||
static OffsetNumber
|
||||
@@ -1684,7 +1683,7 @@ _bt_checksplitloc(FindSplitData *state,
|
||||
*
|
||||
* On entry, buf and rbuf are the left and right split pages, which we
|
||||
* still hold write locks on per the L&Y algorithm. We release the
|
||||
* write locks once we have write lock on the parent page. (Any sooner,
|
||||
* write locks once we have write lock on the parent page. (Any sooner,
|
||||
* and it'd be possible for some other process to try to split or delete
|
||||
* one of these pages, and get confused because it cannot find the downlink.)
|
||||
*
|
||||
@@ -1705,7 +1704,7 @@ _bt_insert_parent(Relation rel,
|
||||
* Here we have to do something Lehman and Yao don't talk about: deal with
|
||||
* a root split and construction of a new root. If our stack is empty
|
||||
* then we have just split a node on what had been the root level when we
|
||||
* descended the tree. If it was still the root then we perform a
|
||||
* descended the tree. If it was still the root then we perform a
|
||||
* new-root construction. If it *wasn't* the root anymore, search to find
|
||||
* the next higher level that someone constructed meanwhile, and find the
|
||||
* right place to insert as for the normal case.
|
||||
@@ -1917,7 +1916,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
|
||||
/*
|
||||
* These loops will check every item on the page --- but in an
|
||||
* order that's attuned to the probability of where it actually
|
||||
* is. Scan to the right first, then to the left.
|
||||
* is. Scan to the right first, then to the left.
|
||||
*/
|
||||
for (offnum = start;
|
||||
offnum <= maxoff;
|
||||
|
||||
Reference in New Issue
Block a user