1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-05 07:21:24 +03:00

pgindent run for 9.4

This includes removing tabs after periods in C comments, which was
applied to back branches, so this change should not effect backpatching.
This commit is contained in:
Bruce Momjian
2014-05-06 12:12:18 -04:00
parent fb85cd4320
commit 0a78320057
854 changed files with 7848 additions and 7368 deletions

View File

@ -25,7 +25,7 @@
/*
* SPPageDesc tracks all info about a page we are inserting into. In some
* situations it actually identifies a tuple, or even a specific node within
* an inner tuple. But any of the fields can be invalid. If the buffer
* an inner tuple. But any of the fields can be invalid. If the buffer
* field is valid, it implies we hold pin and exclusive lock on that buffer.
* page pointer should be valid exactly when buffer is.
*/
@ -249,7 +249,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
else
{
/*
* Tuple must be inserted into existing chain. We mustn't change the
* Tuple must be inserted into existing chain. We mustn't change the
* chain's head address, but we don't need to chase the entire chain
* to put the tuple at the end; we can insert it second.
*
@ -814,7 +814,7 @@ doPickSplit(Relation index, SpGistState *state,
* We may not actually insert new tuple because another picksplit may be
* necessary due to too large value, but we will try to allocate enough
* space to include it; and in any case it has to be included in the input
* for the picksplit function. So don't increment nToInsert yet.
* for the picksplit function. So don't increment nToInsert yet.
*/
in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state);
heapPtrs[in.nTuples] = newLeafTuple->heapPtr;
@ -872,7 +872,7 @@ doPickSplit(Relation index, SpGistState *state,
/*
* Check to see if the picksplit function failed to separate the values,
* ie, it put them all into the same child node. If so, select allTheSame
* mode and create a random split instead. See comments for
* mode and create a random split instead. See comments for
* checkAllTheSame as to why we need to know if the new leaf tuples could
* fit on one page.
*/
@ -1037,7 +1037,7 @@ doPickSplit(Relation index, SpGistState *state,
&xlrec.initDest);
/*
* Attempt to assign node groups to the two pages. We might fail to
* Attempt to assign node groups to the two pages. We might fail to
* do so, even if totalLeafSizes is less than the available space,
* because we can't split a group across pages.
*/
@ -1917,7 +1917,7 @@ spgdoinsert(Relation index, SpGistState *state,
if (current.blkno == InvalidBlockNumber)
{
/*
* Create a leaf page. If leafSize is too large to fit on a page,
* Create a leaf page. If leafSize is too large to fit on a page,
* we won't actually use the page yet, but it simplifies the API
* for doPickSplit to always have a leaf page at hand; so just
* quietly limit our request to a page size.
@ -2120,7 +2120,7 @@ spgdoinsert(Relation index, SpGistState *state,
out.result.addNode.nodeLabel);
/*
* Retry insertion into the enlarged node. We assume that
* Retry insertion into the enlarged node. We assume that
* we'll get a MatchNode result this time.
*/
goto process_inner_tuple;

View File

@ -163,7 +163,7 @@ spgbuildempty(PG_FUNCTION_ARGS)
page = (Page) palloc(BLCKSZ);
SpGistInitMetapage(page);
/* Write the page. If archiving/streaming, XLOG it. */
/* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
(char *) page, true);
@ -232,7 +232,7 @@ spginsert(PG_FUNCTION_ARGS)
/*
* We might have to repeat spgdoinsert() multiple times, if conflicts
* occur with concurrent insertions. If so, reset the insertCtx each time
* to avoid cumulative memory consumption. That means we also have to
* to avoid cumulative memory consumption. That means we also have to
* redo initSpGistState(), but it's cheap enough not to matter.
*/
while (!spgdoinsert(index, &spgstate, ht_ctid, *values, *isnull))

View File

@ -103,7 +103,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
* Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
*
* The point here is to eliminate null-related considerations from what the
* opclass consistent functions need to deal with. We assume all SPGiST-
* opclass consistent functions need to deal with. We assume all SPGiST-
* indexable operators are strict, so any null RHS value makes the scan
* condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL
* conditions; their effect is reflected into searchNulls/searchNonNulls.
@ -600,7 +600,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
if (so->want_itup)
{
/*
* Reconstruct desired IndexTuple. We have to copy the datum out of
* Reconstruct desired IndexTuple. We have to copy the datum out of
* the temp context anyway, so we may as well create the tuple here.
*/
so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc,

View File

@ -26,11 +26,11 @@
* In the worst case, an inner tuple in a text radix tree could have as many
* as 256 nodes (one for each possible byte value). Each node can take 16
* bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page
* of size BLCKSZ. Rather than assuming we know the exact amount of overhead
* of size BLCKSZ. Rather than assuming we know the exact amount of overhead
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). So we can safely create prefixes up to
* BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
* it is always possible to get "SPGiST inner tuple size exceeds maximum"
* if there are too many distinct next-byte values at a given place in the
@ -327,7 +327,7 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
}
/*
* Sort by label bytes so that we can group the values into nodes. This
* Sort by label bytes so that we can group the values into nodes. This
* also ensures that the nodes are ordered by label value, allowing the
* use of binary search in searchChar.
*/
@ -377,7 +377,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
/*
* Reconstruct values represented at this tuple, including parent data,
* prefix of this tuple if any, and the node label if any. in->level
* prefix of this tuple if any, and the node label if any. in->level
* should be the length of the previously reconstructed value, and the
* number of bytes added here is prefixSize or prefixSize + 1.
*

View File

@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When requesting an inner page, if we get one with the wrong parity,
* we just release the buffer and try again. We will get a different page
* because GetFreeIndexPage will have marked the page used in FSM. The page
* because GetFreeIndexPage will have marked the page used in FSM. The page
* is entered in our local lastUsedPages cache, so there's some hope of
* making use of it later in this session, but otherwise we rely on VACUUM
* to eventually re-enter the page in FSM, making it available for recycling.
@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When we return a buffer to the caller, the page is *not* entered into
* the lastUsedPages cache; we expect the caller will do so after it's taken
* whatever space it will use. This is because after the caller has used up
* whatever space it will use. This is because after the caller has used up
* some space, the page might have less space than whatever was cached already
* so we'd rather not trash the old cache entry.
*/
@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
/*
* If possible, increase the space request to include relation's
* fillfactor. This ensures that when we add unrelated tuples to a page,
* fillfactor. This ensures that when we add unrelated tuples to a page,
* we try to keep 100-fillfactor% available for adding tuples that are
* related to the ones already on it. But fillfactor mustn't cause an
* error for requests that would otherwise be legal.

View File

@ -211,7 +211,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* Figure out exactly what we have to do. We do this separately from
* actually modifying the page, mainly so that we have a representation
* that can be dumped into WAL and then the replay code can do exactly
* the same thing. The output of this step consists of six arrays
* the same thing. The output of this step consists of six arrays
* describing four kinds of operations, to be performed in this order:
*
* toDead[]: tuple numbers to be replaced with DEAD tuples
@ -287,7 +287,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else
{
/*
* Second or later live tuple. Arrange to re-chain it to the
* Second or later live tuple. Arrange to re-chain it to the
* previous live one, if there was a gap.
*/
if (interveningDeletable)

View File

@ -41,7 +41,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc)
}
/*
* Add a leaf tuple, or replace an existing placeholder tuple. This is used
* Add a leaf tuple, or replace an existing placeholder tuple. This is used
* to replay SpGistPageAddNewItem() operations. If the offset points at an
* existing tuple, it had better be a placeholder tuple.
*/
@ -462,7 +462,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
}
/*
* Update parent downlink. Since parent could be in either of the
* Update parent downlink. Since parent could be in either of the
* previous two buffers, it's a bit tricky to determine which BKP bit
* applies.
*/
@ -799,7 +799,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
bbi++;
/*
* Now we can release the leaf-page locks. It's okay to do this before
* Now we can release the leaf-page locks. It's okay to do this before
* updating the parent downlink.
*/
if (BufferIsValid(srcBuffer))