1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-30 11:03:19 +03:00

Change TRUE/FALSE to true/false

The lower case spellings are C and C++ standard and are used in most
parts of the PostgreSQL sources.  The upper case spellings are only used
in some files/modules.  So standardize on the standard spellings.

The APIs for ICU, Perl, and Windows define their own TRUE and FALSE, so
those are left as is when using those APIs.

In code comments, we use the lower-case spelling for the C concepts and
keep the upper-case spelling for the SQL concepts.

Reviewed-by: Michael Paquier <michael.paquier@gmail.com>
This commit is contained in:
Peter Eisentraut
2017-08-16 00:22:32 -04:00
parent 4497f2f3b3
commit 2eb4a831e5
216 changed files with 1168 additions and 1168 deletions

View File

@ -315,7 +315,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
*
* Index must be locked in ShareUpdateExclusiveLock mode.
*
* Return FALSE if caller should retry.
* Return false if caller should retry.
*/
bool
brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)

View File

@ -289,7 +289,7 @@ heap_fill_tuple(TupleDesc tupleDesc,
*/
/* ----------------
* heap_attisnull - returns TRUE iff tuple attribute is not present
* heap_attisnull - returns true iff tuple attribute is not present
* ----------------
*/
bool

View File

@ -41,7 +41,7 @@ ginTraverseLock(Buffer buffer, bool searchMode)
page = BufferGetPage(buffer);
if (GinPageIsLeaf(page))
{
if (searchMode == FALSE)
if (searchMode == false)
{
/* we should relock our page */
LockBuffer(buffer, GIN_UNLOCK);
@ -107,7 +107,7 @@ ginFindLeafPage(GinBtree btree, bool searchMode, Snapshot snapshot)
* ok, page is correctly locked, we should check to move right ..,
* root never has a right link, so small optimization
*/
while (btree->fullScan == FALSE && stack->blkno != btree->rootBlkno &&
while (btree->fullScan == false && stack->blkno != btree->rootBlkno &&
btree->isMoveRight(btree, page))
{
BlockNumber rightlink = GinPageGetOpaque(page)->rightlink;

View File

@ -52,7 +52,7 @@ ginCombineData(RBNode *existing, const RBNode *newdata, void *arg)
}
/* If item pointers are not ordered, they will need to be sorted later */
if (eo->shouldSort == FALSE)
if (eo->shouldSort == false)
{
int res;
@ -60,7 +60,7 @@ ginCombineData(RBNode *existing, const RBNode *newdata, void *arg)
Assert(res != 0);
if (res > 0)
eo->shouldSort = TRUE;
eo->shouldSort = true;
}
eo->list[eo->count] = en->list[0];
@ -176,7 +176,7 @@ ginInsertBAEntry(BuildAccumulator *accum,
ea->key = getDatumCopy(accum, attnum, key);
ea->maxcount = DEF_NPTR;
ea->count = 1;
ea->shouldSort = FALSE;
ea->shouldSort = false;
ea->list =
(ItemPointerData *) palloc(sizeof(ItemPointerData) * DEF_NPTR);
ea->list[0] = *heapptr;

View File

@ -235,9 +235,9 @@ dataIsMoveRight(GinBtree btree, Page page)
ItemPointer iptr = GinDataPageGetRightBound(page);
if (GinPageRightMost(page))
return FALSE;
return false;
return (ginCompareItemPointers(&btree->itemptr, iptr) > 0) ? TRUE : FALSE;
return (ginCompareItemPointers(&btree->itemptr, iptr) > 0) ? true : false;
}
/*
@ -1875,9 +1875,9 @@ ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno)
btree->fillRoot = ginDataFillRoot;
btree->prepareDownlink = dataPrepareDownlink;
btree->isData = TRUE;
btree->fullScan = FALSE;
btree->isBuild = FALSE;
btree->isData = true;
btree->fullScan = false;
btree->isBuild = false;
}
/*
@ -1919,9 +1919,9 @@ ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno,
ginPrepareDataScan(btree, index, rootBlkno);
btree->fullScan = TRUE;
btree->fullScan = true;
stack = ginFindLeafPage(btree, TRUE, snapshot);
stack = ginFindLeafPage(btree, true, snapshot);
return stack;
}

View File

@ -30,7 +30,7 @@ static void entrySplitPage(GinBtree btree, Buffer origbuf,
* Form a tuple for entry tree.
*
* If the tuple would be too big to be stored, function throws a suitable
* error if errorTooBig is TRUE, or returns NULL if errorTooBig is FALSE.
* error if errorTooBig is true, or returns NULL if errorTooBig is false.
*
* See src/backend/access/gin/README for a description of the index tuple
* format that is being built here. We build on the assumption that we
@ -249,7 +249,7 @@ entryIsMoveRight(GinBtree btree, Page page)
GinNullCategory category;
if (GinPageRightMost(page))
return FALSE;
return false;
itup = getRightMostTuple(page);
attnum = gintuple_get_attrnum(btree->ginstate, itup);
@ -258,9 +258,9 @@ entryIsMoveRight(GinBtree btree, Page page)
if (ginCompareAttEntries(btree->ginstate,
btree->entryAttnum, btree->entryKey, btree->entryCategory,
attnum, key, category) > 0)
return TRUE;
return true;
return FALSE;
return false;
}
/*
@ -356,7 +356,7 @@ entryLocateLeafEntry(GinBtree btree, GinBtreeStack *stack)
if (btree->fullScan)
{
stack->off = FirstOffsetNumber;
return TRUE;
return true;
}
low = FirstOffsetNumber;
@ -762,9 +762,9 @@ ginPrepareEntryScan(GinBtree btree, OffsetNumber attnum,
btree->fillRoot = ginEntryFillRoot;
btree->prepareDownlink = entryPrepareDownlink;
btree->isData = FALSE;
btree->fullScan = FALSE;
btree->isBuild = FALSE;
btree->isData = false;
btree->fullScan = false;
btree->isBuild = false;
btree->entryAttnum = attnum;
btree->entryKey = key;

View File

@ -311,7 +311,7 @@ restartScanEntry:
entry->nlist = 0;
entry->matchBitmap = NULL;
entry->matchResult = NULL;
entry->reduceResult = FALSE;
entry->reduceResult = false;
entry->predictNumberResult = 0;
/*
@ -324,9 +324,9 @@ restartScanEntry:
stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
page = BufferGetPage(stackEntry->buffer);
/* ginFindLeafPage() will have already checked snapshot age. */
needUnlock = TRUE;
needUnlock = true;
entry->isFinished = TRUE;
entry->isFinished = true;
if (entry->isPartialMatch ||
entry->queryCategory == GIN_CAT_EMPTY_QUERY)
@ -363,7 +363,7 @@ restartScanEntry:
if (entry->matchBitmap && !tbm_is_empty(entry->matchBitmap))
{
entry->matchIterator = tbm_begin_iterate(entry->matchBitmap);
entry->isFinished = FALSE;
entry->isFinished = false;
}
}
else if (btreeEntry.findItem(&btreeEntry, stackEntry))
@ -385,7 +385,7 @@ restartScanEntry:
* root of posting tree.
*/
LockBuffer(stackEntry->buffer, GIN_UNLOCK);
needUnlock = FALSE;
needUnlock = false;
stack = ginScanBeginPostingTree(&entry->btree, ginstate->index,
rootPostingTree, snapshot);
@ -410,7 +410,7 @@ restartScanEntry:
LockBuffer(entry->buffer, GIN_UNLOCK);
freeGinBtreeStack(stack);
entry->isFinished = FALSE;
entry->isFinished = false;
}
else if (GinGetNPosting(itup) > 0)
{
@ -418,7 +418,7 @@ restartScanEntry:
&entry->nlist);
entry->predictNumberResult = entry->nlist;
entry->isFinished = FALSE;
entry->isFinished = false;
}
}
@ -565,7 +565,7 @@ startScan(IndexScanDesc scan)
for (i = 0; i < so->totalentries; i++)
{
so->entries[i]->predictNumberResult /= so->totalentries;
so->entries[i]->reduceResult = TRUE;
so->entries[i]->reduceResult = true;
}
}
}
@ -666,7 +666,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
{
UnlockReleaseBuffer(entry->buffer);
entry->buffer = InvalidBuffer;
entry->isFinished = TRUE;
entry->isFinished = true;
return;
}
@ -728,7 +728,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
/*
* Sets entry->curItem to next heap item pointer > advancePast, for one entry
* of one scan key, or sets entry->isFinished to TRUE if there are no more.
* of one scan key, or sets entry->isFinished to true if there are no more.
*
* Item pointers are returned in ascending order.
*
@ -775,7 +775,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
ItemPointerSetInvalid(&entry->curItem);
tbm_end_iterate(entry->matchIterator);
entry->matchIterator = NULL;
entry->isFinished = TRUE;
entry->isFinished = true;
break;
}
@ -835,7 +835,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
entry->matchResult->offsets[entry->offset]);
entry->offset++;
gotitem = true;
} while (!gotitem || (entry->reduceResult == TRUE && dropItem(entry)));
} while (!gotitem || (entry->reduceResult == true && dropItem(entry)));
}
else if (!BufferIsValid(entry->buffer))
{
@ -848,7 +848,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
if (entry->offset >= entry->nlist)
{
ItemPointerSetInvalid(&entry->curItem);
entry->isFinished = TRUE;
entry->isFinished = true;
break;
}
@ -876,7 +876,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
entry->curItem = entry->list[entry->offset++];
} while (ginCompareItemPointers(&entry->curItem, &advancePast) <= 0 ||
(entry->reduceResult == TRUE && dropItem(entry)));
(entry->reduceResult == true && dropItem(entry)));
}
}
@ -891,7 +891,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
* iff recheck is needed for this item pointer (including the case where the
* item pointer is a lossy page pointer).
*
* If all entry streams are exhausted, sets key->isFinished to TRUE.
* If all entry streams are exhausted, sets key->isFinished to true.
*
* Item pointers must be returned in ascending order.
*
@ -963,7 +963,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
if (allFinished)
{
/* all entries are finished */
key->isFinished = TRUE;
key->isFinished = true;
return;
}
@ -1051,7 +1051,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
* them. We could pass them as MAYBE as well, but if we're using the
* "shim" implementation of a tri-state consistent function (see
* ginlogic.c), it's better to pass as few MAYBEs as possible. So pass
* them as TRUE.
* them as true.
*
* Note that only lossy-page entries pointing to the current item's page
* should trigger this processing; we might have future lossy pages in the
@ -1064,7 +1064,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
for (i = 0; i < key->nentries; i++)
{
entry = key->scanEntry[i];
if (entry->isFinished == FALSE &&
if (entry->isFinished == false &&
ginCompareItemPointers(&entry->curItem, &curPageLossy) == 0)
{
if (i < key->nuserentries)
@ -1314,7 +1314,7 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
}
}
return TRUE;
return true;
}
@ -1508,7 +1508,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
memset(key->entryRes, GIN_FALSE, key->nentries);
}
memset(pos->hasMatchKey, FALSE, so->nkeys);
memset(pos->hasMatchKey, false, so->nkeys);
/*
* Outer loop iterates over multiple pending-list pages when a single heap

View File

@ -185,7 +185,7 @@ ginEntryInsert(GinState *ginstate,
IndexTuple itup;
Page page;
insertdata.isDelete = FALSE;
insertdata.isDelete = false;
/* During index build, count the to-be-inserted entry */
if (buildStats)
@ -221,7 +221,7 @@ ginEntryInsert(GinState *ginstate,
itup = addItemPointersToLeafTuple(ginstate, itup,
items, nitem, buildStats);
insertdata.isDelete = TRUE;
insertdata.isDelete = true;
}
else
{

View File

@ -235,7 +235,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
DataPageDeleteStack *me;
Buffer buffer;
Page page;
bool meDelete = FALSE;
bool meDelete = false;
bool isempty;
if (isRoot)
@ -274,7 +274,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
{
PostingItem *pitem = GinDataPageGetPostingItem(page, i);
if (ginScanToDelete(gvs, PostingItemGetBlockNumber(pitem), FALSE, me, i))
if (ginScanToDelete(gvs, PostingItemGetBlockNumber(pitem), false, me, i))
i--;
}
}
@ -291,7 +291,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
{
Assert(!isRoot);
ginDeletePage(gvs, blkno, me->leftBlkno, me->parent->blkno, myoff, me->parent->isRoot);
meDelete = TRUE;
meDelete = true;
}
}
@ -319,7 +319,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
{
Buffer buffer;
Page page;
bool hasVoidPage = FALSE;
bool hasVoidPage = false;
MemoryContext oldCxt;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
@ -339,7 +339,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
/* if root is a leaf page, we don't desire further processing */
if (GinDataLeafPageIsEmpty(page))
hasVoidPage = TRUE;
hasVoidPage = true;
UnlockReleaseBuffer(buffer);
@ -348,8 +348,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
else
{
OffsetNumber i;
bool hasEmptyChild = FALSE;
bool hasNonEmptyChild = FALSE;
bool hasEmptyChild = false;
bool hasNonEmptyChild = false;
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));
@ -369,10 +369,10 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
for (i = FirstOffsetNumber; i <= maxoff; i++)
{
if (ginVacuumPostingTreeLeaves(gvs, children[i], FALSE))
hasEmptyChild = TRUE;
if (ginVacuumPostingTreeLeaves(gvs, children[i], false))
hasEmptyChild = true;
else
hasNonEmptyChild = TRUE;
hasNonEmptyChild = true;
}
pfree(children);
@ -380,12 +380,12 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
vacuum_delay_point();
/*
* All subtree is empty - just return TRUE to indicate that parent
* All subtree is empty - just return true to indicate that parent
* must do a cleanup. Unless we are ROOT an there is way to go upper.
*/
if (hasEmptyChild && !hasNonEmptyChild && !isRoot)
return TRUE;
return true;
if (hasEmptyChild)
{
@ -399,9 +399,9 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
memset(&root, 0, sizeof(DataPageDeleteStack));
root.leftBlkno = InvalidBlockNumber;
root.isRoot = TRUE;
root.isRoot = true;
ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber);
ginScanToDelete(gvs, blkno, true, &root, InvalidOffsetNumber);
ptr = root.child;
@ -416,14 +416,14 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
}
/* Here we have deleted all empty subtrees */
return FALSE;
return false;
}
}
static void
ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
{
ginVacuumPostingTreeLeaves(gvs, rootBlkno, TRUE);
ginVacuumPostingTreeLeaves(gvs, rootBlkno, true);
}
/*

View File

@ -1364,8 +1364,8 @@ gistSplit(Relation r,
IndexTupleSize(itup[0]), GiSTPageSize,
RelationGetRelationName(r))));
memset(v.spl_lisnull, TRUE, sizeof(bool) * giststate->tupdesc->natts);
memset(v.spl_risnull, TRUE, sizeof(bool) * giststate->tupdesc->natts);
memset(v.spl_lisnull, true, sizeof(bool) * giststate->tupdesc->natts);
memset(v.spl_risnull, true, sizeof(bool) * giststate->tupdesc->natts);
gistSplitByKey(r, page, itup, len, giststate, &v, 0);
/* form left and right vector */

View File

@ -197,7 +197,7 @@ gistindex_keytest(IndexScanDesc scan,
gistdentryinit(giststate, key->sk_attno - 1, &de,
datum, r, page, offset,
FALSE, isNull);
false, isNull);
/*
* Call the Consistent function to evaluate the test. The
@ -258,7 +258,7 @@ gistindex_keytest(IndexScanDesc scan,
gistdentryinit(giststate, key->sk_attno - 1, &de,
datum, r, page, offset,
FALSE, isNull);
false, isNull);
/*
* Call the Distance function to evaluate the distance. The

View File

@ -105,7 +105,7 @@ box_penalty(const BOX *original, const BOX *new)
* The GiST Consistent method for boxes
*
* Should return false if for all data items x below entry,
* the predicate x op query must be FALSE, where op is the oper
* the predicate x op query must be false, where op is the oper
* corresponding to strategy in the pg_amop table.
*/
Datum
@ -122,7 +122,7 @@ gist_box_consistent(PG_FUNCTION_ARGS)
*recheck = false;
if (DatumGetBoxP(entry->key) == NULL || query == NULL)
PG_RETURN_BOOL(FALSE);
PG_RETURN_BOOL(false);
/*
* if entry is not leaf, use rtree_internal_consistent, else use
@ -1056,7 +1056,7 @@ gist_poly_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;
@ -1081,7 +1081,7 @@ gist_poly_consistent(PG_FUNCTION_ARGS)
*recheck = true;
if (DatumGetBoxP(entry->key) == NULL || query == NULL)
PG_RETURN_BOOL(FALSE);
PG_RETURN_BOOL(false);
/*
* Since the operators require recheck anyway, we can just use
@ -1124,7 +1124,7 @@ gist_circle_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;
@ -1150,7 +1150,7 @@ gist_circle_consistent(PG_FUNCTION_ARGS)
*recheck = true;
if (DatumGetBoxP(entry->key) == NULL || query == NULL)
PG_RETURN_BOOL(FALSE);
PG_RETURN_BOOL(false);
/*
* Since the operators require recheck anyway, we can just use
@ -1186,7 +1186,7 @@ gist_point_compress(PG_FUNCTION_ARGS)
box->high = box->low = *point;
gistentryinit(*retval, BoxPGetDatum(box),
entry->rel, entry->page, entry->offset, FALSE);
entry->rel, entry->page, entry->offset, false);
PG_RETURN_POINTER(retval);
}
@ -1215,7 +1215,7 @@ gist_point_fetch(PG_FUNCTION_ARGS)
r->y = in->high.y;
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
PG_RETURN_POINTER(retval);
}

View File

@ -125,7 +125,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
* check for nulls
*/
gistentryinit(entry, spl->splitVector.spl_rdatum, r, NULL,
(OffsetNumber) 0, FALSE);
(OffsetNumber) 0, false);
for (i = 0; i < spl->splitVector.spl_nleft; i++)
{
int j = spl->splitVector.spl_left[i];
@ -141,7 +141,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/* And conversely for the right-side tuples */
gistentryinit(entry, spl->splitVector.spl_ldatum, r, NULL,
(OffsetNumber) 0, FALSE);
(OffsetNumber) 0, false);
for (i = 0; i < spl->splitVector.spl_nright; i++)
{
int j = spl->splitVector.spl_right[i];
@ -177,7 +177,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
{
OffsetNumber ai = a[i];
if (dontcare[ai] == FALSE)
if (dontcare[ai] == false)
{
/* re-emit item into a[] */
*curwpos = ai;
@ -213,10 +213,10 @@ placeOne(Relation r, GISTSTATE *giststate, GistSplitVector *v,
rpenalty;
GISTENTRY entry;
gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, FALSE);
gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, false);
lpenalty = gistpenalty(giststate, attno, &entry, v->spl_lisnull[attno],
identry + attno, isnull[attno]);
gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, FALSE);
gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, false);
rpenalty = gistpenalty(giststate, attno, &entry, v->spl_risnull[attno],
identry + attno, isnull[attno]);
@ -265,10 +265,10 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
entrySL,
entrySR;
gistentryinit(entryL, oldL, r, NULL, 0, FALSE);
gistentryinit(entryR, oldR, r, NULL, 0, FALSE);
gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE);
gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE);
gistentryinit(entryL, oldL, r, NULL, 0, false);
gistentryinit(entryR, oldR, r, NULL, 0, false);
gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, false);
gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, false);
if (sv->spl_ldatum_exists && sv->spl_rdatum_exists)
{
@ -320,8 +320,8 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
SWAPVAR(sv->spl_left, sv->spl_right, off);
SWAPVAR(sv->spl_nleft, sv->spl_nright, noff);
SWAPVAR(sv->spl_ldatum, sv->spl_rdatum, datum);
gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE);
gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE);
gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, false);
gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, false);
}
if (sv->spl_ldatum_exists)
@ -396,20 +396,20 @@ genericPickSplit(GISTSTATE *giststate, GistEntryVector *entryvec, GIST_SPLITVEC
* Calls user picksplit method for attno column to split tuples into
* two vectors.
*
* Returns FALSE if split is complete (there are no more index columns, or
* Returns false if split is complete (there are no more index columns, or
* there is no need to consider them because split is optimal already).
*
* Returns TRUE and v->spl_dontcare = NULL if the picksplit result is
* Returns true and v->spl_dontcare = NULL if the picksplit result is
* degenerate (all tuples seem to be don't-cares), so we should just
* disregard this column and split on the next column(s) instead.
*
* Returns TRUE and v->spl_dontcare != NULL if there are don't-care tuples
* Returns true and v->spl_dontcare != NULL if there are don't-care tuples
* that could be relocated based on the next column(s). The don't-care
* tuples have been removed from the split and must be reinserted by caller.
* There is at least one non-don't-care tuple on each side of the split,
* and union keys for all columns are updated to include just those tuples.
*
* A TRUE result implies there is at least one more index column.
* A true result implies there is at least one more index column.
*/
static bool
gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVector *v,
@ -610,7 +610,7 @@ gistSplitHalf(GIST_SPLITVEC *v, int len)
* attno: column we are working on (zero-based index)
*
* Outside caller must initialize v->spl_lisnull and v->spl_risnull arrays
* to all-TRUE. On return, spl_left/spl_nleft contain indexes of tuples
* to all-true. On return, spl_left/spl_nleft contain indexes of tuples
* to go left, spl_right/spl_nright contain indexes of tuples to go right,
* spl_lattr/spl_lisnull contain left-side union key values, and
* spl_rattr/spl_risnull contain right-side union key values. Other fields
@ -643,7 +643,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len,
&IsNull);
gistdentryinit(giststate, attno, &(entryvec->vector[i]),
datum, r, page, i,
FALSE, IsNull);
false, IsNull);
if (IsNull)
offNullTuples[nOffNullTuples++] = i;
}
@ -655,7 +655,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len,
* our attention to the next column. If there's no next column, just
* split page in half.
*/
v->spl_risnull[attno] = v->spl_lisnull[attno] = TRUE;
v->spl_risnull[attno] = v->spl_lisnull[attno] = true;
if (attno + 1 < giststate->tupdesc->natts)
gistSplitByKey(r, page, itup, len, giststate, v, attno + 1);
@ -672,7 +672,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len,
*/
v->splitVector.spl_right = offNullTuples;
v->splitVector.spl_nright = nOffNullTuples;
v->spl_risnull[attno] = TRUE;
v->spl_risnull[attno] = true;
v->splitVector.spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
v->splitVector.spl_nleft = 0;

View File

@ -179,7 +179,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len,
evec->vector + evec->n,
datum,
NULL, NULL, (OffsetNumber) 0,
FALSE, IsNull);
false, IsNull);
evec->n++;
}
@ -187,7 +187,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len,
if (evec->n == 0)
{
attr[i] = (Datum) 0;
isnull[i] = TRUE;
isnull[i] = true;
}
else
{
@ -204,7 +204,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len,
PointerGetDatum(evec),
PointerGetDatum(&attrsize));
isnull[i] = FALSE;
isnull[i] = false;
}
}
}
@ -246,17 +246,17 @@ gistMakeUnionKey(GISTSTATE *giststate, int attno,
if (isnull1 && isnull2)
{
*dstisnull = TRUE;
*dstisnull = true;
*dst = (Datum) 0;
}
else
{
if (isnull1 == FALSE && isnull2 == FALSE)
if (isnull1 == false && isnull2 == false)
{
evec->vector[0] = *entry1;
evec->vector[1] = *entry2;
}
else if (isnull1 == FALSE)
else if (isnull1 == false)
{
evec->vector[0] = *entry1;
evec->vector[1] = *entry1;
@ -267,7 +267,7 @@ gistMakeUnionKey(GISTSTATE *giststate, int attno,
evec->vector[1] = *entry2;
}
*dstisnull = FALSE;
*dstisnull = false;
*dst = FunctionCall2Coll(&giststate->unionFn[attno],
giststate->supportCollation[attno],
PointerGetDatum(evec),
@ -303,7 +303,7 @@ gistDeCompressAtt(GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p,
datum = index_getattr(tuple, i + 1, giststate->tupdesc, &isnull[i]);
gistdentryinit(giststate, i, &attdata[i],
datum, r, p, o,
FALSE, isnull[i]);
false, isnull[i]);
}
}
@ -313,7 +313,7 @@ gistDeCompressAtt(GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p,
IndexTuple
gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
{
bool neednew = FALSE;
bool neednew = false;
GISTENTRY oldentries[INDEX_MAX_KEYS],
addentries[INDEX_MAX_KEYS];
bool oldisnull[INDEX_MAX_KEYS],
@ -451,7 +451,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
/* Compute penalty for this column. */
datum = index_getattr(itup, j + 1, giststate->tupdesc, &IsNull);
gistdentryinit(giststate, j, &entry, datum, r, p, i,
FALSE, IsNull);
false, IsNull);
usize = gistpenalty(giststate, j, &entry, IsNull,
&identry[j], isnull[j]);
if (usize > 0)
@ -691,8 +691,8 @@ gistpenalty(GISTSTATE *giststate, int attno,
{
float penalty = 0.0;
if (giststate->penaltyFn[attno].fn_strict == FALSE ||
(isNullOrig == FALSE && isNullAdd == FALSE))
if (giststate->penaltyFn[attno].fn_strict == false ||
(isNullOrig == false && isNullAdd == false))
{
FunctionCall3Coll(&giststate->penaltyFn[attno],
giststate->supportCollation[attno],

View File

@ -992,7 +992,7 @@ fail:
* for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
* so it may not be worth worrying about.
*
* Returns TRUE if successful, or FALSE if allocation failed due to
* Returns true if successful, or false if allocation failed due to
* BlockNumber overflow.
*/
static bool

View File

@ -39,7 +39,7 @@ static void _hash_readnext(IndexScanDesc scan, Buffer *bufp,
* On successful exit, scan->xs_ctup.t_self is set to the TID
* of the next heap tuple. so->currPos is updated as needed.
*
* On failure exit (no more tuples), we return FALSE with pin
* On failure exit (no more tuples), we return false with pin
* held on bucket page but no pins or locks held on overflow
* page.
*/
@ -283,7 +283,7 @@ _hash_readprev(IndexScanDesc scan,
* tuple(s) on the page has been loaded into so->currPos,
* scan->xs_ctup.t_self is set to the heap TID of the current tuple.
*
* On failure exit (no more tuples), we return FALSE, with pin held on
* On failure exit (no more tuples), we return false, with pin held on
* bucket page but no pins or locks held on overflow page.
*/
bool
@ -507,7 +507,7 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
/*
* Remember next and previous block numbers for scrollable
* cursors to know the start position and return FALSE
* cursors to know the start position and return false
* indicating that no more matching tuples were found. Also,
* don't reset currPage or lsn, because we expect
* _hash_kill_items to be called for the old page after this
@ -560,7 +560,7 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
/*
* Remember next and previous block numbers for scrollable
* cursors to know the start position and return FALSE
* cursors to know the start position and return false
* indicating that no more matching tuples were found. Also,
* don't reset currPage or lsn, because we expect
* _hash_kill_items to be called for the old page after this

View File

@ -1379,7 +1379,7 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
* heap_beginscan_strat offers an extended API that lets the caller control
* whether a nondefault buffer access strategy can be used, and whether
* syncscan can be chosen (possibly resulting in the scan not starting from
* block zero). Both of these default to TRUE with plain heap_beginscan.
* block zero). Both of these default to true with plain heap_beginscan.
*
* heap_beginscan_bm is an alternative entry point for setting up a
* HeapScanDesc for a bitmap heap scan. Although that scan technology is
@ -1842,16 +1842,16 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction)
* against the specified snapshot.
*
* If successful (tuple found and passes snapshot time qual), then *userbuf
* is set to the buffer holding the tuple and TRUE is returned. The caller
* is set to the buffer holding the tuple and true is returned. The caller
* must unpin the buffer when done with the tuple.
*
* If the tuple is not found (ie, item number references a deleted slot),
* then tuple->t_data is set to NULL and FALSE is returned.
* then tuple->t_data is set to NULL and false is returned.
*
* If the tuple is found but fails the time qual check, then FALSE is returned
* If the tuple is found but fails the time qual check, then false is returned
* but tuple->t_data is left pointing to the tuple.
*
* keep_buf determines what is done with the buffer in the FALSE-result cases.
* keep_buf determines what is done with the buffer in the false-result cases.
* When the caller specifies keep_buf = true, we retain the pin on the buffer
* and return it in *userbuf (so the caller must eventually unpin it); when
* keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
@ -1993,15 +1993,15 @@ heap_fetch(Relation relation,
* of a HOT chain), and buffer is the buffer holding this tuple. We search
* for the first chain member satisfying the given snapshot. If one is
* found, we update *tid to reference that tuple's offset number, and
* return TRUE. If no match, return FALSE without modifying *tid.
* return true. If no match, return false without modifying *tid.
*
* heapTuple is a caller-supplied buffer. When a match is found, we return
* the tuple here, in addition to updating *tid. If no match is found, the
* contents of this buffer on return are undefined.
*
* If all_dead is not NULL, we check non-visible tuples to see if they are
* globally dead; *all_dead is set TRUE if all members of the HOT chain
* are vacuumable, FALSE if not.
* globally dead; *all_dead is set true if all members of the HOT chain
* are vacuumable, false if not.
*
* Unlike heap_fetch, the caller must already have pin and (at least) share
* lock on the buffer; it is still pinned/locked at exit. Also unlike
@ -6594,7 +6594,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
* are older than the specified cutoff XID and cutoff MultiXactId. If so,
* setup enough state (in the *frz output argument) to later execute and
* WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
* WAL-log what we would need to do, and return true. Return false if nothing
* is to be changed. In addition, set *totally_frozen_p to true if the tuple
* will be totally frozen after these operations are performed and false if
* more freezing will eventually be required.
@ -7242,7 +7242,7 @@ heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
* are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
* are older than the specified cutoff XID or MultiXactId. If so, return true.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.

View File

@ -39,7 +39,7 @@ typedef struct
OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
OffsetNumber nowdead[MaxHeapTuplesPerPage];
OffsetNumber nowunused[MaxHeapTuplesPerPage];
/* marked[i] is TRUE if item i is entered in one of the above arrays */
/* marked[i] is true if item i is entered in one of the above arrays */
bool marked[MaxHeapTuplesPerPage + 1];
} PruneState;
@ -170,7 +170,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
* or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
*
* If report_stats is true then we send the number of reclaimed heap-only
* tuples to pgstats. (This must be FALSE during vacuum, since vacuum will
* tuples to pgstats. (This must be false during vacuum, since vacuum will
* send its own new total to pgstats, and we don't want this delta applied
* on top of that.)
*

View File

@ -140,9 +140,9 @@ identify_opfamily_groups(CatCList *oprlist, CatCList *proclist)
/*
* Validate the signature (argument and result types) of an opclass support
* function. Return TRUE if OK, FALSE if not.
* function. Return true if OK, false if not.
*
* The "..." represents maxargs argument-type OIDs. If "exact" is TRUE, they
* The "..." represents maxargs argument-type OIDs. If "exact" is true, they
* must match the function arg types exactly, else only binary-coercibly.
* In any case the function result type must match restype exactly.
*/
@ -184,7 +184,7 @@ check_amproc_signature(Oid funcid, Oid restype, bool exact,
/*
* Validate the signature (argument and result types) of an opclass operator.
* Return TRUE if OK, FALSE if not.
* Return true if OK, false if not.
*
* Currently, we can hard-wire this as accepting only binary operators. Also,
* we can insist on exact type matches, since the given lefttype/righttype

View File

@ -784,7 +784,7 @@ index_can_return(Relation indexRelation, int attno)
{
RELATION_CHECKS;
/* amcanreturn is optional; assume FALSE if not provided by AM */
/* amcanreturn is optional; assume false if not provided by AM */
if (indexRelation->rd_amroutine->amcanreturn == NULL)
return false;

View File

@ -99,8 +99,8 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
* don't actually insert.
*
* The result value is only significant for UNIQUE_CHECK_PARTIAL:
* it must be TRUE if the entry is known unique, else FALSE.
* (In the current implementation we'll also return TRUE after a
* it must be true if the entry is known unique, else false.
* (In the current implementation we'll also return true after a
* successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but
* that's just a coding artifact.)
*/

View File

@ -524,7 +524,7 @@ _bt_compare(Relation rel,
* scan->xs_ctup.t_self is set to the heap TID of the current tuple,
* and if requested, scan->xs_itup points to a copy of the index tuple.
*
* If there are no matching items in the index, we return FALSE, with no
* If there are no matching items in the index, we return false, with no
* pins or locks held.
*
* Note that scan->keyData[], and the so->keyData[] scankey built from it,
@ -1336,7 +1336,7 @@ _bt_saveitem(BTScanOpaque so, int itemIndex,
*
* For success on a scan using a non-MVCC snapshot we hold a pin, but not a
* read lock, on that page. If we do not hold the pin, we set so->currPos.buf
* to InvalidBuffer. We return TRUE to indicate success.
* to InvalidBuffer. We return true to indicate success.
*/
static bool
_bt_steppage(IndexScanDesc scan, ScanDirection dir)
@ -1440,10 +1440,10 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
*
* On success exit, so->currPos is updated to contain data from the next
* interesting page. Caller is responsible to release lock and pin on
* buffer on success. We return TRUE to indicate success.
* buffer on success. We return true to indicate success.
*
* If there are no more matching records in the given direction, we drop all
* locks and pins, set so->currPos.buf to InvalidBuffer, and return FALSE.
* locks and pins, set so->currPos.buf to InvalidBuffer, and return false.
*/
static bool
_bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
@ -1608,7 +1608,7 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
/*
* _bt_parallel_readpage() -- Read current page containing valid data for scan
*
* On success, release lock and maybe pin on buffer. We return TRUE to
* On success, release lock and maybe pin on buffer. We return true to
* indicate success.
*/
static bool

View File

@ -540,8 +540,8 @@ _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
/*
* _bt_advance_array_keys() -- Advance to next set of array elements
*
* Returns TRUE if there is another set of values to consider, FALSE if not.
* On TRUE result, the scankeys are initialized with the next set of values.
* Returns true if there is another set of values to consider, false if not.
* On true result, the scankeys are initialized with the next set of values.
*/
bool
_bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
@ -724,7 +724,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* for a forward scan; or after the last match for a backward scan.)
*
* As a byproduct of this work, we can detect contradictory quals such
* as "x = 1 AND x > 2". If we see that, we return so->qual_ok = FALSE,
* as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false,
* indicating the scan need not be run at all since no tuples can match.
* (In this case we do not bother completing the output key array!)
* Again, missing cross-type operators might cause us to fail to prove the
@ -1020,7 +1020,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
*
* If the opfamily doesn't supply a complete set of cross-type operators we
* may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* we store the operator result in *result and return true. We return false
* if the comparison could not be made.
*
* Note: op always points at the same ScanKey as either leftarg or rightarg.
@ -1185,8 +1185,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
* a NULL means that the qual cannot be satisfied. We return TRUE if the
* comparison value isn't NULL, or FALSE if the scan should be abandoned.
* a NULL means that the qual cannot be satisfied. We return true if the
* comparison value isn't NULL, or false if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
* on a rescan we will be looking at already-processed scankeys. Hence

View File

@ -580,7 +580,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position,
* Test to see if the user-defined picksplit function failed to do its job,
* ie, it put all the leaf tuples into the same node.
* If so, randomly divide the tuples into several nodes (all with the same
* label) and return TRUE to select allTheSame mode for this inner tuple.
* label) and return true to select allTheSame mode for this inner tuple.
*
* (This code is also used to forcibly select allTheSame mode for nulls.)
*

View File

@ -727,7 +727,7 @@ BootStrapCLOG(void)
/*
* Initialize (or reinitialize) a page of CLOG to zeroes.
* If writeXlog is TRUE, also emit an XLOG record saying we did this.
* If writeXlog is true, also emit an XLOG record saying we did this.
*
* The page is not actually written, just set up in shared memory.
* The slot number of the new page is returned.

View File

@ -531,7 +531,7 @@ BootStrapCommitTs(void)
/*
* Initialize (or reinitialize) a page of CommitTs to zeroes.
* If writeXlog is TRUE, also emit an XLOG record saying we did this.
* If writeXlog is true, also emit an XLOG record saying we did this.
*
* The page is not actually written, just set up in shared memory.
* The slot number of the new page is returned.

View File

@ -1892,7 +1892,7 @@ BootStrapMultiXact(void)
/*
* Initialize (or reinitialize) a page of MultiXactOffset to zeroes.
* If writeXlog is TRUE, also emit an XLOG record saying we did this.
* If writeXlog is true, also emit an XLOG record saying we did this.
*
* The page is not actually written, just set up in shared memory.
* The slot number of the new page is returned.

View File

@ -629,7 +629,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno)
* Physical read of a (previously existing) page into a buffer slot
*
* On failure, we cannot just ereport(ERROR) since caller has put state in
* shared memory that must be undone. So, we return FALSE and save enough
* shared memory that must be undone. So, we return false and save enough
* info in static variables to let SlruReportIOError make the report.
*
* For now, assume it's not worth keeping a file pointer open across
@ -705,7 +705,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* Physical write of a page from a buffer slot
*
* On failure, we cannot just ereport(ERROR) since caller has put state in
* shared memory that must be undone. So, we return FALSE and save enough
* shared memory that must be undone. So, we return false and save enough
* info in static variables to let SlruReportIOError make the report.
*
* For now, assume it's not worth keeping a file pointer open across

View File

@ -170,9 +170,9 @@ typedef struct GlobalTransactionData
Oid owner; /* ID of user that executed the xact */
BackendId locking_backend; /* backend currently working on the xact */
bool valid; /* TRUE if PGPROC entry is in proc array */
bool ondisk; /* TRUE if prepare state file is on disk */
bool inredo; /* TRUE if entry was added via xlog_redo */
bool valid; /* true if PGPROC entry is in proc array */
bool ondisk; /* true if prepare state file is on disk */
bool inredo; /* true if entry was added via xlog_redo */
char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
} GlobalTransactionData;

View File

@ -671,8 +671,8 @@ SubTransactionIsActive(SubTransactionId subxid)
/*
* GetCurrentCommandId
*
* "used" must be TRUE if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. FALSE means the ID is being fetched
* "used" must be true if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. false means the ID is being fetched
* for read-only purposes (ie, as a snapshot validity cutoff). See
* CommandCounterIncrement() for discussion.
*/
@ -3470,7 +3470,7 @@ BeginTransactionBlock(void)
* This executes a PREPARE command.
*
* Since PREPARE may actually do a ROLLBACK, the result indicates what
* happened: TRUE for PREPARE, FALSE for ROLLBACK.
* happened: true for PREPARE, false for ROLLBACK.
*
* Note that we don't actually do anything here except change blockState.
* The real work will be done in the upcoming PrepareTransaction().
@ -3522,7 +3522,7 @@ PrepareTransactionBlock(char *gid)
* This executes a COMMIT command.
*
* Since COMMIT may actually do a ROLLBACK, the result indicates what
* happened: TRUE for COMMIT, FALSE for ROLLBACK.
* happened: true for COMMIT, false for ROLLBACK.
*
* Note that we don't actually do anything here except change blockState.
* The real work will be done in the upcoming CommitTransactionCommand().

View File

@ -2324,7 +2324,7 @@ XLogCheckpointNeeded(XLogSegNo new_segno)
/*
* Write and/or fsync the log at least as far as WriteRqst indicates.
*
* If flexible == TRUE, we don't have to write as far as WriteRqst, but
* If flexible == true, we don't have to write as far as WriteRqst, but
* may stop at any convenient boundary (such as a cache or logfile boundary).
* This option allows us to avoid uselessly issuing multiple writes when a
* single one would do.
@ -2945,7 +2945,7 @@ XLogFlush(XLogRecPtr record)
*
* This routine is invoked periodically by the background walwriter process.
*
* Returns TRUE if there was any work to do, even if we skipped flushing due
* Returns true if there was any work to do, even if we skipped flushing due
* to wal_writer_delay/wal_writer_flush_after.
*/
bool
@ -3141,12 +3141,12 @@ XLogNeedsFlush(XLogRecPtr record)
*
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
* pre-existing file will be deleted). On return, TRUE if a pre-existing
* *use_existent: if true, OK to use a pre-existing file (else, any
* pre-existing file will be deleted). On return, true if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
* place. This should be TRUE except during bootstrap log creation. The
* use_lock: if true, acquire ControlFileLock while moving file into
* place. This should be true except during bootstrap log creation. The
* caller must *not* hold the lock at call.
*
* Returns FD of opened file.
@ -3441,24 +3441,24 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno,
* filename while it's being created) and to recycle an old segment.
*
* *segno: identify segment to install as (or first possible target).
* When find_free is TRUE, this is modified on return to indicate the
* When find_free is true, this is modified on return to indicate the
* actual installation location or last segment searched.
*
* tmppath: initial name of file to install. It will be renamed into place.
*
* find_free: if TRUE, install the new segment at the first empty segno
* number at or after the passed numbers. If FALSE, install the new segment
* find_free: if true, install the new segment at the first empty segno
* number at or after the passed numbers. If false, install the new segment
* exactly where specified, deleting any existing segment file there.
*
* max_segno: maximum segment number to install the new file as. Fail if no
* free slot is found between *segno and max_segno. (Ignored when find_free
* is FALSE.)
* is false.)
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
* place. This should be TRUE except during bootstrap log creation. The
* use_lock: if true, acquire ControlFileLock while moving file into
* place. This should be true except during bootstrap log creation. The
* caller must *not* hold the lock at call.
*
* Returns TRUE if the file was installed successfully. FALSE indicates that
* Returns true if the file was installed successfully. false indicates that
* max_segno limit was exceeded, or an error occurred while renaming the
* file into place.
*/
@ -5680,7 +5680,7 @@ getRecordTimestamp(XLogReaderState *record, TimestampTz *recordXtime)
* For point-in-time recovery, this function decides whether we want to
* stop applying the XLOG before the current record.
*
* Returns TRUE if we are stopping, FALSE otherwise. If stopping, some
* Returns true if we are stopping, false otherwise. If stopping, some
* information is saved in recoveryStopXid et al for use in annotating the
* new timeline's history file.
*/
@ -6659,7 +6659,7 @@ StartupXLOG(void)
ereport(DEBUG1,
(errmsg_internal("redo record is at %X/%X; shutdown %s",
(uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE")));
wasShutdown ? "true" : "false")));
ereport(DEBUG1,
(errmsg_internal("next transaction ID: %u:%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid,
@ -11192,11 +11192,11 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli)
* later than the start of the dump, and so if we rely on it as the start
* point, we will fail to restore a consistent database state.
*
* Returns TRUE if a backup_label was found (and fills the checkpoint
* Returns true if a backup_label was found (and fills the checkpoint
* location and its REDO location into *checkPointLoc and RedoStartLSN,
* respectively); returns FALSE if not. If this backup_label came from a
* streamed backup, *backupEndRequired is set to TRUE. If this backup_label
* was created during recovery, *backupFromStandby is set to TRUE.
* respectively); returns false if not. If this backup_label came from a
* streamed backup, *backupEndRequired is set to true. If this backup_label
* was created during recovery, *backupFromStandby is set to true.
*/
static bool
read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
@ -11279,8 +11279,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
* recovering from a backup dump file, and we therefore need to create symlinks
* as per the information present in tablespace_map file.
*
* Returns TRUE if a tablespace_map file was found (and fills the link
* information for all the tablespace links present in file); returns FALSE
* Returns true if a tablespace_map file was found (and fills the link
* information for all the tablespace links present in file); returns false
* if not.
*/
static bool
@ -11714,7 +11714,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* If primary_conninfo is set, launch walreceiver to try
* to stream the missing WAL.
*
* If fetching_ckpt is TRUE, RecPtr points to the initial
* If fetching_ckpt is true, RecPtr points to the initial
* checkpoint location. In that case, we use RedoStartLSN
* as the streaming start position instead of RecPtr, so
* that when we later jump backwards to start redo at

View File

@ -33,11 +33,11 @@
* Attempt to retrieve the specified file from off-line archival storage.
* If successful, fill "path" with its complete path (note that this will be
* a temp file name that doesn't follow the normal naming convention), and
* return TRUE.
* return true.
*
* If not successful, fill "path" with the name of the normal on-line file
* (which may or may not actually exist, but we'll try to use it), and return
* FALSE.
* false.
*
* For fixed-size files, the caller may pass the expected size as an
* additional crosscheck on successful recovery. If the file size is not

View File

@ -797,8 +797,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
/*
* Create a compressed version of a backup block image.
*
* Returns FALSE if compression fails (i.e., compressed result is actually
* bigger than original). Otherwise, returns TRUE and sets 'dlen' to
* Returns false if compression fails (i.e., compressed result is actually
* bigger than original). Otherwise, returns true and sets 'dlen' to
* the length of compressed block image.
*/
static bool
@ -965,7 +965,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
* log_newpage_buffer instead.
*
* If the page follows the standard page layout, with a PageHeader and unused
* space between pd_lower and pd_upper, set 'page_std' to TRUE. That allows
* space between pd_lower and pd_upper, set 'page_std' to true. That allows
* the unused space to be left out from the WAL record, making it smaller.
*/
XLogRecPtr
@ -1002,7 +1002,7 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
* function. This function will set the page LSN.
*
* If the page follows the standard page layout, with a PageHeader and unused
* space between pd_lower and pd_upper, set 'page_std' to TRUE. That allows
* space between pd_lower and pd_upper, set 'page_std' to true. That allows
* the unused space to be left out from the WAL record, making it smaller.
*/
XLogRecPtr

View File

@ -1302,8 +1302,8 @@ err:
* Returns information about the block that a block reference refers to.
*
* If the WAL record contains a block reference with the given ID, *rnode,
* *forknum, and *blknum are filled in (if not NULL), and returns TRUE.
* Otherwise returns FALSE.
* *forknum, and *blknum are filled in (if not NULL), and returns true.
* Otherwise returns false.
*/
bool
XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id,