1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-26 23:43:30 +03:00

Change TRUE/FALSE to true/false

The lower case spellings are C and C++ standard and are used in most
parts of the PostgreSQL sources.  The upper case spellings are only used
in some files/modules.  So standardize on the standard spellings.

The APIs for ICU, Perl, and Windows define their own TRUE and FALSE, so
those are left as is when using those APIs.

In code comments, we use the lower-case spelling for the C concepts and
keep the upper-case spelling for the SQL concepts.

Reviewed-by: Michael Paquier <michael.paquier@gmail.com>
This commit is contained in:
Peter Eisentraut
2017-08-16 00:22:32 -04:00
parent 4497f2f3b3
commit 2eb4a831e5
216 changed files with 1168 additions and 1168 deletions

View File

@@ -975,7 +975,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
*
* The returned buffer is pinned and is already marked as holding the
* desired page. If it already did have the desired page, *foundPtr is
* set TRUE. Otherwise, *foundPtr is set FALSE and the buffer is marked
* set true. Otherwise, *foundPtr is set false and the buffer is marked
* as IO_IN_PROGRESS; ReadBuffer will now need to do I/O to fill it.
*
* *foundPtr is actually redundant with the buffer's BM_VALID flag, but
@@ -1025,7 +1025,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/* Can release the mapping lock as soon as we've pinned it */
LWLockRelease(newPartitionLock);
*foundPtr = TRUE;
*foundPtr = true;
if (!valid)
{
@@ -1042,7 +1042,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* If we get here, previous attempts to read the buffer must
* have failed ... but we shall bravely try again.
*/
*foundPtr = FALSE;
*foundPtr = false;
}
}
@@ -1237,7 +1237,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/* Can release the mapping lock as soon as we've pinned it */
LWLockRelease(newPartitionLock);
*foundPtr = TRUE;
*foundPtr = true;
if (!valid)
{
@@ -1254,7 +1254,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* If we get here, previous attempts to read the buffer
* must have failed ... but we shall bravely try again.
*/
*foundPtr = FALSE;
*foundPtr = false;
}
}
@@ -1324,9 +1324,9 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* read it before we did, so there's nothing left for BufferAlloc() to do.
*/
if (StartBufferIO(buf, true))
*foundPtr = FALSE;
*foundPtr = false;
else
*foundPtr = TRUE;
*foundPtr = true;
return buf;
}
@@ -1564,7 +1564,7 @@ ReleaseAndReadBuffer(Buffer buffer,
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
*
* Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
* Returns true if buffer is BM_VALID, else false. This provision allows
* some callers to avoid an extra spinlock cycle.
*/
static bool
@@ -1688,7 +1688,7 @@ PinBuffer_Locked(BufferDesc *buf)
* This should be applied only to shared buffers, never local ones.
*
* Most but not all callers want CurrentResourceOwner to be adjusted.
* Those that don't should pass fixOwner = FALSE.
* Those that don't should pass fixOwner = false.
*/
static void
UnpinBuffer(BufferDesc *buf, bool fixOwner)
@@ -3712,7 +3712,7 @@ HoldingBufferPinThatDelaysRecovery(void)
* ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
*
* We won't loop, but just check once to see if the pin count is OK. If
* not, return FALSE with no lock held.
* not, return false with no lock held.
*/
bool
ConditionalLockBufferForCleanup(Buffer buffer)
@@ -3868,8 +3868,8 @@ WaitIO(BufferDesc *buf)
* and output operations only on buffers that are BM_VALID and BM_DIRTY,
* so we can always tell if the work is already done.
*
* Returns TRUE if we successfully marked the buffer as I/O busy,
* FALSE if someone else already did the work.
* Returns true if we successfully marked the buffer as I/O busy,
* false if someone else already did the work.
*/
static bool
StartBufferIO(BufferDesc *buf, bool forInput)
@@ -3929,7 +3929,7 @@ StartBufferIO(BufferDesc *buf, bool forInput)
* We hold the buffer's io_in_progress lock
* The buffer is Pinned
*
* If clear_dirty is TRUE and BM_JUST_DIRTIED is not set, we clear the
* If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
* buffer's BM_DIRTY flag. This is appropriate when terminating a
* successful write. The check on BM_JUST_DIRTIED is necessary to avoid
* marking the buffer clean if it was re-dirtied while we were writing.

View File

@@ -145,11 +145,11 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
ResourceOwnerRememberBuffer(CurrentResourceOwner,
BufferDescriptorGetBuffer(bufHdr));
if (buf_state & BM_VALID)
*foundPtr = TRUE;
*foundPtr = true;
else
{
/* Previous read attempt must have failed; try again */
*foundPtr = FALSE;
*foundPtr = false;
}
return bufHdr;
}
@@ -268,7 +268,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
buf_state += BUF_USAGECOUNT_ONE;
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
*foundPtr = FALSE;
*foundPtr = false;
return bufHdr;
}

View File

@@ -68,7 +68,7 @@ struct BufFile
* avoid making redundant FileSeek calls.
*/
bool isTemp; /* can only add files if this is TRUE */
bool isTemp; /* can only add files if this is true */
bool isInterXact; /* keep open over transactions? */
bool dirty; /* does buffer need to be written? */

View File

@@ -2577,7 +2577,7 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces)
/*
* TempTablespacesAreSet
*
* Returns TRUE if SetTempTablespaces has been called in current transaction.
* Returns true if SetTempTablespaces has been called in current transaction.
* (This is just so that tablespaces.c doesn't need its own per-transaction
* state.)
*/

View File

@@ -1791,7 +1791,7 @@ GetSnapshotData(Snapshot snapshot)
* check that the source transaction is still running, and we'd better do
* that atomically with installing the new xmin.
*
* Returns TRUE if successful, FALSE if source xact is no longer running.
* Returns true if successful, false if source xact is no longer running.
*/
bool
ProcArrayInstallImportedXmin(TransactionId xmin,
@@ -1866,7 +1866,7 @@ ProcArrayInstallImportedXmin(TransactionId xmin,
* PGPROC of the transaction from which we imported the snapshot, rather than
* an XID.
*
* Returns TRUE if successful, FALSE if source xact is no longer running.
* Returns true if successful, false if source xact is no longer running.
*/
bool
ProcArrayInstallRestoredXmin(TransactionId xmin, PGPROC *proc)
@@ -2873,7 +2873,7 @@ CountUserBackends(Oid roleid)
* The current backend is always ignored; it is caller's responsibility to
* check whether the current backend uses the given DB, if it's important.
*
* Returns TRUE if there are (still) other backends in the DB, FALSE if not.
* Returns true if there are (still) other backends in the DB, false if not.
* Also, *nbackends and *nprepared are set to the number of other backends
* and prepared transactions in the DB, respectively.
*

View File

@@ -257,7 +257,7 @@ ShmemAllocUnlocked(Size size)
/*
* ShmemAddrIsValid -- test if an address refers to shared memory
*
* Returns TRUE if the pointer points within the shared memory segment.
* Returns true if the pointer points within the shared memory segment.
*/
bool
ShmemAddrIsValid(const void *addr)
@@ -361,7 +361,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* for it. If it exists already, a pointer to the existing
* structure is returned.
*
* Returns: pointer to the object. *foundPtr is set TRUE if the object was
* Returns: pointer to the object. *foundPtr is set true if the object was
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
@@ -388,7 +388,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
/* Must be initializing a (non-standalone) backend */
Assert(shmemseghdr->index != NULL);
structPtr = shmemseghdr->index;
*foundPtr = TRUE;
*foundPtr = true;
}
else
{
@@ -403,7 +403,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Assert(shmemseghdr->index == NULL);
structPtr = ShmemAlloc(size);
shmemseghdr->index = structPtr;
*foundPtr = FALSE;
*foundPtr = false;
}
LWLockRelease(ShmemIndexLock);
return structPtr;

View File

@@ -40,7 +40,7 @@ SHMQueueInit(SHM_QUEUE *queue)
}
/*
* SHMQueueIsDetached -- TRUE if element is not currently
* SHMQueueIsDetached -- true if element is not currently
* in a queue.
*/
bool
@@ -174,7 +174,7 @@ SHMQueuePrev(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
}
/*
* SHMQueueEmpty -- TRUE if queue head is only element, FALSE otherwise
* SHMQueueEmpty -- true if queue head is only element, false otherwise
*/
bool
SHMQueueEmpty(const SHM_QUEUE *queue)
@@ -184,7 +184,7 @@ SHMQueueEmpty(const SHM_QUEUE *queue)
if (queue->prev == queue)
{
Assert(queue->next == queue);
return TRUE;
return true;
}
return FALSE;
return false;
}

View File

@@ -627,7 +627,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
* SICleanupQueue
* Remove messages that have been consumed by all active backends
*
* callerHasWriteLock is TRUE if caller is holding SInvalWriteLock.
* callerHasWriteLock is true if caller is holding SInvalWriteLock.
* minFree is the minimum number of message slots to make free.
*
* Possible side effects of this routine include marking one or more

View File

@@ -307,7 +307,7 @@ GetBlockingAutoVacuumPgproc(void)
* by an outer level of recursion. Add to this each possible solution
* constraint for any cycle detected at this level.
*
* Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
* Returns true if no solution exists. Returns false if a deadlock-free
* state is attainable, in which case waitOrders[] shows the required
* rearrangements of lock wait queues (if any).
*/
@@ -432,8 +432,8 @@ TestConfiguration(PGPROC *startProc)
* FindLockCycle -- basic check for deadlock cycles
*
* Scan outward from the given proc to see if there is a cycle in the
* waits-for graph that includes this proc. Return TRUE if a cycle
* is found, else FALSE. If a cycle is found, we return a list of
* waits-for graph that includes this proc. Return true if a cycle
* is found, else false. If a cycle is found, we return a list of
* the "soft edges", if any, included in the cycle. These edges could
* potentially be eliminated by rearranging wait queues. We also fill
* deadlockDetails[] with information about the detected cycle; this info
@@ -792,8 +792,8 @@ FindLockCycleRecurseMember(PGPROC *checkProc,
* of nWaitOrders WAIT_ORDER structs in waitOrders[], with PGPROC array
* workspace in waitOrderProcs[].
*
* Returns TRUE if able to build an ordering that satisfies all the
* constraints, FALSE if not (there are contradictory constraints).
* Returns true if able to build an ordering that satisfies all the
* constraints, false if not (there are contradictory constraints).
*/
static bool
ExpandConstraints(EDGE *constraints,
@@ -864,8 +864,8 @@ ExpandConstraints(EDGE *constraints,
* the "blocker" in the output array. The EDGE array may well contain
* edges associated with other locks; these should be ignored.
*
* Returns TRUE if able to build an ordering that satisfies all the
* constraints, FALSE if not (there are contradictory constraints).
* Returns true if able to build an ordering that satisfies all the
* constraints, false if not (there are contradictory constraints).
*/
static bool
TopoSort(LOCK *lock,

View File

@@ -129,7 +129,7 @@ LockRelationOid(Oid relid, LOCKMODE lockmode)
* ConditionalLockRelationOid
*
* As above, but only lock if we can get the lock without blocking.
* Returns TRUE iff the lock was acquired.
* Returns true iff the lock was acquired.
*
* NOTE: we do not currently need conditional versions of all the
* LockXXX routines in this file, but they could easily be added if needed.
@@ -344,7 +344,7 @@ LockRelationForExtension(Relation relation, LOCKMODE lockmode)
* ConditionalLockRelationForExtension
*
* As above, but only lock if we can get the lock without blocking.
* Returns TRUE iff the lock was acquired.
* Returns true iff the lock was acquired.
*/
bool
ConditionalLockRelationForExtension(Relation relation, LOCKMODE lockmode)
@@ -413,7 +413,7 @@ LockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode)
* ConditionalLockPage
*
* As above, but only lock if we can get the lock without blocking.
* Returns TRUE iff the lock was acquired.
* Returns true iff the lock was acquired.
*/
bool
ConditionalLockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode)
@@ -469,7 +469,7 @@ LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode)
* ConditionalLockTuple
*
* As above, but only lock if we can get the lock without blocking.
* Returns TRUE iff the lock was acquired.
* Returns true iff the lock was acquired.
*/
bool
ConditionalLockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode)
@@ -601,7 +601,7 @@ XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid,
* ConditionalXactLockTableWait
*
* As above, but only lock if we can get the lock without blocking.
* Returns TRUE if the lock was acquired.
* Returns true if the lock was acquired.
*/
bool
ConditionalXactLockTableWait(TransactionId xid)

View File

@@ -768,7 +768,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
locallock->nLocks = 0;
locallock->numLockOwners = 0;
locallock->maxLockOwners = 8;
locallock->holdsStrongLockCount = FALSE;
locallock->holdsStrongLockCount = false;
locallock->lockOwners = NULL; /* in case next line fails */
locallock->lockOwners = (LOCALLOCKOWNER *)
MemoryContextAlloc(TopMemoryContext,
@@ -1264,7 +1264,7 @@ RemoveLocalLock(LOCALLOCK *locallock)
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
FastPathStrongRelationLocks->count[fasthashcode]--;
locallock->holdsStrongLockCount = FALSE;
locallock->holdsStrongLockCount = false;
SpinLockRelease(&FastPathStrongRelationLocks->mutex);
}
@@ -1578,7 +1578,7 @@ static void
BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
{
Assert(StrongLockInProgress == NULL);
Assert(locallock->holdsStrongLockCount == FALSE);
Assert(locallock->holdsStrongLockCount == false);
/*
* Adding to a memory location is not atomic, so we take a spinlock to
@@ -1591,7 +1591,7 @@ BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
FastPathStrongRelationLocks->count[fasthashcode]++;
locallock->holdsStrongLockCount = TRUE;
locallock->holdsStrongLockCount = true;
StrongLockInProgress = locallock;
SpinLockRelease(&FastPathStrongRelationLocks->mutex);
}
@@ -1620,11 +1620,11 @@ AbortStrongLockAcquire(void)
return;
fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
Assert(locallock->holdsStrongLockCount == TRUE);
Assert(locallock->holdsStrongLockCount == true);
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
FastPathStrongRelationLocks->count[fasthashcode]--;
locallock->holdsStrongLockCount = FALSE;
locallock->holdsStrongLockCount = false;
StrongLockInProgress = NULL;
SpinLockRelease(&FastPathStrongRelationLocks->mutex);
}
@@ -1857,7 +1857,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
{
elog(WARNING, "you don't own a lock of type %s",
lockMethodTable->lockModeNames[lockmode]);
return FALSE;
return false;
}
/*
@@ -1896,7 +1896,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
/* don't release a lock belonging to another owner */
elog(WARNING, "you don't own a lock of type %s",
lockMethodTable->lockModeNames[lockmode]);
return FALSE;
return false;
}
}
@@ -1907,7 +1907,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
locallock->nLocks--;
if (locallock->nLocks > 0)
return TRUE;
return true;
/* Attempt fast release of any lock eligible for the fast path. */
if (EligibleForRelationFastPath(locktag, lockmode) &&
@@ -1926,7 +1926,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
if (released)
{
RemoveLocalLock(locallock);
return TRUE;
return true;
}
}
@@ -1984,7 +1984,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
elog(WARNING, "you don't own a lock of type %s",
lockMethodTable->lockModeNames[lockmode]);
RemoveLocalLock(locallock);
return FALSE;
return false;
}
/*
@@ -1999,7 +1999,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
LWLockRelease(partitionLock);
RemoveLocalLock(locallock);
return TRUE;
return true;
}
/*
@@ -3137,7 +3137,7 @@ AtPrepare_Locks(void)
* entry. We must retain the count until the prepared transaction is
* committed or rolled back.
*/
locallock->holdsStrongLockCount = FALSE;
locallock->holdsStrongLockCount = false;
/*
* Create a 2PC record.

View File

@@ -1281,7 +1281,7 @@ LWLockAcquire(LWLock *lock, LWLockMode mode)
/*
* LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
*
* If the lock is not available, return FALSE with no side-effects.
* If the lock is not available, return false with no side-effects.
*
* If successful, cancel/die interrupts are held off until lock release.
*/