1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-03 15:22:11 +03:00

Fix typos in comments.

Backpatch to all supported versions, where applicable, to make backpatching
of future fixes go more smoothly.

Josh Soref

Discussion: https://www.postgresql.org/message-id/CACZqfqCf+5qRztLPgmmosr-B0Ye4srWzzw_mo4c_8_B_mtjmJQ@mail.gmail.com
This commit is contained in:
Heikki Linnakangas
2017-02-06 11:33:58 +02:00
parent e5e75ea288
commit 3aee34d41d
105 changed files with 147 additions and 147 deletions

View File

@@ -472,7 +472,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
* it will point to a temporary buffer. This mostly avoids data copying in
* the hoped-for case where messages are short compared to the buffer size,
* while still allowing longer messages. In either case, the return value
* remains valid until the next receive operation is perfomed on the queue.
* remains valid until the next receive operation is performed on the queue.
*
* When nowait = false, we'll wait on our process latch when the ring buffer
* is empty and we have not yet received a full message. The sender will

View File

@@ -2697,7 +2697,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
vxids = (VirtualTransactionId *)
palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
/* Compute hash code and partiton lock, and look up conflicting modes. */
/* Compute hash code and partition lock, and look up conflicting modes. */
hashcode = LockTagHashCode(locktag);
partitionLock = LockHashPartitionLock(hashcode);
conflictMask = lockMethodTable->conflictTab[lockmode];

View File

@@ -636,7 +636,7 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode)
return false;
}
else
return true; /* someobdy else has the lock */
return true; /* somebody else has the lock */
}
}
pg_unreachable();
@@ -733,7 +733,7 @@ LWLockWakeup(LWLock *lock)
* that happens before the list unlink happens, the list would end up
* being corrupted.
*
* The barrier pairs with the SpinLockAcquire() when enqueing for
* The barrier pairs with the SpinLockAcquire() when enqueuing for
* another lock.
*/
pg_write_barrier();
@@ -823,7 +823,7 @@ LWLockDequeueSelf(LWLock *lock)
/*
* Can't just remove ourselves from the list, but we need to iterate over
* all entries as somebody else could have unqueued us.
* all entries as somebody else could have dequeued us.
*/
dlist_foreach_modify(iter, &lock->waiters)
{

View File

@@ -3203,7 +3203,7 @@ ReleasePredicateLocks(bool isCommit)
/*
* We can't trust XactReadOnly here, because a transaction which started
* as READ WRITE can show as READ ONLY later, e.g., within
* substransactions. We want to flag a transaction as READ ONLY if it
* subtransactions. We want to flag a transaction as READ ONLY if it
* commits without writing so that de facto READ ONLY transactions get the
* benefit of some RO optimizations, so we will use this local variable to
* get some cleanup logic right which is based on whether the transaction