1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-27 12:41:57 +03:00

Fix misc typos.

Oskari Saarenmaa. Backpatch to stable branches where applicable.
This commit is contained in:
Heikki Linnakangas
2015-09-05 11:35:49 +03:00
parent c39f5674df
commit c80b5f66c6
17 changed files with 25 additions and 25 deletions

View File

@ -127,7 +127,7 @@ brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
* it's not long enough.
*
* The returned buffer is also recorded in the revmap struct; finishing that
* releases the buffer, therefore the caller needn't do it explicitely.
* releases the buffer, therefore the caller needn't do it explicitly.
*/
Buffer
brinLockRevmapPageForUpdate(BrinRevmap *revmap, BlockNumber heapBlk)
@ -314,7 +314,7 @@ revmap_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
* Obtain and return a buffer containing the revmap page for the given heap
* page. The revmap must have been previously extended to cover that page.
* The returned buffer is also recorded in the revmap struct; finishing that
* releases the buffer, therefore the caller needn't do it explicitely.
* releases the buffer, therefore the caller needn't do it explicitly.
*/
static Buffer
revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)

View File

@ -805,7 +805,7 @@ heap_modify_tuple(HeapTuple tuple,
* repl information, as appropriate.
*
* NOTE: it's debatable whether to use heap_deform_tuple() here or just
* heap_getattr() only the non-replaced colums. The latter could win if
* heap_getattr() only the non-replaced columns. The latter could win if
* there are many replaced columns and few non-replaced ones. However,
* heap_deform_tuple costs only O(N) while the heap_getattr way would cost
* O(N^2) if there are many non-replaced columns, so it seems better to

View File

@ -888,8 +888,8 @@ ginInsertCleanup(GinState *ginstate,
* locking */
/*
* remove readed pages from pending list, at this point all
* content of readed pages is in regular structure
* remove read pages from pending list, at this point all
* content of read pages is in regular structure
*/
if (shiftList(index, metabuffer, blkno, stats))
{

View File

@ -588,7 +588,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
* We first consider splits where b is the lower bound of an entry.
* We iterate through all entries, and for each b, calculate the
* smallest possible a. Then we consider splits where a is the
* uppper bound of an entry, and for each a, calculate the greatest
* upper bound of an entry, and for each a, calculate the greatest
* possible b.
*
* In the above example, the first loop would consider splits:
@ -638,7 +638,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
}
/*
* Iterate over upper bound of left group finding greates possible
* Iterate over upper bound of left group finding greatest possible
* lower bound of right group.
*/
i1 = nentries - 1;

View File

@ -5473,7 +5473,7 @@ l4:
*
* The initial tuple is assumed to be already locked.
*
* This function doesn't check visibility, it just inconditionally marks the
* This function doesn't check visibility, it just unconditionally marks the
* tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished.
@ -6187,7 +6187,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* NB -- some of these transformations are only valid because we
* know the return Xid is a tuple updater (i.e. not merely a
* locker.) Also note that the only reason we don't explicitely
* locker.) Also note that the only reason we don't explicitly
* worry about HEAP_KEYS_UPDATED is because it lives in
* t_infomask2 rather than t_infomask.
*/

View File

@ -763,9 +763,9 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
*
* Crash-Safety: This module diverts from the usual patterns of doing WAL
* since it cannot rely on checkpoint flushing out all buffers and thus
* waiting for exlusive locks on buffers. Usually the XLogInsert() covering
* waiting for exclusive locks on buffers. Usually the XLogInsert() covering
* buffer modifications is performed while the buffer(s) that are being
* modified are exlusively locked guaranteeing that both the WAL record and
* modified are exclusively locked guaranteeing that both the WAL record and
* the modified heap are on either side of the checkpoint. But since the
* mapping files we log aren't in shared_buffers that interlock doesn't work.
*

View File

@ -83,7 +83,7 @@ int synchronous_commit = SYNCHRONOUS_COMMIT_ON;
* When running as a parallel worker, we place only a single
* TransactionStateData on the parallel worker's state stack, and the XID
* reflected there will be that of the *innermost* currently-active
* subtransaction in the backend that initiated paralllelism. However,
* subtransaction in the backend that initiated parallelism. However,
* GetTopTransactionId() and TransactionIdIsCurrentTransactionId()
* need to return the same answers in the parallel worker as they would have
* in the user backend, so we need some additional bookkeeping.

View File

@ -1039,7 +1039,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
/*
* The TID qual expressions will be computed once, any other baserestrict
* quals once per retrived tuple.
* quals once per retrieved tuple.
*/
cost_qual_eval(&tid_qual_cost, tidquals, root);

View File

@ -1049,7 +1049,7 @@ replorigin_session_setup(RepOriginId node)
{
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("replication identiefer %d is already active for pid %d",
errmsg("replication identifier %d is already active for pid %d",
curstate->roident, curstate->acquired_by)));
}

View File

@ -855,7 +855,7 @@ format_operator_internal(Oid operator_oid, bool force_qualify)
/*
* Would this oper be found (given the right args) by regoperatorin?
* If not, or if caller explicitely requests it, we need to qualify
* If not, or if caller explicitly requests it, we need to qualify
* it.
*/
if (force_qualify || !OperatorIsVisible(operator_oid))

View File

@ -54,13 +54,13 @@ typedef struct LWLock
slock_t mutex; /* Protects LWLock and queue of PGPROCs */
uint16 tranche; /* tranche ID */
pg_atomic_uint32 state; /* state of exlusive/nonexclusive lockers */
pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */
#ifdef LOCK_DEBUG
pg_atomic_uint32 nwaiters; /* number of waiters */
#endif
dlist_head waiters; /* list of waiting PGPROCs */
#ifdef LOCK_DEBUG
struct PGPROC *owner; /* last exlusive owner of the lock */
struct PGPROC *owner; /* last exclusive owner of the lock */
#endif
} LWLock;