diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c index aa58489f496..cf169c35b97 100644 --- a/contrib/btree_gist/btree_ts.c +++ b/contrib/btree_gist/btree_ts.c @@ -374,7 +374,7 @@ gbt_ts_penalty(PG_FUNCTION_ARGS) newdbl[2]; /* - * We are allways using "double" timestamps here. Precision should be good + * We are always using "double" timestamps here. Precision should be good * enough. */ orgdbl[0] = ((double) origentry->lower); diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c index c7c6faafc67..225a945a7e5 100644 --- a/contrib/btree_gist/btree_utils_var.c +++ b/contrib/btree_gist/btree_utils_var.c @@ -52,7 +52,7 @@ gbt_var_decompress(PG_FUNCTION_ARGS) PG_RETURN_POINTER(entry); } -/* Returns a better readable representaion of variable key ( sets pointer ) */ +/* Returns a better readable representation of variable key ( sets pointer ) */ GBT_VARKEY_R gbt_var_key_readable(const GBT_VARKEY *k) { diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 770cc778b18..ea4dd24e6b4 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -807,7 +807,7 @@ heap_modify_tuple(HeapTuple tuple, * repl information, as appropriate. * * NOTE: it's debatable whether to use heap_deform_tuple() here or just - * heap_getattr() only the non-replaced colums. The latter could win if + * heap_getattr() only the non-replaced columns. The latter could win if * there are many replaced columns and few non-replaced ones. However, * heap_deform_tuple costs only O(N) while the heap_getattr way would cost * O(N^2) if there are many non-replaced columns, so it seems better to diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index a0d6ae2a005..c34de3f160c 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -881,8 +881,8 @@ ginInsertCleanup(GinState *ginstate, * locking */ /* - * remove readed pages from pending list, at this point all - * content of readed pages is in regular structure + * remove read pages from pending list, at this point all + * content of read pages is in regular structure */ if (shiftList(index, metabuffer, blkno, stats)) { diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index 3a45781d0e1..a1ba58a23c1 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -578,7 +578,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) * We first consider splits where b is the lower bound of an entry. * We iterate through all entries, and for each b, calculate the * smallest possible a. Then we consider splits where a is the - * uppper bound of an entry, and for each a, calculate the greatest + * upper bound of an entry, and for each a, calculate the greatest * possible b. * * In the above example, the first loop would consider splits: @@ -628,7 +628,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) } /* - * Iterate over upper bound of left group finding greates possible + * Iterate over upper bound of left group finding greatest possible * lower bound of right group. */ i1 = nentries - 1; diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 9918c226c28..11542163ea4 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -5115,7 +5115,7 @@ l4: * * The initial tuple is assumed to be already locked. * - * This function doesn't check visibility, it just inconditionally marks the + * This function doesn't check visibility, it just unconditionally marks the * tuple(s) as locked. If any tuple in the updated chain is being deleted * concurrently (or updated with the key being modified), sleep until the * transaction doing it is finished. @@ -5608,7 +5608,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, /* * NB -- some of these transformations are only valid because * we know the return Xid is a tuple updater (i.e. not merely a - * locker.) Also note that the only reason we don't explicitely + * locker.) Also note that the only reason we don't explicitly * worry about HEAP_KEYS_UPDATED is because it lives in t_infomask2 * rather than t_infomask. */ diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 054b29ec040..5a0c4822762 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -931,7 +931,7 @@ cost_tidscan(Path *path, PlannerInfo *root, /* * The TID qual expressions will be computed once, any other baserestrict - * quals once per retrived tuple. + * quals once per retrieved tuple. */ cost_qual_eval(&tid_qual_cost, tidquals, root); diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index 88b1c2b5648..37817583d35 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -698,7 +698,7 @@ format_operator_internal(Oid operator_oid, bool force_qualify) /* * Would this oper be found (given the right args) by regoperatorin? - * If not, or if caller explicitely requests it, we need to qualify + * If not, or if caller explicitly requests it, we need to qualify * it. */ if (force_qualify || !OperatorIsVisible(operator_oid))