diff --git a/contrib/ltree/ltree.h b/contrib/ltree/ltree.h index 10341d60212..564e4fa81b8 100644 --- a/contrib/ltree/ltree.h +++ b/contrib/ltree/ltree.h @@ -24,7 +24,7 @@ * modified to look for -D compile flags in Makefiles, so here, in order to * get the historic behavior of LOWER_NODE not being defined on MSVC, we only * define it when not building in that environment. This is important as we - * want to maintain the same LOWER_NODE behavior after a pg_update. + * want to maintain the same LOWER_NODE behavior after a pg_upgrade. */ #ifndef _MSC_VER #define LOWER_NODE diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c index 82333752f1f..10d4f17bc6f 100644 --- a/src/backend/access/brin/brin_minmax_multi.c +++ b/src/backend/access/brin/brin_minmax_multi.c @@ -310,7 +310,7 @@ AssertCheckRanges(Ranges *ranges, FmgrInfo *cmpFn, Oid colloid) */ AssertArrayOrder(cmpFn, colloid, ranges->values, 2 * ranges->nranges); - /* then the single-point ranges (with nvalues boundar values ) */ + /* then the single-point ranges (with nvalues boundary values ) */ AssertArrayOrder(cmpFn, colloid, &ranges->values[2 * ranges->nranges], ranges->nsorted); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 1ee985f6330..a03122df8d6 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1470,7 +1470,7 @@ heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, * heap_set_tidrange will have used heap_setscanlimits to limit the * range of pages we scan to only ones that can contain the TID range * we're scanning for. Here we must filter out any tuples from these - * pages that are outwith that range. + * pages that are outside of that range. */ if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0) { diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index a5f1a648d3d..b3e37003ac5 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -300,7 +300,7 @@ XLogReleasePreviousRecord(XLogReaderState *state) /* Release the space. */ if (unlikely(record->oversized)) { - /* It's not in the the decode buffer, so free it to release space. */ + /* It's not in the decode buffer, so free it to release space. */ pfree(record); } else diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index 2e555f8573d..4ee29182ac8 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -2975,7 +2975,7 @@ ReadRecord(XLogPrefetcher *xlogprefetcher, int emode, /* * When not in standby mode we find that WAL ends in an incomplete * record, keep track of that record. After recovery is done, - * we'll write a record to indicate downstream WAL readers that + * we'll write a record to indicate to downstream WAL readers that * that portion is to be ignored. */ if (!StandbyMode && diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index ce776c53cad..1bbecfeddf6 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -386,7 +386,7 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid, * needs to be copied from the source database to the destination database, * and if so, construct a CreateDBRelInfo for it. * - * Visbility checks are handled by the caller, so our job here is just + * Visibility checks are handled by the caller, so our job here is just * to assess the data stored in the tuple. */ CreateDBRelInfo * diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index 6b4f742578e..bbf3b69c57e 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -12,7 +12,7 @@ * the memory space for storing dead items allocated in the DSM segment. We * launch parallel worker processes at the start of parallel index * bulk-deletion and index cleanup and once all indexes are processed, the - * parallel worker processes exit. Each time we process indexes parallelly, + * parallel worker processes exit. Each time we process indexes in parallel, * the parallel context is re-initialized so that the same DSM can be used for * multiple passes of index bulk-deletion and index cleanup. * diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index ecf9052e038..c5c62fa5c78 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -100,7 +100,7 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) /* * When no run-time pruning is required and there's at least one - * subplan, we can fill as_valid_subplans immediately, preventing + * subplan, we can fill ms_valid_subplans immediately, preventing * later calls to ExecFindMatchingSubPlans. */ if (!prunestate->do_exec_prune && nplans > 0) diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index ec3c23013a3..b787c6f81a8 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -1976,8 +1976,8 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, * by calling estimate_num_groups_incremental(), which estimates the * group size for "new" pathkeys. * - * Note: estimate_num_groups_incremntal does not handle fake Vars, so use - * a default estimate otherwise. + * Note: estimate_num_groups_incremental does not handle fake Vars, so + * use a default estimate otherwise. */ if (!has_fake_var) nGroups = estimate_num_groups_incremental(root, pathkeyExprs, @@ -6471,7 +6471,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, exact_pages = heap_pages - lossy_pages; /* - * If there are lossy pages then recompute the number of tuples + * If there are lossy pages then recompute the number of tuples * processed by the bitmap heap node. We assume here that the chance * of a given tuple coming from an exact page is the same as the * chance that a given page is exact. This might not be true, but diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 75fe03fd04b..91556910aec 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -2383,16 +2383,16 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys) * Count the number of pathkeys that are useful for grouping (instead of * explicit sort) * - * Group pathkeys could be reordered to benefit from the odering. The ordering - * may not be "complete" and may require incremental sort, but that's fine. So - * we simply count prefix pathkeys with a matching group key, and stop once we - * find the first pathkey without a match. + * Group pathkeys could be reordered to benefit from the ordering. The + * ordering may not be "complete" and may require incremental sort, but that's + * fine. So we simply count prefix pathkeys with a matching group key, and + * stop once we find the first pathkey without a match. * * So e.g. with pathkeys (a,b,c) and group keys (a,b,e) this determines (a,b) * pathkeys are useful for grouping, and we might do incremental sort to get * path ordered by (a,b,e). * - * This logic is necessary to retain paths with ordeding not matching grouping + * This logic is necessary to retain paths with ordering not matching grouping * keys directly, without the reordering. * * Returns the length of pathkey prefix with matching group keys. diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index b6a2482f23a..3cbd5161528 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -3507,7 +3507,7 @@ transformJsonOutput(ParseState *pstate, const JsonOutput *output, } /* - * Transform JSON output clause of JSON contructor functions. + * Transform JSON output clause of JSON constructor functions. * * Derive RETURNING type, if not specified, from argument types. */ diff --git a/src/backend/replication/basebackup_server.c b/src/backend/replication/basebackup_server.c index bc16897b33f..f5d73301d82 100644 --- a/src/backend/replication/basebackup_server.c +++ b/src/backend/replication/basebackup_server.c @@ -195,9 +195,9 @@ bbsink_server_end_archive(bbsink *sink) /* * We intentionally don't use data_sync_elevel here, because the server - * shouldn't PANIC just because we can't guarantee the the backup has been - * written down to disk. Running recovery won't fix anything in this case - * anyway. + * shouldn't PANIC just because we can't guarantee that the backup has + * been written down to disk. Running recovery won't fix anything in this + * case anyway. */ if (FileSync(mysink->file, WAIT_EVENT_BASEBACKUP_SYNC) < 0) ereport(ERROR, diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 5adc016d449..6887dc23f61 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -1876,7 +1876,7 @@ ReorderBufferStreamCommit(ReorderBuffer *rb, ReorderBufferTXN *txn) * xid 502 which is not visible to our snapshot. And when we will try to * decode with that catalog tuple, it can lead to a wrong result or a crash. * So, it is necessary to detect concurrent aborts to allow streaming of - * in-progress transactions or decoding of prepared transactions. + * in-progress transactions or decoding of prepared transactions. * * For detecting the concurrent abort we set CheckXidAlive to the current * (sub)transaction's xid for which this change belongs to. And, during diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index c35ea7c35bf..5c778f5333b 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -247,7 +247,7 @@ ReplicationSlotValidateName(const char *name, int elevel) * to be enabled only at the slot creation time. If we allow this option * to be changed during decoding then it is quite possible that we skip * prepare first time because this option was not enabled. Now next time - * during getting changes, if the two_phase option is enabled it can skip + * during getting changes, if the two_phase option is enabled it can skip * prepare because by that time start decoding point has been moved. So the * user will only get commit prepared. */ diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 603f6aba71f..e184a3552c8 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -2338,7 +2338,7 @@ GetSnapshotData(Snapshot snapshot) /* * We don't include our own XIDs (if any) in the snapshot. It - * needs to be includeded in the xmin computation, but we did so + * needs to be included in the xmin computation, but we did so * outside the loop. */ if (pgxactoff == mypgxactoff) diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c index bf4247048dd..fe469881612 100644 --- a/src/backend/tsearch/ts_parse.c +++ b/src/backend/tsearch/ts_parse.c @@ -248,7 +248,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem) dict = lookup_ts_dictionary_cache(ld->curDictId); /* - * Dictionary ld->curDictId asks us about following words + * Dictionary ld->curDictId asks us about following words */ while (ld->curSub) diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c index 88f279d1b31..2bf52192567 100644 --- a/src/backend/utils/adt/genfile.c +++ b/src/backend/utils/adt/genfile.c @@ -59,9 +59,9 @@ convert_and_check_filename(text *arg) canonicalize_path(filename); /* filename can change length here */ /* - * Roles with privleges of the 'pg_read_server_files' role are allowed to access - * any files on the server as the PG user, so no need to do any further checks - * here. + * Roles with privileges of the 'pg_read_server_files' role are allowed to + * access any files on the server as the PG user, so no need to do any + * further checks here. */ if (has_privs_of_role(GetUserId(), ROLE_PG_READ_SERVER_FILES)) return filename; diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c index 609aab2e651..b79705f8b3f 100644 --- a/src/backend/utils/adt/geo_ops.c +++ b/src/backend/utils/adt/geo_ops.c @@ -3878,7 +3878,7 @@ lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start) Point p; /* - * if X-intersection wasn't found then check central point of tested + * if X-intersection wasn't found, then check central point of tested * segment. In opposite case we already check all subsegments */ p.x = float8_div(float8_pl(t.p[0].x, t.p[1].x), 2.0); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 12603b727cd..2c47dea3429 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -991,7 +991,7 @@ search_locale_enum(LPWSTR pStr, DWORD dwFlags, LPARAM lparam) test_locale, LOCALE_NAME_MAX_LENGTH)) { /* - * If the enumerated locale does not have a hyphen ("en") OR the + * If the enumerated locale does not have a hyphen ("en") OR the * lc_message input does not have an underscore ("English"), we only * need to compare the tags. */ diff --git a/src/backend/utils/adt/tsquery.c b/src/backend/utils/adt/tsquery.c index f0a95297b3e..f54f2988149 100644 --- a/src/backend/utils/adt/tsquery.c +++ b/src/backend/utils/adt/tsquery.c @@ -261,7 +261,7 @@ parse_or_operator(TSQueryParserState pstate) /* * Suppose, we found an operand, but could be a not correct operand. * So we still treat OR literal as operation with possibly incorrect - * operand and will not search it as lexeme + * operand and will not search it as lexeme */ if (!t_isspace(ptr)) break; diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index dee3387d026..75a3aedc5af 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -1044,7 +1044,7 @@ perform_relmap_update(bool shared, const RelMapFile *updates) (shared ? "global" : DatabasePath)); /* - * We succesfully wrote the updated file, so it's now safe to rely on the + * We successfully wrote the updated file, so it's now safe to rely on the * new values in this process, too. */ if (shared) @@ -1093,7 +1093,7 @@ relmap_redo(XLogReaderState *record) * an existing database as we do for creating a new database. In * the latter case, taking the relmap log and sending sinval messages * is unnecessary, but harmless. If we wanted to avoid it, we could - * add a flag to the WAL record to indicate which opration is being + * add a flag to the WAL record to indicate which operation is being * performed. */ LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE); diff --git a/src/backend/utils/error/csvlog.c b/src/backend/utils/error/csvlog.c index 89f78b447d9..5c49bc4209e 100644 --- a/src/backend/utils/error/csvlog.c +++ b/src/backend/utils/error/csvlog.c @@ -4,7 +4,7 @@ * CSV logging * * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of Californi + * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index bd4b2c19b1c..72778b896a2 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -2269,7 +2269,7 @@ write_console(const char *line, int len) /* * Conversion on non-win32 platforms is not implemented yet. It requires * non-throw version of pg_do_encoding_conversion(), that converts - * unconvertable characters to '?' without errors. + * unconvertible characters to '?' without errors. * * XXX: We have a no-throw version now. It doesn't convert to '?' though. */ diff --git a/src/backend/utils/error/jsonlog.c b/src/backend/utils/error/jsonlog.c index 843641c865f..f336c063e58 100644 --- a/src/backend/utils/error/jsonlog.c +++ b/src/backend/utils/error/jsonlog.c @@ -4,7 +4,7 @@ * JSON logging * * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of Californi + * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c index d269662ad8e..9197b0f1e26 100644 --- a/src/backend/utils/fmgr/funcapi.c +++ b/src/backend/utils/fmgr/funcapi.c @@ -69,7 +69,7 @@ static TypeFuncClass get_type_func_class(Oid typid, Oid *base_typeid); * descriptor coming from expectedDesc, which is the tuple descriptor * expected by the caller. SRF_SINGLE_BLESS can be set to complete the * information associated to the tuple descriptor, which is necessary - * in some cases where the tuple descriptor comes from a transient + * in some cases where the tuple descriptor comes from a transient * RECORD datatype. */ void diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c index 685688c1554..56ed496386e 100644 --- a/src/backend/utils/mmgr/generation.c +++ b/src/backend/utils/mmgr/generation.c @@ -27,7 +27,7 @@ * context's 'freeblock' field. If the freeblock field is already occupied * by another free block we simply return the newly empty block to malloc. * - * This approach to free blocks requires fewer malloc/free calls for truely + * This approach to free blocks requires fewer malloc/free calls for truly * first allocated, first free'd allocation patterns. * *------------------------------------------------------------------------- diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index c1f2253af2e..d96ae873032 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -1464,7 +1464,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data) GetCopyDataEnd(r, copybuf, cursor); /* - * The server shouldn't send progres report messages too + * The server shouldn't send progress report messages too * often, so we force an update each time we receive one. */ progress_report(state->tablespacenum, true, false); diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 8197da8546d..e63cea56a1c 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -3450,7 +3450,7 @@ discardUntilSync(CState *st) PQclear(res); } - /* exit pipline */ + /* exit pipeline */ if (PQexitPipelineMode(st->con) != 1) { pg_log_error("client %d aborted: failed to exit pipeline mode for rolling back the failed transaction", @@ -7261,7 +7261,7 @@ main(int argc, char **argv) /* * All connections should be already closed in threadRun(), so this - * disconnect_all() will be a no-op, but clean up the connecions just to + * disconnect_all() will be a no-op, but clean up the connections just to * be sure. We don't need to measure the disconnection delays here. */ disconnect_all(state, nclients); diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c index a5ceaec3ac7..424a429e1e2 100644 --- a/src/bin/psql/copy.c +++ b/src/bin/psql/copy.c @@ -653,7 +653,8 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary, PGresult **res) * * Make sure there's always space for four more bytes in the * buffer, plus a NUL terminator. That way, an EOF marker is - * never split across two fgets() calls, which simplies the logic. + * never split across two fgets() calls, which simplifies the + * logic. */ if (buflen >= COPYBUFSIZ - 5 || (copydone && buflen > 0)) { diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index e8919ab0be2..d04ba2b0290 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -3080,7 +3080,7 @@ describeOneTableDetails(const char *schemaname, * servers between v11 and v14, though these must still be shown to * the user. So we use another property that is true for such * inherited triggers to avoid them being hidden, which is their - * dependendence on another trigger. + * dependence on another trigger. */ if (pset.sversion >= 110000 && pset.sversion < 150000) appendPQExpBufferStr(&buf, "(NOT t.tgisinternal OR (t.tgisinternal AND t.tgenabled = 'D') \n" diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 8f163fb7bac..10e8dbc9b1f 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -2257,7 +2257,7 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH("COLUMN", "CONSTRAINT", "CHECK", "UNIQUE", "PRIMARY KEY", "EXCLUDE", "FOREIGN KEY"); } - /* ATER TABLE xxx ADD [COLUMN] yyy */ + /* ALTER TABLE xxx ADD [COLUMN] yyy */ else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "COLUMN", MatchAny) || (Matches("ALTER", "TABLE", MatchAny, "ADD", MatchAny) && !Matches("ALTER", "TABLE", MatchAny, "ADD", "COLUMN|CONSTRAINT|CHECK|UNIQUE|PRIMARY|EXCLUDE|FOREIGN"))) diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h index 60e5f9940d4..4f7c73f0aac 100644 --- a/src/include/utils/sortsupport.h +++ b/src/include/utils/sortsupport.h @@ -24,7 +24,7 @@ * function will have a shim set up by sort support automatically. However, * opclasses that support the optional additional abbreviated key capability * must always provide an authoritative comparator used to tie-break - * inconclusive abbreviated comparisons and also used when aborting + * inconclusive abbreviated comparisons and also used when aborting * abbreviation. Furthermore, a converter and abort/costing function must be * provided. * diff --git a/src/tools/mark_pgdllimport.pl b/src/tools/mark_pgdllimport.pl index a09ec5a369e..83b90db6ef9 100755 --- a/src/tools/mark_pgdllimport.pl +++ b/src/tools/mark_pgdllimport.pl @@ -6,7 +6,7 @@ # Perl script that tries to add PGDLLIMPORT markings to PostgreSQL # header files. # -# This relies on a few idiosyncracies of the PostgreSQL cding style, +# This relies on a few idiosyncracies of the PostgreSQL coding style, # such as the fact that we always use "extern" in function # declarations, and that we don't use // comments. It's not very # smart and may not catch all cases.