mirror of
https://github.com/postgres/postgres.git
synced 2025-04-25 21:42:33 +03:00
Fix various typos and spelling mistakes in code comments
Author: Justin Pryzby Discussion: https://postgr.es/m/20220411020336.GB26620@telsasoft.com
This commit is contained in:
parent
bba3c35b29
commit
b0e5f02ddc
@ -24,7 +24,7 @@
|
||||
* modified to look for -D compile flags in Makefiles, so here, in order to
|
||||
* get the historic behavior of LOWER_NODE not being defined on MSVC, we only
|
||||
* define it when not building in that environment. This is important as we
|
||||
* want to maintain the same LOWER_NODE behavior after a pg_update.
|
||||
* want to maintain the same LOWER_NODE behavior after a pg_upgrade.
|
||||
*/
|
||||
#ifndef _MSC_VER
|
||||
#define LOWER_NODE
|
||||
|
@ -310,7 +310,7 @@ AssertCheckRanges(Ranges *ranges, FmgrInfo *cmpFn, Oid colloid)
|
||||
*/
|
||||
AssertArrayOrder(cmpFn, colloid, ranges->values, 2 * ranges->nranges);
|
||||
|
||||
/* then the single-point ranges (with nvalues boundar values ) */
|
||||
/* then the single-point ranges (with nvalues boundary values ) */
|
||||
AssertArrayOrder(cmpFn, colloid, &ranges->values[2 * ranges->nranges],
|
||||
ranges->nsorted);
|
||||
|
||||
|
@ -1470,7 +1470,7 @@ heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
|
||||
* heap_set_tidrange will have used heap_setscanlimits to limit the
|
||||
* range of pages we scan to only ones that can contain the TID range
|
||||
* we're scanning for. Here we must filter out any tuples from these
|
||||
* pages that are outwith that range.
|
||||
* pages that are outside of that range.
|
||||
*/
|
||||
if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
|
||||
{
|
||||
|
@ -300,7 +300,7 @@ XLogReleasePreviousRecord(XLogReaderState *state)
|
||||
/* Release the space. */
|
||||
if (unlikely(record->oversized))
|
||||
{
|
||||
/* It's not in the the decode buffer, so free it to release space. */
|
||||
/* It's not in the decode buffer, so free it to release space. */
|
||||
pfree(record);
|
||||
}
|
||||
else
|
||||
|
@ -2975,7 +2975,7 @@ ReadRecord(XLogPrefetcher *xlogprefetcher, int emode,
|
||||
/*
|
||||
* When not in standby mode we find that WAL ends in an incomplete
|
||||
* record, keep track of that record. After recovery is done,
|
||||
* we'll write a record to indicate downstream WAL readers that
|
||||
* we'll write a record to indicate to downstream WAL readers that
|
||||
* that portion is to be ignored.
|
||||
*/
|
||||
if (!StandbyMode &&
|
||||
|
@ -386,7 +386,7 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
|
||||
* needs to be copied from the source database to the destination database,
|
||||
* and if so, construct a CreateDBRelInfo for it.
|
||||
*
|
||||
* Visbility checks are handled by the caller, so our job here is just
|
||||
* Visibility checks are handled by the caller, so our job here is just
|
||||
* to assess the data stored in the tuple.
|
||||
*/
|
||||
CreateDBRelInfo *
|
||||
|
@ -12,7 +12,7 @@
|
||||
* the memory space for storing dead items allocated in the DSM segment. We
|
||||
* launch parallel worker processes at the start of parallel index
|
||||
* bulk-deletion and index cleanup and once all indexes are processed, the
|
||||
* parallel worker processes exit. Each time we process indexes parallelly,
|
||||
* parallel worker processes exit. Each time we process indexes in parallel,
|
||||
* the parallel context is re-initialized so that the same DSM can be used for
|
||||
* multiple passes of index bulk-deletion and index cleanup.
|
||||
*
|
||||
|
@ -100,7 +100,7 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
|
||||
|
||||
/*
|
||||
* When no run-time pruning is required and there's at least one
|
||||
* subplan, we can fill as_valid_subplans immediately, preventing
|
||||
* subplan, we can fill ms_valid_subplans immediately, preventing
|
||||
* later calls to ExecFindMatchingSubPlans.
|
||||
*/
|
||||
if (!prunestate->do_exec_prune && nplans > 0)
|
||||
|
@ -1976,8 +1976,8 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
|
||||
* by calling estimate_num_groups_incremental(), which estimates the
|
||||
* group size for "new" pathkeys.
|
||||
*
|
||||
* Note: estimate_num_groups_incremntal does not handle fake Vars, so use
|
||||
* a default estimate otherwise.
|
||||
* Note: estimate_num_groups_incremental does not handle fake Vars, so
|
||||
* use a default estimate otherwise.
|
||||
*/
|
||||
if (!has_fake_var)
|
||||
nGroups = estimate_num_groups_incremental(root, pathkeyExprs,
|
||||
@ -6471,7 +6471,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
|
||||
exact_pages = heap_pages - lossy_pages;
|
||||
|
||||
/*
|
||||
* If there are lossy pages then recompute the number of tuples
|
||||
* If there are lossy pages then recompute the number of tuples
|
||||
* processed by the bitmap heap node. We assume here that the chance
|
||||
* of a given tuple coming from an exact page is the same as the
|
||||
* chance that a given page is exact. This might not be true, but
|
||||
|
@ -2383,16 +2383,16 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
|
||||
* Count the number of pathkeys that are useful for grouping (instead of
|
||||
* explicit sort)
|
||||
*
|
||||
* Group pathkeys could be reordered to benefit from the odering. The ordering
|
||||
* may not be "complete" and may require incremental sort, but that's fine. So
|
||||
* we simply count prefix pathkeys with a matching group key, and stop once we
|
||||
* find the first pathkey without a match.
|
||||
* Group pathkeys could be reordered to benefit from the ordering. The
|
||||
* ordering may not be "complete" and may require incremental sort, but that's
|
||||
* fine. So we simply count prefix pathkeys with a matching group key, and
|
||||
* stop once we find the first pathkey without a match.
|
||||
*
|
||||
* So e.g. with pathkeys (a,b,c) and group keys (a,b,e) this determines (a,b)
|
||||
* pathkeys are useful for grouping, and we might do incremental sort to get
|
||||
* path ordered by (a,b,e).
|
||||
*
|
||||
* This logic is necessary to retain paths with ordeding not matching grouping
|
||||
* This logic is necessary to retain paths with ordering not matching grouping
|
||||
* keys directly, without the reordering.
|
||||
*
|
||||
* Returns the length of pathkey prefix with matching group keys.
|
||||
|
@ -3507,7 +3507,7 @@ transformJsonOutput(ParseState *pstate, const JsonOutput *output,
|
||||
}
|
||||
|
||||
/*
|
||||
* Transform JSON output clause of JSON contructor functions.
|
||||
* Transform JSON output clause of JSON constructor functions.
|
||||
*
|
||||
* Derive RETURNING type, if not specified, from argument types.
|
||||
*/
|
||||
|
@ -195,9 +195,9 @@ bbsink_server_end_archive(bbsink *sink)
|
||||
|
||||
/*
|
||||
* We intentionally don't use data_sync_elevel here, because the server
|
||||
* shouldn't PANIC just because we can't guarantee the the backup has been
|
||||
* written down to disk. Running recovery won't fix anything in this case
|
||||
* anyway.
|
||||
* shouldn't PANIC just because we can't guarantee that the backup has
|
||||
* been written down to disk. Running recovery won't fix anything in this
|
||||
* case anyway.
|
||||
*/
|
||||
if (FileSync(mysink->file, WAIT_EVENT_BASEBACKUP_SYNC) < 0)
|
||||
ereport(ERROR,
|
||||
|
@ -1876,7 +1876,7 @@ ReorderBufferStreamCommit(ReorderBuffer *rb, ReorderBufferTXN *txn)
|
||||
* xid 502 which is not visible to our snapshot. And when we will try to
|
||||
* decode with that catalog tuple, it can lead to a wrong result or a crash.
|
||||
* So, it is necessary to detect concurrent aborts to allow streaming of
|
||||
* in-progress transactions or decoding of prepared transactions.
|
||||
* in-progress transactions or decoding of prepared transactions.
|
||||
*
|
||||
* For detecting the concurrent abort we set CheckXidAlive to the current
|
||||
* (sub)transaction's xid for which this change belongs to. And, during
|
||||
|
@ -247,7 +247,7 @@ ReplicationSlotValidateName(const char *name, int elevel)
|
||||
* to be enabled only at the slot creation time. If we allow this option
|
||||
* to be changed during decoding then it is quite possible that we skip
|
||||
* prepare first time because this option was not enabled. Now next time
|
||||
* during getting changes, if the two_phase option is enabled it can skip
|
||||
* during getting changes, if the two_phase option is enabled it can skip
|
||||
* prepare because by that time start decoding point has been moved. So the
|
||||
* user will only get commit prepared.
|
||||
*/
|
||||
|
@ -2338,7 +2338,7 @@ GetSnapshotData(Snapshot snapshot)
|
||||
|
||||
/*
|
||||
* We don't include our own XIDs (if any) in the snapshot. It
|
||||
* needs to be includeded in the xmin computation, but we did so
|
||||
* needs to be included in the xmin computation, but we did so
|
||||
* outside the loop.
|
||||
*/
|
||||
if (pgxactoff == mypgxactoff)
|
||||
|
@ -248,7 +248,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
|
||||
dict = lookup_ts_dictionary_cache(ld->curDictId);
|
||||
|
||||
/*
|
||||
* Dictionary ld->curDictId asks us about following words
|
||||
* Dictionary ld->curDictId asks us about following words
|
||||
*/
|
||||
|
||||
while (ld->curSub)
|
||||
|
@ -59,9 +59,9 @@ convert_and_check_filename(text *arg)
|
||||
canonicalize_path(filename); /* filename can change length here */
|
||||
|
||||
/*
|
||||
* Roles with privleges of the 'pg_read_server_files' role are allowed to access
|
||||
* any files on the server as the PG user, so no need to do any further checks
|
||||
* here.
|
||||
* Roles with privileges of the 'pg_read_server_files' role are allowed to
|
||||
* access any files on the server as the PG user, so no need to do any
|
||||
* further checks here.
|
||||
*/
|
||||
if (has_privs_of_role(GetUserId(), ROLE_PG_READ_SERVER_FILES))
|
||||
return filename;
|
||||
|
@ -3878,7 +3878,7 @@ lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start)
|
||||
Point p;
|
||||
|
||||
/*
|
||||
* if X-intersection wasn't found then check central point of tested
|
||||
* if X-intersection wasn't found, then check central point of tested
|
||||
* segment. In opposite case we already check all subsegments
|
||||
*/
|
||||
p.x = float8_div(float8_pl(t.p[0].x, t.p[1].x), 2.0);
|
||||
|
@ -991,7 +991,7 @@ search_locale_enum(LPWSTR pStr, DWORD dwFlags, LPARAM lparam)
|
||||
test_locale, LOCALE_NAME_MAX_LENGTH))
|
||||
{
|
||||
/*
|
||||
* If the enumerated locale does not have a hyphen ("en") OR the
|
||||
* If the enumerated locale does not have a hyphen ("en") OR the
|
||||
* lc_message input does not have an underscore ("English"), we only
|
||||
* need to compare the <Language> tags.
|
||||
*/
|
||||
|
@ -261,7 +261,7 @@ parse_or_operator(TSQueryParserState pstate)
|
||||
/*
|
||||
* Suppose, we found an operand, but could be a not correct operand.
|
||||
* So we still treat OR literal as operation with possibly incorrect
|
||||
* operand and will not search it as lexeme
|
||||
* operand and will not search it as lexeme
|
||||
*/
|
||||
if (!t_isspace(ptr))
|
||||
break;
|
||||
|
4
src/backend/utils/cache/relmapper.c
vendored
4
src/backend/utils/cache/relmapper.c
vendored
@ -1044,7 +1044,7 @@ perform_relmap_update(bool shared, const RelMapFile *updates)
|
||||
(shared ? "global" : DatabasePath));
|
||||
|
||||
/*
|
||||
* We succesfully wrote the updated file, so it's now safe to rely on the
|
||||
* We successfully wrote the updated file, so it's now safe to rely on the
|
||||
* new values in this process, too.
|
||||
*/
|
||||
if (shared)
|
||||
@ -1093,7 +1093,7 @@ relmap_redo(XLogReaderState *record)
|
||||
* an existing database as we do for creating a new database. In
|
||||
* the latter case, taking the relmap log and sending sinval messages
|
||||
* is unnecessary, but harmless. If we wanted to avoid it, we could
|
||||
* add a flag to the WAL record to indicate which opration is being
|
||||
* add a flag to the WAL record to indicate which operation is being
|
||||
* performed.
|
||||
*/
|
||||
LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);
|
||||
|
@ -4,7 +4,7 @@
|
||||
* CSV logging
|
||||
*
|
||||
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of Californi
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
|
@ -2269,7 +2269,7 @@ write_console(const char *line, int len)
|
||||
/*
|
||||
* Conversion on non-win32 platforms is not implemented yet. It requires
|
||||
* non-throw version of pg_do_encoding_conversion(), that converts
|
||||
* unconvertable characters to '?' without errors.
|
||||
* unconvertible characters to '?' without errors.
|
||||
*
|
||||
* XXX: We have a no-throw version now. It doesn't convert to '?' though.
|
||||
*/
|
||||
|
@ -4,7 +4,7 @@
|
||||
* JSON logging
|
||||
*
|
||||
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of Californi
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
|
@ -69,7 +69,7 @@ static TypeFuncClass get_type_func_class(Oid typid, Oid *base_typeid);
|
||||
* descriptor coming from expectedDesc, which is the tuple descriptor
|
||||
* expected by the caller. SRF_SINGLE_BLESS can be set to complete the
|
||||
* information associated to the tuple descriptor, which is necessary
|
||||
* in some cases where the tuple descriptor comes from a transient
|
||||
* in some cases where the tuple descriptor comes from a transient
|
||||
* RECORD datatype.
|
||||
*/
|
||||
void
|
||||
|
@ -27,7 +27,7 @@
|
||||
* context's 'freeblock' field. If the freeblock field is already occupied
|
||||
* by another free block we simply return the newly empty block to malloc.
|
||||
*
|
||||
* This approach to free blocks requires fewer malloc/free calls for truely
|
||||
* This approach to free blocks requires fewer malloc/free calls for truly
|
||||
* first allocated, first free'd allocation patterns.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
|
@ -1464,7 +1464,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
|
||||
GetCopyDataEnd(r, copybuf, cursor);
|
||||
|
||||
/*
|
||||
* The server shouldn't send progres report messages too
|
||||
* The server shouldn't send progress report messages too
|
||||
* often, so we force an update each time we receive one.
|
||||
*/
|
||||
progress_report(state->tablespacenum, true, false);
|
||||
|
@ -3450,7 +3450,7 @@ discardUntilSync(CState *st)
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
/* exit pipline */
|
||||
/* exit pipeline */
|
||||
if (PQexitPipelineMode(st->con) != 1)
|
||||
{
|
||||
pg_log_error("client %d aborted: failed to exit pipeline mode for rolling back the failed transaction",
|
||||
@ -7261,7 +7261,7 @@ main(int argc, char **argv)
|
||||
|
||||
/*
|
||||
* All connections should be already closed in threadRun(), so this
|
||||
* disconnect_all() will be a no-op, but clean up the connecions just to
|
||||
* disconnect_all() will be a no-op, but clean up the connections just to
|
||||
* be sure. We don't need to measure the disconnection delays here.
|
||||
*/
|
||||
disconnect_all(state, nclients);
|
||||
|
@ -653,7 +653,8 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary, PGresult **res)
|
||||
*
|
||||
* Make sure there's always space for four more bytes in the
|
||||
* buffer, plus a NUL terminator. That way, an EOF marker is
|
||||
* never split across two fgets() calls, which simplies the logic.
|
||||
* never split across two fgets() calls, which simplifies the
|
||||
* logic.
|
||||
*/
|
||||
if (buflen >= COPYBUFSIZ - 5 || (copydone && buflen > 0))
|
||||
{
|
||||
|
@ -3080,7 +3080,7 @@ describeOneTableDetails(const char *schemaname,
|
||||
* servers between v11 and v14, though these must still be shown to
|
||||
* the user. So we use another property that is true for such
|
||||
* inherited triggers to avoid them being hidden, which is their
|
||||
* dependendence on another trigger.
|
||||
* dependence on another trigger.
|
||||
*/
|
||||
if (pset.sversion >= 110000 && pset.sversion < 150000)
|
||||
appendPQExpBufferStr(&buf, "(NOT t.tgisinternal OR (t.tgisinternal AND t.tgenabled = 'D') \n"
|
||||
|
@ -2257,7 +2257,7 @@ psql_completion(const char *text, int start, int end)
|
||||
COMPLETE_WITH("COLUMN", "CONSTRAINT", "CHECK", "UNIQUE", "PRIMARY KEY",
|
||||
"EXCLUDE", "FOREIGN KEY");
|
||||
}
|
||||
/* ATER TABLE xxx ADD [COLUMN] yyy */
|
||||
/* ALTER TABLE xxx ADD [COLUMN] yyy */
|
||||
else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "COLUMN", MatchAny) ||
|
||||
(Matches("ALTER", "TABLE", MatchAny, "ADD", MatchAny) &&
|
||||
!Matches("ALTER", "TABLE", MatchAny, "ADD", "COLUMN|CONSTRAINT|CHECK|UNIQUE|PRIMARY|EXCLUDE|FOREIGN")))
|
||||
|
@ -24,7 +24,7 @@
|
||||
* function will have a shim set up by sort support automatically. However,
|
||||
* opclasses that support the optional additional abbreviated key capability
|
||||
* must always provide an authoritative comparator used to tie-break
|
||||
* inconclusive abbreviated comparisons and also used when aborting
|
||||
* inconclusive abbreviated comparisons and also used when aborting
|
||||
* abbreviation. Furthermore, a converter and abort/costing function must be
|
||||
* provided.
|
||||
*
|
||||
|
@ -6,7 +6,7 @@
|
||||
# Perl script that tries to add PGDLLIMPORT markings to PostgreSQL
|
||||
# header files.
|
||||
#
|
||||
# This relies on a few idiosyncracies of the PostgreSQL cding style,
|
||||
# This relies on a few idiosyncracies of the PostgreSQL coding style,
|
||||
# such as the fact that we always use "extern" in function
|
||||
# declarations, and that we don't use // comments. It's not very
|
||||
# smart and may not catch all cases.
|
||||
|
Loading…
x
Reference in New Issue
Block a user