1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-10 09:21:54 +03:00

Fix typos and grammar in the code

The large majority of these have been introduced by recent commits done
in the v18 development cycle.

Author: Alexander Lakhin <exclusion@gmail.com>
Discussion: https://postgr.es/m/9a7763ab-5252-429d-a943-b28941e0e28b@gmail.com
This commit is contained in:
Michael Paquier 2025-04-19 19:17:42 +09:00
parent 114f7fa81c
commit 88e947136b
45 changed files with 68 additions and 69 deletions

View File

@ -16,7 +16,7 @@
#include "utils/relcache.h" #include "utils/relcache.h"
#include "miscadmin.h" #include "miscadmin.h"
/* Typedefs for callback functions for amcheck_lock_relation */ /* Typedefs for callback functions for amcheck_lock_relation_and_check */
typedef void (*IndexCheckableCallback) (Relation index); typedef void (*IndexCheckableCallback) (Relation index);
typedef void (*IndexDoCheckCallback) (Relation rel, typedef void (*IndexDoCheckCallback) (Relation rel,
Relation heaprel, Relation heaprel,

View File

@ -359,8 +359,8 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting
ptr->depth = stack->depth + 1; ptr->depth = stack->depth + 1;
/* /*
* Set rightmost parent key to invalid iterm pointer. Its * Set rightmost parent key to invalid item pointer. Its value
* value is 'Infinity' and not explicitly stored. * is 'Infinity' and not explicitly stored.
*/ */
if (rightlink == InvalidBlockNumber) if (rightlink == InvalidBlockNumber)
ItemPointerSetInvalid(&ptr->parentkey); ItemPointerSetInvalid(&ptr->parentkey);
@ -587,7 +587,7 @@ gin_check_parent_keys_consistency(Relation rel,
/* /*
* Check if it is properly adjusted. If succeed, * Check if it is properly adjusted. If succeed,
* procced to the next key. * proceed to the next key.
*/ */
if (ginCompareEntries(&state, attnum, current_key, if (ginCompareEntries(&state, attnum, current_key,
current_key_category, parent_key, current_key_category, parent_key,

View File

@ -289,7 +289,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
* *
* Returns NUMA node ID for each memory page used by the buffer. Buffers may * Returns NUMA node ID for each memory page used by the buffer. Buffers may
* be smaller or larger than OS memory pages. For each buffer we return one * be smaller or larger than OS memory pages. For each buffer we return one
* entry for each memory page used by the buffer (it fhe buffer is smaller, * entry for each memory page used by the buffer (if the buffer is smaller,
* it only uses a part of one memory page). * it only uses a part of one memory page).
* *
* We expect both sizes (for buffers and memory pages) to be a power-of-2, so * We expect both sizes (for buffers and memory pages) to be a power-of-2, so
@ -335,7 +335,7 @@ pg_buffercache_numa_pages(PG_FUNCTION_ARGS)
* how the pages and buffers "align" in memory - the buffers may be * how the pages and buffers "align" in memory - the buffers may be
* shifted in some way, using more memory pages than necessary. * shifted in some way, using more memory pages than necessary.
* *
* So we need to be careful about mappping buffers to memory pages. We * So we need to be careful about mapping buffers to memory pages. We
* calculate the maximum number of pages a buffer might use, so that * calculate the maximum number of pages a buffer might use, so that
* we allocate enough space for the entries. And then we count the * we allocate enough space for the entries. And then we count the
* actual number of entries as we scan the buffers. * actual number of entries as we scan the buffers.

View File

@ -123,7 +123,7 @@ $$);
RTI 1 (relation, inherited, in-from-clause): RTI 1 (relation, inherited, in-from-clause):
Eref: vegetables (id, name, genus) Eref: vegetables (id, name, genus)
Relation: vegetables Relation: vegetables
Relation Kind: parititioned_table Relation Kind: partitioned_table
Relation Lock Mode: AccessShareLock Relation Lock Mode: AccessShareLock
Permission Info Index: 1 Permission Info Index: 1
RTI 2 (group): RTI 2 (group):
@ -250,7 +250,7 @@ $$);
<In-From-Clause>true</In-From-Clause> + <In-From-Clause>true</In-From-Clause> +
<Eref>vegetables (id, name, genus)</Eref> + <Eref>vegetables (id, name, genus)</Eref> +
<Relation>vegetables</Relation> + <Relation>vegetables</Relation> +
<Relation-Kind>parititioned_table</Relation-Kind> + <Relation-Kind>partitioned_table</Relation-Kind> +
<Relation-Lock-Mode>AccessShareLock</Relation-Lock-Mode> + <Relation-Lock-Mode>AccessShareLock</Relation-Lock-Mode> +
<Permission-Info-Index>1</Permission-Info-Index> + <Permission-Info-Index>1</Permission-Info-Index> +
<Security-Barrier>false</Security-Barrier> + <Security-Barrier>false</Security-Barrier> +
@ -454,7 +454,7 @@ SELECT * FROM vegetables WHERE genus = 'daucus';
RTI 1 (relation, inherited, in-from-clause): RTI 1 (relation, inherited, in-from-clause):
Eref: vegetables (id, name, genus) Eref: vegetables (id, name, genus)
Relation: vegetables Relation: vegetables
Relation Kind: parititioned_table Relation Kind: partitioned_table
Relation Lock Mode: AccessShareLock Relation Lock Mode: AccessShareLock
Permission Info Index: 1 Permission Info Index: 1
RTI 2 (relation, in-from-clause): RTI 2 (relation, in-from-clause):
@ -478,7 +478,7 @@ INSERT INTO vegetables (name, genus) VALUES ('broccoflower', 'brassica');
RTI 1 (relation): RTI 1 (relation):
Eref: vegetables (id, name, genus) Eref: vegetables (id, name, genus)
Relation: vegetables Relation: vegetables
Relation Kind: parititioned_table Relation Kind: partitioned_table
Relation Lock Mode: RowExclusiveLock Relation Lock Mode: RowExclusiveLock
Permission Info Index: 1 Permission Info Index: 1
RTI 2 (result): RTI 2 (result):

View File

@ -277,7 +277,7 @@ overexplain_per_plan_hook(PlannedStmt *plannedstmt,
* Print out various details from the PlannedStmt that wouldn't otherwise * Print out various details from the PlannedStmt that wouldn't otherwise
* be displayed. * be displayed.
* *
* We don't try to print everything here. Information that would be displyed * We don't try to print everything here. Information that would be displayed
* anyway doesn't need to be printed again here, and things with lots of * anyway doesn't need to be printed again here, and things with lots of
* substructure probably should be printed via separate options, or not at all. * substructure probably should be printed via separate options, or not at all.
*/ */
@ -517,10 +517,10 @@ overexplain_range_table(PlannedStmt *plannedstmt, ExplainState *es)
relkind = "foreign_table"; relkind = "foreign_table";
break; break;
case RELKIND_PARTITIONED_TABLE: case RELKIND_PARTITIONED_TABLE:
relkind = "parititioned_table"; relkind = "partitioned_table";
break; break;
case RELKIND_PARTITIONED_INDEX: case RELKIND_PARTITIONED_INDEX:
relkind = "parititioned_index"; relkind = "partitioned_index";
break; break;
case '\0': case '\0':
relkind = NULL; relkind = NULL;
@ -632,7 +632,7 @@ overexplain_range_table(PlannedStmt *plannedstmt, ExplainState *es)
} }
/* /*
* add_rte_to_flat_rtable will clear coltypes, coltypemods, and * add_rte_to_flat_rtable will clear coltypes, coltypmods, and
* colcollations, so skip those fields. * colcollations, so skip those fields.
* *
* If this is an ephemeral named relation, print out ENR-related * If this is an ephemeral named relation, print out ENR-related
@ -675,7 +675,7 @@ overexplain_range_table(PlannedStmt *plannedstmt, ExplainState *es)
* Emit a text property describing the contents of an Alias. * Emit a text property describing the contents of an Alias.
* *
* Column lists can be quite long here, so perhaps we should have an option * Column lists can be quite long here, so perhaps we should have an option
* to limit the display length by # of columsn or # of characters, but for * to limit the display length by # of column or # of characters, but for
* now, just display everything. * now, just display everything.
*/ */
static void static void

View File

@ -3,7 +3,7 @@
# Test SCRAM authentication when opening a new connection with a foreign # Test SCRAM authentication when opening a new connection with a foreign
# server. # server.
# #
# The test is executed by testing the SCRAM authentifcation on a looplback # The test is executed by testing the SCRAM authentifcation on a loopback
# connection on the same server and with different servers. # connection on the same server and with different servers.
use strict; use strict;

View File

@ -1315,7 +1315,7 @@ PostgreSQL documentation
</para> </para>
<para> <para>
The data section contains actual table data, large-object The data section contains actual table data, large-object
contents, statitistics for tables and materialized views and contents, statistics for tables and materialized views and
sequence values. sequence values.
Post-data items include definitions of indexes, triggers, rules, Post-data items include definitions of indexes, triggers, rules,
statistics for indexes, and constraints other than validated check statistics for indexes, and constraints other than validated check

View File

@ -353,7 +353,7 @@ make check-world PG_TEST_EXTRA='kerberos ldap ssl load_balance libpq_encryption'
<listitem> <listitem>
<para> <para>
Runs the test suite under <filename>src/test/modules/oauth_validator</filename>. Runs the test suite under <filename>src/test/modules/oauth_validator</filename>.
This opens TCP/IP listen sockets for a test-server running HTTPS. This opens TCP/IP listen sockets for a test server running HTTPS.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>

View File

@ -167,7 +167,7 @@ typedef struct
/* /*
* The sortstate used only within a single worker for the first merge pass * The sortstate used only within a single worker for the first merge pass
* happenning there. In principle it doesn't need to be part of the build * happening there. In principle it doesn't need to be part of the build
* state and we could pass it around directly, but it's more convenient * state and we could pass it around directly, but it's more convenient
* this way. And it's part of the build state, after all. * this way. And it's part of the build state, after all.
*/ */
@ -1306,7 +1306,7 @@ GinBufferIsEmpty(GinBuffer *buffer)
* Compare if the tuple matches the already accumulated data in the GIN * Compare if the tuple matches the already accumulated data in the GIN
* buffer. Compare scalar fields first, before the actual key. * buffer. Compare scalar fields first, before the actual key.
* *
* Returns true if the key matches, and the TID belonds to the buffer, or * Returns true if the key matches, and the TID belongs to the buffer, or
* false if the key does not match. * false if the key does not match.
*/ */
static bool static bool
@ -1497,7 +1497,7 @@ GinBufferStoreTuple(GinBuffer *buffer, GinTuple *tup)
buffer->items = repalloc(buffer->items, buffer->items = repalloc(buffer->items,
(buffer->nitems + tup->nitems) * sizeof(ItemPointerData)); (buffer->nitems + tup->nitems) * sizeof(ItemPointerData));
new = ginMergeItemPointers(&buffer->items[buffer->nfrozen], /* first unfronzen */ new = ginMergeItemPointers(&buffer->items[buffer->nfrozen], /* first unfrozen */
(buffer->nitems - buffer->nfrozen), /* num of unfrozen */ (buffer->nitems - buffer->nfrozen), /* num of unfrozen */
items, tup->nitems, &nnew); items, tup->nitems, &nnew);
@ -1531,7 +1531,7 @@ GinBufferReset(GinBuffer *buffer)
pfree(DatumGetPointer(buffer->key)); pfree(DatumGetPointer(buffer->key));
/* /*
* Not required, but makes it more likely to trigger NULL derefefence if * Not required, but makes it more likely to trigger NULL dereference if
* using the value incorrectly, etc. * using the value incorrectly, etc.
*/ */
buffer->key = (Datum) 0; buffer->key = (Datum) 0;
@ -1603,7 +1603,7 @@ GinBufferCanAddKey(GinBuffer *buffer, GinTuple *tup)
* *
* After waiting for all workers to finish, merge the per-worker results into * After waiting for all workers to finish, merge the per-worker results into
* the complete index. The results from each worker are sorted by block number * the complete index. The results from each worker are sorted by block number
* (start of the page range). While combinig the per-worker results we merge * (start of the page range). While combining the per-worker results we merge
* summaries for the same page range, and also fill-in empty summaries for * summaries for the same page range, and also fill-in empty summaries for
* ranges without any tuples. * ranges without any tuples.
* *

View File

@ -1792,7 +1792,7 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
truncatt = BTreeTupleGetNAtts(itup, rel); truncatt = BTreeTupleGetNAtts(itup, rel);
pstate.forcenonrequired = false; pstate.forcenonrequired = false;
pstate.startikey = 0; /* _bt_set_startikey ignores HIKEY */ pstate.startikey = 0; /* _bt_set_startikey ignores P_HIKEY */
_bt_checkkeys(scan, &pstate, arrayKeys, itup, truncatt); _bt_checkkeys(scan, &pstate, arrayKeys, itup, truncatt);
} }

View File

@ -473,7 +473,7 @@ typedef struct XLogCtlData
XLogRecPtr InitializedFrom; XLogRecPtr InitializedFrom;
/* /*
* Latest reserved for inititalization page in the cache (last byte * Latest reserved for initialization page in the cache (last byte
* position + 1). * position + 1).
* *
* To change the identity of a buffer, you need to advance * To change the identity of a buffer, you need to advance
@ -2221,7 +2221,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
* m must observe f[k] == false. Otherwise, it will later attempt * m must observe f[k] == false. Otherwise, it will later attempt
* CAS(v, k, k + 1) with success. * CAS(v, k, k + 1) with success.
* 4. Therefore, corresponding read_barrier() (while j == k) on * 4. Therefore, corresponding read_barrier() (while j == k) on
* process m happend before write_barrier() of process k. But then * process m reached before write_barrier() of process k. But then
* process k attempts CAS(v, k, k + 1) after process m successfully * process k attempts CAS(v, k, k + 1) after process m successfully
* incremented v to k, and that CAS operation must succeed. * incremented v to k, and that CAS operation must succeed.
* That leads to a contradiction. So, there is no such k (k < n) * That leads to a contradiction. So, there is no such k (k < n)
@ -2253,7 +2253,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
if (pg_atomic_read_u64(&XLogCtl->xlblocks[nextidx]) != NewPageEndPtr) if (pg_atomic_read_u64(&XLogCtl->xlblocks[nextidx]) != NewPageEndPtr)
{ {
/* /*
* Page at nextidx wasn't initialized yet, so we cann't move * Page at nextidx wasn't initialized yet, so we can't move
* InitializedUpto further. It will be moved by backend which * InitializedUpto further. It will be moved by backend which
* will initialize nextidx. * will initialize nextidx.
*/ */

View File

@ -143,7 +143,7 @@ IsCatalogRelationOid(Oid relid)
* *
* The relcache must not use these indexes. Inserting into any UNIQUE * The relcache must not use these indexes. Inserting into any UNIQUE
* index compares index keys while holding BUFFER_LOCK_EXCLUSIVE. * index compares index keys while holding BUFFER_LOCK_EXCLUSIVE.
* bttextcmp() can search the COLLID catcache. Depending on concurrent * bttextcmp() can search the COLLOID catcache. Depending on concurrent
* invalidation traffic, catcache can reach relcache builds. A backend * invalidation traffic, catcache can reach relcache builds. A backend
* would self-deadlock on LWLocks if the relcache build read the * would self-deadlock on LWLocks if the relcache build read the
* exclusive-locked buffer. * exclusive-locked buffer.

View File

@ -11999,7 +11999,7 @@ DropForeignKeyConstraintTriggers(Relation trigrel, Oid conoid, Oid confrelid,
if (OidIsValid(confrelid) && trgform->tgrelid != confrelid) if (OidIsValid(confrelid) && trgform->tgrelid != confrelid)
continue; continue;
/* We should be droping trigger related to foreign key constraint */ /* We should be dropping trigger related to foreign key constraint */
Assert(trgform->tgfoid == F_RI_FKEY_CHECK_INS || Assert(trgform->tgfoid == F_RI_FKEY_CHECK_INS ||
trgform->tgfoid == F_RI_FKEY_CHECK_UPD || trgform->tgfoid == F_RI_FKEY_CHECK_UPD ||
trgform->tgfoid == F_RI_FKEY_CASCADE_DEL || trgform->tgfoid == F_RI_FKEY_CASCADE_DEL ||

View File

@ -1861,7 +1861,7 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
MemoryContext oldContext; MemoryContext oldContext;
/* /*
* CheckConstraintFetch let this pass with only a warning, but now we * CheckNNConstraintFetch let this pass with only a warning, but now we
* should fail rather than possibly failing to enforce an important * should fail rather than possibly failing to enforce an important
* constraint. * constraint.
*/ */

View File

@ -1778,7 +1778,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
* Updates the PartitionPruneState found at given part_prune_index in * Updates the PartitionPruneState found at given part_prune_index in
* EState.es_part_prune_states for use during "exec" pruning if required. * EState.es_part_prune_states for use during "exec" pruning if required.
* Also returns the set of subplans to initialize that would be stored at * Also returns the set of subplans to initialize that would be stored at
* part_prune_index in EState.es_part_prune_result by * part_prune_index in EState.es_part_prune_results by
* ExecDoInitialPruning(). Maps in PartitionPruneState are updated to * ExecDoInitialPruning(). Maps in PartitionPruneState are updated to
* account for initial pruning possibly having eliminated some of the * account for initial pruning possibly having eliminated some of the
* subplans. * subplans.
@ -2109,7 +2109,7 @@ CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo,
*/ */
partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false); partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false);
/* Remember for InitExecPartitionPruneContext(). */ /* Remember for InitExecPartitionPruneContexts(). */
pprune->partrel = partrel; pprune->partrel = partrel;
partkey = RelationGetPartitionKey(partrel); partkey = RelationGetPartitionKey(partrel);

View File

@ -1311,7 +1311,7 @@ ExecInsert(ModifyTableContext *context,
/* /*
* Convert the OLD tuple to the new partition's format/slot, if * Convert the OLD tuple to the new partition's format/slot, if
* needed. Note that ExceDelete() already converted it to the * needed. Note that ExecDelete() already converted it to the
* root's partition's format/slot. * root's partition's format/slot.
*/ */
oldSlot = context->cpDeletedSlot; oldSlot = context->cpDeletedSlot;

View File

@ -100,7 +100,7 @@ SeqRecheck(SeqScanState *node, TupleTableSlot *slot)
* ExecSeqScan(node) * ExecSeqScan(node)
* *
* Scans the relation sequentially and returns the next qualifying * Scans the relation sequentially and returns the next qualifying
* tuple. This variant is used when there is no es_eqp_active, no qual * tuple. This variant is used when there is no es_epq_active, no qual
* and no projection. Passing const-NULLs for these to ExecScanExtended * and no projection. Passing const-NULLs for these to ExecScanExtended
* allows the compiler to eliminate the additional code that would * allows the compiler to eliminate the additional code that would
* ordinarily be required for the evaluation of these. * ordinarily be required for the evaluation of these.

View File

@ -357,7 +357,7 @@ AppendJumble64(JumbleState *jstate, const unsigned char *value)
/* /*
* FlushPendingNulls * FlushPendingNulls
* Incorporate the pending_null value into the jumble buffer. * Incorporate the pending_nulls value into the jumble buffer.
* *
* Note: Callers must ensure that there's at least 1 pending NULL. * Note: Callers must ensure that there's at least 1 pending NULL.
*/ */

View File

@ -2718,7 +2718,7 @@ HandleFatalError(QuitSignalReason reason, bool consider_sigabrt)
/* /*
* Choose the appropriate new state to react to the fatal error. Unless we * Choose the appropriate new state to react to the fatal error. Unless we
* were already in the process of shutting down, we go through * were already in the process of shutting down, we go through
* PM_WAIT_BACKEND. For errors during the shutdown sequence, we directly * PM_WAIT_BACKENDS. For errors during the shutdown sequence, we directly
* switch to PM_WAIT_DEAD_END. * switch to PM_WAIT_DEAD_END.
*/ */
switch (pmState) switch (pmState)
@ -3001,7 +3001,7 @@ PostmasterStateMachine(void)
/* /*
* Stop any dead-end children and stop creating new ones. * Stop any dead-end children and stop creating new ones.
* *
* NB: Similar code exists in HandleFatalErrors(), when the * NB: Similar code exists in HandleFatalError(), when the
* error happens in pmState > PM_WAIT_BACKENDS. * error happens in pmState > PM_WAIT_BACKENDS.
*/ */
UpdatePMState(PM_WAIT_DEAD_END); UpdatePMState(PM_WAIT_DEAD_END);
@ -3082,7 +3082,7 @@ PostmasterStateMachine(void)
{ {
/* /*
* PM_WAIT_IO_WORKERS state ends when there's only checkpointer and * PM_WAIT_IO_WORKERS state ends when there's only checkpointer and
* dead_end children left. * dead-end children left.
*/ */
if (io_worker_count == 0) if (io_worker_count == 0)
{ {

View File

@ -103,7 +103,7 @@ pgaio_io_set_handle_data_32(ioh, (uint32 *) buffer, 1);
* *
* E.g. md.c needs to translate block numbers into offsets in segments. * E.g. md.c needs to translate block numbers into offsets in segments.
* *
* Once the IO handle has been handed off to smgstartreadv(), it may not * Once the IO handle has been handed off to smgrstartreadv(), it may not
* further be used, as the IO may immediately get executed below * further be used, as the IO may immediately get executed below
* smgrstartreadv() and the handle reused for another IO. * smgrstartreadv() and the handle reused for another IO.
* *

View File

@ -321,7 +321,7 @@ pgaio_worker_die(int code, Datum arg)
} }
/* /*
* Register the worker in shared memory, assign MyWorkerId and register a * Register the worker in shared memory, assign MyIoWorkerId and register a
* shutdown callback to release registration. * shutdown callback to release registration.
*/ */
static void static void

View File

@ -4970,7 +4970,7 @@ FlushRelationBuffers(Relation rel)
ResourceOwnerEnlarge(CurrentResourceOwner); ResourceOwnerEnlarge(CurrentResourceOwner);
/* /*
* Pin/upin mostly to make valgrind work, but it also seems * Pin/unpin mostly to make valgrind work, but it also seems
* like the right thing to do. * like the right thing to do.
*/ */
PinLocalBuffer(bufHdr, false); PinLocalBuffer(bufHdr, false);

View File

@ -88,7 +88,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* To allow the caller to report statistics about checksum failures, * To allow the caller to report statistics about checksum failures,
* *checksum_failure_p can be passed in. Note that there may be checksum * *checksum_failure_p can be passed in. Note that there may be checksum
* failures even if this function returns true, due to * failures even if this function returns true, due to
* IGNORE_CHECKSUM_FAILURE. * PIV_IGNORE_CHECKSUM_FAILURE.
*/ */
bool bool
PageIsVerified(PageData *page, BlockNumber blkno, int flags, bool *checksum_failure_p) PageIsVerified(PageData *page, BlockNumber blkno, int flags, bool *checksum_failure_p)

View File

@ -323,8 +323,8 @@ pg_log_backend_memory_contexts(PG_FUNCTION_ARGS)
* Signal a backend or an auxiliary process to send its memory contexts, * Signal a backend or an auxiliary process to send its memory contexts,
* wait for the results and display them. * wait for the results and display them.
* *
* By default, only superusers or users with PG_READ_ALL_STATS are allowed to * By default, only superusers or users with ROLE_PG_READ_ALL_STATS are allowed
* signal a process to return the memory contexts. This is because allowing * to signal a process to return the memory contexts. This is because allowing
* any users to issue this request at an unbounded rate would cause lots of * any users to issue this request at an unbounded rate would cause lots of
* requests to be sent, which can lead to denial of service. Additional roles * requests to be sent, which can lead to denial of service. Additional roles
* can be permitted with GRANT. * can be permitted with GRANT.
@ -495,7 +495,7 @@ pg_get_process_memory_contexts(PG_FUNCTION_ARGS)
* statistics are available within the allowed time then display * statistics are available within the allowed time then display
* previously published statistics if there are any. If no * previously published statistics if there are any. If no
* previous statistics are available then return NULL. The timer * previous statistics are available then return NULL. The timer
* is defined in milliseconds since thats what the condition * is defined in milliseconds since that's what the condition
* variable sleep uses. * variable sleep uses.
*/ */
if (ConditionVariableTimedSleep(&memCxtState[procNumber].memcxt_cv, if (ConditionVariableTimedSleep(&memCxtState[procNumber].memcxt_cv,

View File

@ -19,7 +19,7 @@
* immediately. * immediately.
* *
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are * The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are
* permanentaly set to "C", and then we use temporary locale_t * permanently set to "C", and then we use temporary locale_t
* objects when we need to look up locale data based on the GUCs * objects when we need to look up locale data based on the GUCs
* of the same name. Information is cached when the GUCs change. * of the same name. Information is cached when the GUCs change.
* The cached information is only used by the formatting functions * The cached information is only used by the formatting functions

View File

@ -1271,7 +1271,7 @@ UpdateCachedPlan(CachedPlanSource *plansource, int query_index,
/* /*
* XXX Should this also (re)set the properties of the CachedPlan that are * XXX Should this also (re)set the properties of the CachedPlan that are
* set in BuildCachedPlan() after creating the fresh plans such as * set in BuildCachedPlan() after creating the fresh plans such as
* planRoleId, dependsOnRole, and save_xmin? * planRoleId, dependsOnRole, and saved_xmin?
*/ */
/* /*

View File

@ -910,7 +910,7 @@ MemoryContextStatsDetail(MemoryContext context,
* *
* Print stats for this context if possible, but in any case accumulate counts * Print stats for this context if possible, but in any case accumulate counts
* into *totals (if not NULL). The callers should make sure that print_location * into *totals (if not NULL). The callers should make sure that print_location
* is set to PRINT_STATS_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE. * is set to PRINT_STATS_TO_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
*/ */
static void static void
MemoryContextStatsInternal(MemoryContext context, int level, MemoryContextStatsInternal(MemoryContext context, int level,

View File

@ -384,7 +384,7 @@ $node->command_fails_like(
'--format' => 'custom', '--format' => 'custom',
'-d' => 'dbpq', ], '-d' => 'dbpq', ],
qr/\Qpg_restore: error: could not connect to database "dbpq"\E/, qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
'When non-exist datbase is given with -d option in pg_restore with dump of pg_dumpall'); 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall');
$node->stop('fast'); $node->stop('fast');

View File

@ -36,7 +36,7 @@ static void transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_
* *
* // be sure to sync any remaining files in the queue * // be sure to sync any remaining files in the queue
* sync_queue_sync_all(); * sync_queue_sync_all();
* synq_queue_destroy(); * sync_queue_destroy();
*/ */
#define SYNC_QUEUE_MAX_LEN (1024) #define SYNC_QUEUE_MAX_LEN (1024)

View File

@ -72,7 +72,7 @@ sub filter_dump
# adjust_child_columns is passed to adjust_regress_dumpfile() which actually # adjust_child_columns is passed to adjust_regress_dumpfile() which actually
# adjusts the dump output. # adjusts the dump output.
# #
# The name of the file containting adjusted dump is returned. # The name of the file containing adjusted dump is returned.
sub get_dump_for_comparison sub get_dump_for_comparison
{ {
my ($node, $db, $file_prefix, $adjust_child_columns) = @_; my ($node, $db, $file_prefix, $adjust_child_columns) = @_;

View File

@ -1459,7 +1459,7 @@ DescribeQuery(const char *query, double *elapsed_msec)
* *
* If a synchronisation point is found, we can stop discarding results as * If a synchronisation point is found, we can stop discarding results as
* the pipeline will switch back to a clean state. If no synchronisation * the pipeline will switch back to a clean state. If no synchronisation
* point is available, we need to stop when ther are no more pending * point is available, we need to stop when there are no more pending
* results, otherwise, calling PQgetResult() would block. * results, otherwise, calling PQgetResult() would block.
*/ */
static PGresult * static PGresult *

View File

@ -197,7 +197,7 @@ $node->command_fails_like(
'postgres', 'postgres',
], ],
qr/cannot vacuum specific table\(s\) and exclude schema\(s\) at the same time/, qr/cannot vacuum specific table\(s\) and exclude schema\(s\) at the same time/,
'cannot use options --excludes-chema and ---table at the same time'); 'cannot use options --exclude-schema and ---table at the same time');
$node->command_fails_like( $node->command_fails_like(
[ [
'vacuumdb', 'vacuumdb',

View File

@ -7,8 +7,8 @@
* src/include/access/gin.h * src/include/access/gin.h
*-------------------------------------------------------------------------- *--------------------------------------------------------------------------
*/ */
#ifndef GIN_TUPLE_ #ifndef GIN_TUPLE_H
#define GIN_TUPLE_ #define GIN_TUPLE_H
#include "access/ginblock.h" #include "access/ginblock.h"
#include "storage/itemptr.h" #include "storage/itemptr.h"

View File

@ -1592,7 +1592,7 @@ typedef struct
* equivalent and closely-related orderings. (See optimizer/README for more * equivalent and closely-related orderings. (See optimizer/README for more
* information.) * information.)
* *
* Note: pk_strategy is either COMPARE_LT (for ASC) or COMPARE_GT (for DESC). * Note: pk_cmptype is either COMPARE_LT (for ASC) or COMPARE_GT (for DESC).
*/ */
typedef struct PathKey typedef struct PathKey
{ {

View File

@ -31,7 +31,7 @@ extern void pgaio_error_cleanup(void);
extern void AtEOXact_Aio(bool is_commit); extern void AtEOXact_Aio(bool is_commit);
/* aio_worker.c */ /* method_worker.c */
extern bool pgaio_workers_enabled(void); extern bool pgaio_workers_enabled(void);
#endif /* AIO_SUBSYS_H */ #endif /* AIO_SUBSYS_H */

View File

@ -1205,7 +1205,7 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
res = epoll_ctl(actx->mux, op, socket, &ev); res = epoll_ctl(actx->mux, op, socket, &ev);
if (res < 0 && errno == EEXIST) if (res < 0 && errno == EEXIST)
{ {
/* We already had this socket in the pollset. */ /* We already had this socket in the poll set. */
op = EPOLL_CTL_MOD; op = EPOLL_CTL_MOD;
res = epoll_ctl(actx->mux, op, socket, &ev); res = epoll_ctl(actx->mux, op, socket, &ev);
} }

View File

@ -50,7 +50,7 @@ struct pg_cancel
* retransmits */ * retransmits */
/* Pre-constructed cancel request packet starts here */ /* Pre-constructed cancel request packet starts here */
int32 cancel_pkt_len; /* in network-byte-order */ int32 cancel_pkt_len; /* in network byte order */
char cancel_req[FLEXIBLE_ARRAY_MEMBER]; /* CancelRequestPacket */ char cancel_req[FLEXIBLE_ARRAY_MEMBER]; /* CancelRequestPacket */
}; };

View File

@ -693,7 +693,7 @@ pqDropServerData(PGconn *conn)
conn->oauth_want_retry = false; conn->oauth_want_retry = false;
/* /*
* Cancel connections need to retain their be_pid and be_key across * Cancel connections need to retain their be_pid and be_cancel_key across
* PQcancelReset invocations, otherwise they would not have access to the * PQcancelReset invocations, otherwise they would not have access to the
* secret token of the connection they are supposed to cancel. * secret token of the connection they are supposed to cancel.
*/ */

View File

@ -1486,7 +1486,7 @@ pqGetNegotiateProtocolVersion3(PGconn *conn)
return 0; return 0;
eof: eof:
libpq_append_conn_error(conn, "received invalid protocol negotation message: message too short"); libpq_append_conn_error(conn, "received invalid protocol negotiation message: message too short");
failure: failure:
conn->asyncStatus = PGASYNC_READY; conn->asyncStatus = PGASYNC_READY;
pqSaveErrorResult(conn); pqSaveErrorResult(conn);

View File

@ -216,7 +216,7 @@ pg_localeconv_copy_members(struct lconv *dst,
* implied by the LC_MONETARY or LC_NUMERIC locale name. On Windows, LC_CTYPE * implied by the LC_MONETARY or LC_NUMERIC locale name. On Windows, LC_CTYPE
* has to match to get sane results. * has to match to get sane results.
* *
* To get predicable results on all platforms, we'll call the underlying * To get predictable results on all platforms, we'll call the underlying
* routines with LC_ALL set to the appropriate locale for each set of members, * routines with LC_ALL set to the appropriate locale for each set of members,
* and merge the results. Three members of the resulting object are therefore * and merge the results. Three members of the resulting object are therefore
* guaranteed to be encoded with LC_NUMERIC's codeset: "decimal_point", * guaranteed to be encoded with LC_NUMERIC's codeset: "decimal_point",
@ -224,7 +224,7 @@ pg_localeconv_copy_members(struct lconv *dst,
* LC_MONETARY's codeset. * LC_MONETARY's codeset.
* *
* Returns 0 on success. Returns non-zero on failure, and sets errno. On * Returns 0 on success. Returns non-zero on failure, and sets errno. On
* success, the caller is responsible for calling pg_localeconf_free() on the * success, the caller is responsible for calling pg_localeconv_free() on the
* output struct to free the string members it contains. * output struct to free the string members it contains.
*/ */
int int

View File

@ -457,7 +457,7 @@ pg_popcount_masked_neon(const char *buf, int bytes, bits8 mask)
popcnt += vaddvq_u64(vaddq_u64(accum3, accum4)); popcnt += vaddvq_u64(vaddq_u64(accum3, accum4));
/* /*
* Process remining 8-byte blocks. * Process remaining 8-byte blocks.
*/ */
for (; bytes >= sizeof(uint64); bytes -= sizeof(uint64)) for (; bytes >= sizeof(uint64); bytes -= sizeof(uint64))
{ {

View File

@ -47,8 +47,7 @@ local all test oauth issuer="$issuer" scope="$scope"
}); });
$node->reload; $node->reload;
my ($log_start, $log_end); my $log_start = $node->wait_for_log(qr/reloading configuration files/);
$log_start = $node->wait_for_log(qr/reloading configuration files/);
$ENV{PGOAUTHDEBUG} = "UNSAFE"; $ENV{PGOAUTHDEBUG} = "UNSAFE";

View File

@ -560,7 +560,7 @@ INSERT INTO tmp_ok SELECT generate_series(1, 10000);
qr/^t$/, qr/^t$/,
qr/^$/); qr/^$/);
# Because local buffers don't use IO_IN_PROGRESS, a second StartLocalBufer # Because local buffers don't use IO_IN_PROGRESS, a second StartLocalBufferIO
# succeeds as well. This test mostly serves as a documentation of that # succeeds as well. This test mostly serves as a documentation of that
# fact. If we had actually started IO, it'd be different. # fact. If we had actually started IO, it'd be different.
psql_like( psql_like(

View File

@ -799,7 +799,7 @@ $logstart = -s $node_standby->logfile;
reactive_slots_change_hfs_and_wait_for_xmins('no_conflict_', 'pruning_', 0, reactive_slots_change_hfs_and_wait_for_xmins('no_conflict_', 'pruning_', 0,
0); 0);
# Injection_point avoids seeing a xl_running_xacts. This is required because if # Injection point avoids seeing a xl_running_xacts. This is required because if
# it is generated between the last two updates, then the catalog_xmin of the # it is generated between the last two updates, then the catalog_xmin of the
# active slot could be updated, and hence, the conflict won't occur. See # active slot could be updated, and hence, the conflict won't occur. See
# comments atop wait_until_vacuum_can_remove. # comments atop wait_until_vacuum_can_remove.

View File

@ -80,7 +80,7 @@ $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION mysub1");
# Test ALTER PUBLICATION RENAME command during the replication # Test ALTER PUBLICATION RENAME command during the replication
# #
# Test function for swaping name of publications # Test function for swapping name of publications
sub test_swap sub test_swap
{ {
my ($table_name, $pubname, $appname) = @_; my ($table_name, $pubname, $appname) = @_;