mirror of
https://github.com/postgres/postgres.git
synced 2025-06-29 10:41:53 +03:00
Fix a few duplicate words in comments
These are all new to v18 Author: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/CAApHDvrMcr8XD107H3NV=WHgyBcu=sx5+7=WArr-n_cWUqdFXQ@mail.gmail.com
This commit is contained in:
@ -460,7 +460,7 @@ verify_heapam(PG_FUNCTION_ARGS)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* It would not be safe to naively use use batchmode, as
|
||||
* It would not be safe to naively use batchmode, as
|
||||
* heapcheck_read_stream_next_unskippable takes locks. It shouldn't be
|
||||
* too hard to convert though.
|
||||
*/
|
||||
|
@ -450,8 +450,8 @@ pg_buffercache_numa_pages(PG_FUNCTION_ARGS)
|
||||
* locks, so the information of each buffer is self-consistent.
|
||||
*
|
||||
* This loop touches and stores addresses into os_page_ptrs[] as input
|
||||
* to one big big move_pages(2) inquiry system call. Basically we ask
|
||||
* for all memory pages for NBuffers.
|
||||
* to one big move_pages(2) inquiry system call. Basically we ask for
|
||||
* all memory pages for NBuffers.
|
||||
*/
|
||||
startptr = (char *) TYPEALIGN_DOWN(os_page_size, (char *) BufferGetBlock(1));
|
||||
idx = 0;
|
||||
|
@ -2051,7 +2051,7 @@ _bt_preprocess_array_keys_final(IndexScanDesc scan, int *keyDataMap)
|
||||
* the scan's BTArrayKeyInfo array, which is guaranteed to be large enough to
|
||||
* fit every so->arrayKeys[] entry.
|
||||
*
|
||||
* Also sets *numSkipArrayKeys_out to the number of of skip arrays caller must
|
||||
* Also sets *numSkipArrayKeys_out to the number of skip arrays caller must
|
||||
* add to the scan keys it'll output. Caller must add this many skip arrays:
|
||||
* one array for each of the most significant attributes that lack a = input
|
||||
* key (IS NULL keys count as = input keys here). The specific attributes
|
||||
|
@ -2597,7 +2597,7 @@ _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Some = key (could be a a scalar = key, could be an array = key) */
|
||||
/* Some = key (could be a scalar = key, could be an array = key) */
|
||||
Assert(key->sk_strategy == BTEqualStrategyNumber);
|
||||
|
||||
if (!(key->sk_flags & SK_SEARCHARRAY))
|
||||
|
@ -1983,8 +1983,8 @@ ExecInitPartitionExecPruning(PlanState *planstate,
|
||||
* account for any that were removed due to initial pruning; refer to the
|
||||
* condition in InitExecPartitionPruneContexts() that is used to determine
|
||||
* whether to do this. If no exec pruning needs to be done, we would thus
|
||||
* leave the maps to be in an invalid invalid state, but that's ok since
|
||||
* that data won't be consulted again (cf initial Assert in
|
||||
* leave the maps to be in an invalid state, but that's ok since that data
|
||||
* won't be consulted again (cf initial Assert in
|
||||
* ExecFindMatchingSubPlans).
|
||||
*/
|
||||
if (prunestate->do_exec_prune)
|
||||
|
@ -551,7 +551,7 @@ PGLC_localeconv(void)
|
||||
"could not get lconv for LC_MONETARY = \"%s\", LC_NUMERIC = \"%s\": %m",
|
||||
locale_monetary, locale_numeric);
|
||||
|
||||
/* Must copy data now now so we can re-encode it. */
|
||||
/* Must copy data now so we can re-encode it. */
|
||||
extlconv = &tmp;
|
||||
worklconv.decimal_point = strdup(extlconv->decimal_point);
|
||||
worklconv.thousands_sep = strdup(extlconv->thousands_sep);
|
||||
|
@ -1478,7 +1478,7 @@ ProcessGetMemoryContextInterrupt(void)
|
||||
summary);
|
||||
|
||||
/*
|
||||
* Allocate memory in this process's DSA for storing statistics of the the
|
||||
* Allocate memory in this process's DSA for storing statistics of the
|
||||
* memory contexts upto max_stats, for contexts that don't fit within a
|
||||
* limit, a cumulative total is written as the last record in the DSA
|
||||
* segment.
|
||||
@ -1488,8 +1488,8 @@ ProcessGetMemoryContextInterrupt(void)
|
||||
LWLockAcquire(&memCxtArea->lw_lock, LW_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* Create a DSA and send handle to the the client process after storing
|
||||
* the context statistics. If number of contexts exceed a predefined
|
||||
* Create a DSA and send handle to the client process after storing the
|
||||
* context statistics. If number of contexts exceed a predefined
|
||||
* limit(8MB), a cumulative total is stored for such contexts.
|
||||
*/
|
||||
if (memCxtArea->memstats_dsa_handle == DSA_HANDLE_INVALID)
|
||||
|
@ -424,8 +424,8 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
|
||||
* stage of pg_upgrade in swap mode, so we need to synchronize them
|
||||
* ourselves. We only do this for the catalog files because they were
|
||||
* created during pg_restore with fsync=off. We assume that the user
|
||||
* data files files were properly persisted to disk when the user last
|
||||
* shut it down.
|
||||
* data files were properly persisted to disk when the user last shut
|
||||
* it down.
|
||||
*/
|
||||
if (user_opts.do_sync)
|
||||
sync_queue_push(dest);
|
||||
|
@ -159,7 +159,7 @@ struct PgAioTargetInfo
|
||||
{
|
||||
/*
|
||||
* To support executing using worker processes, the file descriptor for an
|
||||
* IO may need to be be reopened in a different process.
|
||||
* IO may need to be reopened in a different process.
|
||||
*/
|
||||
void (*reopen) (PgAioHandle *ioh);
|
||||
|
||||
|
Reference in New Issue
Block a user