1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

Fix collection of typos and grammar mistakes in the tree, volume 2

This fixes some comments and documentation new as of Postgres 13, and is
a follow-up of the work done in dd0f37e.

Author: Justin Pryzby
Discussion: https://postgr.es/m/20200408165653.GF2228@telsasoft.com
This commit is contained in:
Michael Paquier
2020-04-14 14:45:43 +09:00
parent f762b2feba
commit 8128b0c152
11 changed files with 31 additions and 31 deletions

View File

@ -270,7 +270,7 @@ isCurrentGroup(IncrementalSortState *node, TupleTableSlot *pivot, TupleTableSlot
* verify they're all part of the same prefix key group before sorting them
* solely by unsorted suffix keys.
*
* While it's likely that all already fetch tuples are all part of a single
* While it's likely that all tuples already fetched are all part of a single
* prefix group, we also have to handle the possibility that there is at least
* one different prefix key group before the large prefix key group.
* ----------------------------------------------------------------
@ -381,7 +381,7 @@ switchToPresortedPrefixMode(PlanState *pstate)
* node->transfer_tuple slot, and, even though that slot
* points to memory inside the full sort tuplesort, we can't
* reset that tuplesort anyway until we've fully transferred
* out of its tuples, so this reference is safe. We do need to
* out its tuples, so this reference is safe. We do need to
* reset the group pivot tuple though since we've finished the
* current prefix key group.
*/
@ -603,7 +603,7 @@ ExecIncrementalSort(PlanState *pstate)
/*
* Initialize presorted column support structures for
* isCurrentGroup(). It's correct to do this along with the
* initial intialization for the full sort state (and not for the
* initial initialization for the full sort state (and not for the
* prefix sort state) since we always load the full sort state
* first.
*/
@ -723,7 +723,7 @@ ExecIncrementalSort(PlanState *pstate)
nTuples++;
/*
* If we've reach our minimum group size, then we need to
* If we've reached our minimum group size, then we need to
* store the most recent tuple as a pivot.
*/
if (nTuples == minGroupSize)
@ -752,7 +752,7 @@ ExecIncrementalSort(PlanState *pstate)
{
/*
* Since the tuple we fetched isn't part of the current
* prefix key group we don't want to sort it as part of
* prefix key group we don't want to sort it as part of
* the current batch. Instead we use the group_pivot slot
* to carry it over to the next batch (even though we
* won't actually treat it as a group pivot).
@ -792,12 +792,12 @@ ExecIncrementalSort(PlanState *pstate)
}
/*
* Unless we've alrady transitioned modes to reading from the full
* Unless we've already transitioned modes to reading from the full
* sort state, then we assume that having read at least
* DEFAULT_MAX_FULL_SORT_GROUP_SIZE tuples means it's likely we're
* processing a large group of tuples all having equal prefix keys
* (but haven't yet found the final tuple in that prefix key
* group), so we need to transition in to presorted prefix mode.
* group), so we need to transition into presorted prefix mode.
*/
if (nTuples > DEFAULT_MAX_FULL_SORT_GROUP_SIZE &&
node->execution_status != INCSORT_READFULLSORT)
@ -849,7 +849,7 @@ ExecIncrementalSort(PlanState *pstate)
/*
* We might have multiple prefix key groups in the full sort
* state, so the mode transition function needs to know the it
* state, so the mode transition function needs to know that it
* needs to move from the fullsort to presorted prefix sort.
*/
node->n_fullsort_remaining = nTuples;
@ -913,7 +913,7 @@ ExecIncrementalSort(PlanState *pstate)
/*
* If the tuple's prefix keys match our pivot tuple, we're not
* done yet and can load it into the prefix sort state. If not, we
* don't want to sort it as part of the current batch. Instead we
* don't want to sort it as part of the current batch. Instead we
* use the group_pivot slot to carry it over to the next batch
* (even though we won't actually treat it as a group pivot).
*/
@ -1121,14 +1121,14 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
PlanState *outerPlan = outerPlanState(node);
/*
* Incremental sort doesn't support efficient rescan even when paramters
* Incremental sort doesn't support efficient rescan even when parameters
* haven't changed (e.g., rewind) because unlike regular sort we don't
* store all tuples at once for the full sort.
*
* So even if EXEC_FLAG_REWIND is set we just reset all of our state and
* reexecute the sort along with the child node below us.
*
* In theory if we've only fill the full sort with one batch (and haven't
* In theory if we've only filled the full sort with one batch (and haven't
* reset it for a new batch yet) then we could efficiently rewind, but
* that seems a narrow enough case that it's not worth handling specially
* at this time.

View File

@ -575,7 +575,7 @@ logicalrep_partmap_init(void)
* Returned entry reuses most of the values of the root table's entry, save
* the attribute map, which can be different for the partition.
*
* Note there's no logialrep_partition_close, because the caller closes the
* Note there's no logicalrep_partition_close, because the caller closes the
* the component relation.
*/
LogicalRepRelMapEntry *

View File

@ -808,7 +808,7 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate,
*
* Setup, or reset, all state need for processing a new set of tuples with this
* sort state. Called both from tuplesort_begin_common (the first time sorting
* with this sort state) and tuplesort_reseti (for subsequent usages).
* with this sort state) and tuplesort_reset (for subsequent usages).
*/
static void
tuplesort_begin_batch(Tuplesortstate *state)

View File

@ -63,7 +63,7 @@ typedef struct SortCoordinateData *SortCoordinate;
* sometimes put it in shared memory.
*
* The parallel-sort infrastructure relies on having a zero TuplesortMethod
* indicate that a worker never did anything, so we assign zero to
* to indicate that a worker never did anything, so we assign zero to
* SORT_TYPE_STILL_IN_PROGRESS. The other values of this enum can be
* OR'ed together to represent a situation where different workers used
* different methods, so we need a separate bit for each one. Keep the