1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-10 17:42:29 +03:00

Fix typos and grammar in comments and docs

Author: Justin Pryzby
Discussion: https://postgr.es/m/20210416070310.GG3315@telsasoft.com
This commit is contained in:
Michael Paquier
2021-04-19 11:32:30 +09:00
parent c731f9187b
commit 7ef8b52cf0
26 changed files with 47 additions and 47 deletions

View File

@@ -596,7 +596,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
* and if we're violating them. In that case we can
* terminate early, without invoking the support function.
*
* As there may be more keys, we can only detemine
* As there may be more keys, we can only determine
* mismatch within this loop.
*/
if (bdesc->bd_info[attno - 1]->oi_regular_nulls &&
@@ -636,7 +636,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
/*
* Collation from the first key (has to be the same for
* all keys for the same attribue).
* all keys for the same attribute).
*/
collation = keys[attno - 1][0]->sk_collation;

View File

@@ -409,7 +409,7 @@ typedef struct BloomOpaque
{
/*
* XXX At this point we only need a single proc (to compute the hash), but
* let's keep the array just like inclusion and minman opclasses, for
* let's keep the array just like inclusion and minmax opclasses, for
* consistency. We may need additional procs in the future.
*/
FmgrInfo extra_procinfos[BLOOM_MAX_PROCNUMS];

View File

@@ -248,7 +248,7 @@ typedef struct DistanceValue
} DistanceValue;
/* Cache for support and strategy procesures. */
/* Cache for support and strategy procedures. */
static FmgrInfo *minmax_multi_get_procinfo(BrinDesc *bdesc, uint16 attno,
uint16 procnum);
@@ -1311,7 +1311,7 @@ compare_distances(const void *a, const void *b)
}
/*
* Given an array of expanded ranges, compute distance of the gaps betwen
* Given an array of expanded ranges, compute distance of the gaps between
* the ranges - for ncranges there are (ncranges-1) gaps.
*
* We simply call the "distance" function to compute the (max-min) for pairs
@@ -1623,7 +1623,7 @@ ensure_free_space_in_buffer(BrinDesc *bdesc, Oid colloid,
*
* We don't simply check against range->maxvalues again. The deduplication
* might have freed very little space (e.g. just one value), forcing us to
* do depuplication very often. In that case it's better to do compaction
* do deduplication very often. In that case it's better to do compaction
* and reduce more space.
*/
if (2 * range->nranges + range->nvalues <= range->maxvalues * MINMAX_BUFFER_LOAD_FACTOR)

View File

@@ -115,7 +115,7 @@ typedef struct
/*
* In sorted build, we use a stack of these structs, one for each level,
* to hold an in-memory buffer of the righmost page at the level. When the
* to hold an in-memory buffer of the rightmost page at the level. When the
* page fills up, it is written out and a new page is allocated.
*/
typedef struct GistSortedBuildPageState

View File

@@ -633,7 +633,7 @@ systable_endscan(SysScanDesc sysscan)
* Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
* For the moment, therefore, these functions are merely the thinest of
* wrappers around index_beginscan/index_getnext_slot. The main reason for
* their existence is to centralize possible future support of lossy operators
* in catalog scans.

View File

@@ -1398,7 +1398,7 @@ _bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid,
* _bt_delitems_delete. These steps must take place before each function's
* critical section begins.
*
* updatabable and nupdatable are inputs, though note that we will use
* updatable and nupdatable are inputs, though note that we will use
* _bt_update_posting() to replace the original itup with a pointer to a final
* version in palloc()'d memory. Caller should free the tuples when its done.
*
@@ -1504,7 +1504,7 @@ _bt_delitems_cmp(const void *a, const void *b)
* some extra index tuples that were practically free for tableam to check in
* passing (when they actually turn out to be safe to delete). It probably
* only makes sense for the tableam to go ahead with these extra checks when
* it is block-orientated (otherwise the checks probably won't be practically
* it is block-oriented (otherwise the checks probably won't be practically
* free, which we rely on). The tableam interface requires the tableam side
* to handle the problem, though, so this is okay (we as an index AM are free
* to make the simplifying assumption that all tableams must be block-based).