1
0
mirror of https://github.com/postgres/postgres.git synced 2025-08-30 06:01:21 +03:00

Fix inconsistencies and typos in the tree

This is numbered take 7, and addresses a set of issues with code
comments, variable names and unreferenced variables.

Author: Alexander Lakhin
Discussion: https://postgr.es/m/dff75442-2468-f74f-568c-6006e141062f@gmail.com
This commit is contained in:
Michael Paquier
2019-07-22 10:01:50 +09:00
parent e1a0f6a983
commit 23bccc823d
44 changed files with 49 additions and 65 deletions

View File

@@ -69,7 +69,7 @@
* currently executing.
*
* Fillfactor can be set because it applies only to subsequent changes made to
* data blocks, as documented in heapio.c
* data blocks, as documented in hio.c
*
* n_distinct options can be set at ShareUpdateExclusiveLock because they
* are only used during ANALYZE, which uses a ShareUpdateExclusiveLock,

View File

@@ -92,7 +92,7 @@ typedef struct
/*
* The following fields represent the items in this segment. If 'items' is
* not NULL, it contains a palloc'd array of the itemsin this segment. If
* not NULL, it contains a palloc'd array of the items in this segment. If
* 'seg' is not NULL, it contains the items in an already-compressed
* format. It can point to an on-disk page (!modified), or a palloc'd
* segment in memory. If both are set, they must represent the same items.

View File

@@ -663,7 +663,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
}
/*
* Check the last returned tuple and add it to killitems if
* Check the last returned tuple and add it to killedItems if
* necessary
*/
if (scan->kill_prior_tuple

View File

@@ -120,7 +120,7 @@ gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
}
/*
* make plain IndexTupleVector
* make plain IndexTuple vector
*/
IndexTupleData *

View File

@@ -793,7 +793,7 @@ _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
* be confused into returning the same tuple more than once or some tuples
* not at all by the rearrangement we are performing here. To prevent
* any concurrent scan to cross the squeeze scan we use lock chaining
* similar to hasbucketcleanup. Refer comments atop hashbucketcleanup.
* similar to hashbucketcleanup. Refer comments atop hashbucketcleanup.
*
* We need to retain a pin on the primary bucket to ensure that no concurrent
* split can start.

View File

@@ -509,7 +509,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the
* total number of buckets which has to be allocated before using its
* _hashm_spare element. However always force at least 2 bucket pages. The
* hashm_spares element. However always force at least 2 bucket pages. The
* upper limit is determined by considerations explained in
* _hash_expandtable().
*/

View File

@@ -102,7 +102,7 @@ static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 in
static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
uint16 infomask, Relation rel, int *remaining);
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed,
bool *copy);

View File

@@ -256,7 +256,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
MarkBufferDirty(buffer);
/*
* Emit a WAL HEAP_CLEAN record showing what we did
* Emit a WAL XLOG_HEAP2_CLEAN record showing what we did
*/
if (RelationNeedsWAL(relation))
{

View File

@@ -557,8 +557,8 @@ systable_endscan(SysScanDesc sysscan)
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
* wrappers around index_beginscan/index_getnext. The main reason for their
* existence is to centralize possible future support of lossy operators
* wrappers around index_beginscan/index_getnext_slot. The main reason for
* their existence is to centralize possible future support of lossy operators
* in catalog scans.
*/
SysScanDesc

View File

@@ -643,7 +643,7 @@ spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
continue;
/*
* Use infinity distances if innerConsistent() failed to return
* Use infinity distances if innerConsistentFn() failed to return
* them or if is a NULL item (their distances are really unused).
*/
distances = out.distances ? out.distances[i] : so->infDistances;

View File

@@ -891,7 +891,7 @@ ExtendCLOG(TransactionId newestXact)
* Remove all CLOG segments before the one holding the passed transaction ID
*
* Before removing any CLOG data, we must flush XLOG to disk, to ensure
* that any recently-emitted HEAP_FREEZE records have reached disk; otherwise
* that any recently-emitted FREEZE_PAGE records have reached disk; otherwise
* a crash and restart might leave us with some unfrozen tuples referencing
* removed CLOG data. We choose to emit a special TRUNCATE XLOG record too.
* Replaying the deletion from XLOG is not critical, since the files could

View File

@@ -9158,7 +9158,7 @@ CreateRestartPoint(int flags)
/*
* Update pg_control, using current time. Check that it still shows
* IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
* DB_IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
* this is a quick hack to make sure nothing really bad happens if somehow
* we get here after the end-of-recovery checkpoint.
*/