1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-02 04:21:28 +03:00

Fix typos in comments

The changes done in this commit impact comments with no direct
user-visible changes, with fixes for incorrect function, variable or
structure names.

Author: Alexander Lakhin
Discussion: https://postgr.es/m/e8c38840-596a-83d6-bd8d-cebc51111572@gmail.com
This commit is contained in:
Michael Paquier
2023-05-02 12:23:08 +09:00
parent 6fd8ae6888
commit 8961cb9a03
43 changed files with 59 additions and 60 deletions

View File

@@ -580,8 +580,8 @@ gistXLogAssignLSN(void)
int dummy = 0;
/*
* Records other than SWITCH_WAL must have content. We use an integer 0 to
* follow the restriction.
* Records other than XLOG_SWITCH must have content. We use an integer 0
* to follow the restriction.
*/
XLogBeginInsert();
XLogSetRecordFlags(XLOG_MARK_UNIMPORTANT);

View File

@@ -1223,7 +1223,7 @@ heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
* Calculate the first block and the number of blocks we must scan. We
* could be more aggressive here and perform some more validation to try
* and further narrow the scope of blocks to scan by checking if the
* lowerItem has an offset above MaxOffsetNumber. In this case, we could
* lowestItem has an offset above MaxOffsetNumber. In this case, we could
* advance startBlk by one. Likewise, if highestItem has an offset of 0
* we could scan one fewer blocks. However, such an optimization does not
* seem worth troubling over, currently.

View File

@@ -816,7 +816,7 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
* If the last pages of the scan were empty, we would go to
* the next phase while heap_blks_scanned != heap_blks_total.
* Instead, to ensure that heap_blks_scanned is equivalent to
* total_heap_blks after the table scan phase, this parameter
* heap_blks_total after the table scan phase, this parameter
* is manually updated to the correct value when the table
* scan finishes.
*/

View File

@@ -198,8 +198,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
/*
* Now that we have buffer lock, get accurate information about the
* page's free space, and recheck the heuristic about whether to
* prune. (We needn't recheck PageIsPrunable, since no one else could
* have pruned while we hold pin.)
* prune.
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
@@ -490,7 +489,7 @@ heap_page_prune(Relation relation, Buffer buffer,
*
* Due to its cost we also only want to call
* TransactionIdLimitedForOldSnapshots() if necessary, i.e. we might not have
* done so in heap_hot_prune_opt() if pd_prune_xid was old enough. But we
* done so in heap_page_prune_opt() if pd_prune_xid was old enough. But we
* still want to be able to remove rows that are too new to be removed
* according to prstate->vistest, but that can be removed based on
* old_snapshot_threshold. So we call TransactionIdLimitedForOldSnapshots() on

View File

@@ -2575,7 +2575,7 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
END_CRIT_SECTION();
/*
* Now that we have removed the LD_DEAD items from the page, once again
* Now that we have removed the LP_DEAD items from the page, once again
* check if the page has become all-visible. The page is already marked
* dirty, exclusively locked, and, if needed, a full page image has been
* emitted.

View File

@@ -119,7 +119,7 @@ static inline IndexTuple _bt_split_firstright(FindSplitData *state,
* righthand page (which is called firstrightoff), plus a boolean
* indicating whether the new tuple goes on the left or right page. You
* can think of the returned state as a point _between_ two adjacent data
* items (laftleft and firstright data items) on an imaginary version of
* items (lastleft and firstright data items) on an imaginary version of
* origpage that already includes newitem. The bool is necessary to
* disambiguate the case where firstrightoff == newitemoff (i.e. it is
* sometimes needed to determine if the firstright tuple for the split is

View File

@@ -809,7 +809,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata)
}
/*
* During a WriteAll, we may already have the desired file open.
* During a SimpleLruWriteAll, we may already have the desired file open.
*/
if (fdata)
{
@@ -864,7 +864,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata)
else
{
/*
* In the unlikely event that we exceed MAX_FLUSH_BUFFERS,
* In the unlikely event that we exceed MAX_WRITEALL_BUFFERS,
* fall back to treating it as a standalone write.
*/
fdata = NULL;
@@ -1478,7 +1478,7 @@ SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset)
*
* This assumes every uint32 >= FirstNormalTransactionId is a valid key. It
* assumes each value occupies a contiguous, fixed-size region of SLRU bytes.
* (MultiXactMemberCtl separates flags from XIDs. AsyncCtl has
* (MultiXactMemberCtl separates flags from XIDs. NotifyCtl has
* variable-length entries, no keys, and no random access. These unit tests
* do not apply to them.)
*/

View File

@@ -1476,7 +1476,7 @@ err:
}
/*
* Helper function to ease writing of XLogRoutine->page_read callbacks.
* Helper function to ease writing of XLogReaderRoutine->page_read callbacks.
* If this function is used, caller must supply a segment_open callback in
* 'state', as that is used here.
*
@@ -1513,7 +1513,7 @@ WALRead(XLogReaderState *state,
/*
* If the data we want is not in a segment we have open, close what we
* have (if anything) and open the next one, using the caller's
* provided openSegment callback.
* provided segment_open callback.
*/
if (state->seg.ws_file < 0 ||
!XLByteInSeg(recptr, state->seg.ws_segno, state->segcxt.ws_segsize) ||