1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-09 22:41:56 +03:00

Initial pgindent and pgperltidy run for v14.

Also "make reformat-dat-files".

The only change worthy of note is that pgindent messed up the formatting
of launcher.c's struct LogicalRepWorkerId, which led me to notice that
that struct wasn't used at all anymore, so I just took it out.
This commit is contained in:
Tom Lane
2021-05-12 13:14:10 -04:00
parent e6ccd1ce16
commit def5b065ff
230 changed files with 2408 additions and 2125 deletions

View File

@ -432,11 +432,11 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
* transactions on the primary might still be invisible to a read-only
* transaction in the standby. We partly handle this problem by tracking
* the minimum xmin of visible tuples as the cut-off XID while marking a
* page all-visible on the primary and WAL log that along with the visibility
* map SET operation. In hot standby, we wait for (or abort) all
* transactions that can potentially may not see one or more tuples on the
* page. That's how index-only scans work fine in hot standby. A crucial
* difference between index-only scans and heap scans is that the
* page all-visible on the primary and WAL log that along with the
* visibility map SET operation. In hot standby, we wait for (or abort)
* all transactions that can potentially may not see one or more tuples on
* the page. That's how index-only scans work fine in hot standby. A
* crucial difference between index-only scans and heap scans is that the
* index-only scan completely relies on the visibility map where as heap
* scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
* the page-level flag can be trusted in the same way, because it might
@ -2095,11 +2095,11 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If we're inserting frozen entry into an empty page,
* set visibility map bits and PageAllVisible() hint.
* If we're inserting frozen entry into an empty page, set visibility map
* bits and PageAllVisible() hint.
*
* If we're inserting frozen entry into already all_frozen page,
* preserve this state.
* If we're inserting frozen entry into already all_frozen page, preserve
* this state.
*/
if (options & HEAP_INSERT_FROZEN)
{
@ -2109,7 +2109,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
if (visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer))
vmstatus = visibilitymap_get_status(relation,
BufferGetBlockNumber(buffer), &vmbuffer);
BufferGetBlockNumber(buffer), &vmbuffer);
if ((starting_with_empty_page || vmstatus & VISIBILITYMAP_ALL_FROZEN))
all_frozen_set = true;
@ -2139,8 +2139,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
(options & HEAP_INSERT_SPECULATIVE) != 0);
/*
* If the page is all visible, need to clear that, unless we're only
* going to add further frozen rows to it.
* If the page is all visible, need to clear that, unless we're only going
* to add further frozen rows to it.
*
* If we're only adding already frozen rows to a page that was empty or
* marked as all visible, mark it as all-visible.
@ -2258,11 +2258,11 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
END_CRIT_SECTION();
/*
* If we've frozen everything on the page, update the visibilitymap.
* We're already holding pin on the vmbuffer.
* If we've frozen everything on the page, update the visibilitymap. We're
* already holding pin on the vmbuffer.
*
* No need to update the visibilitymap if it had all_frozen bit set
* before this insertion.
* No need to update the visibilitymap if it had all_frozen bit set before
* this insertion.
*/
if (all_frozen_set && ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0))
{
@ -2270,14 +2270,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer));
/*
* It's fine to use InvalidTransactionId here - this is only used
* when HEAP_INSERT_FROZEN is specified, which intentionally
* violates visibility rules.
* It's fine to use InvalidTransactionId here - this is only used when
* HEAP_INSERT_FROZEN is specified, which intentionally violates
* visibility rules.
*/
visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
InvalidXLogRecPtr, vmbuffer,
InvalidTransactionId,
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
InvalidXLogRecPtr, vmbuffer,
InvalidTransactionId,
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
}
UnlockReleaseBuffer(buffer);
@ -2547,7 +2547,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
tupledata = scratchptr;
/* check that the mutually exclusive flags are not both set */
Assert (!(all_visible_cleared && all_frozen_set));
Assert(!(all_visible_cleared && all_frozen_set));
xlrec->flags = 0;
if (all_visible_cleared)
@ -3063,7 +3063,10 @@ l1:
xl_heap_header xlhdr;
XLogRecPtr recptr;
/* For logical decode we need combo CIDs to properly decode the catalog */
/*
* For logical decode we need combo CIDs to properly decode the
* catalog
*/
if (RelationIsAccessibleInLogicalDecoding(relation))
log_heap_new_cid(relation, &tp);
@ -7932,16 +7935,16 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
* TIDs as each other. The goal is to ignore relatively small differences
* in the total number of promising entries, so that the whole process can
* give a little weight to heapam factors (like heap block locality)
* instead. This isn't a trade-off, really -- we have nothing to lose.
* It would be foolish to interpret small differences in npromisingtids
* instead. This isn't a trade-off, really -- we have nothing to lose. It
* would be foolish to interpret small differences in npromisingtids
* values as anything more than noise.
*
* We tiebreak on nhtids when sorting block group subsets that have the
* same npromisingtids, but this has the same issues as npromisingtids,
* and so nhtids is subject to the same power-of-two bucketing scheme.
* The only reason that we don't fix nhtids in the same way here too is
* that we'll need accurate nhtids values after the sort. We handle
* nhtids bucketization dynamically instead (in the sort comparator).
* and so nhtids is subject to the same power-of-two bucketing scheme. The
* only reason that we don't fix nhtids in the same way here too is that
* we'll need accurate nhtids values after the sort. We handle nhtids
* bucketization dynamically instead (in the sort comparator).
*
* See bottomup_nblocksfavorable() for a full explanation of when and how
* heap locality/favorable blocks can significantly influence when and how
@ -8944,8 +8947,8 @@ heap_xlog_insert(XLogReaderState *record)
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
/* check that the mutually exclusive flags are not both set */
Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
/*
* The visibility map may need to be fixed even if the heap page is
@ -9072,8 +9075,8 @@ heap_xlog_multi_insert(XLogReaderState *record)
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
/* check that the mutually exclusive flags are not both set */
Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
/*
* The visibility map may need to be fixed even if the heap page is

View File

@ -1659,13 +1659,13 @@ heapam_index_build_range_scan(Relation heapRelation,
offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
/*
* If a HOT tuple points to a root that we don't know
* about, obtain root items afresh. If that still fails,
* report it as corruption.
* If a HOT tuple points to a root that we don't know about,
* obtain root items afresh. If that still fails, report it as
* corruption.
*/
if (root_offsets[offnum - 1] == InvalidOffsetNumber)
{
Page page = BufferGetPage(hscan->rs_cbuf);
Page page = BufferGetPage(hscan->rs_cbuf);
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
heap_get_root_tuples(page, root_offsets);
@ -2482,8 +2482,8 @@ reform_and_rewrite_tuple(HeapTuple tuple,
else if (!isnull[i] && TupleDescAttr(newTupDesc, i)->attlen == -1)
{
struct varlena *new_value;
ToastCompressionId cmid;
char cmethod;
ToastCompressionId cmid;
char cmethod;
new_value = (struct varlena *) DatumGetPointer(values[i]);
cmid = toast_get_compression_id(new_value);

View File

@ -1608,8 +1608,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
/*
* another transaction might have (tried to) delete this tuple or
* cmin/cmax was stored in a combo CID. So we need to lookup the actual
* values externally.
* cmin/cmax was stored in a combo CID. So we need to lookup the
* actual values externally.
*/
resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
htup, buffer,
@ -1629,8 +1629,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
* elog inside ResolveCminCmaxDuringDecoding.
*
* XXX For the streaming case, we can track the largest combo CID
* assigned, and error out based on this (when unable to resolve
* combo CID below that observed maximum value).
* assigned, and error out based on this (when unable to resolve combo
* CID below that observed maximum value).
*/
if (!resolved)
return false;
@ -1717,8 +1717,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
* elog inside ResolveCminCmaxDuringDecoding.
*
* XXX For the streaming case, we can track the largest combo CID
* assigned, and error out based on this (when unable to resolve
* combo CID below that observed maximum value).
* assigned, and error out based on this (when unable to resolve combo
* CID below that observed maximum value).
*/
if (!resolved || cmax == InvalidCommandId)
return true;

View File

@ -410,8 +410,8 @@ RelationGetBufferForTuple(Relation relation, Size len,
}
/*
* If the FSM knows nothing of the rel, try the last page before we
* give up and extend. This avoids one-tuple-per-page syndrome during
* If the FSM knows nothing of the rel, try the last page before we give
* up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)

View File

@ -95,8 +95,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
/*
* We can't write WAL in recovery mode, so there's no point trying to
* clean the page. The primary will likely issue a cleaning WAL record soon
* anyway, so this is no particular loss.
* clean the page. The primary will likely issue a cleaning WAL record
* soon anyway, so this is no particular loss.
*/
if (RecoveryInProgress())
return;

View File

@ -691,8 +691,8 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
*
* Deliberately avoid telling the stats collector about LP_DEAD items that
* remain in the table due to VACUUM bypassing index and heap vacuuming.
* ANALYZE will consider the remaining LP_DEAD items to be dead tuples.
* It seems like a good idea to err on the side of not vacuuming again too
* ANALYZE will consider the remaining LP_DEAD items to be dead tuples. It
* seems like a good idea to err on the side of not vacuuming again too
* soon in cases where the failsafe prevented significant amounts of heap
* vacuuming.
*/
@ -2284,7 +2284,7 @@ static void
lazy_vacuum_heap_rel(LVRelState *vacrel)
{
int tupindex;
BlockNumber vacuumed_pages;
BlockNumber vacuumed_pages;
PGRUsage ru0;
Buffer vmbuffer = InvalidBuffer;
LVSavedErrInfo saved_err_info;