1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-12 05:01:15 +03:00

pgindent run prior to branching

This commit is contained in:
Andrew Dunstan
2018-06-30 12:25:49 -04:00
parent 2c64d20048
commit 1e9c858090
18 changed files with 62 additions and 62 deletions

View File

@@ -247,9 +247,9 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
metapage = BufferGetPage(metabuffer);
/*
* An insertion to the pending list could logically belong anywhere in
* the tree, so it conflicts with all serializable scans. All scans
* acquire a predicate lock on the metabuffer to represent that.
* An insertion to the pending list could logically belong anywhere in the
* tree, so it conflicts with all serializable scans. All scans acquire a
* predicate lock on the metabuffer to represent that.
*/
CheckForSerializableConflictIn(index, NULL, metabuffer);

View File

@@ -235,8 +235,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
LockBuffer(stack->buffer, GIN_UNLOCK);
/*
* Acquire predicate lock on the posting tree. We already hold
* a lock on the entry page, but insertions to the posting tree
* Acquire predicate lock on the posting tree. We already hold a
* lock on the entry page, but insertions to the posting tree
* don't check for conflicts on that level.
*/
PredicateLockPage(btree->index, rootPostingTree, snapshot);
@@ -1766,8 +1766,8 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
*ntids = 0;
/*
* Acquire predicate lock on the metapage, to conflict with any
* fastupdate insertions.
* Acquire predicate lock on the metapage, to conflict with any fastupdate
* insertions.
*/
PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);

View File

@@ -820,10 +820,10 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
/*
* If table receives enough insertions and no cleanup was performed,
* then index would appear have stale statistics. If scale factor
* is set, we avoid that by performing cleanup if the number of
* inserted tuples exceeds vacuum_cleanup_index_scale_factor fraction
* of original tuples count.
* then index would appear have stale statistics. If scale factor is
* set, we avoid that by performing cleanup if the number of inserted
* tuples exceeds vacuum_cleanup_index_scale_factor fraction of
* original tuples count.
*/
relopts = (StdRdOptions *) info->index->rd_options;
cleanup_scale_factor = (relopts &&
@@ -873,8 +873,8 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
&oldestBtpoXact);
/*
* Update cleanup-related information in metapage. This information
* is used only for cleanup but keeping them up to date can avoid
* Update cleanup-related information in metapage. This information is
* used only for cleanup but keeping them up to date can avoid
* unnecessary cleanup even after bulkdelete.
*/
_bt_update_meta_cleanup_info(info->index, oldestBtpoXact,

View File

@@ -2196,8 +2196,8 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
* non-zero, or when there is no explicit representation and the
* tuple is evidently not a pre-pg_upgrade tuple.
*
* Prior to v11, downlinks always had P_HIKEY as their offset.
* Use that to decide if the tuple is a pre-v11 tuple.
* Prior to v11, downlinks always had P_HIKEY as their offset. Use
* that to decide if the tuple is a pre-v11 tuple.
*/
return BTreeTupleGetNAtts(itup, rel) == 0 ||
((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&

View File

@@ -4512,7 +4512,7 @@ ReadControlFile(void)
errmsg("could not read from control file: %m")));
else
ereport(PANIC,
(errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
(errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
}
pgstat_report_wait_end();

View File

@@ -829,9 +829,9 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
}
/*
* Check that the address on the page agrees with what we expected.
* This check typically fails when an old WAL segment is recycled,
* and hasn't yet been overwritten with new data yet.
* Check that the address on the page agrees with what we expected. This
* check typically fails when an old WAL segment is recycled, and hasn't
* yet been overwritten with new data yet.
*/
if (hdr->xlp_pageaddr != recaddr)
{