1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-07 19:06:32 +03:00

Pre-beta mechanical code beautification.

Run pgindent, pgperltidy, and reformat-dat-files.

This set of diffs is a bit larger than typical.  We've updated to
pg_bsd_indent 2.1.2, which properly indents variable declarations that
have multi-line initialization expressions (the continuation lines are
now indented one tab stop).  We've also updated to perltidy version
20230309 and changed some of its settings, which reduces its desire to
add whitespace to lines to make assignments etc. line up.  Going
forward, that should make for fewer random-seeming changes to existing
code.

Discussion: https://postgr.es/m/20230428092545.qfb3y5wcu4cm75ur@alvherre.pgsql
This commit is contained in:
Tom Lane
2023-05-19 17:24:48 -04:00
parent df6b19fbbc
commit 0245f8db36
402 changed files with 4756 additions and 4427 deletions

View File

@@ -259,7 +259,7 @@ libpqrcv_check_conninfo(const char *conninfo, bool must_use_password)
if (must_use_password)
{
bool uses_password = false;
bool uses_password = false;
for (opt = opts; opt->keyword != NULL; ++opt)
{

View File

@@ -155,7 +155,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
case XLOG_PARAMETER_CHANGE:
{
xl_parameter_change *xlrec =
(xl_parameter_change *) XLogRecGetData(buf->record);
(xl_parameter_change *) XLogRecGetData(buf->record);
/*
* If wal_level on the primary is reduced to less than
@@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
* invalidated when this WAL record is replayed; and further,
* slot creation fails when wal_level is not sufficient; but
* all these operations are not synchronized, so a logical
* slot may creep in while the wal_level is being
* reduced. Hence this extra check.
* slot may creep in while the wal_level is being reduced.
* Hence this extra check.
*/
if (xlrec->wal_level < WAL_LEVEL_LOGICAL)
{
@@ -752,7 +752,7 @@ DecodePrepare(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
SnapBuild *builder = ctx->snapshot_builder;
XLogRecPtr origin_lsn = parsed->origin_lsn;
TimestampTz prepare_time = parsed->xact_time;
RepOriginId origin_id = XLogRecGetOrigin(buf->record);
RepOriginId origin_id = XLogRecGetOrigin(buf->record);
int i;
TransactionId xid = parsed->twophase_xid;
@@ -828,7 +828,7 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
int i;
XLogRecPtr origin_lsn = InvalidXLogRecPtr;
TimestampTz abort_time = parsed->xact_time;
RepOriginId origin_id = XLogRecGetOrigin(buf->record);
RepOriginId origin_id = XLogRecGetOrigin(buf->record);
bool skip_xact;
if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN)

View File

@@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin,
MemoryContext old_context;
/*
* On a standby, this check is also required while creating the
* slot. Check the comments in the function.
* On a standby, this check is also required while creating the slot.
* Check the comments in the function.
*/
CheckLogicalDecodingRequirements();

View File

@@ -833,7 +833,7 @@ replorigin_redo(XLogReaderState *record)
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec =
(xl_replorigin_set *) XLogRecGetData(record);
(xl_replorigin_set *) XLogRecGetData(record);
replorigin_advance(xlrec->node_id,
xlrec->remote_lsn, record->EndRecPtr,

View File

@@ -1408,7 +1408,7 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
{
dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node);
ReorderBufferChange *next_change =
dlist_container(ReorderBufferChange, node, next);
dlist_container(ReorderBufferChange, node, next);
/* txn stays the same */
state->entries[off].lsn = next_change->lsn;
@@ -1439,8 +1439,8 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
{
/* successfully restored changes from disk */
ReorderBufferChange *next_change =
dlist_head_element(ReorderBufferChange, node,
&entry->txn->changes);
dlist_head_element(ReorderBufferChange, node,
&entry->txn->changes);
elog(DEBUG2, "restored %u/%u changes from disk",
(uint32) entry->txn->nentries_mem,
@@ -1582,7 +1582,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
dclist_delete_from(&rb->catchange_txns, &txn->catchange_node);
/* now remove reference from buffer */
hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
Assert(found);
/* remove entries spilled to disk */
@@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
ReorderBufferTXN *txn;
/*
* Bail out if logical_replication_mode is buffered and we haven't exceeded
* the memory limit.
* Bail out if logical_replication_mode is buffered and we haven't
* exceeded the memory limit.
*/
if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED &&
rb->size < logical_decoding_work_mem * 1024L)
@@ -3841,7 +3841,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
{
char *data;
Size inval_size = sizeof(SharedInvalidationMessage) *
change->data.inval.ninvalidations;
change->data.inval.ninvalidations;
sz += inval_size;
@@ -4010,10 +4010,10 @@ ReorderBufferStreamTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
* After that we need to reuse the snapshot from the previous run.
*
* Unlike DecodeCommit which adds xids of all the subtransactions in
* snapshot's xip array via SnapBuildCommitTxn, we can't do that here
* but we do add them to subxip array instead via ReorderBufferCopySnap.
* This allows the catalog changes made in subtransactions decoded till
* now to be visible.
* snapshot's xip array via SnapBuildCommitTxn, we can't do that here but
* we do add them to subxip array instead via ReorderBufferCopySnap. This
* allows the catalog changes made in subtransactions decoded till now to
* be visible.
*/
if (txn->snapshot_now == NULL)
{
@@ -4206,7 +4206,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
dlist_foreach_modify(cleanup_iter, &txn->changes)
{
ReorderBufferChange *cleanup =
dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
dlist_delete(&cleanup->node);
ReorderBufferReturnChange(rb, cleanup, true);
@@ -4431,7 +4431,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
case REORDER_BUFFER_CHANGE_INVALIDATION:
{
Size inval_size = sizeof(SharedInvalidationMessage) *
change->data.inval.ninvalidations;
change->data.inval.ninvalidations;
change->data.inval.invalidations =
MemoryContextAlloc(rb->context, inval_size);
@@ -4936,7 +4936,7 @@ ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
dlist_foreach_modify(it, &ent->chunks)
{
ReorderBufferChange *change =
dlist_container(ReorderBufferChange, node, it.cur);
dlist_container(ReorderBufferChange, node, it.cur);
dlist_delete(&change->node);
ReorderBufferReturnChange(rb, change, true);

View File

@@ -574,7 +574,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder)
Assert(builder->building_full_snapshot);
/* don't allow older snapshots */
InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
if (HaveRegisteredOrActiveSnapshot())
elog(ERROR, "cannot build an initial slot snapshot when snapshots exist");
Assert(!HistoricSnapshotActive());
@@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
*/
/*
* xl_running_xacts record is older than what we can use, we might not have
* all necessary catalog rows anymore.
* xl_running_xacts record is older than what we can use, we might not
* have all necessary catalog rows anymore.
*/
if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
NormalTransactionIdPrecedes(running->oldestRunningXid,

View File

@@ -563,7 +563,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
* the lock.
*/
int nsyncworkers =
logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
/* Now safe to release the LWLock */
LWLockRelease(LogicalRepWorkerLock);

View File

@@ -2399,7 +2399,7 @@ apply_handle_insert(StringInfo s)
LogicalRepRelMapEntry *rel;
LogicalRepTupleData newtup;
LogicalRepRelId relid;
UserContext ucxt;
UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
@@ -2547,7 +2547,7 @@ apply_handle_update(StringInfo s)
{
LogicalRepRelMapEntry *rel;
LogicalRepRelId relid;
UserContext ucxt;
UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
LogicalRepTupleData oldtup;
@@ -2732,7 +2732,7 @@ apply_handle_delete(StringInfo s)
LogicalRepRelMapEntry *rel;
LogicalRepTupleData oldtup;
LogicalRepRelId relid;
UserContext ucxt;
UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
@@ -3079,8 +3079,8 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
if (map)
{
TupleConversionMap *PartitionToRootMap =
convert_tuples_by_name(RelationGetDescr(partrel),
RelationGetDescr(parentrel));
convert_tuples_by_name(RelationGetDescr(partrel),
RelationGetDescr(parentrel));
remoteslot =
execute_attr_map_slot(PartitionToRootMap->attrMap,
@@ -3414,7 +3414,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush,
dlist_foreach_modify(iter, &lsn_mapping)
{
FlushPosition *pos =
dlist_container(FlushPosition, node, iter.cur);
dlist_container(FlushPosition, node, iter.cur);
*write = pos->remote_end;
@@ -4702,11 +4702,11 @@ ApplyWorkerMain(Datum main_arg)
ereport(DEBUG1,
(errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s",
MySubscription->name,
MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
"?")));
MySubscription->name,
MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
"?")));
}
else
{
@@ -5080,10 +5080,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
}
/*
* If we are processing this transaction using a parallel apply worker then
* either we send the changes to the parallel worker or if the worker is busy
* then serialize the changes to the file which will later be processed by
* the parallel worker.
* If we are processing this transaction using a parallel apply worker
* then either we send the changes to the parallel worker or if the worker
* is busy then serialize the changes to the file which will later be
* processed by the parallel worker.
*/
*winfo = pa_find_worker(xid);
@@ -5097,9 +5097,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
}
/*
* If there is no parallel worker involved to process this transaction then
* we either directly apply the change or serialize it to a file which will
* later be applied when the transaction finish message is processed.
* If there is no parallel worker involved to process this transaction
* then we either directly apply the change or serialize it to a file
* which will later be applied when the transaction finish message is
* processed.
*/
else if (in_streamed_transaction)
{

View File

@@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications,
* are multiple lists (one for each operation) to which row filters will
* be appended.
*
* FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row
* filter expression" so it takes precedence.
* FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter
* expression" so it takes precedence.
*/
foreach(lc, publications)
{

View File

@@ -330,7 +330,7 @@ static void
SyncRepQueueInsert(int mode)
{
dlist_head *queue;
dlist_iter iter;
dlist_iter iter;
Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
queue = &WalSndCtl->SyncRepQueue[mode];
@@ -879,7 +879,7 @@ SyncRepWakeQueue(bool all, int mode)
dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode])
{
PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
/*
* Assume the queue is ordered by LSN