1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-06 07:49:08 +03:00

Final pgindent + perltidy run for v10.

This commit is contained in:
Tom Lane
2017-08-14 17:29:33 -04:00
parent 5b6289c1e0
commit 21d304dfed
46 changed files with 273 additions and 273 deletions

View File

@@ -1320,10 +1320,10 @@ _hash_splitbucket(Relation rel,
/*
* If possible, clean up the old bucket. We might not be able to do this
* if someone else has a pin on it, but if not then we can go ahead. This
* isn't absolutely necessary, but it reduces bloat; if we don't do it now,
* VACUUM will do it eventually, but maybe not until new overflow pages
* have been allocated. Note that there's no need to clean up the new
* bucket.
* isn't absolutely necessary, but it reduces bloat; if we don't do it
* now, VACUUM will do it eventually, but maybe not until new overflow
* pages have been allocated. Note that there's no need to clean up the
* new bucket.
*/
if (IsBufferCleanupOK(bucket_obuf))
{

View File

@@ -233,7 +233,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
}
/* Should fit to estimated shmem size */
Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
}
else
Assert(found);

View File

@@ -3802,14 +3802,14 @@ InitTempTableNamespace(void)
get_database_name(MyDatabaseId))));
/*
* Do not allow a Hot Standby session to make temp tables. Aside
* from problems with modifying the system catalogs, there is a naming
* Do not allow a Hot Standby session to make temp tables. Aside from
* problems with modifying the system catalogs, there is a naming
* conflict: pg_temp_N belongs to the session with BackendId N on the
* master, not to a hot standby session with the same BackendId. We should not
* be able to get here anyway due to XactReadOnly checks, but let's just
* make real sure. Note that this also backstops various operations that
* allow XactReadOnly transactions to modify temp tables; they'd need
* RecoveryInProgress checks if not for this.
* master, not to a hot standby session with the same BackendId. We
* should not be able to get here anyway due to XactReadOnly checks, but
* let's just make real sure. Note that this also backstops various
* operations that allow XactReadOnly transactions to modify temp tables;
* they'd need RecoveryInProgress checks if not for this.
*/
if (RecoveryInProgress())
ereport(ERROR,

View File

@@ -728,9 +728,9 @@ check_new_partition_bound(char *relname, Relation parent,
errmsg("empty range bound specified for partition \"%s\"",
relname),
errdetail("Specified lower bound %s is greater than or equal to upper bound %s.",
get_range_partbound_string(spec->lowerdatums),
get_range_partbound_string(spec->upperdatums)),
parser_errposition(pstate, spec->location)));
get_range_partbound_string(spec->lowerdatums),
get_range_partbound_string(spec->upperdatums)),
parser_errposition(pstate, spec->location)));
}
if (partdesc->nparts > 0)

View File

@@ -1454,7 +1454,7 @@ BeginCopy(ParseState *pstate,
*/
if (cstate->transition_capture != NULL)
{
int i;
int i;
cstate->transition_tupconv_maps = (TupleConversionMap **)
palloc0(sizeof(TupleConversionMap *) *
@@ -2651,6 +2651,7 @@ CopyFrom(CopyState cstate)
cstate->transition_capture->tcs_map = NULL;
}
}
/*
* We might need to convert from the parent rowtype to the
* partition rowtype.

View File

@@ -919,9 +919,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
subworkers = logicalrep_workers_find(subid, false);
LWLockRelease(LogicalRepWorkerLock);
foreach (lc, subworkers)
foreach(lc, subworkers)
{
LogicalRepWorker *w = (LogicalRepWorker *) lfirst(lc);
if (slotname)
logicalrep_worker_stop(w->subid, w->relid);
else

View File

@@ -13509,8 +13509,8 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
* having to construct this list again, so we request the strongest lock
* on all partitions. We need the strongest lock, because we may decide
* to scan them if we find out that the table being attached (or its leaf
* partitions) may contain rows that violate the partition constraint.
* If the table has a constraint that would prevent such rows, which by
* partitions) may contain rows that violate the partition constraint. If
* the table has a constraint that would prevent such rows, which by
* definition is present in all the partitions, we need not scan the
* table, nor its partitions. But we cannot risk a deadlock by taking a
* weaker lock now and the stronger one only when needed.

View File

@@ -2071,11 +2071,11 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
{
if (trigdesc != NULL)
{
int i;
int i;
for (i = 0; i < trigdesc->numtriggers; ++i)
{
Trigger *trigger = &trigdesc->triggers[i];
Trigger *trigger = &trigdesc->triggers[i];
if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
return trigger->tgname;
@@ -5253,12 +5253,12 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
*/
if (row_trigger && transition_capture != NULL)
{
HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple;
HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple;
TupleConversionMap *map = transition_capture->tcs_map;
bool delete_old_table = transition_capture->tcs_delete_old_table;
bool update_old_table = transition_capture->tcs_update_old_table;
bool update_new_table = transition_capture->tcs_update_new_table;
bool insert_new_table = transition_capture->tcs_insert_new_table;;
bool delete_old_table = transition_capture->tcs_delete_old_table;
bool update_old_table = transition_capture->tcs_update_old_table;
bool update_new_table = transition_capture->tcs_update_new_table;
bool insert_new_table = transition_capture->tcs_insert_new_table;;
if ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
(event == TRIGGER_EVENT_UPDATE && update_old_table))

View File

@@ -529,11 +529,11 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* safely set for relfrozenxid or relminmxid.
*
* Before entering the main loop, establish the invariant that
* next_unskippable_block is the next block number >= blkno that we
* can't skip based on the visibility map, either all-visible for a
* regular scan or all-frozen for an aggressive scan. We set it to
* nblocks if there's no such block. We also set up the skipping_blocks
* flag correctly at this stage.
* next_unskippable_block is the next block number >= blkno that we can't
* skip based on the visibility map, either all-visible for a regular scan
* or all-frozen for an aggressive scan. We set it to nblocks if there's
* no such block. We also set up the skipping_blocks flag correctly at
* this stage.
*
* Note: The value returned by visibilitymap_get_status could be slightly
* out-of-date, since we make this test before reading the corresponding

View File

@@ -411,9 +411,9 @@ ExecProcNodeFirst(PlanState *node)
/*
* Perform stack depth check during the first execution of the node. We
* only do so the first time round because it turns out to not be cheap on
* some common architectures (eg. x86). This relies on the assumption that
* ExecProcNode calls for a given plan node will always be made at roughly
* the same stack depth.
* some common architectures (eg. x86). This relies on the assumption
* that ExecProcNode calls for a given plan node will always be made at
* roughly the same stack depth.
*/
check_stack_depth();

View File

@@ -1469,7 +1469,7 @@ static void
ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
{
ResultRelInfo *targetRelInfo = getASTriggerResultRelInfo(mtstate);
int i;
int i;
/* Check for transition tables on the directly targeted relation. */
mtstate->mt_transition_capture =
@@ -1483,7 +1483,7 @@ ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
if (mtstate->mt_transition_capture != NULL)
{
ResultRelInfo *resultRelInfos;
int numResultRelInfos;
int numResultRelInfos;
/* Find the set of partitions so that we can find their TupleDescs. */
if (mtstate->mt_partition_dispatch_info != NULL)
@@ -2254,8 +2254,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
else if (relkind == RELKIND_FOREIGN_TABLE)
{
/*
* When there is a row-level trigger, there should be a
* wholerow attribute.
* When there is a row-level trigger, there should be
* a wholerow attribute.
*/
j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
}

View File

@@ -730,9 +730,10 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
n = -1;
break;
case SSL_ERROR_ZERO_RETURN:
/*
* the SSL connnection was closed, leave it to the caller
* to ereport it
* the SSL connnection was closed, leave it to the caller to
* ereport it
*/
errno = ECONNRESET;
n = -1;

View File

@@ -46,7 +46,7 @@
*/
int
cx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
int num_gene, City *city_table)
int num_gene, City * city_table)
{
int i,
start_pos,

View File

@@ -45,7 +45,7 @@
*/
void
ox1(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
City *city_table)
City * city_table)
{
int left,
right,

View File

@@ -44,7 +44,7 @@
* position crossover
*/
void
ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, City *city_table)
ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, City * city_table)
{
int k,
j,

View File

@@ -45,7 +45,7 @@
*/
void
px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
City *city_table)
City * city_table)
{
int num_positions;
int i,

View File

@@ -84,7 +84,7 @@ alloc_city_table(PlannerInfo *root, int num_gene)
* deallocate memory of city table
*/
void
free_city_table(PlannerInfo *root, City *city_table)
free_city_table(PlannerInfo *root, City * city_table)
{
pfree(city_table);
}

View File

@@ -2131,8 +2131,8 @@ transformCheckConstraints(CreateStmtContext *cxt, bool skipValidation)
/*
* If creating a new table (but not a foreign table), we can safely skip
* validation of check constraints, and nonetheless mark them valid.
* (This will override any user-supplied NOT VALID flag.)
* validation of check constraints, and nonetheless mark them valid. (This
* will override any user-supplied NOT VALID flag.)
*/
if (skipValidation)
{

View File

@@ -75,8 +75,8 @@ LogicalRepCtxStruct *LogicalRepCtx;
typedef struct LogicalRepWorkerId
{
Oid subid;
Oid relid;
Oid subid;
Oid relid;
} LogicalRepWorkerId;
static List *on_commit_stop_workers = NIL;
@@ -552,7 +552,7 @@ void
logicalrep_worker_stop_at_commit(Oid subid, Oid relid)
{
LogicalRepWorkerId *wid;
MemoryContext oldctx;
MemoryContext oldctx;
/* Make sure we store the info in context that survives until commit. */
oldctx = MemoryContextSwitchTo(TopTransactionContext);
@@ -824,11 +824,12 @@ AtEOXact_ApplyLauncher(bool isCommit)
{
if (isCommit)
{
ListCell *lc;
ListCell *lc;
foreach (lc, on_commit_stop_workers)
foreach(lc, on_commit_stop_workers)
{
LogicalRepWorkerId *wid = lfirst(lc);
logicalrep_worker_stop(wid->subid, wid->relid);
}

View File

@@ -353,7 +353,7 @@ restart:
{
if (state->acquired_by != 0)
{
ConditionVariable *cv;
ConditionVariable *cv;
if (nowait)
ereport(ERROR,
@@ -977,7 +977,7 @@ replorigin_get_progress(RepOriginId node, bool flush)
static void
ReplicationOriginExitCleanup(int code, Datum arg)
{
ConditionVariable *cv = NULL;
ConditionVariable *cv = NULL;
LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
@@ -1097,7 +1097,7 @@ replorigin_session_setup(RepOriginId node)
void
replorigin_session_reset(void)
{
ConditionVariable *cv;
ConditionVariable *cv;
Assert(max_replication_slots != 0);

View File

@@ -1117,9 +1117,9 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact
* only ever look at those.
*
* NB: We only increase xmax when a catalog modifying transaction commits
* (see SnapBuildCommitTxn). Because of this, xmax can be lower than xmin,
* which looks odd but is correct and actually more efficient, since we hit
* fast paths in tqual.c.
* (see SnapBuildCommitTxn). Because of this, xmax can be lower than
* xmin, which looks odd but is correct and actually more efficient, since
* we hit fast paths in tqual.c.
*/
builder->xmin = running->oldestRunningXid;

View File

@@ -351,8 +351,8 @@ retry:
if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
{
/*
* This is the slot we want. We don't know yet if it's active,
* so get ready to sleep on it in case it is. (We may end up not
* This is the slot we want. We don't know yet if it's active, so
* get ready to sleep on it in case it is. (We may end up not
* sleeping, but we don't want to do this while holding the
* spinlock.)
*/
@@ -397,7 +397,7 @@ retry:
goto retry;
}
else
ConditionVariableCancelSleep(); /* no sleep needed after all */
ConditionVariableCancelSleep(); /* no sleep needed after all */
/* Let everybody know we've modified this slot */
ConditionVariableBroadcast(&slot->active_cv);

View File

@@ -293,8 +293,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
* WalSender has checked our LSN and has removed us from queue. Clean up
* state and leave. It's OK to reset these shared memory fields without
* holding SyncRepLock, because any walsenders will ignore us anyway when
* we're not on the queue. We need a read barrier to make sure we see
* the changes to the queue link (this might be unnecessary without
* we're not on the queue. We need a read barrier to make sure we see the
* changes to the queue link (this might be unnecessary without
* assertions, but better safe than sorry).
*/
pg_read_barrier();
@@ -715,7 +715,7 @@ SyncRepGetSyncStandbysQuorum(bool *am_sync)
for (i = 0; i < max_wal_senders; i++)
{
XLogRecPtr flush;
WalSndState state;
WalSndState state;
int pid;
walsnd = &WalSndCtl->walsnds[i];
@@ -794,7 +794,7 @@ SyncRepGetSyncStandbysPriority(bool *am_sync)
for (i = 0; i < max_wal_senders; i++)
{
XLogRecPtr flush;
WalSndState state;
WalSndState state;
int pid;
walsnd = &WalSndCtl->walsnds[i];

View File

@@ -1408,8 +1408,8 @@ GetOldestXmin(Relation rel, int flags)
* being careful not to generate a "permanent" XID.
*
* vacuum_defer_cleanup_age provides some additional "slop" for the
* benefit of hot standby queries on standby servers. This is quick and
* dirty, and perhaps not all that useful unless the master has a
* benefit of hot standby queries on standby servers. This is quick
* and dirty, and perhaps not all that useful unless the master has a
* predictable transaction rate, but it offers some protection when
* there's no walsender connection. Note that we are assuming
* vacuum_defer_cleanup_age isn't large enough to cause wraparound ---

View File

@@ -8723,8 +8723,8 @@ get_rule_expr(Node *node, deparse_context *context,
list_length(spec->upperdatums));
appendStringInfo(buf, "FOR VALUES FROM %s TO %s",
get_range_partbound_string(spec->lowerdatums),
get_range_partbound_string(spec->upperdatums));
get_range_partbound_string(spec->lowerdatums),
get_range_partbound_string(spec->upperdatums));
break;
default: