1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-02 09:02:37 +03:00

pgindent run for 9.6

This commit is contained in:
Robert Haas
2016-06-09 18:02:36 -04:00
parent 9164deea2f
commit 4bc424b968
252 changed files with 2670 additions and 2558 deletions

View File

@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node)
return false;
/*
* Parallel-aware nodes return a subset of the tuples in each worker,
* and in general we can't expect to have enough bookkeeping state to
* know which ones we returned in this worker as opposed to some other
* worker.
* Parallel-aware nodes return a subset of the tuples in each worker, and
* in general we can't expect to have enough bookkeeping state to know
* which ones we returned in this worker as opposed to some other worker.
*/
if (node->parallel_aware)
return false;

View File

@ -725,7 +725,7 @@ retry:
{
TransactionId xwait;
ItemPointerData ctid_wait;
XLTW_Oper reason_wait;
XLTW_Oper reason_wait;
Datum existing_values[INDEX_MAX_KEYS];
bool existing_isnull[INDEX_MAX_KEYS];
char *error_new;

View File

@ -1851,25 +1851,25 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
if (wco->polname != NULL)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
wco->polname, wco->relname)));
errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
wco->polname, wco->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy for table \"%s\"",
wco->relname)));
errmsg("new row violates row-level security policy for table \"%s\"",
wco->relname)));
break;
case WCO_RLS_CONFLICT_CHECK:
if (wco->polname != NULL)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
wco->polname, wco->relname)));
errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
wco->polname, wco->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
wco->relname)));
errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
wco->relname)));
break;
default:
elog(ERROR, "unrecognized WCO kind: %u", wco->kind);

View File

@ -83,7 +83,7 @@ struct SharedExecutorInstrumentation
typedef struct ExecParallelEstimateContext
{
ParallelContext *pcxt;
int nnodes;
int nnodes;
} ExecParallelEstimateContext;
/* Context object for ExecParallelInitializeDSM. */
@ -91,7 +91,7 @@ typedef struct ExecParallelInitializeDSMContext
{
ParallelContext *pcxt;
SharedExecutorInstrumentation *instrumentation;
int nnodes;
int nnodes;
} ExecParallelInitializeDSMContext;
/* Helper functions that run in the parallel leader. */
@ -99,11 +99,11 @@ static char *ExecSerializePlan(Plan *plan, EState *estate);
static bool ExecParallelEstimate(PlanState *node,
ExecParallelEstimateContext *e);
static bool ExecParallelInitializeDSM(PlanState *node,
ExecParallelInitializeDSMContext *d);
ExecParallelInitializeDSMContext *d);
static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
bool reinitialize);
static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation);
SharedExecutorInstrumentation *instrumentation);
/* Helper functions that run in the parallel worker. */
static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
@ -387,12 +387,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
/* Estimate space for tuple queues. */
shm_toc_estimate_chunk(&pcxt->estimator,
mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
* Give parallel-aware nodes a chance to add to the estimates, and get
* a count of how many PlanState nodes there are.
* Give parallel-aware nodes a chance to add to the estimates, and get a
* count of how many PlanState nodes there are.
*/
e.pcxt = pcxt;
e.nnodes = 0;
@ -444,14 +444,14 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
/*
* If instrumentation options were supplied, allocate space for the
* data. It only gets partially initialized here; the rest happens
* during ExecParallelInitializeDSM.
* If instrumentation options were supplied, allocate space for the data.
* It only gets partially initialized here; the rest happens during
* ExecParallelInitializeDSM.
*/
if (estate->es_instrument)
{
Instrumentation *instrument;
int i;
int i;
instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
instrumentation->instrument_options = estate->es_instrument;
@ -493,13 +493,13 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
*/
static bool
ExecParallelRetrieveInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation)
SharedExecutorInstrumentation *instrumentation)
{
Instrumentation *instrument;
int i;
int n;
int ibytes;
int plan_node_id = planstate->plan->plan_node_id;
int i;
int n;
int ibytes;
int plan_node_id = planstate->plan->plan_node_id;
/* Find the instumentation for this node. */
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
@ -532,7 +532,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
void
ExecParallelFinish(ParallelExecutorInfo *pei)
{
int i;
int i;
if (pei->finished)
return;
@ -626,19 +626,19 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
*/
static bool
ExecParallelReportInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation)
SharedExecutorInstrumentation *instrumentation)
{
int i;
int plan_node_id = planstate->plan->plan_node_id;
int i;
int plan_node_id = planstate->plan->plan_node_id;
Instrumentation *instrument;
InstrEndLoop(planstate->instrument);
/*
* If we shuffled the plan_node_id values in ps_instrument into sorted
* order, we could use binary search here. This might matter someday
* if we're pushing down sufficiently large plan trees. For now, do it
* the slow, dumb way.
* order, we could use binary search here. This might matter someday if
* we're pushing down sufficiently large plan trees. For now, do it the
* slow, dumb way.
*/
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id)

View File

@ -497,8 +497,8 @@ init_execution_state(List *queryTree_list,
stmt = queryTree->utilityStmt;
else
stmt = (Node *) pg_plan_query(queryTree,
fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
NULL);
fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
NULL);
/* Precheck all commands for validity in a function */
if (IsA(stmt, TransactionStmt))

View File

@ -491,9 +491,9 @@ static void finalize_aggregate(AggState *aggstate,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
static void finalize_partialaggregate(AggState *aggstate,
AggStatePerAgg peragg,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
AggStatePerAgg peragg,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
static void prepare_projection_slot(AggState *aggstate,
TupleTableSlot *slot,
int currentSet);
@ -981,17 +981,18 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
if (OidIsValid(pertrans->deserialfn_oid))
{
/*
* Don't call a strict deserialization function with NULL input.
* A strict deserialization function and a null value means we skip
* calling the combine function for this state. We assume that this
* would be a waste of time and effort anyway so just skip it.
* Don't call a strict deserialization function with NULL input. A
* strict deserialization function and a null value means we skip
* calling the combine function for this state. We assume that
* this would be a waste of time and effort anyway so just skip
* it.
*/
if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0])
continue;
else
{
FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
MemoryContext oldContext;
FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
MemoryContext oldContext;
dsinfo->arg[0] = slot->tts_values[0];
dsinfo->argnull[0] = slot->tts_isnull[0];
@ -1423,14 +1424,14 @@ finalize_partialaggregate(AggState *aggstate,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull)
{
AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
MemoryContext oldContext;
AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
MemoryContext oldContext;
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
/*
* serialfn_oid will be set if we must serialize the input state
* before calling the combine function on the state.
* serialfn_oid will be set if we must serialize the input state before
* calling the combine function on the state.
*/
if (OidIsValid(pertrans->serialfn_oid))
{
@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate,
else
{
FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo;
fcinfo->arg[0] = pergroupstate->transValue;
fcinfo->argnull[0] = pergroupstate->transValueIsNull;
@ -1459,7 +1461,7 @@ finalize_partialaggregate(AggState *aggstate,
/* If result is pass-by-ref, make sure it is in the right context. */
if (!peragg->resulttypeByVal && !*resultIsNull &&
!MemoryContextContains(CurrentMemoryContext,
DatumGetPointer(*resultVal)))
DatumGetPointer(*resultVal)))
*resultVal = datumCopy(*resultVal,
peragg->resulttypeByVal,
peragg->resulttypeLen);
@ -2627,21 +2629,21 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*
* 1. An aggregate function appears more than once in query:
*
* SELECT SUM(x) FROM ... HAVING SUM(x) > 0
* SELECT SUM(x) FROM ... HAVING SUM(x) > 0
*
* Since the aggregates are the identical, we only need to calculate
* the calculate it once. Both aggregates will share the same 'aggno'
* value.
* Since the aggregates are the identical, we only need to calculate
* the calculate it once. Both aggregates will share the same 'aggno'
* value.
*
* 2. Two different aggregate functions appear in the query, but the
* aggregates have the same transition function and initial value, but
* different final function:
* aggregates have the same transition function and initial value, but
* different final function:
*
* SELECT SUM(x), AVG(x) FROM ...
* SELECT SUM(x), AVG(x) FROM ...
*
* In this case we must create a new peragg for the varying aggregate,
* and need to call the final functions separately, but can share the
* same transition state.
* In this case we must create a new peragg for the varying aggregate,
* and need to call the final functions separately, but can share the
* same transition state.
*
* For either of these optimizations to be valid, the aggregate's
* arguments must be the same, including any modifiers such as ORDER BY,
@ -2889,8 +2891,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/
existing_transno = find_compatible_pertrans(aggstate, aggref,
transfn_oid, aggtranstype,
serialfn_oid, deserialfn_oid,
initValue, initValueIsNull,
serialfn_oid, deserialfn_oid,
initValue, initValueIsNull,
same_input_transnos);
if (existing_transno != -1)
{
@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg,
/*
* The serialization and deserialization functions must match, if
* present, as we're unable to share the trans state for aggregates
* which will serialize or deserialize into different formats. Remember
* that these will be InvalidOid if they're not required for this agg
* node.
* which will serialize or deserialize into different formats.
* Remember that these will be InvalidOid if they're not required for
* this agg node.
*/
if (aggserialfn != pertrans->serialfn_oid ||
aggdeserialfn != pertrans->deserialfn_oid)

View File

@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node)
/*
* If chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode. outerPlan may also be NULL, in which case there
* is nothing to rescan at all.
* first ExecProcNode. outerPlan may also be NULL, in which case there is
* nothing to rescan at all.
*/
if (outerPlan != NULL && outerPlan->chgParam == NULL)
ExecReScan(outerPlan);

View File

@ -138,8 +138,8 @@ ExecGather(GatherState *node)
/*
* Initialize the parallel context and workers on first execution. We do
* this on first execution rather than during node initialization, as it
* needs to allocate large dynamic segment, so it is better to do if it
* is really needed.
* needs to allocate large dynamic segment, so it is better to do if it is
* really needed.
*/
if (!node->initialized)
{
@ -147,8 +147,8 @@ ExecGather(GatherState *node)
Gather *gather = (Gather *) node->ps.plan;
/*
* Sometimes we might have to run without parallelism; but if
* parallel mode is active then we can try to fire up some workers.
* Sometimes we might have to run without parallelism; but if parallel
* mode is active then we can try to fire up some workers.
*/
if (gather->num_workers > 0 && IsInParallelMode())
{
@ -186,7 +186,7 @@ ExecGather(GatherState *node)
}
else
{
/* No workers? Then never mind. */
/* No workers? Then never mind. */
ExecShutdownGatherWorkers(node);
}
}
@ -314,7 +314,7 @@ gather_getnext(GatherState *gatherstate)
static HeapTuple
gather_readnext(GatherState *gatherstate)
{
int waitpos = gatherstate->nextreader;
int waitpos = gatherstate->nextreader;
for (;;)
{
@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate)
tup = TupleQueueReaderNext(reader, true, &readerdone);
/*
* If this reader is done, remove it. If all readers are done,
* clean up remaining worker state.
* If this reader is done, remove it. If all readers are done, clean
* up remaining worker state.
*/
if (readerdone)
{
@ -402,7 +402,7 @@ ExecShutdownGatherWorkers(GatherState *node)
/* Shut down tuple queue readers before shutting down workers. */
if (node->reader != NULL)
{
int i;
int i;
for (i = 0; i < node->nreaders; ++i)
DestroyTupleQueueReader(node->reader[i]);
@ -452,10 +452,10 @@ void
ExecReScanGather(GatherState *node)
{
/*
* Re-initialize the parallel workers to perform rescan of relation.
* We want to gracefully shutdown all the workers so that they
* should be able to propagate any error or other information to master
* backend before dying. Parallel context will be reused for rescan.
* Re-initialize the parallel workers to perform rescan of relation. We
* want to gracefully shutdown all the workers so that they should be able
* to propagate any error or other information to master backend before
* dying. Parallel context will be reused for rescan.
*/
ExecShutdownGatherWorkers(node);

View File

@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/*
* Note that it is possible that the target tuple has been modified in
* this session, after the above heap_lock_tuple. We choose to not error
* out in that case, in line with ExecUpdate's treatment of similar
* cases. This can happen if an UPDATE is triggered from within
* ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by
* selecting from a wCTE in the ON CONFLICT's SET.
* out in that case, in line with ExecUpdate's treatment of similar cases.
* This can happen if an UPDATE is triggered from within ExecQual(),
* ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
* wCTE in the ON CONFLICT's SET.
*/
/* Execute UPDATE with projection */
@ -1595,7 +1595,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/* Initialize the usesFdwDirectModify flag */
resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
node->fdwDirectModifyPlans);
node->fdwDirectModifyPlans);
/*
* Verify result relation is a valid target for the current operation

View File

@ -65,8 +65,8 @@ SeqNext(SeqScanState *node)
if (scandesc == NULL)
{
/*
* We reach here if the scan is not parallel, or if we're executing
* a scan that was intended to be parallel serially.
* We reach here if the scan is not parallel, or if we're executing a
* scan that was intended to be parallel serially.
*/
scandesc = heap_beginscan(node->ss.ss_currentRelation,
estate->es_snapshot,
@ -145,7 +145,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags)
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
((SeqScan *) node->ss.ps.plan)->scanrelid,
((SeqScan *) node->ss.ps.plan)->scanrelid,
eflags);
node->ss.ss_currentRelation = currentRelation;
@ -277,8 +277,8 @@ ExecReScanSeqScan(SeqScanState *node)
scan = node->ss.ss_currentScanDesc;
if (scan != NULL)
heap_rescan(scan, /* scan desc */
NULL); /* new scan keys */
heap_rescan(scan, /* scan desc */
NULL); /* new scan keys */
ExecScanReScan((ScanState *) node);
}
@ -316,7 +316,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
ParallelContext *pcxt)
{
EState *estate = node->ss.ps.state;
ParallelHeapScanDesc pscan;
ParallelHeapScanDesc pscan;
pscan = shm_toc_allocate(pcxt->toc, node->pscan_len);
heap_parallelscan_initialize(pscan,
@ -336,7 +336,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
void
ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc)
{
ParallelHeapScanDesc pscan;
ParallelHeapScanDesc pscan;
pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
node->ss.ss_currentScanDesc =

View File

@ -2220,8 +2220,8 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
/* build expression trees using actual argument & result types */
build_aggregate_transfn_expr(inputTypes,
numArguments,
0, /* no ordered-set window functions yet */
false, /* no variadic window functions yet */
0, /* no ordered-set window functions yet */
false, /* no variadic window functions yet */
wfunc->wintype,
wfunc->inputcollid,
transfn_oid,

View File

@ -44,13 +44,13 @@ typedef enum
TQUEUE_REMAP_ARRAY, /* array */
TQUEUE_REMAP_RANGE, /* range */
TQUEUE_REMAP_RECORD /* composite type, named or anonymous */
} RemapClass;
} RemapClass;
typedef struct
{
int natts;
RemapClass mapping[FLEXIBLE_ARRAY_MEMBER];
} RemapInfo;
} RemapInfo;
typedef struct
{
@ -61,13 +61,13 @@ typedef struct
char mode;
TupleDesc tupledesc;
RemapInfo *remapinfo;
} TQueueDestReceiver;
} TQueueDestReceiver;
typedef struct RecordTypemodMap
{
int remotetypmod;
int localtypmod;
} RecordTypemodMap;
} RecordTypemodMap;
struct TupleQueueReader
{
@ -81,19 +81,19 @@ struct TupleQueueReader
#define TUPLE_QUEUE_MODE_CONTROL 'c'
#define TUPLE_QUEUE_MODE_DATA 'd'
static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype,
static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype,
Datum value);
static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value);
static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value);
static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value);
static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value);
static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value);
static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value);
static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc);
static void TupleQueueHandleControlMessage(TupleQueueReader *reader,
Size nbytes, char *data);
static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader,
Size nbytes, HeapTupleHeader data);
static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader,
TupleDesc tupledesc, RemapInfo * remapinfo,
TupleDesc tupledesc, RemapInfo *remapinfo,
HeapTuple tuple);
static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass,
Datum value);
@ -212,7 +212,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self)
* Invoke the appropriate walker function based on the given RemapClass.
*/
static void
tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value)
{
check_stack_depth();
@ -237,7 +237,7 @@ tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
* contained therein.
*/
static void
tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value)
{
HeapTupleHeader tup;
Oid typeid;
@ -304,7 +304,7 @@ tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
* contained therein.
*/
static void
tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value)
{
ArrayType *arr = DatumGetArrayTypeP(value);
Oid typeid = ARR_ELEMTYPE(arr);
@ -342,7 +342,7 @@ tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
* contained therein.
*/
static void
tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value)
{
RangeType *range = DatumGetRangeType(value);
Oid typeid = RangeTypeGetOid(range);
@ -386,7 +386,7 @@ tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
* already done so previously.
*/
static void
tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc)
{
StringInfoData buf;
@ -613,7 +613,7 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader,
*/
static HeapTuple
TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc,
RemapInfo * remapinfo, HeapTuple tuple)
RemapInfo *remapinfo, HeapTuple tuple)
{
Datum *values;
bool *isnull;