1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-02 09:02:37 +03:00

pgindent run for 9.5

This commit is contained in:
Bruce Momjian
2015-05-23 21:35:49 -04:00
parent 225892552b
commit 807b9e0dff
414 changed files with 5810 additions and 5308 deletions

View File

@ -405,10 +405,10 @@ ExecSupportsMarkRestore(Path *pathnode)
* that does, we presently come here only for ResultPath nodes,
* which represent Result plans without a child plan. So there is
* nothing to recurse to and we can just say "false". (This means
* that Result's support for mark/restore is in fact dead code.
* We keep it since it's not much code, and someday the planner
* might be smart enough to use it. That would require making
* this function smarter too, of course.)
* that Result's support for mark/restore is in fact dead code. We
* keep it since it's not much code, and someday the planner might
* be smart enough to use it. That would require making this
* function smarter too, of course.)
*/
Assert(IsA(pathnode, ResultPath));
return false;

View File

@ -78,9 +78,9 @@
* another in-progress tuple, it has two options:
*
* 1. back out the speculatively inserted tuple, then wait for the other
* transaction, and retry. Or,
* transaction, and retry. Or,
* 2. wait for the other transaction, with the speculatively inserted tuple
* still in place.
* still in place.
*
* If two backends insert at the same time, and both try to wait for each
* other, they will deadlock. So option 2 is not acceptable. Option 1
@ -428,7 +428,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
indexRelation, indexInfo,
tupleid, values, isnull,
estate, false,
waitMode, violationOK, NULL);
waitMode, violationOK, NULL);
}
if ((checkUnique == UNIQUE_CHECK_PARTIAL ||
@ -538,7 +538,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("ON CONFLICT does not support deferred unique constraints/exclusion constraints as arbiters"),
errtableconstraint(heapRelation,
RelationGetRelationName(indexRelation))));
RelationGetRelationName(indexRelation))));
checkedIndex = true;
@ -578,7 +578,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot,
satisfiesConstraint =
check_exclusion_or_unique_constraint(heapRelation, indexRelation,
indexInfo, &invalidItemPtr,
values, isnull, estate, false,
values, isnull, estate, false,
CEOUC_WAIT, true,
conflictTid);
if (!satisfiesConstraint)
@ -814,9 +814,9 @@ retry:
errmsg("could not create exclusion constraint \"%s\"",
RelationGetRelationName(index)),
error_new && error_existing ?
errdetail("Key %s conflicts with key %s.",
error_new, error_existing) :
errdetail("Key conflicts exist."),
errdetail("Key %s conflicts with key %s.",
error_new, error_existing) :
errdetail("Key conflicts exist."),
errtableconstraint(heap,
RelationGetRelationName(index))));
else
@ -825,9 +825,9 @@ retry:
errmsg("conflicting key value violates exclusion constraint \"%s\"",
RelationGetRelationName(index)),
error_new && error_existing ?
errdetail("Key %s conflicts with existing key %s.",
error_new, error_existing) :
errdetail("Key conflicts with existing key."),
errdetail("Key %s conflicts with existing key %s.",
error_new, error_existing) :
errdetail("Key conflicts with existing key."),
errtableconstraint(heap,
RelationGetRelationName(index))));
}
@ -838,8 +838,8 @@ retry:
* Ordinarily, at this point the search should have found the originally
* inserted tuple (if any), unless we exited the loop early because of
* conflict. However, it is possible to define exclusion constraints for
* which that wouldn't be true --- for instance, if the operator is <>.
* So we no longer complain if found_self is still false.
* which that wouldn't be true --- for instance, if the operator is <>. So
* we no longer complain if found_self is still false.
*/
econtext->ecxt_scantuple = save_scantuple;

View File

@ -153,16 +153,16 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
* If the transaction is read-only, we need to check if any writes are
* planned to non-temporary tables. EXPLAIN is considered read-only.
*
* Don't allow writes in parallel mode. Supporting UPDATE and DELETE would
* require (a) storing the combocid hash in shared memory, rather than
* synchronizing it just once at the start of parallelism, and (b) an
* Don't allow writes in parallel mode. Supporting UPDATE and DELETE
* would require (a) storing the combocid hash in shared memory, rather
* than synchronizing it just once at the start of parallelism, and (b) an
* alternative to heap_update()'s reliance on xmax for mutual exclusion.
* INSERT may have no such troubles, but we forbid it to simplify the
* checks.
*
* We have lower-level defenses in CommandCounterIncrement and elsewhere
* against performing unsafe operations in parallel mode, but this gives
* a more user-friendly error message.
* against performing unsafe operations in parallel mode, but this gives a
* more user-friendly error message.
*/
if ((XactReadOnly || IsInParallelMode()) &&
!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
@ -670,14 +670,14 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
*/
if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
userid,
rte->insertedCols,
ACL_INSERT))
rte->insertedCols,
ACL_INSERT))
return false;
if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
userid,
rte->updatedCols,
ACL_UPDATE))
rte->updatedCols,
ACL_UPDATE))
return false;
}
return true;
@ -695,10 +695,9 @@ ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
int col = -1;
/*
* When the query doesn't explicitly update any columns, allow the
* query if we have permission on any column of the rel. This is
* to handle SELECT FOR UPDATE as well as possible corner cases in
* UPDATE.
* When the query doesn't explicitly update any columns, allow the query
* if we have permission on any column of the rel. This is to handle
* SELECT FOR UPDATE as well as possible corner cases in UPDATE.
*/
if (bms_is_empty(modifiedCols))
{
@ -742,8 +741,8 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
ListCell *l;
/*
* Fail if write permissions are requested in parallel mode for
* table (temp or non-temp), otherwise fail for any non-temp table.
* Fail if write permissions are requested in parallel mode for table
* (temp or non-temp), otherwise fail for any non-temp table.
*/
foreach(l, plannedstmt->rtable)
{
@ -1665,9 +1664,9 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
Relation rel = resultRelInfo->ri_RelationDesc;
TupleDesc tupdesc = RelationGetDescr(rel);
TupleConstr *constr = tupdesc->constr;
Bitmapset *modifiedCols;
Bitmapset *insertedCols;
Bitmapset *updatedCols;
Bitmapset *modifiedCols;
Bitmapset *insertedCols;
Bitmapset *updatedCols;
Assert(constr);
@ -1722,7 +1721,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
RelationGetRelationName(rel), failed),
val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
errtableconstraint(rel, failed)));
}
}
@ -1773,11 +1772,11 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
/*
* WITH CHECK OPTION checks are intended to ensure that the new tuple
* is visible (in the case of a view) or that it passes the
* 'with-check' policy (in the case of row security).
* If the qual evaluates to NULL or FALSE, then the new tuple won't be
* included in the view or doesn't pass the 'with-check' policy for the
* table. We need ExecQual to return FALSE for NULL to handle the view
* case (the opposite of what we do above for CHECK constraints).
* 'with-check' policy (in the case of row security). If the qual
* evaluates to NULL or FALSE, then the new tuple won't be included in
* the view or doesn't pass the 'with-check' policy for the table. We
* need ExecQual to return FALSE for NULL to handle the view case (the
* opposite of what we do above for CHECK constraints).
*/
if (!ExecQual((List *) wcoExpr, econtext, false))
{
@ -1788,14 +1787,15 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
switch (wco->kind)
{
/*
* For WITH CHECK OPTIONs coming from views, we might be able to
* provide the details on the row, depending on the permissions
* on the relation (that is, if the user could view it directly
* anyway). For RLS violations, we don't include the data since
* we don't know if the user should be able to view the tuple as
* as that depends on the USING policy.
*/
/*
* For WITH CHECK OPTIONs coming from views, we might be
* able to provide the details on the row, depending on
* the permissions on the relation (that is, if the user
* could view it directly anyway). For RLS violations, we
* don't include the data since we don't know if the user
* should be able to view the tuple as as that depends on
* the USING policy.
*/
case WCO_VIEW_CHECK:
insertedCols = GetInsertedColumns(resultRelInfo, estate);
updatedCols = GetUpdatedColumns(resultRelInfo, estate);
@ -1808,8 +1808,8 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
errmsg("new row violates WITH CHECK OPTION for \"%s\"",
wco->relname),
errmsg("new row violates WITH CHECK OPTION for \"%s\"",
wco->relname),
val_desc ? errdetail("Failing row contains %s.",
val_desc) : 0));
break;
@ -1817,14 +1817,14 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
case WCO_RLS_UPDATE_CHECK:
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row level security policy for \"%s\"",
wco->relname)));
errmsg("new row violates row level security policy for \"%s\"",
wco->relname)));
break;
case WCO_RLS_CONFLICT_CHECK:
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row level security policy (USING expression) for \"%s\"",
wco->relname)));
errmsg("new row violates row level security policy (USING expression) for \"%s\"",
wco->relname)));
break;
default:
elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
@ -1915,8 +1915,8 @@ ExecBuildSlotValueDescription(Oid reloid,
{
/*
* No table-level SELECT, so need to make sure they either have
* SELECT rights on the column or that they have provided the
* data for the column. If not, omit this column from the error
* SELECT rights on the column or that they have provided the data
* for the column. If not, omit this column from the error
* message.
*/
aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
@ -2258,14 +2258,14 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
break;
case LockWaitSkip:
if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
return NULL; /* skip instead of waiting */
return NULL; /* skip instead of waiting */
break;
case LockWaitError:
if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
RelationGetRelationName(relation))));
RelationGetRelationName(relation))));
break;
}
continue; /* loop back to repeat heap_fetch */
@ -2313,9 +2313,9 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
* doing so would require changing heap_update and
* heap_delete to not complain about updating "invisible"
* tuples, which seems pretty scary (heap_lock_tuple will
* not complain, but few callers expect HeapTupleInvisible,
* and we're not one of them). So for now, treat the tuple
* as deleted and do not process.
* not complain, but few callers expect
* HeapTupleInvisible, and we're not one of them). So for
* now, treat the tuple as deleted and do not process.
*/
ReleaseBuffer(buffer);
return NULL;
@ -2563,8 +2563,8 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
if (fdwroutine->RefetchForeignRow == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot lock rows in foreign table \"%s\"",
RelationGetRelationName(erm->relation))));
errmsg("cannot lock rows in foreign table \"%s\"",
RelationGetRelationName(erm->relation))));
copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
erm,
datum,

View File

@ -182,8 +182,8 @@ static Datum ExecEvalArrayCoerceExpr(ArrayCoerceExprState *astate,
static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalGroupingFuncExpr(GroupingFuncExprState *gstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
/* ----------------------------------------------------------------
@ -3034,10 +3034,10 @@ ExecEvalGroupingFuncExpr(GroupingFuncExprState *gstate,
bool *isNull,
ExprDoneCond *isDone)
{
int result = 0;
int attnum = 0;
Bitmapset *grouped_cols = gstate->aggstate->grouped_cols;
ListCell *lc;
int result = 0;
int attnum = 0;
Bitmapset *grouped_cols = gstate->aggstate->grouped_cols;
ListCell *lc;
if (isDone)
*isDone = ExprSingleResult;
@ -4529,7 +4529,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
GroupingFuncExprState *grp_state = makeNode(GroupingFuncExprState);
Agg *agg = NULL;
if (!parent || !IsA(parent, AggState) || !IsA(parent->plan, Agg))
if (!parent || !IsA(parent, AggState) ||!IsA(parent->plan, Agg))
elog(ERROR, "parent of GROUPING is not Agg node");
grp_state->aggstate = (AggState *) parent;

View File

@ -645,7 +645,7 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
* overall targetlist's econtext. GroupingFunc arguments are never
* evaluated at all.
*/
if (IsA(node, Aggref) || IsA(node, GroupingFunc))
if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
return false;
if (IsA(node, WindowFunc))
return false;

View File

@ -337,11 +337,11 @@ typedef struct AggStatePerPhaseData
{
int numsets; /* number of grouping sets (or 0) */
int *gset_lengths; /* lengths of grouping sets */
Bitmapset **grouped_cols; /* column groupings for rollup */
Bitmapset **grouped_cols; /* column groupings for rollup */
FmgrInfo *eqfunctions; /* per-grouping-field equality fns */
Agg *aggnode; /* Agg node for phase data */
Sort *sortnode; /* Sort node for input ordering for phase */
} AggStatePerPhaseData;
} AggStatePerPhaseData;
/*
* To implement hashed aggregation, we need a hashtable that stores a
@ -380,12 +380,12 @@ static void finalize_aggregate(AggState *aggstate,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
static void prepare_projection_slot(AggState *aggstate,
TupleTableSlot *slot,
int currentSet);
TupleTableSlot *slot,
int currentSet);
static void finalize_aggregates(AggState *aggstate,
AggStatePerAgg peragg,
AggStatePerGroup pergroup,
int currentSet);
AggStatePerAgg peragg,
AggStatePerGroup pergroup,
int currentSet);
static TupleTableSlot *project_aggregates(AggState *aggstate);
static Bitmapset *find_unaggregated_cols(AggState *aggstate);
static bool find_unaggregated_cols_walker(Node *node, Bitmapset **colnos);
@ -441,12 +441,12 @@ initialize_phase(AggState *aggstate, int newphase)
}
/*
* If this isn't the last phase, we need to sort appropriately for the next
* phase in sequence.
* If this isn't the last phase, we need to sort appropriately for the
* next phase in sequence.
*/
if (newphase < aggstate->numphases - 1)
{
Sort *sortnode = aggstate->phases[newphase+1].sortnode;
Sort *sortnode = aggstate->phases[newphase + 1].sortnode;
PlanState *outerNode = outerPlanState(aggstate);
TupleDesc tupDesc = ExecGetResultType(outerNode);
@ -540,9 +540,8 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
/*
* (Re)set transValue to the initial value.
*
* Note that when the initial value is pass-by-ref, we must copy
* it (into the aggcontext) since we will pfree the transValue
* later.
* Note that when the initial value is pass-by-ref, we must copy it (into
* the aggcontext) since we will pfree the transValue later.
*/
if (peraggstate->initValueIsNull)
pergroupstate->transValue = peraggstate->initValue;
@ -551,7 +550,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
MemoryContext oldContext;
oldContext = MemoryContextSwitchTo(
aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
pergroupstate->transValue = datumCopy(peraggstate->initValue,
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
@ -560,11 +559,11 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
/*
* If the initial value for the transition state doesn't exist in
* the pg_aggregate table then we will let the first non-NULL
* value returned from the outer procNode become the initial
* value. (This is useful for aggregates like max() and min().)
* The noTransValue flag signals that we still need to do this.
* If the initial value for the transition state doesn't exist in the
* pg_aggregate table then we will let the first non-NULL value returned
* from the outer procNode become the initial value. (This is useful for
* aggregates like max() and min().) The noTransValue flag signals that we
* still need to do this.
*/
pergroupstate->noTransValue = peraggstate->initValueIsNull;
}
@ -586,8 +585,8 @@ initialize_aggregates(AggState *aggstate,
int numReset)
{
int aggno;
int numGroupingSets = Max(aggstate->phase->numsets, 1);
int setno = 0;
int numGroupingSets = Max(aggstate->phase->numsets, 1);
int setno = 0;
if (numReset < 1)
numReset = numGroupingSets;
@ -655,7 +654,7 @@ advance_transition_function(AggState *aggstate,
* do not need to pfree the old transValue, since it's NULL.
*/
oldContext = MemoryContextSwitchTo(
aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
pergroupstate->transValue = datumCopy(fcinfo->arg[1],
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
@ -730,9 +729,9 @@ static void
advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
{
int aggno;
int setno = 0;
int numGroupingSets = Max(aggstate->phase->numsets, 1);
int numAggs = aggstate->numaggs;
int setno = 0;
int numGroupingSets = Max(aggstate->phase->numsets, 1);
int numAggs = aggstate->numaggs;
for (aggno = 0; aggno < numAggs; aggno++)
{
@ -1134,7 +1133,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
{
if (aggstate->phase->grouped_cols)
{
Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
aggstate->grouped_cols = grouped_cols;
@ -1156,7 +1155,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
foreach(lc, aggstate->all_grouped_cols)
{
int attnum = lfirst_int(lc);
int attnum = lfirst_int(lc);
if (!bms_is_member(attnum, grouped_cols))
slot->tts_isnull[attnum - 1] = true;
@ -1225,8 +1224,7 @@ project_aggregates(AggState *aggstate)
ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
/*
* Check the qual (HAVING clause); if the group does not match, ignore
* it.
* Check the qual (HAVING clause); if the group does not match, ignore it.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
@ -1286,7 +1284,7 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
*colnos = bms_add_member(*colnos, var->varattno);
return false;
}
if (IsA(node, Aggref) || IsA(node, GroupingFunc))
if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
{
/* do not descend into aggregate exprs */
return false;
@ -1319,7 +1317,7 @@ build_hash_table(AggState *aggstate)
aggstate->hashfunctions,
node->numGroups,
entrysize,
aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
tmpmem);
}
@ -1521,8 +1519,8 @@ agg_retrieve_direct(AggState *aggstate)
/*
* get state info from node
*
* econtext is the per-output-tuple expression context
* tmpcontext is the per-input-tuple expression context
* econtext is the per-output-tuple expression context tmpcontext is the
* per-input-tuple expression context
*/
econtext = aggstate->ss.ps.ps_ExprContext;
tmpcontext = aggstate->tmpcontext;
@ -1615,17 +1613,17 @@ agg_retrieve_direct(AggState *aggstate)
* If a subgroup for the current grouping set is present, project it.
*
* We have a new group if:
* - we're out of input but haven't projected all grouping sets
* (checked above)
* - we're out of input but haven't projected all grouping sets
* (checked above)
* OR
* - we already projected a row that wasn't from the last grouping
* set
* AND
* - the next grouping set has at least one grouping column (since
* empty grouping sets project only once input is exhausted)
* AND
* - the previous and pending rows differ on the grouping columns
* of the next grouping set
* - we already projected a row that wasn't from the last grouping
* set
* AND
* - the next grouping set has at least one grouping column (since
* empty grouping sets project only once input is exhausted)
* AND
* - the previous and pending rows differ on the grouping columns
* of the next grouping set
*/
if (aggstate->input_done ||
(node->aggstrategy == AGG_SORTED &&
@ -1729,7 +1727,8 @@ agg_retrieve_direct(AggState *aggstate)
firstSlot,
InvalidBuffer,
true);
aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
aggstate->grp_firstTuple = NULL; /* don't keep two
* pointers */
/* set up for first advance_aggregates call */
tmpcontext->ecxt_outertuple = firstSlot;
@ -1774,7 +1773,7 @@ agg_retrieve_direct(AggState *aggstate)
node->numCols,
node->grpColIdx,
aggstate->phase->eqfunctions,
tmpcontext->ecxt_per_tuple_memory))
tmpcontext->ecxt_per_tuple_memory))
{
aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot);
break;
@ -1787,8 +1786,8 @@ agg_retrieve_direct(AggState *aggstate)
* Use the representative input tuple for any references to
* non-aggregated input columns in aggregate direct args, the node
* qual, and the tlist. (If we are not grouping, and there are no
* input rows at all, we will come here with an empty firstSlot ...
* but if not grouping, there can't be any references to
* input rows at all, we will come here with an empty firstSlot
* ... but if not grouping, there can't be any references to
* non-aggregated input columns, so no problem.)
*/
econtext->ecxt_outertuple = firstSlot;
@ -1803,8 +1802,8 @@ agg_retrieve_direct(AggState *aggstate)
finalize_aggregates(aggstate, peragg, pergroup, currentSet);
/*
* If there's no row to project right now, we must continue rather than
* returning a null since there might be more groups.
* If there's no row to project right now, we must continue rather
* than returning a null since there might be more groups.
*/
result = project_aggregates(aggstate);
if (result)
@ -1996,7 +1995,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
foreach(l, node->chain)
{
Agg *agg = lfirst(l);
Agg *agg = lfirst(l);
numGroupingSets = Max(numGroupingSets,
list_length(agg->groupingSets));
@ -2074,7 +2073,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
ExecAssignScanTypeFromOuterPlan(&aggstate->ss);
if (node->chain)
ExecSetSlotDescriptor(aggstate->sort_slot,
aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
/*
* Initialize result tuple type and projection info.
@ -2111,13 +2110,13 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
for (phase = 0; phase < numPhases; ++phase)
{
AggStatePerPhase phasedata = &aggstate->phases[phase];
Agg *aggnode;
Sort *sortnode;
int num_sets;
Agg *aggnode;
Sort *sortnode;
int num_sets;
if (phase > 0)
{
aggnode = list_nth(node->chain, phase-1);
aggnode = list_nth(node->chain, phase - 1);
sortnode = (Sort *) aggnode->plan.lefttree;
Assert(IsA(sortnode, Sort));
}
@ -2137,8 +2136,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
i = 0;
foreach(l, aggnode->groupingSets)
{
int current_length = list_length(lfirst(l));
Bitmapset *cols = NULL;
int current_length = list_length(lfirst(l));
Bitmapset *cols = NULL;
/* planner forces this to be correct */
for (j = 0; j < current_length; ++j)
@ -2288,8 +2287,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/* Begin filling in the peraggstate data */
peraggstate->aggrefstate = aggrefstate;
peraggstate->aggref = aggref;
peraggstate->sortstates =(Tuplesortstate**)
palloc0(sizeof(Tuplesortstate*) * numGroupingSets);
peraggstate->sortstates = (Tuplesortstate **)
palloc0(sizeof(Tuplesortstate *) * numGroupingSets);
for (currentsortno = 0; currentsortno < numGroupingSets; currentsortno++)
peraggstate->sortstates[currentsortno] = NULL;
@ -2643,11 +2642,11 @@ void
ExecReScanAgg(AggState *node)
{
ExprContext *econtext = node->ss.ps.ps_ExprContext;
PlanState *outerPlan = outerPlanState(node);
PlanState *outerPlan = outerPlanState(node);
Agg *aggnode = (Agg *) node->ss.ps.plan;
int aggno;
int numGroupingSets = Max(node->maxsets, 1);
int setno;
int numGroupingSets = Max(node->maxsets, 1);
int setno;
node->agg_done = false;
@ -2732,7 +2731,7 @@ ExecReScanAgg(AggState *node)
* Reset the per-group state (in particular, mark transvalues null)
*/
MemSet(node->pergroup, 0,
sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
/* reset to phase 0 */
initialize_phase(node, 0);
@ -2775,8 +2774,9 @@ AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
{
if (aggcontext)
{
AggState *aggstate = ((AggState *) fcinfo->context);
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
AggState *aggstate = ((AggState *) fcinfo->context);
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
*aggcontext = cxt->ecxt_per_tuple_memory;
}
return AGG_CONTEXT_AGGREGATE;
@ -2862,7 +2862,7 @@ AggRegisterCallback(FunctionCallInfo fcinfo,
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
AggState *aggstate = (AggState *) fcinfo->context;
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
RegisterExprContextCallback(cxt, func, arg);

View File

@ -449,7 +449,7 @@ ExecBitmapHeapScan(BitmapHeapScanState *node)
void
ExecReScanBitmapHeapScan(BitmapHeapScanState *node)
{
PlanState *outerPlan = outerPlanState(node);
PlanState *outerPlan = outerPlanState(node);
/* rescan to release any page pin */
heap_rescan(node->ss.ss_currentScanDesc, NULL);

View File

@ -280,7 +280,7 @@ ExecEndGroup(GroupState *node)
void
ExecReScanGroup(GroupState *node)
{
PlanState *outerPlan = outerPlanState(node);
PlanState *outerPlan = outerPlanState(node);
node->grp_done = FALSE;
node->ss.ps.ps_TupFromTlist = false;

View File

@ -500,8 +500,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
/*
* If there's not enough space to store the projected number of tuples
* and the required bucket headers, we will need multiple batches.
* If there's not enough space to store the projected number of tuples and
* the required bucket headers, we will need multiple batches.
*/
if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
{
@ -512,8 +512,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
long bucket_size;
/*
* Estimate the number of buckets we'll want to have when work_mem
* is entirely full. Each bucket will contain a bucket pointer plus
* Estimate the number of buckets we'll want to have when work_mem is
* entirely full. Each bucket will contain a bucket pointer plus
* NTUP_PER_BUCKET tuples, whose projected size already includes
* overhead for the hash code, pointer to the next tuple, etc.
*/
@ -527,9 +527,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
* Buckets are simple pointers to hashjoin tuples, while tupsize
* includes the pointer, hash code, and MinimalTupleData. So buckets
* should never really exceed 25% of work_mem (even for
* NTUP_PER_BUCKET=1); except maybe * for work_mem values that are
* not 2^N bytes, where we might get more * because of doubling.
* So let's look for 50% here.
* NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not
* 2^N bytes, where we might get more * because of doubling. So let's
* look for 50% here.
*/
Assert(bucket_bytes <= hash_table_bytes / 2);
@ -655,7 +655,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
hashtable->buckets = repalloc(hashtable->buckets,
sizeof(HashJoinTuple) * hashtable->nbuckets);
sizeof(HashJoinTuple) * hashtable->nbuckets);
}
/*
@ -671,6 +671,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
while (oldchunks != NULL)
{
HashMemoryChunk nextchunk = oldchunks->next;
/* position within the buffer (up to oldchunks->used) */
size_t idx = 0;
@ -691,7 +692,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
{
/* keep tuple in memory - copy it into the new chunk */
HashJoinTuple copyTuple =
(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
memcpy(copyTuple, hashTuple, hashTupleSize);
/* and add it back to the appropriate bucket */
@ -749,15 +751,15 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
static void
ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
{
HashMemoryChunk chunk;
HashMemoryChunk chunk;
/* do nothing if not an increase (it's called increase for a reason) */
if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
return;
/*
* We already know the optimal number of buckets, so let's just
* compute the log2_nbuckets for it.
* We already know the optimal number of buckets, so let's just compute
* the log2_nbuckets for it.
*/
hashtable->nbuckets = hashtable->nbuckets_optimal;
hashtable->log2_nbuckets = my_log2(hashtable->nbuckets_optimal);
@ -771,14 +773,14 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
#endif
/*
* Just reallocate the proper number of buckets - we don't need to
* walk through them - we can walk the dense-allocated chunks
* (just like in ExecHashIncreaseNumBatches, but without all the
* copying into new chunks)
* Just reallocate the proper number of buckets - we don't need to walk
* through them - we can walk the dense-allocated chunks (just like in
* ExecHashIncreaseNumBatches, but without all the copying into new
* chunks)
*/
hashtable->buckets =
(HashJoinTuple *) repalloc(hashtable->buckets,
hashtable->nbuckets * sizeof(HashJoinTuple));
hashtable->nbuckets * sizeof(HashJoinTuple));
memset(hashtable->buckets, 0, sizeof(void *) * hashtable->nbuckets);
@ -786,12 +788,13 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next)
{
/* process all tuples stored in this chunk */
size_t idx = 0;
size_t idx = 0;
while (idx < chunk->used)
{
HashJoinTuple hashTuple = (HashJoinTuple) (chunk->data + idx);
int bucketno;
int batchno;
int bucketno;
int batchno;
ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
&bucketno, &batchno);
@ -869,10 +872,11 @@ ExecHashTableInsert(HashJoinTable hashtable,
/*
* Increase the (optimal) number of buckets if we just exceeded the
* NTUP_PER_BUCKET threshold, but only when there's still a single batch.
* NTUP_PER_BUCKET threshold, but only when there's still a single
* batch.
*/
if ((hashtable->nbatch == 1) &&
(hashtable->nbuckets_optimal <= INT_MAX/2) && /* overflow protection */
(hashtable->nbuckets_optimal <= INT_MAX / 2) && /* overflow protection */
(ntuples >= (hashtable->nbuckets_optimal * NTUP_PER_BUCKET)))
{
hashtable->nbuckets_optimal *= 2;
@ -1636,7 +1640,7 @@ dense_alloc(HashJoinTable hashtable, Size size)
{
/* allocate new chunk and put it at the beginning of the list */
newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
offsetof(HashMemoryChunkData, data) + size);
offsetof(HashMemoryChunkData, data) + size);
newChunk->maxlen = size;
newChunk->used = 0;
newChunk->ntuples = 0;
@ -1663,15 +1667,15 @@ dense_alloc(HashJoinTable hashtable, Size size)
}
/*
* See if we have enough space for it in the current chunk (if any).
* If not, allocate a fresh chunk.
* See if we have enough space for it in the current chunk (if any). If
* not, allocate a fresh chunk.
*/
if ((hashtable->chunks == NULL) ||
(hashtable->chunks->maxlen - hashtable->chunks->used) < size)
{
/* allocate new chunk and put it at the beginning of the list */
newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
newChunk->maxlen = HASH_CHUNK_SIZE;
newChunk->used = size;

View File

@ -106,8 +106,8 @@ IndexOnlyNext(IndexOnlyScanState *node)
* away, because the tuple is still visible until the deleting
* transaction commits or the statement ends (if it's our
* transaction). In either case, the lock on the VM buffer will have
* been released (acting as a write barrier) after clearing the
* bit. And for us to have a snapshot that includes the deleting
* been released (acting as a write barrier) after clearing the bit.
* And for us to have a snapshot that includes the deleting
* transaction (making the tuple invisible), we must have acquired
* ProcArrayLock after that time, acting as a read barrier.
*

View File

@ -288,9 +288,9 @@ next_indextuple:
* Can we return this tuple immediately, or does it need to be pushed
* to the reorder queue? If the ORDER BY expression values returned
* by the index were inaccurate, we can't return it yet, because the
* next tuple from the index might need to come before this one.
* Also, we can't return it yet if there are any smaller tuples in the
* queue already.
* next tuple from the index might need to come before this one. Also,
* we can't return it yet if there are any smaller tuples in the queue
* already.
*/
if (!was_exact || (topmost && cmp_orderbyvals(lastfetched_vals,
lastfetched_nulls,

View File

@ -196,11 +196,12 @@ lnext:
* case, so as to avoid the "Halloween problem" of repeated
* update attempts. In the latter case it might be sensible
* to fetch the updated tuple instead, but doing so would
* require changing heap_update and heap_delete to not complain
* about updating "invisible" tuples, which seems pretty scary
* (heap_lock_tuple will not complain, but few callers expect
* HeapTupleInvisible, and we're not one of them). So for now,
* treat the tuple as deleted and do not process.
* require changing heap_update and heap_delete to not
* complain about updating "invisible" tuples, which seems
* pretty scary (heap_lock_tuple will not complain, but few
* callers expect HeapTupleInvisible, and we're not one of
* them). So for now, treat the tuple as deleted and do not
* process.
*/
goto lnext;

View File

@ -317,7 +317,7 @@ ExecMaterialRestrPos(MaterialState *node)
void
ExecReScanMaterial(MaterialState *node)
{
PlanState *outerPlan = outerPlanState(node);
PlanState *outerPlan = outerPlanState(node);
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);

View File

@ -139,10 +139,10 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
/*
* It isn't feasible to perform abbreviated key conversion, since
* tuples are pulled into mergestate's binary heap as needed. It would
* likely be counter-productive to convert tuples into an abbreviated
* representation as they're pulled up, so opt out of that additional
* optimization entirely.
* tuples are pulled into mergestate's binary heap as needed. It
* would likely be counter-productive to convert tuples into an
* abbreviated representation as they're pulled up, so opt out of that
* additional optimization entirely.
*/
sortKey->abbreviate = false;

View File

@ -232,8 +232,8 @@ MJExamineQuals(List *mergeclauses,
/*
* sortsupport routine must know if abbreviation optimization is
* applicable in principle. It is never applicable for merge joins
* because there is no convenient opportunity to convert to alternative
* representation.
* because there is no convenient opportunity to convert to
* alternative representation.
*/
clause->ssup.abbreviate = false;

View File

@ -180,7 +180,7 @@ ExecCheckHeapTupleVisible(EState *estate,
if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
errmsg("could not serialize access due to concurrent update")));
}
/*
@ -321,8 +321,8 @@ ExecInsert(ModifyTableState *mtstate,
/*
* Check any RLS INSERT WITH CHECK policies
*
* ExecWithCheckOptions() will skip any WCOs which are not of
* the kind we are looking for at this point.
* ExecWithCheckOptions() will skip any WCOs which are not of the kind
* we are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_RLS_INSERT_CHECK,
@ -383,9 +383,9 @@ ExecInsert(ModifyTableState *mtstate,
else
{
/*
* In case of ON CONFLICT DO NOTHING, do nothing.
* However, verify that the tuple is visible to the
* executor's MVCC snapshot at higher isolation levels.
* In case of ON CONFLICT DO NOTHING, do nothing. However,
* verify that the tuple is visible to the executor's MVCC
* snapshot at higher isolation levels.
*/
Assert(onconflict == ONCONFLICT_NOTHING);
ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
@ -411,7 +411,7 @@ ExecInsert(ModifyTableState *mtstate,
/* insert index entries for tuple */
recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
estate, true, &specConflict,
estate, true, &specConflict,
arbiterIndexes);
/* adjust the tuple's state accordingly */
@ -475,17 +475,16 @@ ExecInsert(ModifyTableState *mtstate,
list_free(recheckIndexes);
/*
* Check any WITH CHECK OPTION constraints from parent views. We
* are required to do this after testing all constraints and
* uniqueness violations per the SQL spec, so we do it after actually
* inserting the record into the heap and all indexes.
* Check any WITH CHECK OPTION constraints from parent views. We are
* required to do this after testing all constraints and uniqueness
* violations per the SQL spec, so we do it after actually inserting the
* record into the heap and all indexes.
*
* ExecWithCheckOptions will elog(ERROR) if a violation is found, so
* the tuple will never be seen, if it violates the WITH CHECK
* OPTION.
* ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
* tuple will never be seen, if it violates the WITH CHECK OPTION.
*
* ExecWithCheckOptions() will skip any WCOs which are not of
* the kind we are looking for at this point.
* ExecWithCheckOptions() will skip any WCOs which are not of the kind we
* are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
@ -860,8 +859,8 @@ ExecUpdate(ItemPointer tupleid,
* triggers then trigger.c will have done heap_lock_tuple to lock the
* correct tuple, so there's no need to do them again.)
*
* ExecWithCheckOptions() will skip any WCOs which are not of
* the kind we are looking for at this point.
* ExecWithCheckOptions() will skip any WCOs which are not of the kind
* we are looking for at this point.
*/
lreplace:;
if (resultRelInfo->ri_WithCheckOptions != NIL)
@ -990,13 +989,13 @@ lreplace:;
list_free(recheckIndexes);
/*
* Check any WITH CHECK OPTION constraints from parent views. We
* are required to do this after testing all constraints and
* uniqueness violations per the SQL spec, so we do it after actually
* updating the record in the heap and all indexes.
* Check any WITH CHECK OPTION constraints from parent views. We are
* required to do this after testing all constraints and uniqueness
* violations per the SQL spec, so we do it after actually updating the
* record in the heap and all indexes.
*
* ExecWithCheckOptions() will skip any WCOs which are not of
* the kind we are looking for at this point.
* ExecWithCheckOptions() will skip any WCOs which are not of the kind we
* are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
@ -1143,9 +1142,9 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/*
* Make tuple and any needed join variables available to ExecQual and
* ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
* the target's existing tuple is installed in the scantuple. EXCLUDED has
* been made to reference INNER_VAR in setrefs.c, but there is no other
* redirection.
* the target's existing tuple is installed in the scantuple. EXCLUDED
* has been made to reference INNER_VAR in setrefs.c, but there is no
* other redirection.
*/
econtext->ecxt_scantuple = mtstate->mt_existing;
econtext->ecxt_innertuple = excludedSlot;
@ -1430,7 +1429,7 @@ ExecModifyTable(ModifyTableState *node)
{
case CMD_INSERT:
slot = ExecInsert(node, slot, planSlot,
node->mt_arbiterindexes, node->mt_onconflict,
node->mt_arbiterindexes, node->mt_onconflict,
estate, node->canSetTag);
break;
case CMD_UPDATE:

View File

@ -27,7 +27,7 @@
#include "utils/tqual.h"
static void InitScanRelation(SampleScanState *node, EState *estate,
int eflags, TableSampleClause *tablesample);
int eflags, TableSampleClause *tablesample);
static TupleTableSlot *SampleNext(SampleScanState *node);
@ -45,9 +45,9 @@ static TupleTableSlot *SampleNext(SampleScanState *node);
static TupleTableSlot *
SampleNext(SampleScanState *node)
{
TupleTableSlot *slot;
TableSampleDesc *tsdesc;
HeapTuple tuple;
TupleTableSlot *slot;
TableSampleDesc *tsdesc;
HeapTuple tuple;
/*
* get information from the scan state
@ -60,7 +60,8 @@ SampleNext(SampleScanState *node)
if (tuple)
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
tsdesc->heapScan->rs_cbuf, /* buffer associated with this tuple */
tsdesc->heapScan->rs_cbuf, /* buffer associated
* with this tuple */
false); /* don't pfree this pointer */
else
ExecClearTuple(slot);
@ -112,7 +113,7 @@ InitScanRelation(SampleScanState *node, EState *estate, int eflags,
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
((SampleScan *) node->ss.ps.plan)->scanrelid,
((SampleScan *) node->ss.ps.plan)->scanrelid,
eflags);
node->ss.ss_currentRelation = currentRelation;

View File

@ -290,7 +290,7 @@ ExecSortRestrPos(SortState *node)
void
ExecReScanSort(SortState *node)
{
PlanState *outerPlan = outerPlanState(node);
PlanState *outerPlan = outerPlanState(node);
/*
* If we haven't sorted yet, just return. If outerplan's chgParam is not

View File

@ -2057,7 +2057,7 @@ ExecEndWindowAgg(WindowAggState *node)
void
ExecReScanWindowAgg(WindowAggState *node)
{
PlanState *outerPlan = outerPlanState(node);
PlanState *outerPlan = outerPlanState(node);
ExprContext *econtext = node->ss.ps.ps_ExprContext;
node->all_done = false;

View File

@ -1344,11 +1344,11 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
}
/*
* If told to be read-only, or in parallel mode, verify that this query
* is in fact read-only. This can't be done earlier because we need to
* look at the finished, planned queries. (In particular, we don't want
* to do it between GetCachedPlan and PortalDefineQuery, because throwing
* an error between those steps would result in leaking our plancache
* If told to be read-only, or in parallel mode, verify that this query is
* in fact read-only. This can't be done earlier because we need to look
* at the finished, planned queries. (In particular, we don't want to do
* it between GetCachedPlan and PortalDefineQuery, because throwing an
* error between those steps would result in leaking our plancache
* refcount.)
*/
if (read_only || IsInParallelMode())
@ -1365,8 +1365,8 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a non-volatile function",
CreateCommandTag(pstmt))));
errmsg("%s is not allowed in a non-volatile function",
CreateCommandTag(pstmt))));
else
PreventCommandIfParallelMode(CreateCommandTag(pstmt));
}