1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-08 11:42:09 +03:00

pgindent run for 9.6

This commit is contained in:
Robert Haas
2016-06-09 18:02:36 -04:00
parent 9164deea2f
commit 4bc424b968
252 changed files with 2670 additions and 2558 deletions

View File

@ -163,8 +163,8 @@ make_one_rel(PlannerInfo *root, List *joinlist)
set_base_rel_consider_startup(root);
/*
* Generate access paths for the base rels. set_base_rel_sizes also
* sets the consider_parallel flag for each baserel, if appropriate.
* Generate access paths for the base rels. set_base_rel_sizes also sets
* the consider_parallel flag for each baserel, if appropriate.
*/
set_base_rel_sizes(root);
set_base_rel_pathlists(root);
@ -228,7 +228,7 @@ set_base_rel_consider_startup(PlannerInfo *root)
/*
* set_base_rel_sizes
* Set the size estimates (rows and widths) for each base-relation entry.
* Also determine whether to consider parallel paths for base relations.
* Also determine whether to consider parallel paths for base relations.
*
* We do this in a separate pass over the base rels so that rowcount
* estimates are available for parameterized path generation, and also so
@ -509,6 +509,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
switch (rte->rtekind)
{
case RTE_RELATION:
/*
* Currently, parallel workers can't access the leader's temporary
* tables. We could possibly relax this if the wrote all of its
@ -528,7 +529,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
*/
if (rte->tablesample != NULL)
{
Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
if (proparallel != PROPARALLEL_SAFE)
return;
@ -557,14 +558,15 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break;
case RTE_SUBQUERY:
/*
* Subplans currently aren't passed to workers. Even if they
* were, the subplan might be using parallelism internally, and
* we can't support nested Gather nodes at present. Finally,
* we don't have a good way of knowing whether the subplan
* involves any parallel-restricted operations. It would be
* nice to relax this restriction some day, but it's going to
* take a fair amount of work.
* were, the subplan might be using parallelism internally, and we
* can't support nested Gather nodes at present. Finally, we
* don't have a good way of knowing whether the subplan involves
* any parallel-restricted operations. It would be nice to relax
* this restriction some day, but it's going to take a fair amount
* of work.
*/
return;
@ -580,6 +582,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break;
case RTE_VALUES:
/*
* The data for a VALUES clause is stored in the plan tree itself,
* so scanning it in a worker is fine.
@ -587,6 +590,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break;
case RTE_CTE:
/*
* CTE tuplestores aren't shared among parallel workers, so we
* force all CTE scans to happen in the leader. Also, populating
@ -598,8 +602,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
}
/*
* If there's anything in baserestrictinfo that's parallel-restricted,
* we give up on parallelizing access to this relation. We could consider
* If there's anything in baserestrictinfo that's parallel-restricted, we
* give up on parallelizing access to this relation. We could consider
* instead postponing application of the restricted quals until we're
* above all the parallelism in the plan tree, but it's not clear that
* this would be a win in very many cases, and it might be tricky to make
@ -609,8 +613,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
return;
/*
* If the relation's outputs are not parallel-safe, we must give up.
* In the common case where the relation only outputs Vars, this check is
* If the relation's outputs are not parallel-safe, we must give up. In
* the common case where the relation only outputs Vars, this check is
* very cheap; otherwise, we have to do more work.
*/
if (rel->reltarget_has_non_vars &&
@ -1251,8 +1255,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
int parallel_workers = 0;
/*
* Decide on the numebr of workers to request for this append path. For
* now, we just use the maximum value from among the members. It
* Decide on the numebr of workers to request for this append path.
* For now, we just use the maximum value from among the members. It
* might be useful to use a higher number if the Append node were
* smart enough to spread out the workers, but it currently isn't.
*/
@ -2160,8 +2164,8 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* Run generate_gather_paths() for each just-processed joinrel. We
* could not do this earlier because both regular and partial paths
* can get added to a particular joinrel at multiple times within
* join_search_one_level. After that, we're done creating paths
* for the joinrel, so run set_cheapest().
* join_search_one_level. After that, we're done creating paths for
* the joinrel, so run set_cheapest().
*/
foreach(lc, root->join_rel_level[lev])
{

View File

@ -1428,15 +1428,14 @@ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path)
* We might not really need a Result node here. There are several ways
* that this can happen. For example, MergeAppend doesn't project, so we
* would have thought that we needed a projection to attach resjunk sort
* columns to its output ... but create_merge_append_plan might have
* added those same resjunk sort columns to both MergeAppend and its
* children. Alternatively, apply_projection_to_path might have created
* a projection path as the subpath of a Gather node even though the
* subpath was projection-capable. So, if the subpath is capable of
* projection or the desired tlist is the same expression-wise as the
* subplan's, just jam it in there. We'll have charged for a Result that
* doesn't actually appear in the plan, but that's better than having a
* Result we don't need.
* columns to its output ... but create_merge_append_plan might have added
* those same resjunk sort columns to both MergeAppend and its children.
* Alternatively, apply_projection_to_path might have created a projection
* path as the subpath of a Gather node even though the subpath was
* projection-capable. So, if the subpath is capable of projection or the
* desired tlist is the same expression-wise as the subplan's, just jam it
* in there. We'll have charged for a Result that doesn't actually appear
* in the plan, but that's better than having a Result we don't need.
*/
if (is_projection_capable_path(best_path->subpath) ||
tlist_same_exprs(tlist, subplan->targetlist))
@ -3248,8 +3247,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/*
* If a join between foreign relations was pushed down, remember it. The
* push-down safety of the join depends upon the server and user mapping
* being same. That can change between planning and execution time, in which
* case the plan should be invalidated.
* being same. That can change between planning and execution time, in
* which case the plan should be invalidated.
*/
if (scan_relid == 0)
root->glob->hasForeignJoin = true;
@ -3257,8 +3256,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/*
* Replace any outer-relation variables with nestloop params in the qual,
* fdw_exprs and fdw_recheck_quals expressions. We do this last so that
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs
* or fdw_recheck_quals could have come from join clauses, so doing this
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
* fdw_recheck_quals could have come from join clauses, so doing this
* beforehand on the scan_clauses wouldn't work.) We assume
* fdw_scan_tlist contains no such variables.
*/
@ -3279,8 +3278,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
* 0, but there can be no Var with relid 0 in the rel's targetlist or the
* restriction clauses, so we skip this in that case. Note that any such
* columns in base relations that were joined are assumed to be contained
* in fdw_scan_tlist.) This is a bit of a kluge and might go away someday,
* so we intentionally leave it out of the API presented to FDWs.
* in fdw_scan_tlist.) This is a bit of a kluge and might go away
* someday, so we intentionally leave it out of the API presented to FDWs.
*/
scan_plan->fsSystemCol = false;
if (scan_relid > 0)
@ -5899,7 +5898,7 @@ make_gather(List *qptlist,
plan->righttree = NULL;
node->num_workers = nworkers;
node->single_copy = single_copy;
node->invisible = false;
node->invisible = false;
return node;
}

View File

@ -108,10 +108,10 @@ static double get_number_of_groups(PlannerInfo *root,
List *rollup_lists,
List *rollup_groupclauses);
static void set_grouped_rel_consider_parallel(PlannerInfo *root,
RelOptInfo *grouped_rel,
PathTarget *target);
RelOptInfo *grouped_rel,
PathTarget *target);
static Size estimate_hashagg_tablesize(Path *path, AggClauseCosts *agg_costs,
double dNumGroups);
double dNumGroups);
static RelOptInfo *create_grouping_paths(PlannerInfo *root,
RelOptInfo *input_rel,
PathTarget *target,
@ -141,7 +141,7 @@ static RelOptInfo *create_ordered_paths(PlannerInfo *root,
static PathTarget *make_group_input_target(PlannerInfo *root,
PathTarget *final_target);
static PathTarget *make_partialgroup_input_target(PlannerInfo *root,
PathTarget *final_target);
PathTarget *final_target);
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
static PathTarget *make_window_input_target(PlannerInfo *root,
@ -1777,8 +1777,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
* findable from the PlannerInfo struct; anything else the FDW wants
* to know should be obtainable via "root".
*
* Note: CustomScan providers, as well as FDWs that don't want to
* use this hook, can use the create_upper_paths_hook; see below.
* Note: CustomScan providers, as well as FDWs that don't want to use
* this hook, can use the create_upper_paths_hook; see below.
*/
if (current_rel->fdwroutine &&
current_rel->fdwroutine->GetForeignUpperPaths)
@ -3196,8 +3196,8 @@ set_grouped_rel_consider_parallel(PlannerInfo *root, RelOptInfo *grouped_rel,
/*
* All that's left to check now is to make sure all aggregate functions
* support partial mode. If there's no aggregates then we can skip checking
* that.
* support partial mode. If there's no aggregates then we can skip
* checking that.
*/
if (!parse->hasAggs)
grouped_rel->consider_parallel = true;
@ -3370,9 +3370,10 @@ create_grouping_paths(PlannerInfo *root,
/*
* Determine whether it's possible to perform sort-based implementations
* of grouping. (Note that if groupClause is empty, grouping_is_sortable()
* is trivially true, and all the pathkeys_contained_in() tests will
* succeed too, so that we'll consider every surviving input path.)
* of grouping. (Note that if groupClause is empty,
* grouping_is_sortable() is trivially true, and all the
* pathkeys_contained_in() tests will succeed too, so that we'll consider
* every surviving input path.)
*/
can_sort = grouping_is_sortable(parse->groupClause);
@ -3408,7 +3409,7 @@ create_grouping_paths(PlannerInfo *root,
*/
if (grouped_rel->consider_parallel)
{
Path *cheapest_partial_path = linitial(input_rel->partial_pathlist);
Path *cheapest_partial_path = linitial(input_rel->partial_pathlist);
/*
* Build target list for partial aggregate paths. We cannot reuse the
@ -3471,27 +3472,27 @@ create_grouping_paths(PlannerInfo *root,
if (parse->hasAggs)
add_partial_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
path,
partial_grouping_target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause,
NIL,
&agg_partial_costs,
dNumPartialGroups,
false,
false,
true));
create_agg_path(root,
grouped_rel,
path,
partial_grouping_target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause,
NIL,
&agg_partial_costs,
dNumPartialGroups,
false,
false,
true));
else
add_partial_path(grouped_rel, (Path *)
create_group_path(root,
grouped_rel,
path,
partial_grouping_target,
parse->groupClause,
NIL,
dNumPartialGroups));
create_group_path(root,
grouped_rel,
path,
partial_grouping_target,
parse->groupClause,
NIL,
dNumPartialGroups));
}
}
}
@ -3513,18 +3514,18 @@ create_grouping_paths(PlannerInfo *root,
if (hashaggtablesize < work_mem * 1024L)
{
add_partial_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
cheapest_partial_path,
partial_grouping_target,
AGG_HASHED,
parse->groupClause,
NIL,
&agg_partial_costs,
dNumPartialGroups,
false,
false,
true));
create_agg_path(root,
grouped_rel,
cheapest_partial_path,
partial_grouping_target,
AGG_HASHED,
parse->groupClause,
NIL,
&agg_partial_costs,
dNumPartialGroups,
false,
false,
true));
}
}
}
@ -3616,13 +3617,13 @@ create_grouping_paths(PlannerInfo *root,
/*
* Now generate a complete GroupAgg Path atop of the cheapest partial
* path. We need only bother with the cheapest path here, as the output
* of Gather is never sorted.
* path. We need only bother with the cheapest path here, as the
* output of Gather is never sorted.
*/
if (grouped_rel->partial_pathlist)
{
Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
double total_groups = path->rows * path->parallel_workers;
Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root,
grouped_rel,
@ -3632,9 +3633,9 @@ create_grouping_paths(PlannerInfo *root,
&total_groups);
/*
* Gather is always unsorted, so we'll need to sort, unless there's
* no GROUP BY clause, in which case there will only be a single
* group.
* Gather is always unsorted, so we'll need to sort, unless
* there's no GROUP BY clause, in which case there will only be a
* single group.
*/
if (parse->groupClause)
path = (Path *) create_sort_path(root,
@ -3645,27 +3646,27 @@ create_grouping_paths(PlannerInfo *root,
if (parse->hasAggs)
add_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
path,
target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause,
(List *) parse->havingQual,
&agg_final_costs,
dNumGroups,
true,
true,
true));
create_agg_path(root,
grouped_rel,
path,
target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause,
(List *) parse->havingQual,
&agg_final_costs,
dNumGroups,
true,
true,
true));
else
add_path(grouped_rel, (Path *)
create_group_path(root,
grouped_rel,
path,
target,
parse->groupClause,
(List *) parse->havingQual,
dNumGroups));
create_group_path(root,
grouped_rel,
path,
target,
parse->groupClause,
(List *) parse->havingQual,
dNumGroups));
}
}
@ -3678,15 +3679,15 @@ create_grouping_paths(PlannerInfo *root,
/*
* Provided that the estimated size of the hashtable does not exceed
* work_mem, we'll generate a HashAgg Path, although if we were unable
* to sort above, then we'd better generate a Path, so that we at least
* have one.
* to sort above, then we'd better generate a Path, so that we at
* least have one.
*/
if (hashaggtablesize < work_mem * 1024L ||
grouped_rel->pathlist == NIL)
{
/*
* We just need an Agg over the cheapest-total input path, since input
* order won't matter.
* We just need an Agg over the cheapest-total input path, since
* input order won't matter.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root, grouped_rel,
@ -3704,12 +3705,12 @@ create_grouping_paths(PlannerInfo *root,
/*
* Generate a HashAgg Path atop of the cheapest partial path. Once
* again, we'll only do this if it looks as though the hash table won't
* exceed work_mem.
* again, we'll only do this if it looks as though the hash table
* won't exceed work_mem.
*/
if (grouped_rel->partial_pathlist)
{
Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
hashaggtablesize = estimate_hashagg_tablesize(path,
&agg_final_costs,
@ -3717,7 +3718,7 @@ create_grouping_paths(PlannerInfo *root,
if (hashaggtablesize < work_mem * 1024L)
{
double total_groups = path->rows * path->parallel_workers;
double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root,
grouped_rel,
@ -3727,18 +3728,18 @@ create_grouping_paths(PlannerInfo *root,
&total_groups);
add_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
path,
target,
AGG_HASHED,
parse->groupClause,
(List *) parse->havingQual,
&agg_final_costs,
dNumGroups,
true,
true,
true));
create_agg_path(root,
grouped_rel,
path,
target,
AGG_HASHED,
parse->groupClause,
(List *) parse->havingQual,
&agg_final_costs,
dNumGroups,
true,
true,
true));
}
}
}

View File

@ -2100,6 +2100,7 @@ search_indexed_tlist_for_partial_aggref(Aggref *aggref, indexed_tlist *itlist,
continue;
if (aggref->aggvariadic != tlistaggref->aggvariadic)
continue;
/*
* it would be harmless to compare aggcombine and aggpartial, but
* it's also unnecessary

View File

@ -101,7 +101,7 @@ typedef struct
} has_parallel_hazard_arg;
static bool aggregates_allow_partial_walker(Node *node,
partial_agg_context *context);
partial_agg_context *context);
static bool contain_agg_clause_walker(Node *node, void *context);
static bool count_agg_clauses_walker(Node *node,
count_agg_clauses_context *context);
@ -112,9 +112,9 @@ static bool contain_mutable_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context);
static bool has_parallel_hazard_walker(Node *node,
has_parallel_hazard_arg *context);
has_parallel_hazard_arg *context);
static bool parallel_too_dangerous(char proparallel,
has_parallel_hazard_arg *context);
has_parallel_hazard_arg *context);
static bool typeid_is_temp(Oid typeid);
static bool contain_nonstrict_functions_walker(Node *node, void *context);
static bool contain_leaked_vars_walker(Node *node, void *context);
@ -446,7 +446,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
if (aggref->aggdistinct || aggref->aggorder)
{
context->allowedtype = PAT_DISABLED;
return true; /* abort search */
return true; /* abort search */
}
aggTuple = SearchSysCache1(AGGFNOID,
ObjectIdGetDatum(aggref->aggfnoid));
@ -463,7 +463,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
{
ReleaseSysCache(aggTuple);
context->allowedtype = PAT_DISABLED;
return true; /* abort search */
return true; /* abort search */
}
/*
@ -479,7 +479,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
context->allowedtype = PAT_INTERNAL_ONLY;
ReleaseSysCache(aggTuple);
return false; /* continue searching */
return false; /* continue searching */
}
return expression_tree_walker(node, aggregates_allow_partial_walker,
(void *) context);
@ -1354,7 +1354,7 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context)
bool
has_parallel_hazard(Node *node, bool allow_restricted)
{
has_parallel_hazard_arg context;
has_parallel_hazard_arg context;
context.allow_restricted = allow_restricted;
return has_parallel_hazard_walker(node, &context);
@ -1371,16 +1371,16 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
* recurse through Query objects to as to locate parallel-unsafe
* constructs anywhere in the tree.
*
* Later, we'll be called again for specific quals, possibly after
* some planning has been done, we may encounter SubPlan, SubLink,
* or AlternativeSubLink nodes. Currently, there's no need to recurse
* through these; they can't be unsafe, since we've already cleared
* the entire query of unsafe operations, and they're definitely
* Later, we'll be called again for specific quals, possibly after some
* planning has been done, we may encounter SubPlan, SubLink, or
* AlternativeSubLink nodes. Currently, there's no need to recurse
* through these; they can't be unsafe, since we've already cleared the
* entire query of unsafe operations, and they're definitely
* parallel-restricted.
*/
if (IsA(node, Query))
{
Query *query = (Query *) node;
Query *query = (Query *) node;
if (query->rowMarks != NULL)
return true;
@ -1390,12 +1390,12 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
has_parallel_hazard_walker,
context, 0);
}
else if (IsA(node, SubPlan) || IsA(node, SubLink) ||
IsA(node, AlternativeSubPlan) || IsA(node, Param))
else if (IsA(node, SubPlan) ||IsA(node, SubLink) ||
IsA(node, AlternativeSubPlan) ||IsA(node, Param))
{
/*
* Since we don't have the ability to push subplans down to workers
* at present, we treat subplan references as parallel-restricted.
* Since we don't have the ability to push subplans down to workers at
* present, we treat subplan references as parallel-restricted.
*/
if (!context->allow_restricted)
return true;
@ -1405,12 +1405,14 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
if (IsA(node, RestrictInfo))
{
RestrictInfo *rinfo = (RestrictInfo *) node;
return has_parallel_hazard_walker((Node *) rinfo->clause, context);
}
/*
* It is an error for a parallel worker to touch a temporary table in any
* way, so we can't handle nodes whose type is the rowtype of such a table.
* way, so we can't handle nodes whose type is the rowtype of such a
* table.
*/
if (!context->allow_restricted)
{
@ -1534,7 +1536,8 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
foreach(opid, rcexpr->opnos)
{
Oid opfuncid = get_opcode(lfirst_oid(opid));
Oid opfuncid = get_opcode(lfirst_oid(opid));
if (parallel_too_dangerous(func_parallel(opfuncid), context))
return true;
}
@ -1558,7 +1561,7 @@ parallel_too_dangerous(char proparallel, has_parallel_hazard_arg *context)
static bool
typeid_is_temp(Oid typeid)
{
Oid relid = get_typ_typrelid(typeid);
Oid relid = get_typ_typrelid(typeid);
if (!OidIsValid(relid))
return false;
@ -1870,8 +1873,8 @@ contain_leaked_vars_walker(Node *node, void *context)
/*
* WHERE CURRENT OF doesn't contain function calls. Moreover, it
* is important that this can be pushed down into a
* security_barrier view, since the planner must always generate
* a TID scan when CURRENT OF is present -- c.f. cost_tidscan.
* security_barrier view, since the planner must always generate a
* TID scan when CURRENT OF is present -- c.f. cost_tidscan.
*/
return false;

View File

@ -709,7 +709,7 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
AttrNumber natt;
Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */
Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */
int nplain = 0; /* # plain attrs observed */
int nplain = 0; /* # plain attrs observed */
/*
* If inference specification element lacks collation/opclass, then no

View File

@ -107,7 +107,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind)
rel->consider_startup = (root->tuple_fraction > 0);
rel->consider_param_startup = false; /* might get changed later */
rel->consider_parallel = false; /* might get changed later */
rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */
rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */
rel->reltarget = create_empty_pathtarget();
rel->reltarget_has_non_vars = false;
rel->pathlist = NIL;

View File

@ -776,11 +776,11 @@ apply_pathtarget_labeling_to_tlist(List *tlist, PathTarget *target)
void
apply_partialaggref_adjustment(PathTarget *target)
{
ListCell *lc;
ListCell *lc;
foreach(lc, target->exprs)
{
Aggref *aggref = (Aggref *) lfirst(lc);
Aggref *aggref = (Aggref *) lfirst(lc);
if (IsA(aggref, Aggref))
{