mirror of
https://github.com/postgres/postgres.git
synced 2025-09-03 15:22:11 +03:00
Initial pgindent and pgperltidy run for v13.
Includes some manual cleanup of places that pgindent messed up,
most of which weren't per project style anyway.
Notably, it seems some people didn't absorb the style rules of
commit c9d297751
, because there were a bunch of new occurrences
of function calls with a newline just after the left paren, all
with faulty expectations about how the rest of the call would get
indented.
This commit is contained in:
@@ -2751,13 +2751,14 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
|
||||
List *useful_pathkeys_list = NIL;
|
||||
|
||||
/*
|
||||
* Considering query_pathkeys is always worth it, because it might allow us
|
||||
* to avoid a total sort when we have a partially presorted path available.
|
||||
* Considering query_pathkeys is always worth it, because it might allow
|
||||
* us to avoid a total sort when we have a partially presorted path
|
||||
* available.
|
||||
*/
|
||||
if (root->query_pathkeys)
|
||||
{
|
||||
ListCell *lc;
|
||||
int npathkeys = 0; /* useful pathkeys */
|
||||
int npathkeys = 0; /* useful pathkeys */
|
||||
|
||||
foreach(lc, root->query_pathkeys)
|
||||
{
|
||||
@@ -2765,15 +2766,15 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
|
||||
EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
|
||||
|
||||
/*
|
||||
* We can only build an Incremental Sort for pathkeys which contain
|
||||
* an EC member in the current relation, so ignore any suffix of the
|
||||
* list as soon as we find a pathkey without an EC member the
|
||||
* relation.
|
||||
* We can only build an Incremental Sort for pathkeys which
|
||||
* contain an EC member in the current relation, so ignore any
|
||||
* suffix of the list as soon as we find a pathkey without an EC
|
||||
* member the relation.
|
||||
*
|
||||
* By still returning the prefix of the pathkeys list that does meet
|
||||
* criteria of EC membership in the current relation, we enable not
|
||||
* just an incremental sort on the entirety of query_pathkeys but
|
||||
* also incremental sort below a JOIN.
|
||||
* By still returning the prefix of the pathkeys list that does
|
||||
* meet criteria of EC membership in the current relation, we
|
||||
* enable not just an incremental sort on the entirety of
|
||||
* query_pathkeys but also incremental sort below a JOIN.
|
||||
*/
|
||||
if (!find_em_expr_for_rel(pathkey_ec, rel))
|
||||
break;
|
||||
@@ -2782,9 +2783,9 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
|
||||
}
|
||||
|
||||
/*
|
||||
* The whole query_pathkeys list matches, so append it directly, to allow
|
||||
* comparing pathkeys easily by comparing list pointer. If we have to truncate
|
||||
* the pathkeys, we gotta do a copy though.
|
||||
* The whole query_pathkeys list matches, so append it directly, to
|
||||
* allow comparing pathkeys easily by comparing list pointer. If we
|
||||
* have to truncate the pathkeys, we gotta do a copy though.
|
||||
*/
|
||||
if (npathkeys == list_length(root->query_pathkeys))
|
||||
useful_pathkeys_list = lappend(useful_pathkeys_list,
|
||||
@@ -2851,14 +2852,15 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
|
||||
|
||||
/*
|
||||
* If the path has no ordering at all, then we can't use either
|
||||
* incremental sort or rely on implict sorting with a gather merge.
|
||||
* incremental sort or rely on implict sorting with a gather
|
||||
* merge.
|
||||
*/
|
||||
if (subpath->pathkeys == NIL)
|
||||
continue;
|
||||
|
||||
is_sorted = pathkeys_count_contained_in(useful_pathkeys,
|
||||
subpath->pathkeys,
|
||||
&presorted_keys);
|
||||
subpath->pathkeys,
|
||||
&presorted_keys);
|
||||
|
||||
/*
|
||||
* We don't need to consider the case where a subpath is already
|
||||
@@ -2915,8 +2917,9 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
|
||||
Path *tmp;
|
||||
|
||||
/*
|
||||
* We should have already excluded pathkeys of length 1 because
|
||||
* then presorted_keys > 0 would imply is_sorted was true.
|
||||
* We should have already excluded pathkeys of length 1
|
||||
* because then presorted_keys > 0 would imply is_sorted was
|
||||
* true.
|
||||
*/
|
||||
Assert(list_length(useful_pathkeys) != 1);
|
||||
|
||||
|
@@ -1821,19 +1821,19 @@ cost_incremental_sort(Path *path,
|
||||
/*
|
||||
* Extract presorted keys as list of expressions.
|
||||
*
|
||||
* We need to be careful about Vars containing "varno 0" which might
|
||||
* have been introduced by generate_append_tlist, which would confuse
|
||||
* We need to be careful about Vars containing "varno 0" which might have
|
||||
* been introduced by generate_append_tlist, which would confuse
|
||||
* estimate_num_groups (in fact it'd fail for such expressions). See
|
||||
* recurse_set_operations which has to deal with the same issue.
|
||||
*
|
||||
* Unlike recurse_set_operations we can't access the original target
|
||||
* list here, and even if we could it's not very clear how useful would
|
||||
* that be for a set operation combining multiple tables. So we simply
|
||||
* detect if there are any expressions with "varno 0" and use the
|
||||
* default DEFAULT_NUM_DISTINCT in that case.
|
||||
* Unlike recurse_set_operations we can't access the original target list
|
||||
* here, and even if we could it's not very clear how useful would that be
|
||||
* for a set operation combining multiple tables. So we simply detect if
|
||||
* there are any expressions with "varno 0" and use the default
|
||||
* DEFAULT_NUM_DISTINCT in that case.
|
||||
*
|
||||
* We might also use either 1.0 (a single group) or input_tuples (each
|
||||
* row being a separate group), pretty much the worst and best case for
|
||||
* We might also use either 1.0 (a single group) or input_tuples (each row
|
||||
* being a separate group), pretty much the worst and best case for
|
||||
* incremental sort. But those are extreme cases and using something in
|
||||
* between seems reasonable. Furthermore, generate_append_tlist is used
|
||||
* for set operations, which are likely to produce mostly unique output
|
||||
@@ -2403,40 +2403,40 @@ cost_agg(Path *path, PlannerInfo *root,
|
||||
/*
|
||||
* Add the disk costs of hash aggregation that spills to disk.
|
||||
*
|
||||
* Groups that go into the hash table stay in memory until finalized,
|
||||
* so spilling and reprocessing tuples doesn't incur additional
|
||||
* invocations of transCost or finalCost. Furthermore, the computed
|
||||
* hash value is stored with the spilled tuples, so we don't incur
|
||||
* extra invocations of the hash function.
|
||||
* Groups that go into the hash table stay in memory until finalized, so
|
||||
* spilling and reprocessing tuples doesn't incur additional invocations
|
||||
* of transCost or finalCost. Furthermore, the computed hash value is
|
||||
* stored with the spilled tuples, so we don't incur extra invocations of
|
||||
* the hash function.
|
||||
*
|
||||
* Hash Agg begins returning tuples after the first batch is
|
||||
* complete. Accrue writes (spilled tuples) to startup_cost and to
|
||||
* total_cost; accrue reads only to total_cost.
|
||||
* Hash Agg begins returning tuples after the first batch is complete.
|
||||
* Accrue writes (spilled tuples) to startup_cost and to total_cost;
|
||||
* accrue reads only to total_cost.
|
||||
*/
|
||||
if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
|
||||
{
|
||||
double pages;
|
||||
double pages_written = 0.0;
|
||||
double pages_read = 0.0;
|
||||
double hashentrysize;
|
||||
double nbatches;
|
||||
Size mem_limit;
|
||||
uint64 ngroups_limit;
|
||||
int num_partitions;
|
||||
int depth;
|
||||
double pages;
|
||||
double pages_written = 0.0;
|
||||
double pages_read = 0.0;
|
||||
double hashentrysize;
|
||||
double nbatches;
|
||||
Size mem_limit;
|
||||
uint64 ngroups_limit;
|
||||
int num_partitions;
|
||||
int depth;
|
||||
|
||||
/*
|
||||
* Estimate number of batches based on the computed limits. If less
|
||||
* than or equal to one, all groups are expected to fit in memory;
|
||||
* otherwise we expect to spill.
|
||||
*/
|
||||
hashentrysize = hash_agg_entry_size(
|
||||
aggcosts->numAggs, input_width, aggcosts->transitionSpace);
|
||||
hashentrysize = hash_agg_entry_size(aggcosts->numAggs, input_width,
|
||||
aggcosts->transitionSpace);
|
||||
hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
|
||||
&ngroups_limit, &num_partitions);
|
||||
|
||||
nbatches = Max( (numGroups * hashentrysize) / mem_limit,
|
||||
numGroups / ngroups_limit );
|
||||
nbatches = Max((numGroups * hashentrysize) / mem_limit,
|
||||
numGroups / ngroups_limit);
|
||||
|
||||
nbatches = Max(ceil(nbatches), 1.0);
|
||||
num_partitions = Max(num_partitions, 2);
|
||||
@@ -2446,7 +2446,7 @@ cost_agg(Path *path, PlannerInfo *root,
|
||||
* recursion; but for the purposes of this calculation assume it stays
|
||||
* constant.
|
||||
*/
|
||||
depth = ceil( log(nbatches) / log(num_partitions) );
|
||||
depth = ceil(log(nbatches) / log(num_partitions));
|
||||
|
||||
/*
|
||||
* Estimate number of pages read and written. For each level of
|
||||
|
@@ -1378,8 +1378,8 @@ try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
|
||||
Assert(joinrel->consider_partitionwise_join);
|
||||
|
||||
/*
|
||||
* We can not perform partitionwise join if either of the joining relations
|
||||
* is not partitioned.
|
||||
* We can not perform partitionwise join if either of the joining
|
||||
* relations is not partitioned.
|
||||
*/
|
||||
if (!IS_PARTITIONED_REL(rel1) || !IS_PARTITIONED_REL(rel2))
|
||||
return;
|
||||
@@ -1622,8 +1622,8 @@ compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1,
|
||||
* partition bounds as inputs, and the partitions with the same
|
||||
* cardinal positions form the pairs.
|
||||
*
|
||||
* Note: even in cases where one or both inputs have merged bounds,
|
||||
* it would be possible for both the bounds to be exactly the same, but
|
||||
* Note: even in cases where one or both inputs have merged bounds, it
|
||||
* would be possible for both the bounds to be exactly the same, but
|
||||
* it seems unlikely to be worth the cycles to check.
|
||||
*/
|
||||
if (!rel1->partbounds_merged &&
|
||||
@@ -1670,8 +1670,8 @@ compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1,
|
||||
/*
|
||||
* If the join rel's partbounds_merged flag is true, it means inputs
|
||||
* are not guaranteed to have the same partition bounds, therefore we
|
||||
* can't assume that the partitions at the same cardinal positions form
|
||||
* the pairs; let get_matching_part_pairs() generate the pairs.
|
||||
* can't assume that the partitions at the same cardinal positions
|
||||
* form the pairs; let get_matching_part_pairs() generate the pairs.
|
||||
* Otherwise, nothing to do since we can assume that.
|
||||
*/
|
||||
if (joinrel->partbounds_merged)
|
||||
@@ -1695,7 +1695,7 @@ get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel,
|
||||
{
|
||||
bool rel1_is_simple = IS_SIMPLE_REL(rel1);
|
||||
bool rel2_is_simple = IS_SIMPLE_REL(rel2);
|
||||
int cnt_parts;
|
||||
int cnt_parts;
|
||||
|
||||
*parts1 = NIL;
|
||||
*parts2 = NIL;
|
||||
@@ -1735,9 +1735,10 @@ get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel,
|
||||
* Get a child rel for rel1 with the relids. Note that we should have
|
||||
* the child rel even if rel1 is a join rel, because in that case the
|
||||
* partitions specified in the relids would have matching/overlapping
|
||||
* boundaries, so the specified partitions should be considered as ones
|
||||
* to be joined when planning partitionwise joins of rel1, meaning that
|
||||
* the child rel would have been built by the time we get here.
|
||||
* boundaries, so the specified partitions should be considered as
|
||||
* ones to be joined when planning partitionwise joins of rel1,
|
||||
* meaning that the child rel would have been built by the time we get
|
||||
* here.
|
||||
*/
|
||||
if (rel1_is_simple)
|
||||
{
|
||||
|
@@ -1857,7 +1857,7 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
|
||||
return 0; /* unordered path */
|
||||
|
||||
(void) pathkeys_count_contained_in(root->query_pathkeys, pathkeys,
|
||||
&n_common_pathkeys);
|
||||
&n_common_pathkeys);
|
||||
|
||||
return n_common_pathkeys;
|
||||
}
|
||||
|
@@ -4866,8 +4866,7 @@ create_distinct_paths(PlannerInfo *root,
|
||||
allow_hash = false; /* policy-based decision not to hash */
|
||||
else
|
||||
{
|
||||
Size hashentrysize = hash_agg_entry_size(
|
||||
0, cheapest_input_path->pathtarget->width, 0);
|
||||
Size hashentrysize = hash_agg_entry_size(0, cheapest_input_path->pathtarget->width, 0);
|
||||
|
||||
allow_hash = enable_hashagg_disk ||
|
||||
(hashentrysize * numDistinctRows <= work_mem * 1024L);
|
||||
@@ -4972,7 +4971,7 @@ create_ordered_paths(PlannerInfo *root,
|
||||
int presorted_keys;
|
||||
|
||||
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
|
||||
input_path->pathkeys, &presorted_keys);
|
||||
input_path->pathkeys, &presorted_keys);
|
||||
|
||||
if (is_sorted)
|
||||
{
|
||||
@@ -4986,9 +4985,9 @@ create_ordered_paths(PlannerInfo *root,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Try adding an explicit sort, but only to the cheapest total path
|
||||
* since a full sort should generally add the same cost to all
|
||||
* paths.
|
||||
* Try adding an explicit sort, but only to the cheapest total
|
||||
* path since a full sort should generally add the same cost to
|
||||
* all paths.
|
||||
*/
|
||||
if (input_path == cheapest_input_path)
|
||||
{
|
||||
@@ -5010,11 +5009,11 @@ create_ordered_paths(PlannerInfo *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* If incremental sort is enabled, then try it as well. Unlike with
|
||||
* regular sorts, we can't just look at the cheapest path, because
|
||||
* the cost of incremental sort depends on how well presorted the
|
||||
* path is. Additionally incremental sort may enable a cheaper
|
||||
* startup path to win out despite higher total cost.
|
||||
* If incremental sort is enabled, then try it as well. Unlike
|
||||
* with regular sorts, we can't just look at the cheapest path,
|
||||
* because the cost of incremental sort depends on how well
|
||||
* presorted the path is. Additionally incremental sort may enable
|
||||
* a cheaper startup path to win out despite higher total cost.
|
||||
*/
|
||||
if (!enable_incrementalsort)
|
||||
continue;
|
||||
@@ -5110,15 +5109,15 @@ create_ordered_paths(PlannerInfo *root,
|
||||
double total_groups;
|
||||
|
||||
/*
|
||||
* We don't care if this is the cheapest partial path - we can't
|
||||
* simply skip it, because it may be partially sorted in which
|
||||
* case we want to consider adding incremental sort (instead of
|
||||
* full sort, which is what happens above).
|
||||
* We don't care if this is the cheapest partial path - we
|
||||
* can't simply skip it, because it may be partially sorted in
|
||||
* which case we want to consider adding incremental sort
|
||||
* (instead of full sort, which is what happens above).
|
||||
*/
|
||||
|
||||
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
|
||||
input_path->pathkeys,
|
||||
&presorted_keys);
|
||||
input_path->pathkeys,
|
||||
&presorted_keys);
|
||||
|
||||
/* No point in adding incremental sort on fully sorted paths. */
|
||||
if (is_sorted)
|
||||
@@ -6510,8 +6509,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
|
||||
int presorted_keys;
|
||||
|
||||
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
|
||||
if (path == cheapest_path || is_sorted)
|
||||
{
|
||||
@@ -6607,8 +6606,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
|
||||
else if (parse->hasAggs)
|
||||
{
|
||||
/*
|
||||
* We have aggregation, possibly with plain GROUP BY. Make
|
||||
* an AggPath.
|
||||
* We have aggregation, possibly with plain GROUP BY. Make an
|
||||
* AggPath.
|
||||
*/
|
||||
add_path(grouped_rel, (Path *)
|
||||
create_agg_path(root,
|
||||
@@ -6625,8 +6624,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
|
||||
else if (parse->groupClause)
|
||||
{
|
||||
/*
|
||||
* We have GROUP BY without aggregation or grouping sets.
|
||||
* Make a GroupPath.
|
||||
* We have GROUP BY without aggregation or grouping sets. Make
|
||||
* a GroupPath.
|
||||
*/
|
||||
add_path(grouped_rel, (Path *)
|
||||
create_group_path(root,
|
||||
@@ -6657,8 +6656,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
|
||||
int presorted_keys;
|
||||
|
||||
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
|
||||
/*
|
||||
* Insert a Sort node, if required. But there's no point in
|
||||
@@ -6712,8 +6711,9 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We should have already excluded pathkeys of length 1 because
|
||||
* then presorted_keys > 0 would imply is_sorted was true.
|
||||
* We should have already excluded pathkeys of length 1
|
||||
* because then presorted_keys > 0 would imply is_sorted was
|
||||
* true.
|
||||
*/
|
||||
Assert(list_length(root->group_pathkeys) != 1);
|
||||
|
||||
@@ -7032,8 +7032,8 @@ create_partial_grouping_paths(PlannerInfo *root,
|
||||
int presorted_keys;
|
||||
|
||||
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
|
||||
/* Ignore already sorted paths */
|
||||
if (is_sorted)
|
||||
@@ -7086,8 +7086,8 @@ create_partial_grouping_paths(PlannerInfo *root,
|
||||
int presorted_keys;
|
||||
|
||||
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
|
||||
if (path == cheapest_partial_path || is_sorted)
|
||||
{
|
||||
@@ -7301,8 +7301,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
|
||||
* Consider incremental sort on all partial paths, if enabled.
|
||||
*
|
||||
* We can also skip the entire loop when we only have a single-item
|
||||
* group_pathkeys because then we can't possibly have a presorted
|
||||
* prefix of the list without having the list be fully sorted.
|
||||
* group_pathkeys because then we can't possibly have a presorted prefix
|
||||
* of the list without having the list be fully sorted.
|
||||
*/
|
||||
if (!enable_incrementalsort || list_length(root->group_pathkeys) == 1)
|
||||
return;
|
||||
@@ -7316,8 +7316,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
|
||||
double total_groups;
|
||||
|
||||
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
path->pathkeys,
|
||||
&presorted_keys);
|
||||
|
||||
if (is_sorted)
|
||||
continue;
|
||||
|
Reference in New Issue
Block a user