1
0
mirror of https://github.com/postgres/postgres.git synced 2025-08-27 07:42:10 +03:00

Pre-beta mechanical code beautification.

Run pgindent, pgperltidy, and reformat-dat-files.
I manually fixed a couple of comments that pgindent uglified.
This commit is contained in:
Tom Lane
2022-05-12 15:17:30 -04:00
parent 93909599cd
commit 23e7b38bfe
287 changed files with 5193 additions and 3549 deletions

View File

@@ -1777,17 +1777,18 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
* When building a fractional path, determine a cheapest fractional
* path for each child relation too. Looking at startup and total
* costs is not enough, because the cheapest fractional path may be
* dominated by two separate paths (one for startup, one for total).
* When building a fractional path, determine a cheapest
* fractional path for each child relation too. Looking at startup
* and total costs is not enough, because the cheapest fractional
* path may be dominated by two separate paths (one for startup,
* one for total).
*
* When needed (building fractional path), determine the cheapest
* fractional path too.
*/
if (root->tuple_fraction > 0)
{
double path_fraction = (1.0 / root->tuple_fraction);
double path_fraction = (1.0 / root->tuple_fraction);
cheapest_fractional =
get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
@@ -1796,8 +1797,8 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
path_fraction);
/*
* If we found no path with matching pathkeys, use the cheapest
* total path instead.
* If we found no path with matching pathkeys, use the
* cheapest total path instead.
*
* XXX We might consider partially sorted paths too (with an
* incremental sort on top). But we'd have to build all the

View File

@@ -1794,7 +1794,7 @@ is_fake_var(Expr *expr)
static double
get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
{
double width = -1.0; /* fake value */
double width = -1.0; /* fake value */
if (IsA(expr, RelabelType))
expr = (Expr *) ((RelabelType *) expr)->arg;
@@ -1802,17 +1802,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
/* Try to find actual stat in corresponding relation */
if (IsA(expr, Var))
{
Var *var = (Var *) expr;
Var *var = (Var *) expr;
if (var->varno > 0 && var->varno < root->simple_rel_array_size)
{
RelOptInfo *rel = root->simple_rel_array[var->varno];
RelOptInfo *rel = root->simple_rel_array[var->varno];
if (rel != NULL &&
var->varattno >= rel->min_attr &&
var->varattno <= rel->max_attr)
{
int ndx = var->varattno - rel->min_attr;
int ndx = var->varattno - rel->min_attr;
if (rel->attr_widths[ndx] > 0)
width = rel->attr_widths[ndx];
@@ -1823,7 +1823,7 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
/* Didn't find any actual stats, try using type width instead. */
if (width < 0.0)
{
Node *node = (Node*) expr;
Node *node = (Node *) expr;
width = get_typavgwidth(exprType(node), exprTypmod(node));
}
@@ -1832,17 +1832,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
* Values are passed as Datum type, so comparisons can't be cheaper than
* comparing a Datum value.
*
* FIXME I find this reasoning questionable. We may pass int2, and comparing
* it is probably a bit cheaper than comparing a bigint.
* FIXME I find this reasoning questionable. We may pass int2, and
* comparing it is probably a bit cheaper than comparing a bigint.
*/
if (width <= sizeof(Datum))
return 1.0;
/*
* We consider the cost of a comparison not to be directly proportional to
* width of the argument, because widths of the arguments could be slightly
* different (we only know the average width for the whole column). So we
* use log16(width) as an estimate.
* width of the argument, because widths of the arguments could be
* slightly different (we only know the average width for the whole
* column). So we use log16(width) as an estimate.
*/
return 1.0 + 0.125 * LOG2(width / sizeof(Datum));
}
@@ -1902,23 +1902,23 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
bool heapSort)
{
Cost per_tuple_cost = 0.0;
ListCell *lc;
List *pathkeyExprs = NIL;
ListCell *lc;
List *pathkeyExprs = NIL;
double tuplesPerPrevGroup = tuples;
double totalFuncCost = 1.0;
bool has_fake_var = false;
int i = 0;
Oid prev_datatype = InvalidOid;
List *cache_varinfos = NIL;
List *cache_varinfos = NIL;
/* fallback if pathkeys is unknown */
if (list_length(pathkeys) == 0)
{
/*
* If we'll use a bounded heap-sort keeping just K tuples in memory, for
* a total number of tuple comparisons of N log2 K; but the constant
* factor is a bit higher than for quicksort. Tweak it so that the cost
* curve is continuous at the crossover point.
* If we'll use a bounded heap-sort keeping just K tuples in memory,
* for a total number of tuple comparisons of N log2 K; but the
* constant factor is a bit higher than for quicksort. Tweak it so
* that the cost curve is continuous at the crossover point.
*/
output_tuples = (heapSort) ? 2.0 * output_tuples : tuples;
per_tuple_cost += 2.0 * cpu_operator_cost * LOG2(output_tuples);
@@ -1930,17 +1930,17 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
}
/*
* Computing total cost of sorting takes into account:
* - per column comparison function cost
* - we try to compute needed number of comparison per column
* Computing total cost of sorting takes into account the per-column
* comparison function cost. We try to compute the needed number of
* comparisons per column.
*/
foreach(lc, pathkeys)
{
PathKey *pathkey = (PathKey*) lfirst(lc);
EquivalenceMember *em;
double nGroups,
correctedNGroups;
Cost funcCost = 1.0;
PathKey *pathkey = (PathKey *) lfirst(lc);
EquivalenceMember *em;
double nGroups,
correctedNGroups;
Cost funcCost = 1.0;
/*
* We believe that equivalence members aren't very different, so, to
@@ -1985,10 +1985,10 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
pathkeyExprs = lappend(pathkeyExprs, em->em_expr);
/*
* We need to calculate the number of comparisons for this column, which
* requires knowing the group size. So we estimate the number of groups
* by calling estimate_num_groups_incremental(), which estimates the
* group size for "new" pathkeys.
* We need to calculate the number of comparisons for this column,
* which requires knowing the group size. So we estimate the number of
* groups by calling estimate_num_groups_incremental(), which
* estimates the group size for "new" pathkeys.
*
* Note: estimate_num_groups_incremental does not handle fake Vars, so
* use a default estimate otherwise.
@@ -1999,26 +1999,30 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
&cache_varinfos,
list_length(pathkeyExprs) - 1);
else if (tuples > 4.0)
/*
* Use geometric mean as estimation if there are no stats.
*
* We don't use DEFAULT_NUM_DISTINCT here, because thats used for
* a single column, but here were dealing with multiple columns.
* We don't use DEFAULT_NUM_DISTINCT here, because that's used for
* a single column, but here we're dealing with multiple columns.
*/
nGroups = ceil(2.0 + sqrt(tuples) * (i + 1) / list_length(pathkeys));
else
nGroups = tuples;
/*
* Presorted keys are not considered in the cost above, but we still do
* have to compare them in the qsort comparator. So make sure to factor
* in the cost in that case.
* Presorted keys are not considered in the cost above, but we still
* do have to compare them in the qsort comparator. So make sure to
* factor in the cost in that case.
*/
if (i >= nPresortedKeys)
{
if (heapSort)
{
/* have to keep at least one group, and a multiple of group size */
/*
* have to keep at least one group, and a multiple of group
* size
*/
correctedNGroups = ceil(output_tuples / tuplesPerPrevGroup);
}
else
@@ -2033,19 +2037,20 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
i++;
/*
* Uniform distributions with all groups being of the same size are the
* best case, with nice smooth behavior. Real-world distributions tend
* not to be uniform, though, and we dont have any reliable easy-to-use
* information. As a basic defense against skewed distributions, we use
* a 1.5 factor to make the expected group a bit larger, but we need to
* be careful not to make the group larger than in the preceding step.
* Uniform distributions with all groups being of the same size are
* the best case, with nice smooth behavior. Real-world distributions
* tend not to be uniform, though, and we don't have any reliable
* easy-to-use information. As a basic defense against skewed
* distributions, we use a 1.5 factor to make the expected group a bit
* larger, but we need to be careful not to make the group larger than
* in the preceding step.
*/
tuplesPerPrevGroup = Min(tuplesPerPrevGroup,
ceil(1.5 * tuplesPerPrevGroup / nGroups));
/*
* Once we get single-row group, it means tuples in the group are unique
* and we can skip all remaining columns.
* Once we get single-row group, it means tuples in the group are
* unique and we can skip all remaining columns.
*/
if (tuplesPerPrevGroup <= 1.0)
break;
@@ -2057,15 +2062,15 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
per_tuple_cost *= cpu_operator_cost;
/*
* Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles E.
* Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort estimation
* formula has additional term proportional to number of tuples (See Chapter
* 8.2 and Theorem 4.1). That affects cases with a low number of tuples,
* approximately less than 1e4. We could implement it as an additional
* multiplier under the logarithm, but we use a bit more complex formula
* which takes into account the number of unique tuples and its not clear
* how to combine the multiplier with the number of groups. Estimate it as
* 10 in cpu_operator_cost unit.
* Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles
* E. Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort
* estimation formula has additional term proportional to number of tuples
* (see Chapter 8.2 and Theorem 4.1). That affects cases with a low number
* of tuples, approximately less than 1e4. We could implement it as an
* additional multiplier under the logarithm, but we use a bit more
* complex formula which takes into account the number of unique tuples
* and it's not clear how to combine the multiplier with the number of
* groups. Estimate it as 10 cpu_operator_cost units.
*/
per_tuple_cost += 10 * cpu_operator_cost;
@@ -2082,7 +2087,7 @@ cost_sort_estimate(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
double tuples)
{
return compute_cpu_sort_cost(root, pathkeys, nPresortedKeys,
0, tuples, tuples, false);
0, tuples, tuples, false);
}
/*

View File

@@ -685,9 +685,9 @@ get_eclass_for_sort_expr(PlannerInfo *root,
/*
* Match!
*
* Copy the sortref if it wasn't set yet. That may happen if the
* ec was constructed from WHERE clause, i.e. it doesn't have a
* target reference at all.
* Copy the sortref if it wasn't set yet. That may happen if
* the ec was constructed from WHERE clause, i.e. it doesn't
* have a target reference at all.
*/
if (cur_ec->ec_sortref == 0 && sortref > 0)
cur_ec->ec_sortref = sortref;

View File

@@ -1258,7 +1258,7 @@ sort_inner_and_outer(PlannerInfo *root,
foreach(l, all_pathkeys)
{
PathKey *front_pathkey = (PathKey *) lfirst(l);
PathKey *front_pathkey = (PathKey *) lfirst(l);
List *cur_mergeclauses;
List *outerkeys;
List *innerkeys;

View File

@@ -32,7 +32,7 @@
#include "utils/selfuncs.h"
/* Consider reordering of GROUP BY keys? */
bool enable_group_by_reordering = true;
bool enable_group_by_reordering = true;
static bool pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys);
static bool matches_boolean_partition_clause(RestrictInfo *rinfo,
@@ -352,7 +352,7 @@ int
group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
List **group_clauses)
{
List *new_group_pathkeys= NIL,
List *new_group_pathkeys = NIL,
*new_group_clauses = NIL;
ListCell *lc;
int n;
@@ -365,16 +365,16 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
* there's a matching GROUP BY key. If we find one, we append it to the
* list, and do the same for the clauses.
*
* Once we find the first pathkey without a matching GROUP BY key, the rest
* of the pathkeys are useless and can't be used to evaluate the grouping,
* so we abort the loop and ignore the remaining pathkeys.
* Once we find the first pathkey without a matching GROUP BY key, the
* rest of the pathkeys are useless and can't be used to evaluate the
* grouping, so we abort the loop and ignore the remaining pathkeys.
*
* XXX Pathkeys are built in a way to allow simply comparing pointers.
*/
foreach(lc, pathkeys)
{
PathKey *pathkey = (PathKey *) lfirst(lc);
SortGroupClause *sgc;
PathKey *pathkey = (PathKey *) lfirst(lc);
SortGroupClause *sgc;
/* abort on first mismatch */
if (!list_member_ptr(*group_pathkeys, pathkey))
@@ -403,13 +403,14 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
/*
* Used to generate all permutations of a pathkey list.
*/
typedef struct PathkeyMutatorState {
typedef struct PathkeyMutatorState
{
List *elemsList;
ListCell **elemCells;
void **elems;
int *positions;
int mutatorNColumns;
int count;
int mutatorNColumns;
int count;
} PathkeyMutatorState;
@@ -428,9 +429,9 @@ typedef struct PathkeyMutatorState {
static void
PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
{
int i;
int i;
int n = end - start;
ListCell *lc;
ListCell *lc;
memset(state, 0, sizeof(*state));
@@ -438,8 +439,8 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
state->elemsList = list_copy(elems);
state->elems = palloc(sizeof(void*) * n);
state->elemCells = palloc(sizeof(ListCell*) * n);
state->elems = palloc(sizeof(void *) * n);
state->elemCells = palloc(sizeof(ListCell *) * n);
state->positions = palloc(sizeof(int) * n);
i = 0;
@@ -459,10 +460,10 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
static void
PathkeyMutatorSwap(int *a, int i, int j)
{
int s = a[i];
int s = a[i];
a[i] = a[j];
a[j] = s;
a[i] = a[j];
a[j] = s;
}
/*
@@ -471,7 +472,10 @@ PathkeyMutatorSwap(int *a, int i, int j)
static bool
PathkeyMutatorNextSet(int *a, int n)
{
int j, k, l, r;
int j,
k,
l,
r;
j = n - 2;
@@ -507,7 +511,7 @@ PathkeyMutatorNextSet(int *a, int n)
static List *
PathkeyMutatorNext(PathkeyMutatorState *state)
{
int i;
int i;
state->count++;
@@ -528,9 +532,9 @@ PathkeyMutatorNext(PathkeyMutatorState *state)
}
/* update the list cells to point to the right elements */
for(i = 0; i < state->mutatorNColumns; i++)
for (i = 0; i < state->mutatorNColumns; i++)
lfirst(state->elemCells[i]) =
(void *) state->elems[ state->positions[i] - 1 ];
(void *) state->elems[state->positions[i] - 1];
return state->elemsList;
}
@@ -541,7 +545,7 @@ PathkeyMutatorNext(PathkeyMutatorState *state)
typedef struct PathkeySortCost
{
Cost cost;
PathKey *pathkey;
PathKey *pathkey;
} PathkeySortCost;
static int
@@ -581,41 +585,42 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
List **group_pathkeys, List **group_clauses,
int n_preordered)
{
List *new_group_pathkeys = NIL,
*new_group_clauses = NIL,
*var_group_pathkeys;
List *new_group_pathkeys = NIL,
*new_group_clauses = NIL,
*var_group_pathkeys;
ListCell *cell;
PathkeyMutatorState mstate;
double cheapest_sort_cost = -1.0;
ListCell *cell;
PathkeyMutatorState mstate;
double cheapest_sort_cost = -1.0;
int nFreeKeys;
int nToPermute;
int nFreeKeys;
int nToPermute;
/* If there are less than 2 unsorted pathkeys, we're done. */
if (list_length(*group_pathkeys) - n_preordered < 2)
return false;
/*
* We could exhaustively cost all possible orderings of the pathkeys, but for
* a large number of pathkeys it might be prohibitively expensive. So we try
* to apply simple cheap heuristics first - we sort the pathkeys by sort cost
* (as if the pathkey was sorted independently) and then check only the four
* cheapest pathkeys. The remaining pathkeys are kept ordered by cost.
* We could exhaustively cost all possible orderings of the pathkeys, but
* for a large number of pathkeys it might be prohibitively expensive. So
* we try to apply simple cheap heuristics first - we sort the pathkeys by
* sort cost (as if the pathkey was sorted independently) and then check
* only the four cheapest pathkeys. The remaining pathkeys are kept
* ordered by cost.
*
* XXX This is a very simple heuristics, but likely to work fine for most
* cases (because the number of GROUP BY clauses tends to be lower than 4).
* But it ignores how the number of distinct values in each pathkey affects
* the following steps. It might be better to use "more expensive" pathkey
* first if it has many distinct values, because it then limits the number
* of comparisons for the remaining pathkeys. But evaluating that is likely
* quite the expensive.
* cases (because the number of GROUP BY clauses tends to be lower than
* 4). But it ignores how the number of distinct values in each pathkey
* affects the following steps. It might be better to use "more expensive"
* pathkey first if it has many distinct values, because it then limits
* the number of comparisons for the remaining pathkeys. But evaluating
* that is likely quite the expensive.
*/
nFreeKeys = list_length(*group_pathkeys) - n_preordered;
nToPermute = 4;
if (nFreeKeys > nToPermute)
{
int i;
int i;
PathkeySortCost *costs = palloc(sizeof(PathkeySortCost) * nFreeKeys);
/* skip the pre-ordered pathkeys */
@@ -624,7 +629,7 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
/* estimate cost for sorting individual pathkeys */
for (i = 0; cell != NULL; i++, (cell = lnext(*group_pathkeys, cell)))
{
List *to_cost = list_make1(lfirst(cell));
List *to_cost = list_make1(lfirst(cell));
Assert(i < nFreeKeys);
@@ -658,28 +663,29 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
Assert(list_length(new_group_pathkeys) == list_length(*group_pathkeys));
/*
* Generate pathkey lists with permutations of the first nToPermute pathkeys.
* Generate pathkey lists with permutations of the first nToPermute
* pathkeys.
*
* XXX We simply calculate sort cost for each individual pathkey list, but
* there's room for two dynamic programming optimizations here. Firstly, we
* may pass the current "best" cost to cost_sort_estimate so that it can
* "abort" if the estimated pathkeys list exceeds it. Secondly, it could pass
* the return information about the position when it exceeded the cost, and
* we could skip all permutations with the same prefix.
* there's room for two dynamic programming optimizations here. Firstly,
* we may pass the current "best" cost to cost_sort_estimate so that it
* can "abort" if the estimated pathkeys list exceeds it. Secondly, it
* could pass the return information about the position when it exceeded
* the cost, and we could skip all permutations with the same prefix.
*
* Imagine we've already found ordering with cost C1, and we're evaluating
* another ordering - cost_sort_estimate() calculates cost by adding the
* pathkeys one by one (more or less), and the cost only grows. If at any
* point it exceeds C1, it can't possibly be "better" so we can discard it.
* But we also know that we can discard all ordering with the same prefix,
* because if we're estimating (a,b,c,d) and we exceed C1 at (a,b) then the
* same thing will happen for any ordering with this prefix.
* point it exceeds C1, it can't possibly be "better" so we can discard
* it. But we also know that we can discard all ordering with the same
* prefix, because if we're estimating (a,b,c,d) and we exceed C1 at (a,b)
* then the same thing will happen for any ordering with this prefix.
*/
PathkeyMutatorInit(&mstate, new_group_pathkeys, n_preordered, n_preordered + nToPermute);
while((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL)
while ((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL)
{
Cost cost;
Cost cost;
cost = cost_sort_estimate(root, var_group_pathkeys, n_preordered, nrows);
@@ -694,11 +700,11 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
/* Reorder the group clauses according to the reordered pathkeys. */
foreach(cell, new_group_pathkeys)
{
PathKey *pathkey = (PathKey *) lfirst(cell);
PathKey *pathkey = (PathKey *) lfirst(cell);
new_group_clauses = lappend(new_group_clauses,
get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref,
*group_clauses));
get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref,
*group_clauses));
}
/* Just append the rest GROUP BY clauses */
@@ -745,8 +751,8 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
PathKeyInfo *info;
int n_preordered = 0;
List *pathkeys = group_pathkeys;
List *clauses = group_clauses;
List *pathkeys = group_pathkeys;
List *clauses = group_clauses;
/* always return at least the original pathkeys/clauses */
info = makeNode(PathKeyInfo);
@@ -756,9 +762,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
infos = lappend(infos, info);
/*
* Should we try generating alternative orderings of the group keys? If not,
* we produce only the order specified in the query, i.e. the optimization
* is effectively disabled.
* Should we try generating alternative orderings of the group keys? If
* not, we produce only the order specified in the query, i.e. the
* optimization is effectively disabled.
*/
if (!enable_group_by_reordering)
return infos;
@@ -782,8 +788,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
}
/*
* If the path is sorted in some way, try reordering the group keys to match
* as much of the ordering as possible - we get this sort for free (mostly).
* If the path is sorted in some way, try reordering the group keys to
* match as much of the ordering as possible - we get this sort for free
* (mostly).
*
* We must not do this when there are no grouping sets, because those use
* more complex logic to decide the ordering.
@@ -2400,8 +2407,8 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
static int
pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys)
{
ListCell *key;
int n = 0;
ListCell *key;
int n = 0;
/* no special ordering requested for grouping */
if (root->group_pathkeys == NIL)
@@ -2414,7 +2421,7 @@ pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys)
/* walk the pathkeys and search for matching group key */
foreach(key, pathkeys)
{
PathKey *pathkey = (PathKey *) lfirst(key);
PathKey *pathkey = (PathKey *) lfirst(key);
/* no matching group key, we're done */
if (!list_member_ptr(root->group_pathkeys, pathkey))

View File

@@ -1162,8 +1162,8 @@ mark_async_capable_plan(Plan *plan, Path *path)
case T_ProjectionPath:
/*
* If the generated plan node includes a Result node for
* the projection, we can't execute it asynchronously.
* If the generated plan node includes a Result node for the
* projection, we can't execute it asynchronously.
*/
if (IsA(plan, Result))
return false;

View File

@@ -6250,7 +6250,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
foreach (lc2, pathkey_orderings)
foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
@@ -6283,8 +6283,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->hasAggs)
{
/*
* We have aggregation, possibly with plain GROUP BY. Make
* an AggPath.
* We have aggregation, possibly with plain GROUP BY.
* Make an AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
@@ -6301,8 +6301,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (group_clauses)
{
/*
* We have GROUP BY without aggregation or grouping sets.
* Make a GroupPath.
* We have GROUP BY without aggregation or grouping
* sets. Make a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
@@ -6321,8 +6321,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
/*
* Now we may consider incremental sort on this path, but only
* when the path is not already sorted and when incremental sort
* is enabled.
* when the path is not already sorted and when incremental
* sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
@@ -6335,8 +6335,9 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
continue;
/*
* We should have already excluded pathkeys of length 1 because
* then presorted_keys > 0 would imply is_sorted was true.
* We should have already excluded pathkeys of length 1
* because then presorted_keys > 0 would imply is_sorted was
* true.
*/
Assert(list_length(root->group_pathkeys) != 1);
@@ -6357,8 +6358,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->hasAggs)
{
/*
* We have aggregation, possibly with plain GROUP BY. Make an
* AggPath.
* We have aggregation, possibly with plain GROUP BY. Make
* an AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
@@ -6375,8 +6376,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->groupClause)
{
/*
* We have GROUP BY without aggregation or grouping sets. Make
* a GroupPath.
* We have GROUP BY without aggregation or grouping sets.
* Make a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
@@ -6421,7 +6422,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
foreach (lc2, pathkey_orderings)
foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
@@ -6435,8 +6436,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
&presorted_keys);
/*
* Insert a Sort node, if required. But there's no point in
* sorting anything but the cheapest path.
* Insert a Sort node, if required. But there's no point
* in sorting anything but the cheapest path.
*/
if (!is_sorted)
{
@@ -6471,24 +6472,30 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
dNumGroups));
/*
* Now we may consider incremental sort on this path, but only
* when the path is not already sorted and when incremental
* sort is enabled.
* Now we may consider incremental sort on this path, but
* only when the path is not already sorted and when
* incremental sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
/* Restore the input path (we might have added Sort on top). */
/*
* Restore the input path (we might have added Sort on
* top).
*/
path = path_original;
/* no shared prefix, not point in building incremental sort */
/*
* no shared prefix, not point in building incremental
* sort
*/
if (presorted_keys == 0)
continue;
/*
* We should have already excluded pathkeys of length 1
* because then presorted_keys > 0 would imply is_sorted was
* true.
* because then presorted_keys > 0 would imply is_sorted
* was true.
*/
Assert(list_length(root->group_pathkeys) != 1);
@@ -6741,7 +6748,7 @@ create_partial_grouping_paths(PlannerInfo *root,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
foreach (lc2, pathkey_orderings)
foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
@@ -6874,7 +6881,7 @@ create_partial_grouping_paths(PlannerInfo *root,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
foreach (lc2, pathkey_orderings)
foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
@@ -6924,8 +6931,8 @@ create_partial_grouping_paths(PlannerInfo *root,
/*
* Now we may consider incremental sort on this path, but only
* when the path is not already sorted and when incremental sort
* is enabled.
* when the path is not already sorted and when incremental
* sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
@@ -6938,8 +6945,9 @@ create_partial_grouping_paths(PlannerInfo *root,
continue;
/*
* We should have already excluded pathkeys of length 1 because
* then presorted_keys > 0 would imply is_sorted was true.
* We should have already excluded pathkeys of length 1
* because then presorted_keys > 0 would imply is_sorted was
* true.
*/
Assert(list_length(root->group_pathkeys) != 1);

View File

@@ -391,7 +391,7 @@ contain_mutable_functions_walker(Node *node, void *context)
const JsonConstructorExpr *ctor = (JsonConstructorExpr *) node;
ListCell *lc;
bool is_jsonb =
ctor->returning->format->format_type == JS_FORMAT_JSONB;
ctor->returning->format->format_type == JS_FORMAT_JSONB;
/* Check argument_type => json[b] conversions */
foreach(lc, ctor->args)
@@ -899,7 +899,7 @@ max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context)
/* JsonExpr is parallel-unsafe if subtransactions can be used. */
else if (IsA(node, JsonExpr))
{
JsonExpr *jsexpr = (JsonExpr *) node;
JsonExpr *jsexpr = (JsonExpr *) node;
if (ExecEvalJsonNeedsSubTransaction(jsexpr, NULL))
{
@@ -3581,7 +3581,7 @@ eval_const_expressions_mutator(Node *node,
context->case_val = raw;
formatted = eval_const_expressions_mutator((Node *) jve->formatted_expr,
context);
context);
context->case_val = save_case_val;
@@ -5315,7 +5315,7 @@ pull_paramids_walker(Node *node, Bitmapset **context)
return false;
if (IsA(node, Param))
{
Param *param = (Param *)node;
Param *param = (Param *) node;
*context = bms_add_member(*context, param->paramid);
return false;

View File

@@ -968,102 +968,102 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
{
table_relation_estimate_size(rel, attr_widths, pages, tuples,
allvisfrac);
table_relation_estimate_size(rel, attr_widths, pages, tuples,
allvisfrac);
}
else if (rel->rd_rel->relkind == RELKIND_INDEX)
{
/*
* XXX: It'd probably be good to move this into a callback, individual
* index types e.g. know if they have a metapage.
*/
/* it has storage, ok to call the smgr */
curpages = RelationGetNumberOfBlocks(rel);
/* report estimated # pages */
*pages = curpages;
/* quick exit if rel is clearly empty */
if (curpages == 0)
{
*tuples = 0;
*allvisfrac = 0;
return;
}
/* coerce values in pg_class to more desirable types */
relpages = (BlockNumber) rel->rd_rel->relpages;
reltuples = (double) rel->rd_rel->reltuples;
relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
/*
* Discount the metapage while estimating the number of tuples. This
* is a kluge because it assumes more than it ought to about index
* structure. Currently it's OK for btree, hash, and GIN indexes but
* suspect for GiST indexes.
*/
if (relpages > 0)
{
curpages--;
relpages--;
}
/* estimate number of tuples from previous tuple density */
if (reltuples >= 0 && relpages > 0)
density = reltuples / (double) relpages;
else
{
/*
* XXX: It'd probably be good to move this into a callback,
* individual index types e.g. know if they have a metapage.
* If we have no data because the relation was never vacuumed,
* estimate tuple width from attribute datatypes. We assume here
* that the pages are completely full, which is OK for tables
* (since they've presumably not been VACUUMed yet) but is
* probably an overestimate for indexes. Fortunately
* get_relation_info() can clamp the overestimate to the parent
* table's size.
*
* Note: this code intentionally disregards alignment
* considerations, because (a) that would be gilding the lily
* considering how crude the estimate is, and (b) it creates
* platform dependencies in the default plans which are kind of a
* headache for regression testing.
*
* XXX: Should this logic be more index specific?
*/
int32 tuple_width;
/* it has storage, ok to call the smgr */
curpages = RelationGetNumberOfBlocks(rel);
tuple_width = get_rel_data_width(rel, attr_widths);
tuple_width += MAXALIGN(SizeofHeapTupleHeader);
tuple_width += sizeof(ItemIdData);
/* note: integer division is intentional here */
density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
}
*tuples = rint(density * (double) curpages);
/* report estimated # pages */
*pages = curpages;
/* quick exit if rel is clearly empty */
if (curpages == 0)
{
*tuples = 0;
*allvisfrac = 0;
return;
}
/* coerce values in pg_class to more desirable types */
relpages = (BlockNumber) rel->rd_rel->relpages;
reltuples = (double) rel->rd_rel->reltuples;
relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
/*
* Discount the metapage while estimating the number of tuples.
* This is a kluge because it assumes more than it ought to about
* index structure. Currently it's OK for btree, hash, and GIN
* indexes but suspect for GiST indexes.
*/
if (relpages > 0)
{
curpages--;
relpages--;
}
/* estimate number of tuples from previous tuple density */
if (reltuples >= 0 && relpages > 0)
density = reltuples / (double) relpages;
else
{
/*
* If we have no data because the relation was never vacuumed,
* estimate tuple width from attribute datatypes. We assume
* here that the pages are completely full, which is OK for
* tables (since they've presumably not been VACUUMed yet) but
* is probably an overestimate for indexes. Fortunately
* get_relation_info() can clamp the overestimate to the
* parent table's size.
*
* Note: this code intentionally disregards alignment
* considerations, because (a) that would be gilding the lily
* considering how crude the estimate is, and (b) it creates
* platform dependencies in the default plans which are kind
* of a headache for regression testing.
*
* XXX: Should this logic be more index specific?
*/
int32 tuple_width;
tuple_width = get_rel_data_width(rel, attr_widths);
tuple_width += MAXALIGN(SizeofHeapTupleHeader);
tuple_width += sizeof(ItemIdData);
/* note: integer division is intentional here */
density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
}
*tuples = rint(density * (double) curpages);
/*
* We use relallvisible as-is, rather than scaling it up like we
* do for the pages and tuples counts, on the theory that any
* pages added since the last VACUUM are most likely not marked
* all-visible. But costsize.c wants it converted to a fraction.
*/
if (relallvisible == 0 || curpages <= 0)
*allvisfrac = 0;
else if ((double) relallvisible >= curpages)
*allvisfrac = 1;
else
*allvisfrac = (double) relallvisible / curpages;
/*
* We use relallvisible as-is, rather than scaling it up like we do
* for the pages and tuples counts, on the theory that any pages added
* since the last VACUUM are most likely not marked all-visible. But
* costsize.c wants it converted to a fraction.
*/
if (relallvisible == 0 || curpages <= 0)
*allvisfrac = 0;
else if ((double) relallvisible >= curpages)
*allvisfrac = 1;
else
*allvisfrac = (double) relallvisible / curpages;
}
else
{
/*
* Just use whatever's in pg_class. This covers foreign tables,
* sequences, and also relkinds without storage (shouldn't get
* here?); see initializations in AddNewRelationTuple(). Note
* that FDW must cope if reltuples is -1!
*/
*pages = rel->rd_rel->relpages;
*tuples = rel->rd_rel->reltuples;
*allvisfrac = 0;
/*
* Just use whatever's in pg_class. This covers foreign tables,
* sequences, and also relkinds without storage (shouldn't get here?);
* see initializations in AddNewRelationTuple(). Note that FDW must
* cope if reltuples is -1!
*/
*pages = rel->rd_rel->relpages;
*tuples = rel->rd_rel->reltuples;
*allvisfrac = 0;
}
}