mirror of
https://github.com/postgres/postgres.git
synced 2025-07-08 11:42:09 +03:00
Rework HashAgg GUCs.
Eliminate enable_groupingsets_hash_disk, which was primarily useful for testing grouping sets that use HashAgg and spill. Instead, hack the table stats to convince the planner to choose hashed aggregation for grouping sets that will spill to disk. Suggested by Melanie Plageman. Rename enable_hashagg_disk to hashagg_avoid_disk_plan, and invert the meaning of on/off. The new name indicates more strongly that it only affects the planner. Also, the word "avoid" is less definite, which should avoid surprises when HashAgg still needs to use the disk. Change suggested by Justin Pryzby, though I chose a different GUC name. Discussion: https://postgr.es/m/CAAKRu_aisiENMsPM2gC4oUY1hHG3yrCwY-fXUg22C6_MJUwQdA%40mail.gmail.com Discussion: https://postgr.es/m/20200610021544.GA14879@telsasoft.com Backpatch-through: 13
This commit is contained in:
@ -4256,12 +4256,11 @@ consider_groupingsets_paths(PlannerInfo *root,
|
||||
dNumGroups - exclude_groups);
|
||||
|
||||
/*
|
||||
* If we have sortable columns to work with (gd->rollups is non-empty)
|
||||
* and enable_groupingsets_hash_disk is disabled, don't generate
|
||||
* hash-based paths that will exceed work_mem.
|
||||
* gd->rollups is empty if we have only unsortable columns to work
|
||||
* with. Override work_mem in that case; otherwise, we'll rely on the
|
||||
* sorted-input case to generate usable mixed paths.
|
||||
*/
|
||||
if (!enable_groupingsets_hash_disk &&
|
||||
hashsize > work_mem * 1024L && gd->rollups)
|
||||
if (hashsize > work_mem * 1024L && gd->rollups)
|
||||
return; /* nope, won't fit */
|
||||
|
||||
/*
|
||||
@ -4868,7 +4867,7 @@ create_distinct_paths(PlannerInfo *root,
|
||||
{
|
||||
Size hashentrysize = hash_agg_entry_size(0, cheapest_input_path->pathtarget->width, 0);
|
||||
|
||||
allow_hash = enable_hashagg_disk ||
|
||||
allow_hash = !hashagg_avoid_disk_plan ||
|
||||
(hashentrysize * numDistinctRows <= work_mem * 1024L);
|
||||
}
|
||||
|
||||
@ -6773,7 +6772,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
|
||||
* were unable to sort above, then we'd better generate a Path, so
|
||||
* that we at least have one.
|
||||
*/
|
||||
if (enable_hashagg_disk ||
|
||||
if (!hashagg_avoid_disk_plan ||
|
||||
hashaggtablesize < work_mem * 1024L ||
|
||||
grouped_rel->pathlist == NIL)
|
||||
{
|
||||
@ -6807,7 +6806,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
|
||||
agg_final_costs,
|
||||
dNumGroups);
|
||||
|
||||
if (enable_hashagg_disk ||
|
||||
if (!hashagg_avoid_disk_plan ||
|
||||
hashaggtablesize < work_mem * 1024L)
|
||||
add_path(grouped_rel, (Path *)
|
||||
create_agg_path(root,
|
||||
@ -7188,7 +7187,7 @@ create_partial_grouping_paths(PlannerInfo *root,
|
||||
* Tentatively produce a partial HashAgg Path, depending on if it
|
||||
* looks as if the hash table will fit in work_mem.
|
||||
*/
|
||||
if ((enable_hashagg_disk || hashaggtablesize < work_mem * 1024L) &&
|
||||
if ((!hashagg_avoid_disk_plan || hashaggtablesize < work_mem * 1024L) &&
|
||||
cheapest_total_path != NULL)
|
||||
{
|
||||
add_path(partially_grouped_rel, (Path *)
|
||||
@ -7215,7 +7214,8 @@ create_partial_grouping_paths(PlannerInfo *root,
|
||||
dNumPartialPartialGroups);
|
||||
|
||||
/* Do the same for partial paths. */
|
||||
if ((enable_hashagg_disk || hashaggtablesize < work_mem * 1024L) &&
|
||||
if ((!hashagg_avoid_disk_plan ||
|
||||
hashaggtablesize < work_mem * 1024L) &&
|
||||
cheapest_partial_path != NULL)
|
||||
{
|
||||
add_partial_path(partially_grouped_rel, (Path *)
|
||||
|
Reference in New Issue
Block a user