1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-05 07:21:24 +03:00

Consider explicit incremental sort for mergejoins

For a mergejoin, if the given outer path or inner path is not already
well enough ordered, we need to do an explicit sort.  Currently, we
only consider explicit full sort and do not account for incremental
sort.

In this patch, for the outer path of a mergejoin, we choose to use
explicit incremental sort if it is enabled and there are presorted
keys.  For the inner path, though, we cannot use incremental sort
because it does not support mark/restore at present.

The rationale is based on the assumption that incremental sort is
always faster than full sort when there are presorted keys, a premise
that has been applied in various parts of the code.  In addition, the
current cost model tends to favor incremental sort as being cheaper
than full sort in the presence of presorted keys, making it reasonable
not to consider full sort in such cases.

It could be argued that what if a mergejoin with an incremental sort
as the outer path is selected as the inner path of another mergejoin.
However, this should not be a problem, because mergejoin itself does
not support mark/restore either, and we will add a Material node on
top of it anyway in this case (see final_cost_mergejoin).

There is one ensuing plan change in the regression tests, and we have
to modify that test case to ensure that it continues to test what it
is intended to.

No backpatch as this could result in plan changes.

Author: Richard Guo
Reviewed-by: David Rowley, Tomas Vondra
Discussion: https://postgr.es/m/CAMbWs49x425QrX7h=Ux05WEnt8GS757H-jOP3_xsX5t1FoUsZw@mail.gmail.com
This commit is contained in:
Richard Guo
2024-10-09 17:14:42 +09:00
parent c4528fdfa9
commit 828e94c9d2
6 changed files with 185 additions and 42 deletions

View File

@ -3532,7 +3532,8 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
* join quals here, except for obtaining the scan selectivity estimate which
* is really essential (but fortunately, use of caching keeps the cost of
* getting that down to something reasonable).
* We also assume that cost_sort is cheap enough to use here.
* We also assume that cost_sort/cost_incremental_sort is cheap enough to use
* here.
*
* 'workspace' is to be filled with startup_cost, total_cost, and perhaps
* other data to be used by final_cost_mergejoin
@ -3569,7 +3570,8 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
outerendsel,
innerstartsel,
innerendsel;
Path sort_path; /* dummy for result of cost_sort */
Path sort_path; /* dummy for result of
* cost_sort/cost_incremental_sort */
/* Protect some assumptions below that rowcounts aren't zero */
if (outer_path_rows <= 0)
@ -3682,16 +3684,54 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
if (outersortkeys) /* do we need to sort outer? */
{
cost_sort(&sort_path,
root,
outersortkeys,
outer_path->disabled_nodes,
outer_path->total_cost,
outer_path_rows,
outer_path->pathtarget->width,
0.0,
work_mem,
-1.0);
bool use_incremental_sort = false;
int presorted_keys;
/*
* We choose to use incremental sort if it is enabled and there are
* presorted keys; otherwise we use full sort.
*/
if (enable_incremental_sort)
{
bool is_sorted PG_USED_FOR_ASSERTS_ONLY;
is_sorted = pathkeys_count_contained_in(outersortkeys,
outer_path->pathkeys,
&presorted_keys);
Assert(!is_sorted);
if (presorted_keys > 0)
use_incremental_sort = true;
}
if (!use_incremental_sort)
{
cost_sort(&sort_path,
root,
outersortkeys,
outer_path->disabled_nodes,
outer_path->total_cost,
outer_path_rows,
outer_path->pathtarget->width,
0.0,
work_mem,
-1.0);
}
else
{
cost_incremental_sort(&sort_path,
root,
outersortkeys,
presorted_keys,
outer_path->disabled_nodes,
outer_path->startup_cost,
outer_path->total_cost,
outer_path_rows,
outer_path->pathtarget->width,
0.0,
work_mem,
-1.0);
}
disabled_nodes += sort_path.disabled_nodes;
startup_cost += sort_path.startup_cost;
startup_cost += (sort_path.total_cost - sort_path.startup_cost)
@ -3711,6 +3751,11 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
if (innersortkeys) /* do we need to sort inner? */
{
/*
* We do not consider incremental sort for inner path, because
* incremental sort does not support mark/restore.
*/
cost_sort(&sort_path,
root,
innersortkeys,

View File

@ -179,6 +179,8 @@ static void copy_generic_path_info(Plan *dest, Path *src);
static void copy_plan_costsize(Plan *dest, Plan *src);
static void label_sort_with_costsize(PlannerInfo *root, Sort *plan,
double limit_tuples);
static void label_incrementalsort_with_costsize(PlannerInfo *root, IncrementalSort *plan,
List *pathkeys, double limit_tuples);
static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
TableSampleClause *tsc);
@ -4523,12 +4525,51 @@ create_mergejoin_plan(PlannerInfo *root,
if (best_path->outersortkeys)
{
Relids outer_relids = outer_path->parent->relids;
Sort *sort = make_sort_from_pathkeys(outer_plan,
best_path->outersortkeys,
outer_relids);
Plan *sort_plan;
bool use_incremental_sort = false;
int presorted_keys;
label_sort_with_costsize(root, sort, -1.0);
outer_plan = (Plan *) sort;
/*
* We choose to use incremental sort if it is enabled and there are
* presorted keys; otherwise we use full sort.
*/
if (enable_incremental_sort)
{
bool is_sorted PG_USED_FOR_ASSERTS_ONLY;
is_sorted = pathkeys_count_contained_in(best_path->outersortkeys,
outer_path->pathkeys,
&presorted_keys);
Assert(!is_sorted);
if (presorted_keys > 0)
use_incremental_sort = true;
}
if (!use_incremental_sort)
{
sort_plan = (Plan *)
make_sort_from_pathkeys(outer_plan,
best_path->outersortkeys,
outer_relids);
label_sort_with_costsize(root, (Sort *) sort_plan, -1.0);
}
else
{
sort_plan = (Plan *)
make_incrementalsort_from_pathkeys(outer_plan,
best_path->outersortkeys,
outer_relids,
presorted_keys);
label_incrementalsort_with_costsize(root,
(IncrementalSort *) sort_plan,
best_path->outersortkeys,
-1.0);
}
outer_plan = sort_plan;
outerpathkeys = best_path->outersortkeys;
}
else
@ -4536,6 +4577,11 @@ create_mergejoin_plan(PlannerInfo *root,
if (best_path->innersortkeys)
{
/*
* We do not consider incremental sort for inner path, because
* incremental sort does not support mark/restore.
*/
Relids inner_relids = inner_path->parent->relids;
Sort *sort = make_sort_from_pathkeys(inner_plan,
best_path->innersortkeys,
@ -5447,10 +5493,6 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
Plan *lefttree = plan->plan.lefttree;
Path sort_path; /* dummy for result of cost_sort */
/*
* This function shouldn't have to deal with IncrementalSort plans because
* they are only created from corresponding Path nodes.
*/
Assert(IsA(plan, Sort));
cost_sort(&sort_path, root, NIL,
@ -5469,6 +5511,37 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
plan->plan.parallel_safe = lefttree->parallel_safe;
}
/*
* Same as label_sort_with_costsize, but labels the IncrementalSort node
* instead.
*/
static void
label_incrementalsort_with_costsize(PlannerInfo *root, IncrementalSort *plan,
List *pathkeys, double limit_tuples)
{
Plan *lefttree = plan->sort.plan.lefttree;
Path sort_path; /* dummy for result of cost_incremental_sort */
Assert(IsA(plan, IncrementalSort));
cost_incremental_sort(&sort_path, root, pathkeys,
plan->nPresortedCols,
plan->sort.plan.disabled_nodes,
lefttree->startup_cost,
lefttree->total_cost,
lefttree->plan_rows,
lefttree->plan_width,
0.0,
work_mem,
limit_tuples);
plan->sort.plan.startup_cost = sort_path.startup_cost;
plan->sort.plan.total_cost = sort_path.total_cost;
plan->sort.plan.plan_rows = lefttree->plan_rows;
plan->sort.plan.plan_width = lefttree->plan_width;
plan->sort.plan.parallel_aware = false;
plan->sort.plan.parallel_safe = lefttree->parallel_safe;
}
/*
* bitmap_subplan_mark_shared
* Set isshared flag in bitmap subplan so that it will be created in