mirror of
https://github.com/postgres/postgres.git
synced 2025-10-27 00:12:01 +03:00
Use Append rather than MergeAppend for scanning ordered partitions.
If we need ordered output from a scan of a partitioned table, but the ordering matches the partition ordering, then we don't need to use a MergeAppend to combine the pre-ordered per-partition scan results: a plain Append will produce the same results. This both saves useless comparison work inside the MergeAppend proper, and allows us to start returning tuples after istarting up just the first child node not all of them. However, all is not peaches and cream, because if some of the child nodes have high startup costs then there will be big discontinuities in the tuples-returned-versus-elapsed-time curve. The planner's cost model cannot handle that (yet, anyway). If we model the Append's startup cost as being just the first child's startup cost, we may drastically underestimate the cost of fetching slightly more tuples than are available from the first child. Since we've had bad experiences with over-optimistic choices of "fast start" plans for ORDER BY LIMIT queries, that seems scary. As a klugy workaround, set the startup cost estimate for an ordered Append to be the sum of its children's startup costs (as MergeAppend would). This doesn't really describe reality, but it's less likely to cause a bad plan choice than an underestimated startup cost would. In practice, the cases where we really care about this optimization will have child plans that are IndexScans with zero startup cost, so that the overly conservative estimate is still just zero. David Rowley, reviewed by Julien Rouhaud and Antonin Houska Discussion: https://postgr.es/m/CAKJS1f-hAqhPLRk_RaSFTgYxd=Tz5hA7kQ2h4-DhJufQk8TGuw@mail.gmail.com
This commit is contained in:
@@ -1878,27 +1878,83 @@ cost_append(AppendPath *apath)
|
||||
|
||||
apath->path.startup_cost = 0;
|
||||
apath->path.total_cost = 0;
|
||||
apath->path.rows = 0;
|
||||
|
||||
if (apath->subpaths == NIL)
|
||||
return;
|
||||
|
||||
if (!apath->path.parallel_aware)
|
||||
{
|
||||
Path *subpath = (Path *) linitial(apath->subpaths);
|
||||
List *pathkeys = apath->path.pathkeys;
|
||||
|
||||
/*
|
||||
* Startup cost of non-parallel-aware Append is the startup cost of
|
||||
* first subpath.
|
||||
*/
|
||||
apath->path.startup_cost = subpath->startup_cost;
|
||||
|
||||
/* Compute rows and costs as sums of subplan rows and costs. */
|
||||
foreach(l, apath->subpaths)
|
||||
if (pathkeys == NIL)
|
||||
{
|
||||
Path *subpath = (Path *) lfirst(l);
|
||||
Path *subpath = (Path *) linitial(apath->subpaths);
|
||||
|
||||
apath->path.rows += subpath->rows;
|
||||
apath->path.total_cost += subpath->total_cost;
|
||||
/*
|
||||
* For an unordered, non-parallel-aware Append we take the startup
|
||||
* cost as the startup cost of the first subpath.
|
||||
*/
|
||||
apath->path.startup_cost = subpath->startup_cost;
|
||||
|
||||
/* Compute rows and costs as sums of subplan rows and costs. */
|
||||
foreach(l, apath->subpaths)
|
||||
{
|
||||
Path *subpath = (Path *) lfirst(l);
|
||||
|
||||
apath->path.rows += subpath->rows;
|
||||
apath->path.total_cost += subpath->total_cost;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* For an ordered, non-parallel-aware Append we take the startup
|
||||
* cost as the sum of the subpath startup costs. This ensures
|
||||
* that we don't underestimate the startup cost when a query's
|
||||
* LIMIT is such that several of the children have to be run to
|
||||
* satisfy it. This might be overkill --- another plausible hack
|
||||
* would be to take the Append's startup cost as the maximum of
|
||||
* the child startup costs. But we don't want to risk believing
|
||||
* that an ORDER BY LIMIT query can be satisfied at small cost
|
||||
* when the first child has small startup cost but later ones
|
||||
* don't. (If we had the ability to deal with nonlinear cost
|
||||
* interpolation for partial retrievals, we would not need to be
|
||||
* so conservative about this.)
|
||||
*
|
||||
* This case is also different from the above in that we have to
|
||||
* account for possibly injecting sorts into subpaths that aren't
|
||||
* natively ordered.
|
||||
*/
|
||||
foreach(l, apath->subpaths)
|
||||
{
|
||||
Path *subpath = (Path *) lfirst(l);
|
||||
Path sort_path; /* dummy for result of cost_sort */
|
||||
|
||||
if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
|
||||
{
|
||||
/*
|
||||
* We'll need to insert a Sort node, so include costs for
|
||||
* that. We can use the parent's LIMIT if any, since we
|
||||
* certainly won't pull more than that many tuples from
|
||||
* any child.
|
||||
*/
|
||||
cost_sort(&sort_path,
|
||||
NULL, /* doesn't currently need root */
|
||||
pathkeys,
|
||||
subpath->total_cost,
|
||||
subpath->rows,
|
||||
subpath->pathtarget->width,
|
||||
0.0,
|
||||
work_mem,
|
||||
apath->limit_tuples);
|
||||
subpath = &sort_path;
|
||||
}
|
||||
|
||||
apath->path.rows += subpath->rows;
|
||||
apath->path.startup_cost += subpath->startup_cost;
|
||||
apath->path.total_cost += subpath->total_cost;
|
||||
}
|
||||
}
|
||||
}
|
||||
else /* parallel-aware */
|
||||
@@ -1906,6 +1962,9 @@ cost_append(AppendPath *apath)
|
||||
int i = 0;
|
||||
double parallel_divisor = get_parallel_divisor(&apath->path);
|
||||
|
||||
/* Parallel-aware Append never produces ordered output. */
|
||||
Assert(apath->path.pathkeys == NIL);
|
||||
|
||||
/* Calculate startup cost. */
|
||||
foreach(l, apath->subpaths)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user