mirror of
https://github.com/postgres/postgres.git
synced 2025-11-21 00:42:43 +03:00
Fix up planner infrastructure to support LATERAL properly.
This patch takes care of a number of problems having to do with failure to choose valid join orders and incorrect handling of lateral references pulled up from subqueries. Notable changes: * Add a LateralJoinInfo data structure similar to SpecialJoinInfo, to represent join ordering constraints created by lateral references. (I first considered extending the SpecialJoinInfo structure, but the semantics are different enough that a separate data structure seems better.) Extend join_is_legal() and related functions to prevent trying to form unworkable joins, and to ensure that we will consider joins that satisfy lateral references even if the joins would be clauseless. * Fill in the infrastructure needed for the last few types of relation scan paths to support parameterization. We'd have wanted this eventually anyway, but it is necessary now because a relation that gets pulled up out of a UNION ALL subquery may acquire a reltargetlist containing lateral references, meaning that its paths *have* to be parameterized whether or not we have any code that can push join quals down into the scan. * Compute data about lateral references early in query_planner(), and save in RelOptInfo nodes, to avoid repetitive calculations later. * Assorted corner-case bug fixes. There's probably still some bugs left, but this is a lot closer to being real than it was before.
This commit is contained in:
@@ -855,14 +855,19 @@ cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
|
||||
/*
|
||||
* cost_tidscan
|
||||
* Determines and returns the cost of scanning a relation using TIDs.
|
||||
*
|
||||
* 'baserel' is the relation to be scanned
|
||||
* 'tidquals' is the list of TID-checkable quals
|
||||
* 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
|
||||
*/
|
||||
void
|
||||
cost_tidscan(Path *path, PlannerInfo *root,
|
||||
RelOptInfo *baserel, List *tidquals)
|
||||
RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
|
||||
{
|
||||
Cost startup_cost = 0;
|
||||
Cost run_cost = 0;
|
||||
bool isCurrentOf = false;
|
||||
QualCost qpqual_cost;
|
||||
Cost cpu_per_tuple;
|
||||
QualCost tid_qual_cost;
|
||||
int ntuples;
|
||||
@@ -873,8 +878,11 @@ cost_tidscan(Path *path, PlannerInfo *root,
|
||||
Assert(baserel->relid > 0);
|
||||
Assert(baserel->rtekind == RTE_RELATION);
|
||||
|
||||
/* For now, tidscans are never parameterized */
|
||||
path->rows = baserel->rows;
|
||||
/* Mark the path with the correct row estimate */
|
||||
if (param_info)
|
||||
path->rows = param_info->ppi_rows;
|
||||
else
|
||||
path->rows = baserel->rows;
|
||||
|
||||
/* Count how many tuples we expect to retrieve */
|
||||
ntuples = 0;
|
||||
@@ -931,10 +939,12 @@ cost_tidscan(Path *path, PlannerInfo *root,
|
||||
/* disk costs --- assume each tuple on a different page */
|
||||
run_cost += spc_random_page_cost * ntuples;
|
||||
|
||||
/* CPU costs */
|
||||
startup_cost += baserel->baserestrictcost.startup +
|
||||
tid_qual_cost.per_tuple;
|
||||
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
|
||||
/* Add scanning CPU costs */
|
||||
get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
|
||||
|
||||
/* XXX currently we assume TID quals are a subset of qpquals */
|
||||
startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
|
||||
cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
|
||||
tid_qual_cost.per_tuple;
|
||||
run_cost += cpu_per_tuple * ntuples;
|
||||
|
||||
@@ -1097,25 +1107,32 @@ cost_valuesscan(Path *path, PlannerInfo *root,
|
||||
* and should NOT be counted here.
|
||||
*/
|
||||
void
|
||||
cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
|
||||
cost_ctescan(Path *path, PlannerInfo *root,
|
||||
RelOptInfo *baserel, ParamPathInfo *param_info)
|
||||
{
|
||||
Cost startup_cost = 0;
|
||||
Cost run_cost = 0;
|
||||
QualCost qpqual_cost;
|
||||
Cost cpu_per_tuple;
|
||||
|
||||
/* Should only be applied to base relations that are CTEs */
|
||||
Assert(baserel->relid > 0);
|
||||
Assert(baserel->rtekind == RTE_CTE);
|
||||
|
||||
/* ctescans are never parameterized */
|
||||
path->rows = baserel->rows;
|
||||
/* Mark the path with the correct row estimate */
|
||||
if (param_info)
|
||||
path->rows = param_info->ppi_rows;
|
||||
else
|
||||
path->rows = baserel->rows;
|
||||
|
||||
/* Charge one CPU tuple cost per row for tuplestore manipulation */
|
||||
cpu_per_tuple = cpu_tuple_cost;
|
||||
|
||||
/* Add scanning CPU costs */
|
||||
startup_cost += baserel->baserestrictcost.startup;
|
||||
cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
|
||||
get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
|
||||
|
||||
startup_cost += qpqual_cost.startup;
|
||||
cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
|
||||
run_cost += cpu_per_tuple * baserel->tuples;
|
||||
|
||||
path->startup_cost = startup_cost;
|
||||
@@ -3904,13 +3921,20 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
|
||||
{
|
||||
Node *node = (Node *) lfirst(lc);
|
||||
|
||||
if (IsA(node, Var))
|
||||
/*
|
||||
* Ordinarily, a Var in a rel's reltargetlist must belong to that rel;
|
||||
* but there are corner cases involving LATERAL references in
|
||||
* appendrel members where that isn't so (see set_append_rel_size()).
|
||||
* If the Var has the wrong varno, fall through to the generic case
|
||||
* (it doesn't seem worth the trouble to be any smarter).
|
||||
*/
|
||||
if (IsA(node, Var) &&
|
||||
((Var *) node)->varno == rel->relid)
|
||||
{
|
||||
Var *var = (Var *) node;
|
||||
int ndx;
|
||||
int32 item_width;
|
||||
|
||||
Assert(var->varno == rel->relid);
|
||||
Assert(var->varattno >= rel->min_attr);
|
||||
Assert(var->varattno <= rel->max_attr);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user