1
0
mirror of https://github.com/postgres/postgres.git synced 2025-08-27 07:42:10 +03:00
This removes "Add Result Cache executor node".  It seems that something
weird is going on with the tracking of cache hits and misses as
highlighted by many buildfarm animals.  It's not yet clear what the
problem is as other parts of the plan indicate that the cache did work
correctly, it's just the hits and misses that were being reported as 0.

This is especially a bad time to have the buildfarm so broken, so
reverting before too many more animals go red.

Discussion: https://postgr.es/m/CAApHDvq_hydhfovm4=izgWs+C5HqEeRScjMbOgbpC-jRAeK3Yw@mail.gmail.com
This commit is contained in:
David Rowley
2021-04-01 13:33:23 +13:00
parent b6002a796d
commit 28b3e3905c
45 changed files with 196 additions and 2833 deletions

View File

@@ -4032,10 +4032,6 @@ print_path(PlannerInfo *root, Path *path, int indent)
ptype = "Material";
subpath = ((MaterialPath *) path)->subpath;
break;
case T_ResultCache:
ptype = "ResultCache";
subpath = ((ResultCachePath *) path)->subpath;
break;
case T_UniquePath:
ptype = "Unique";
subpath = ((UniquePath *) path)->subpath;

View File

@@ -79,7 +79,6 @@
#include "executor/executor.h"
#include "executor/nodeAgg.h"
#include "executor/nodeHash.h"
#include "executor/nodeResultCache.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
@@ -140,7 +139,6 @@ bool enable_incremental_sort = true;
bool enable_hashagg = true;
bool enable_nestloop = true;
bool enable_material = true;
bool enable_resultcache = true;
bool enable_mergejoin = true;
bool enable_hashjoin = true;
bool enable_gathermerge = true;
@@ -2404,147 +2402,6 @@ cost_material(Path *path,
path->total_cost = startup_cost + run_cost;
}
/*
* cost_resultcache_rescan
* Determines the estimated cost of rescanning a ResultCache node.
*
* In order to estimate this, we must gain knowledge of how often we expect to
* be called and how many distinct sets of parameters we are likely to be
* called with. If we expect a good cache hit ratio, then we can set our
* costs to account for that hit ratio, plus a little bit of cost for the
* caching itself. Caching will not work out well if we expect to be called
* with too many distinct parameter values. The worst-case here is that we
* never see any parameter value twice, in which case we'd never get a cache
* hit and caching would be a complete waste of effort.
*/
static void
cost_resultcache_rescan(PlannerInfo *root, ResultCachePath *rcpath,
Cost *rescan_startup_cost, Cost *rescan_total_cost)
{
EstimationInfo estinfo;
Cost input_startup_cost = rcpath->subpath->startup_cost;
Cost input_total_cost = rcpath->subpath->total_cost;
double tuples = rcpath->subpath->rows;
double calls = rcpath->calls;
int width = rcpath->subpath->pathtarget->width;
double hash_mem_bytes;
double est_entry_bytes;
double est_cache_entries;
double ndistinct;
double evict_ratio;
double hit_ratio;
Cost startup_cost;
Cost total_cost;
/* available cache space */
hash_mem_bytes = get_hash_mem() * 1024L;
/*
* Set the number of bytes each cache entry should consume in the cache.
* To provide us with better estimations on how many cache entries we can
* store at once, we make a call to the executor here to ask it what
* memory overheads there are for a single cache entry.
*
* XXX we also store the cache key, but that's not accounted for here.
*/
est_entry_bytes = relation_byte_size(tuples, width) +
ExecEstimateCacheEntryOverheadBytes(tuples);
/* estimate on the upper limit of cache entries we can hold at once */
est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
/* estimate on the distinct number of parameter values */
ndistinct = estimate_num_groups(root, rcpath->param_exprs, calls, NULL,
&estinfo);
/*
* When the estimation fell back on using a default value, it's a bit too
* risky to assume that it's ok to use a Result Cache. The use of a
* default could cause us to use a Result Cache when it's really
* inappropriate to do so. If we see that this has been done, then we'll
* assume that every call will have unique parameters, which will almost
* certainly mean a ResultCachePath will never survive add_path().
*/
if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
ndistinct = calls;
/*
* Since we've already estimated the maximum number of entries we can
* store at once and know the estimated number of distinct values we'll be
* called with, we'll take this opportunity to set the path's est_entries.
* This will ultimately determine the hash table size that the executor
* will use. If we leave this at zero, the executor will just choose the
* size itself. Really this is not the right place to do this, but it's
* convenient since everything is already calculated.
*/
rcpath->est_entries = Min(Min(ndistinct, est_cache_entries),
PG_UINT32_MAX);
/*
* When the number of distinct parameter values is above the amount we can
* store in the cache, then we'll have to evict some entries from the
* cache. This is not free. Here we estimate how often we'll incur the
* cost of that eviction.
*/
evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
/*
* In order to estimate how costly a single scan will be, we need to
* attempt to estimate what the cache hit ratio will be. To do that we
* must look at how many scans are estimated in total for this node and
* how many of those scans we expect to get a cache hit.
*/
hit_ratio = 1.0 / ndistinct * Min(est_cache_entries, ndistinct) -
(ndistinct / calls);
/* Ensure we don't go negative */
hit_ratio = Max(hit_ratio, 0.0);
/*
* Set the total_cost accounting for the expected cache hit ratio. We
* also add on a cpu_operator_cost to account for a cache lookup. This
* will happen regardless of whether it's a cache hit or not.
*/
total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
/* Now adjust the total cost to account for cache evictions */
/* Charge a cpu_tuple_cost for evicting the actual cache entry */
total_cost += cpu_tuple_cost * evict_ratio;
/*
* Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
* The per-tuple eviction is really just a pfree, so charging a whole
* cpu_operator_cost seems a little excessive.
*/
total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
/*
* Now adjust for storing things in the cache, since that's not free
* either. Everything must go in the cache. We don't proportion this
* over any ratio, just apply it once for the scan. We charge a
* cpu_tuple_cost for the creation of the cache entry and also a
* cpu_operator_cost for each tuple we expect to cache.
*/
total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
/*
* Getting the first row must be also be proportioned according to the
* expected cache hit ratio.
*/
startup_cost = input_startup_cost * (1.0 - hit_ratio);
/*
* Additionally we charge a cpu_tuple_cost to account for cache lookups,
* which we'll do regardless of whether it was a cache hit or not.
*/
startup_cost += cpu_tuple_cost;
*rescan_startup_cost = startup_cost;
*rescan_total_cost = total_cost;
}
/*
* cost_agg
* Determines and returns the cost of performing an Agg plan node,
@@ -4285,11 +4142,6 @@ cost_rescan(PlannerInfo *root, Path *path,
*rescan_total_cost = run_cost;
}
break;
case T_ResultCache:
/* All the hard work is done by cost_resultcache_rescan */
cost_resultcache_rescan(root, (ResultCachePath *) path,
rescan_startup_cost, rescan_total_cost);
break;
default:
*rescan_startup_cost = path->startup_cost;
*rescan_total_cost = path->total_cost;

View File

@@ -18,13 +18,10 @@
#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/planmain.h"
#include "utils/typcache.h"
/* Hook for plugins to get control in add_paths_to_joinrel() */
set_join_pathlist_hook_type set_join_pathlist_hook = NULL;
@@ -55,9 +52,6 @@ static void try_partial_mergejoin_path(PlannerInfo *root,
static void sort_inner_and_outer(PlannerInfo *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
JoinType jointype, JoinPathExtraData *extra);
static inline bool clause_sides_match_join(RestrictInfo *rinfo,
RelOptInfo *outerrel,
RelOptInfo *innerrel);
static void match_unsorted_outer(PlannerInfo *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
JoinType jointype, JoinPathExtraData *extra);
@@ -169,11 +163,6 @@ add_paths_to_joinrel(PlannerInfo *root,
{
case JOIN_SEMI:
case JOIN_ANTI:
/*
* XXX it may be worth proving this to allow a ResultCache to be
* considered for Nested Loop Semi/Anti Joins.
*/
extra.inner_unique = false; /* well, unproven */
break;
case JOIN_UNIQUE_INNER:
@@ -365,180 +354,6 @@ allow_star_schema_join(PlannerInfo *root,
bms_nonempty_difference(inner_paramrels, outerrelids));
}
/*
* paraminfo_get_equal_hashops
* Determine if param_info and innerrel's lateral_vars can be hashed.
* Returns true the hashing is possible, otherwise return false.
*
* Additionally we also collect the outer exprs and the hash operators for
* each parameter to innerrel. These set in 'param_exprs' and 'operators'
* when we return true.
*/
static bool
paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List **param_exprs, List **operators)
{
ListCell *lc;
*param_exprs = NIL;
*operators = NIL;
if (param_info != NULL)
{
List *clauses = param_info->ppi_clauses;
foreach(lc, clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
OpExpr *opexpr;
Node *expr;
/* can't use result cache without a valid hash equals operator */
if (!OidIsValid(rinfo->hasheqoperator) ||
!clause_sides_match_join(rinfo, outerrel, innerrel))
{
list_free(*operators);
list_free(*param_exprs);
return false;
}
/*
* We already checked that this is an OpExpr with 2 args when
* setting hasheqoperator.
*/
opexpr = (OpExpr *) rinfo->clause;
if (rinfo->outer_is_left)
expr = (Node *) linitial(opexpr->args);
else
expr = (Node *) lsecond(opexpr->args);
*operators = lappend_oid(*operators, rinfo->hasheqoperator);
*param_exprs = lappend(*param_exprs, expr);
}
}
/* Now add any lateral vars to the cache key too */
foreach(lc, innerrel->lateral_vars)
{
Node *expr = (Node *) lfirst(lc);
TypeCacheEntry *typentry;
/* Reject if there are any volatile functions */
if (contain_volatile_functions(expr))
{
list_free(*operators);
list_free(*param_exprs);
return false;
}
typentry = lookup_type_cache(exprType(expr),
TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR);
/* can't use result cache without a valid hash equals operator */
if (!OidIsValid(typentry->hash_proc) || !OidIsValid(typentry->eq_opr))
{
list_free(*operators);
list_free(*param_exprs);
return false;
}
*operators = lappend_oid(*operators, typentry->eq_opr);
*param_exprs = lappend(*param_exprs, expr);
}
/* We're okay to use result cache */
return true;
}
/*
* get_resultcache_path
* If possible, make and return a Result Cache path atop of 'inner_path'.
* Otherwise return NULL.
*/
static Path *
get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
RelOptInfo *outerrel, Path *inner_path,
Path *outer_path, JoinType jointype,
JoinPathExtraData *extra)
{
List *param_exprs;
List *hash_operators;
ListCell *lc;
/* Obviously not if it's disabled */
if (!enable_resultcache)
return NULL;
/*
* We can safely not bother with all this unless we expect to perform more
* than one inner scan. The first scan is always going to be a cache
* miss. This would likely fail later anyway based on costs, so this is
* really just to save some wasted effort.
*/
if (outer_path->parent->rows < 2)
return NULL;
/*
* We can only have a result cache when there's some kind of cache key,
* either parameterized path clauses or lateral Vars. No cache key sounds
* more like something a Materialize node might be more useful for.
*/
if ((inner_path->param_info == NULL ||
inner_path->param_info->ppi_clauses == NIL) &&
innerrel->lateral_vars == NIL)
return NULL;
/*
* Currently we don't do this for SEMI and ANTI joins unless they're
* marked as inner_unique. This is because nested loop SEMI/ANTI joins
* don't scan the inner node to completion, which will mean result cache
* cannot mark the cache entry as complete.
*
* XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique
* = true. Should we? See add_paths_to_joinrel()
*/
if (!extra->inner_unique && (jointype == JOIN_SEMI ||
jointype == JOIN_ANTI))
return NULL;
/*
* We can't use a result cache if there are volatile functions in the
* inner rel's target list or restrict list. A cache hit could reduce the
* number of calls to these functions.
*/
if (contain_volatile_functions((Node *) innerrel->reltarget))
return NULL;
foreach(lc, innerrel->baserestrictinfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
if (contain_volatile_functions((Node *) rinfo))
return NULL;
}
/* Check if we have hash ops for each parameter to the path */
if (paraminfo_get_equal_hashops(root,
inner_path->param_info,
outerrel,
innerrel,
&param_exprs,
&hash_operators))
{
return (Path *) create_resultcache_path(root,
innerrel,
inner_path,
param_exprs,
hash_operators,
extra->inner_unique,
outer_path->parent->rows);
}
return NULL;
}
/*
* try_nestloop_path
* Consider a nestloop join path; if it appears useful, push it into
@@ -1656,7 +1471,6 @@ match_unsorted_outer(PlannerInfo *root,
foreach(lc2, innerrel->cheapest_parameterized_paths)
{
Path *innerpath = (Path *) lfirst(lc2);
Path *rcpath;
try_nestloop_path(root,
joinrel,
@@ -1665,22 +1479,6 @@ match_unsorted_outer(PlannerInfo *root,
merge_pathkeys,
jointype,
extra);
/*
* Try generating a result cache path and see if that makes the
* nested loop any cheaper.
*/
rcpath = get_resultcache_path(root, innerrel, outerrel,
innerpath, outerpath, jointype,
extra);
if (rcpath != NULL)
try_nestloop_path(root,
joinrel,
outerpath,
rcpath,
merge_pathkeys,
jointype,
extra);
}
/* Also consider materialized form of the cheapest inner path */
@@ -1835,7 +1633,6 @@ consider_parallel_nestloop(PlannerInfo *root,
foreach(lc2, innerrel->cheapest_parameterized_paths)
{
Path *innerpath = (Path *) lfirst(lc2);
Path *rcpath;
/* Can't join to an inner path that is not parallel-safe */
if (!innerpath->parallel_safe)
@@ -1860,17 +1657,6 @@ consider_parallel_nestloop(PlannerInfo *root,
try_partial_nestloop_path(root, joinrel, outerpath, innerpath,
pathkeys, jointype, extra);
/*
* Try generating a result cache path and see if that makes the
* nested loop any cheaper.
*/
rcpath = get_resultcache_path(root, innerrel, outerrel,
innerpath, outerpath, jointype,
extra);
if (rcpath != NULL)
try_partial_nestloop_path(root, joinrel, outerpath, rcpath,
pathkeys, jointype, extra);
}
}
}

View File

@@ -91,9 +91,6 @@ static Result *create_group_result_plan(PlannerInfo *root,
static ProjectSet *create_project_set_plan(PlannerInfo *root, ProjectSetPath *best_path);
static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
int flags);
static ResultCache *create_resultcache_plan(PlannerInfo *root,
ResultCachePath *best_path,
int flags);
static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
int flags);
static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
@@ -280,11 +277,6 @@ static Sort *make_sort_from_groupcols(List *groupcls,
AttrNumber *grpColIdx,
Plan *lefttree);
static Material *make_material(Plan *lefttree);
static ResultCache *make_resultcache(Plan *lefttree, Oid *hashoperators,
Oid *collations,
List *param_exprs,
bool singlerow,
uint32 est_entries);
static WindowAgg *make_windowagg(List *tlist, Index winref,
int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
@@ -461,11 +453,6 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
(MaterialPath *) best_path,
flags);
break;
case T_ResultCache:
plan = (Plan *) create_resultcache_plan(root,
(ResultCachePath *) best_path,
flags);
break;
case T_Unique:
if (IsA(best_path, UpperUniquePath))
{
@@ -1579,56 +1566,6 @@ create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags)
return plan;
}
/*
* create_resultcache_plan
* Create a ResultCache plan for 'best_path' and (recursively) plans
* for its subpaths.
*
* Returns a Plan node.
*/
static ResultCache *
create_resultcache_plan(PlannerInfo *root, ResultCachePath *best_path, int flags)
{
ResultCache *plan;
Plan *subplan;
Oid *operators;
Oid *collations;
List *param_exprs = NIL;
ListCell *lc;
ListCell *lc2;
int nkeys;
int i;
subplan = create_plan_recurse(root, best_path->subpath,
flags | CP_SMALL_TLIST);
param_exprs = (List *) replace_nestloop_params(root, (Node *)
best_path->param_exprs);
nkeys = list_length(param_exprs);
Assert(nkeys > 0);
operators = palloc(nkeys * sizeof(Oid));
collations = palloc(nkeys * sizeof(Oid));
i = 0;
forboth(lc, param_exprs, lc2, best_path->hash_operators)
{
Expr *param_expr = (Expr *) lfirst(lc);
Oid opno = lfirst_oid(lc2);
operators[i] = opno;
collations[i] = exprCollation((Node *) param_expr);
i++;
}
plan = make_resultcache(subplan, operators, collations, param_exprs,
best_path->singlerow, best_path->est_entries);
copy_generic_path_info(&plan->plan, (Path *) best_path);
return plan;
}
/*
* create_unique_plan
* Create a Unique plan for 'best_path' and (recursively) plans
@@ -6515,28 +6452,6 @@ materialize_finished_plan(Plan *subplan)
return matplan;
}
static ResultCache *
make_resultcache(Plan *lefttree, Oid *hashoperators, Oid *collations,
List *param_exprs, bool singlerow, uint32 est_entries)
{
ResultCache *node = makeNode(ResultCache);
Plan *plan = &node->plan;
plan->targetlist = lefttree->targetlist;
plan->qual = NIL;
plan->lefttree = lefttree;
plan->righttree = NULL;
node->numKeys = list_length(param_exprs);
node->hashOperators = hashoperators;
node->collations = collations;
node->param_exprs = param_exprs;
node->singlerow = singlerow;
node->est_entries = est_entries;
return node;
}
Agg *
make_agg(List *tlist, List *qual,
AggStrategy aggstrategy, AggSplit aggsplit,
@@ -7123,7 +7038,6 @@ is_projection_capable_path(Path *path)
{
case T_Hash:
case T_Material:
case T_ResultCache:
case T_Sort:
case T_IncrementalSort:
case T_Unique:
@@ -7169,7 +7083,6 @@ is_projection_capable_plan(Plan *plan)
{
case T_Hash:
case T_Material:
case T_ResultCache:
case T_Sort:
case T_Unique:
case T_SetOp:

View File

@@ -33,7 +33,6 @@
#include "parser/analyze.h"
#include "rewrite/rewriteManip.h"
#include "utils/lsyscache.h"
#include "utils/typcache.h"
/* These parameters are set by GUC */
int from_collapse_limit;
@@ -78,7 +77,6 @@ static bool check_equivalence_delay(PlannerInfo *root,
static bool check_redundant_nullability_qual(PlannerInfo *root, Node *clause);
static void check_mergejoinable(RestrictInfo *restrictinfo);
static void check_hashjoinable(RestrictInfo *restrictinfo);
static void check_resultcacheable(RestrictInfo *restrictinfo);
/*****************************************************************************
@@ -2210,13 +2208,6 @@ distribute_restrictinfo_to_rels(PlannerInfo *root,
*/
check_hashjoinable(restrictinfo);
/*
* Likewise, check if the clause is suitable to be used with a
* Result Cache node to cache inner tuples during a parameterized
* nested loop.
*/
check_resultcacheable(restrictinfo);
/*
* Add clause to the join lists of all the relevant relations.
*/
@@ -2459,7 +2450,6 @@ build_implied_join_equality(PlannerInfo *root,
/* Set mergejoinability/hashjoinability flags */
check_mergejoinable(restrictinfo);
check_hashjoinable(restrictinfo);
check_resultcacheable(restrictinfo);
return restrictinfo;
}
@@ -2707,34 +2697,3 @@ check_hashjoinable(RestrictInfo *restrictinfo)
!contain_volatile_functions((Node *) restrictinfo))
restrictinfo->hashjoinoperator = opno;
}
/*
* check_resultcacheable
* If the restrictinfo's clause is suitable to be used for a Result Cache
* node, set the hasheqoperator to the hash equality operator that will be
* needed during caching.
*/
static void
check_resultcacheable(RestrictInfo *restrictinfo)
{
TypeCacheEntry *typentry;
Expr *clause = restrictinfo->clause;
Node *leftarg;
if (restrictinfo->pseudoconstant)
return;
if (!is_opclause(clause))
return;
if (list_length(((OpExpr *) clause)->args) != 2)
return;
leftarg = linitial(((OpExpr *) clause)->args);
typentry = lookup_type_cache(exprType(leftarg), TYPECACHE_HASH_PROC |
TYPECACHE_EQ_OPR);
if (!OidIsValid(typentry->hash_proc) || !OidIsValid(typentry->eq_opr))
return;
restrictinfo->hasheqoperator = typentry->eq_opr;
}

View File

@@ -752,15 +752,6 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
set_hash_references(root, plan, rtoffset);
break;
case T_ResultCache:
{
ResultCache *rcplan = (ResultCache *) plan;
rcplan->param_exprs = fix_scan_list(root, rcplan->param_exprs,
rtoffset,
NUM_EXEC_TLIST(plan));
break;
}
case T_Material:
case T_Sort:
case T_IncrementalSort:

View File

@@ -2745,11 +2745,6 @@ finalize_plan(PlannerInfo *root, Plan *plan,
/* rescan_param does *not* get added to scan_params */
break;
case T_ResultCache:
finalize_primnode((Node *) ((ResultCache *) plan)->param_exprs,
&context);
break;
case T_ProjectSet:
case T_Hash:
case T_Material:

View File

@@ -1576,56 +1576,6 @@ create_material_path(RelOptInfo *rel, Path *subpath)
return pathnode;
}
/*
* create_resultcache_path
* Creates a path corresponding to a ResultCache plan, returning the
* pathnode.
*/
ResultCachePath *
create_resultcache_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
List *param_exprs, List *hash_operators,
bool singlerow, double calls)
{
ResultCachePath *pathnode = makeNode(ResultCachePath);
Assert(subpath->parent == rel);
pathnode->path.pathtype = T_ResultCache;
pathnode->path.parent = rel;
pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = subpath->param_info;
pathnode->path.parallel_aware = false;
pathnode->path.parallel_safe = rel->consider_parallel &&
subpath->parallel_safe;
pathnode->path.parallel_workers = subpath->parallel_workers;
pathnode->path.pathkeys = subpath->pathkeys;
pathnode->subpath = subpath;
pathnode->hash_operators = hash_operators;
pathnode->param_exprs = param_exprs;
pathnode->singlerow = singlerow;
pathnode->calls = calls;
/*
* For now we set est_entries to 0. cost_resultcache_rescan() does all
* the hard work to determine how many cache entries there are likely to
* be, so it seems best to leave it up to that function to fill this field
* in. If left at 0, the executor will make a guess at a good value.
*/
pathnode->est_entries = 0;
/*
* Add a small additional charge for caching the first entry. All the
* harder calculations for rescans are performed in
* cost_resultcache_rescan().
*/
pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
pathnode->path.rows = subpath->rows;
return pathnode;
}
/*
* create_unique_path
* Creates a path representing elimination of distinct rows from the
@@ -3919,17 +3869,6 @@ reparameterize_path(PlannerInfo *root, Path *path,
apath->path.parallel_aware,
-1);
}
case T_ResultCache:
{
ResultCachePath *rcpath = (ResultCachePath *) path;
return (Path *) create_resultcache_path(root, rel,
rcpath->subpath,
rcpath->param_exprs,
rcpath->hash_operators,
rcpath->singlerow,
rcpath->calls);
}
default:
break;
}
@@ -4148,16 +4087,6 @@ do { \
}
break;
case T_ResultCachePath:
{
ResultCachePath *rcpath;
FLAT_COPY_PATH(rcpath, path, ResultCachePath);
REPARAMETERIZE_CHILD_PATH(rcpath->subpath);
new_path = (Path *) rcpath;
}
break;
case T_GatherPath:
{
GatherPath *gpath;

View File

@@ -217,8 +217,6 @@ make_restrictinfo_internal(PlannerInfo *root,
restrictinfo->left_mcvfreq = -1;
restrictinfo->right_mcvfreq = -1;
restrictinfo->hasheqoperator = InvalidOid;
return restrictinfo;
}
@@ -368,7 +366,6 @@ commute_restrictinfo(RestrictInfo *rinfo, Oid comm_op)
result->right_bucketsize = rinfo->left_bucketsize;
result->left_mcvfreq = rinfo->right_mcvfreq;
result->right_mcvfreq = rinfo->left_mcvfreq;
result->hasheqoperator = InvalidOid;
return result;
}