mirror of
https://github.com/postgres/postgres.git
synced 2025-08-27 07:42:10 +03:00
Change the name of the Result Cache node to Memoize
"Result Cache" was never a great name for this node, but nobody managed to come up with another name that anyone liked enough. That was until David Johnston mentioned "Node Memoization", which Tom Lane revised to just "Memoize". People seem to like "Memoize", so let's do the rename. Reviewed-by: Justin Pryzby Discussion: https://postgr.es/m/20210708165145.GG1176@momjian.us Backpatch-through: 14, where Result Cache was introduced
This commit is contained in:
@@ -382,7 +382,7 @@ RelOptInfo - a relation or joined relations
|
||||
MergeAppendPath - merge multiple subpaths, preserving their common sort order
|
||||
GroupResultPath - childless Result plan node (used for degenerate grouping)
|
||||
MaterialPath - a Material plan node
|
||||
ResultCachePath - a result cache plan node for caching tuples from sub-paths
|
||||
MemoizePath - a Memoize plan node for caching tuples from sub-paths
|
||||
UniquePath - remove duplicate rows (either by hashing or sorting)
|
||||
GatherPath - collect the results of parallel workers
|
||||
GatherMergePath - collect parallel results, preserving their common sort order
|
||||
|
@@ -4031,9 +4031,9 @@ print_path(PlannerInfo *root, Path *path, int indent)
|
||||
ptype = "Material";
|
||||
subpath = ((MaterialPath *) path)->subpath;
|
||||
break;
|
||||
case T_ResultCachePath:
|
||||
ptype = "ResultCache";
|
||||
subpath = ((ResultCachePath *) path)->subpath;
|
||||
case T_MemoizePath:
|
||||
ptype = "Memoize";
|
||||
subpath = ((MemoizePath *) path)->subpath;
|
||||
break;
|
||||
case T_UniquePath:
|
||||
ptype = "Unique";
|
||||
|
@@ -79,7 +79,7 @@
|
||||
#include "executor/executor.h"
|
||||
#include "executor/nodeAgg.h"
|
||||
#include "executor/nodeHash.h"
|
||||
#include "executor/nodeResultCache.h"
|
||||
#include "executor/nodeMemoize.h"
|
||||
#include "miscadmin.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
@@ -140,7 +140,7 @@ bool enable_incremental_sort = true;
|
||||
bool enable_hashagg = true;
|
||||
bool enable_nestloop = true;
|
||||
bool enable_material = true;
|
||||
bool enable_resultcache = true;
|
||||
bool enable_memoize = true;
|
||||
bool enable_mergejoin = true;
|
||||
bool enable_hashjoin = true;
|
||||
bool enable_gathermerge = true;
|
||||
@@ -2405,8 +2405,8 @@ cost_material(Path *path,
|
||||
}
|
||||
|
||||
/*
|
||||
* cost_resultcache_rescan
|
||||
* Determines the estimated cost of rescanning a ResultCache node.
|
||||
* cost_memoize_rescan
|
||||
* Determines the estimated cost of rescanning a Memoize node.
|
||||
*
|
||||
* In order to estimate this, we must gain knowledge of how often we expect to
|
||||
* be called and how many distinct sets of parameters we are likely to be
|
||||
@@ -2418,15 +2418,15 @@ cost_material(Path *path,
|
||||
* hit and caching would be a complete waste of effort.
|
||||
*/
|
||||
static void
|
||||
cost_resultcache_rescan(PlannerInfo *root, ResultCachePath *rcpath,
|
||||
Cost *rescan_startup_cost, Cost *rescan_total_cost)
|
||||
cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
|
||||
Cost *rescan_startup_cost, Cost *rescan_total_cost)
|
||||
{
|
||||
EstimationInfo estinfo;
|
||||
Cost input_startup_cost = rcpath->subpath->startup_cost;
|
||||
Cost input_total_cost = rcpath->subpath->total_cost;
|
||||
double tuples = rcpath->subpath->rows;
|
||||
double calls = rcpath->calls;
|
||||
int width = rcpath->subpath->pathtarget->width;
|
||||
Cost input_startup_cost = mpath->subpath->startup_cost;
|
||||
Cost input_total_cost = mpath->subpath->total_cost;
|
||||
double tuples = mpath->subpath->rows;
|
||||
double calls = mpath->calls;
|
||||
int width = mpath->subpath->pathtarget->width;
|
||||
|
||||
double hash_mem_bytes;
|
||||
double est_entry_bytes;
|
||||
@@ -2455,16 +2455,16 @@ cost_resultcache_rescan(PlannerInfo *root, ResultCachePath *rcpath,
|
||||
est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
|
||||
|
||||
/* estimate on the distinct number of parameter values */
|
||||
ndistinct = estimate_num_groups(root, rcpath->param_exprs, calls, NULL,
|
||||
ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
|
||||
&estinfo);
|
||||
|
||||
/*
|
||||
* When the estimation fell back on using a default value, it's a bit too
|
||||
* risky to assume that it's ok to use a Result Cache. The use of a
|
||||
* default could cause us to use a Result Cache when it's really
|
||||
* risky to assume that it's ok to use a Memoize node. The use of a
|
||||
* default could cause us to use a Memoize node when it's really
|
||||
* inappropriate to do so. If we see that this has been done, then we'll
|
||||
* assume that every call will have unique parameters, which will almost
|
||||
* certainly mean a ResultCachePath will never survive add_path().
|
||||
* certainly mean a MemoizePath will never survive add_path().
|
||||
*/
|
||||
if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
|
||||
ndistinct = calls;
|
||||
@@ -2478,8 +2478,8 @@ cost_resultcache_rescan(PlannerInfo *root, ResultCachePath *rcpath,
|
||||
* size itself. Really this is not the right place to do this, but it's
|
||||
* convenient since everything is already calculated.
|
||||
*/
|
||||
rcpath->est_entries = Min(Min(ndistinct, est_cache_entries),
|
||||
PG_UINT32_MAX);
|
||||
mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
|
||||
PG_UINT32_MAX);
|
||||
|
||||
/*
|
||||
* When the number of distinct parameter values is above the amount we can
|
||||
@@ -4285,10 +4285,10 @@ cost_rescan(PlannerInfo *root, Path *path,
|
||||
*rescan_total_cost = run_cost;
|
||||
}
|
||||
break;
|
||||
case T_ResultCache:
|
||||
/* All the hard work is done by cost_resultcache_rescan */
|
||||
cost_resultcache_rescan(root, (ResultCachePath *) path,
|
||||
rescan_startup_cost, rescan_total_cost);
|
||||
case T_Memoize:
|
||||
/* All the hard work is done by cost_memoize_rescan */
|
||||
cost_memoize_rescan(root, (MemoizePath *) path,
|
||||
rescan_startup_cost, rescan_total_cost);
|
||||
break;
|
||||
default:
|
||||
*rescan_startup_cost = path->startup_cost;
|
||||
|
@@ -171,7 +171,7 @@ add_paths_to_joinrel(PlannerInfo *root,
|
||||
case JOIN_ANTI:
|
||||
|
||||
/*
|
||||
* XXX it may be worth proving this to allow a ResultCache to be
|
||||
* XXX it may be worth proving this to allow a Memoize to be
|
||||
* considered for Nested Loop Semi/Anti Joins.
|
||||
*/
|
||||
extra.inner_unique = false; /* well, unproven */
|
||||
@@ -395,7 +395,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
|
||||
OpExpr *opexpr;
|
||||
Node *expr;
|
||||
|
||||
/* can't use result cache without a valid hash equals operator */
|
||||
/* can't use a memoize node without a valid hash equals operator */
|
||||
if (!OidIsValid(rinfo->hasheqoperator) ||
|
||||
!clause_sides_match_join(rinfo, outerrel, innerrel))
|
||||
{
|
||||
@@ -436,7 +436,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
|
||||
typentry = lookup_type_cache(exprType(expr),
|
||||
TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR);
|
||||
|
||||
/* can't use result cache without a valid hash equals operator */
|
||||
/* can't use a memoize node without a valid hash equals operator */
|
||||
if (!OidIsValid(typentry->hash_proc) || !OidIsValid(typentry->eq_opr))
|
||||
{
|
||||
list_free(*operators);
|
||||
@@ -448,27 +448,27 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
|
||||
*param_exprs = lappend(*param_exprs, expr);
|
||||
}
|
||||
|
||||
/* We're okay to use result cache */
|
||||
/* We're okay to use memoize */
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* get_resultcache_path
|
||||
* If possible, make and return a Result Cache path atop of 'inner_path'.
|
||||
* get_memoize_path
|
||||
* If possible, make and return a Memoize path atop of 'inner_path'.
|
||||
* Otherwise return NULL.
|
||||
*/
|
||||
static Path *
|
||||
get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
|
||||
RelOptInfo *outerrel, Path *inner_path,
|
||||
Path *outer_path, JoinType jointype,
|
||||
JoinPathExtraData *extra)
|
||||
get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
|
||||
RelOptInfo *outerrel, Path *inner_path,
|
||||
Path *outer_path, JoinType jointype,
|
||||
JoinPathExtraData *extra)
|
||||
{
|
||||
List *param_exprs;
|
||||
List *hash_operators;
|
||||
ListCell *lc;
|
||||
|
||||
/* Obviously not if it's disabled */
|
||||
if (!enable_resultcache)
|
||||
if (!enable_memoize)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
@@ -481,7 +481,7 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* We can only have a result cache when there's some kind of cache key,
|
||||
* We can only have a memoize node when there's some kind of cache key,
|
||||
* either parameterized path clauses or lateral Vars. No cache key sounds
|
||||
* more like something a Materialize node might be more useful for.
|
||||
*/
|
||||
@@ -493,8 +493,8 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
|
||||
/*
|
||||
* Currently we don't do this for SEMI and ANTI joins unless they're
|
||||
* marked as inner_unique. This is because nested loop SEMI/ANTI joins
|
||||
* don't scan the inner node to completion, which will mean result cache
|
||||
* cannot mark the cache entry as complete.
|
||||
* don't scan the inner node to completion, which will mean memoize cannot
|
||||
* mark the cache entry as complete.
|
||||
*
|
||||
* XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique
|
||||
* = true. Should we? See add_paths_to_joinrel()
|
||||
@@ -504,8 +504,8 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Result Cache normally marks cache entries as complete when it runs out
|
||||
* of tuples to read from its subplan. However, with unique joins, Nested
|
||||
* Memoize normally marks cache entries as complete when it runs out of
|
||||
* tuples to read from its subplan. However, with unique joins, Nested
|
||||
* Loop will skip to the next outer tuple after finding the first matching
|
||||
* inner tuple. This means that we may not read the inner side of the
|
||||
* join to completion which leaves no opportunity to mark the cache entry
|
||||
@@ -516,11 +516,11 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
|
||||
* condition, we can't be sure which part of it causes the join to be
|
||||
* unique. This means there are no guarantees that only 1 tuple will be
|
||||
* read. We cannot mark the cache entry as complete after reading the
|
||||
* first tuple without that guarantee. This means the scope of Result
|
||||
* Cache's usefulness is limited to only outer rows that have no join
|
||||
* first tuple without that guarantee. This means the scope of Memoize
|
||||
* node's usefulness is limited to only outer rows that have no join
|
||||
* partner as this is the only case where Nested Loop would exhaust the
|
||||
* inner scan of a unique join. Since the scope is limited to that, we
|
||||
* just don't bother making a result cache path in this case.
|
||||
* just don't bother making a memoize path in this case.
|
||||
*
|
||||
* Lateral vars needn't be considered here as they're not considered when
|
||||
* determining if the join is unique.
|
||||
@@ -536,7 +536,7 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* We can't use a result cache if there are volatile functions in the
|
||||
* We can't use a memoize node if there are volatile functions in the
|
||||
* inner rel's target list or restrict list. A cache hit could reduce the
|
||||
* number of calls to these functions.
|
||||
*/
|
||||
@@ -559,13 +559,13 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
|
||||
¶m_exprs,
|
||||
&hash_operators))
|
||||
{
|
||||
return (Path *) create_resultcache_path(root,
|
||||
innerrel,
|
||||
inner_path,
|
||||
param_exprs,
|
||||
hash_operators,
|
||||
extra->inner_unique,
|
||||
outer_path->parent->rows);
|
||||
return (Path *) create_memoize_path(root,
|
||||
innerrel,
|
||||
inner_path,
|
||||
param_exprs,
|
||||
hash_operators,
|
||||
extra->inner_unique,
|
||||
outer_path->parent->rows);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -1688,7 +1688,7 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
foreach(lc2, innerrel->cheapest_parameterized_paths)
|
||||
{
|
||||
Path *innerpath = (Path *) lfirst(lc2);
|
||||
Path *rcpath;
|
||||
Path *mpath;
|
||||
|
||||
try_nestloop_path(root,
|
||||
joinrel,
|
||||
@@ -1699,17 +1699,17 @@ match_unsorted_outer(PlannerInfo *root,
|
||||
extra);
|
||||
|
||||
/*
|
||||
* Try generating a result cache path and see if that makes
|
||||
* the nested loop any cheaper.
|
||||
* Try generating a memoize path and see if that makes the
|
||||
* nested loop any cheaper.
|
||||
*/
|
||||
rcpath = get_resultcache_path(root, innerrel, outerrel,
|
||||
innerpath, outerpath, jointype,
|
||||
extra);
|
||||
if (rcpath != NULL)
|
||||
mpath = get_memoize_path(root, innerrel, outerrel,
|
||||
innerpath, outerpath, jointype,
|
||||
extra);
|
||||
if (mpath != NULL)
|
||||
try_nestloop_path(root,
|
||||
joinrel,
|
||||
outerpath,
|
||||
rcpath,
|
||||
mpath,
|
||||
merge_pathkeys,
|
||||
jointype,
|
||||
extra);
|
||||
@@ -1867,7 +1867,7 @@ consider_parallel_nestloop(PlannerInfo *root,
|
||||
foreach(lc2, innerrel->cheapest_parameterized_paths)
|
||||
{
|
||||
Path *innerpath = (Path *) lfirst(lc2);
|
||||
Path *rcpath;
|
||||
Path *mpath;
|
||||
|
||||
/* Can't join to an inner path that is not parallel-safe */
|
||||
if (!innerpath->parallel_safe)
|
||||
@@ -1894,14 +1894,14 @@ consider_parallel_nestloop(PlannerInfo *root,
|
||||
pathkeys, jointype, extra);
|
||||
|
||||
/*
|
||||
* Try generating a result cache path and see if that makes the
|
||||
* nested loop any cheaper.
|
||||
* Try generating a memoize path and see if that makes the nested
|
||||
* loop any cheaper.
|
||||
*/
|
||||
rcpath = get_resultcache_path(root, innerrel, outerrel,
|
||||
innerpath, outerpath, jointype,
|
||||
extra);
|
||||
if (rcpath != NULL)
|
||||
try_partial_nestloop_path(root, joinrel, outerpath, rcpath,
|
||||
mpath = get_memoize_path(root, innerrel, outerrel,
|
||||
innerpath, outerpath, jointype,
|
||||
extra);
|
||||
if (mpath != NULL)
|
||||
try_partial_nestloop_path(root, joinrel, outerpath, mpath,
|
||||
pathkeys, jointype, extra);
|
||||
}
|
||||
}
|
||||
|
@@ -92,9 +92,8 @@ static Result *create_group_result_plan(PlannerInfo *root,
|
||||
static ProjectSet *create_project_set_plan(PlannerInfo *root, ProjectSetPath *best_path);
|
||||
static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
|
||||
int flags);
|
||||
static ResultCache *create_resultcache_plan(PlannerInfo *root,
|
||||
ResultCachePath *best_path,
|
||||
int flags);
|
||||
static Memoize *create_memoize_plan(PlannerInfo *root, MemoizePath *best_path,
|
||||
int flags);
|
||||
static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
|
||||
int flags);
|
||||
static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
|
||||
@@ -278,11 +277,9 @@ static Sort *make_sort_from_groupcols(List *groupcls,
|
||||
AttrNumber *grpColIdx,
|
||||
Plan *lefttree);
|
||||
static Material *make_material(Plan *lefttree);
|
||||
static ResultCache *make_resultcache(Plan *lefttree, Oid *hashoperators,
|
||||
Oid *collations,
|
||||
List *param_exprs,
|
||||
bool singlerow,
|
||||
uint32 est_entries);
|
||||
static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
|
||||
Oid *collations, List *param_exprs,
|
||||
bool singlerow, uint32 est_entries);
|
||||
static WindowAgg *make_windowagg(List *tlist, Index winref,
|
||||
int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
|
||||
int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
|
||||
@@ -459,10 +456,10 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
|
||||
(MaterialPath *) best_path,
|
||||
flags);
|
||||
break;
|
||||
case T_ResultCache:
|
||||
plan = (Plan *) create_resultcache_plan(root,
|
||||
(ResultCachePath *) best_path,
|
||||
flags);
|
||||
case T_Memoize:
|
||||
plan = (Plan *) create_memoize_plan(root,
|
||||
(MemoizePath *) best_path,
|
||||
flags);
|
||||
break;
|
||||
case T_Unique:
|
||||
if (IsA(best_path, UpperUniquePath))
|
||||
@@ -1578,16 +1575,16 @@ create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags)
|
||||
}
|
||||
|
||||
/*
|
||||
* create_resultcache_plan
|
||||
* Create a ResultCache plan for 'best_path' and (recursively) plans
|
||||
* for its subpaths.
|
||||
* create_memoize_plan
|
||||
* Create a Memoize plan for 'best_path' and (recursively) plans for its
|
||||
* subpaths.
|
||||
*
|
||||
* Returns a Plan node.
|
||||
*/
|
||||
static ResultCache *
|
||||
create_resultcache_plan(PlannerInfo *root, ResultCachePath *best_path, int flags)
|
||||
static Memoize *
|
||||
create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
|
||||
{
|
||||
ResultCache *plan;
|
||||
Memoize *plan;
|
||||
Plan *subplan;
|
||||
Oid *operators;
|
||||
Oid *collations;
|
||||
@@ -1619,8 +1616,8 @@ create_resultcache_plan(PlannerInfo *root, ResultCachePath *best_path, int flags
|
||||
i++;
|
||||
}
|
||||
|
||||
plan = make_resultcache(subplan, operators, collations, param_exprs,
|
||||
best_path->singlerow, best_path->est_entries);
|
||||
plan = make_memoize(subplan, operators, collations, param_exprs,
|
||||
best_path->singlerow, best_path->est_entries);
|
||||
|
||||
copy_generic_path_info(&plan->plan, (Path *) best_path);
|
||||
|
||||
@@ -6417,11 +6414,11 @@ materialize_finished_plan(Plan *subplan)
|
||||
return matplan;
|
||||
}
|
||||
|
||||
static ResultCache *
|
||||
make_resultcache(Plan *lefttree, Oid *hashoperators, Oid *collations,
|
||||
List *param_exprs, bool singlerow, uint32 est_entries)
|
||||
static Memoize *
|
||||
make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
|
||||
List *param_exprs, bool singlerow, uint32 est_entries)
|
||||
{
|
||||
ResultCache *node = makeNode(ResultCache);
|
||||
Memoize *node = makeNode(Memoize);
|
||||
Plan *plan = &node->plan;
|
||||
|
||||
plan->targetlist = lefttree->targetlist;
|
||||
@@ -7035,7 +7032,7 @@ is_projection_capable_path(Path *path)
|
||||
{
|
||||
case T_Hash:
|
||||
case T_Material:
|
||||
case T_ResultCache:
|
||||
case T_Memoize:
|
||||
case T_Sort:
|
||||
case T_IncrementalSort:
|
||||
case T_Unique:
|
||||
@@ -7085,7 +7082,7 @@ is_projection_capable_plan(Plan *plan)
|
||||
{
|
||||
case T_Hash:
|
||||
case T_Material:
|
||||
case T_ResultCache:
|
||||
case T_Memoize:
|
||||
case T_Sort:
|
||||
case T_Unique:
|
||||
case T_SetOp:
|
||||
|
@@ -78,7 +78,7 @@ static bool check_equivalence_delay(PlannerInfo *root,
|
||||
static bool check_redundant_nullability_qual(PlannerInfo *root, Node *clause);
|
||||
static void check_mergejoinable(RestrictInfo *restrictinfo);
|
||||
static void check_hashjoinable(RestrictInfo *restrictinfo);
|
||||
static void check_resultcacheable(RestrictInfo *restrictinfo);
|
||||
static void check_memoizable(RestrictInfo *restrictinfo);
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
@@ -2212,10 +2212,10 @@ distribute_restrictinfo_to_rels(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Likewise, check if the clause is suitable to be used with a
|
||||
* Result Cache node to cache inner tuples during a parameterized
|
||||
* Memoize node to cache inner tuples during a parameterized
|
||||
* nested loop.
|
||||
*/
|
||||
check_resultcacheable(restrictinfo);
|
||||
check_memoizable(restrictinfo);
|
||||
|
||||
/*
|
||||
* Add clause to the join lists of all the relevant relations.
|
||||
@@ -2459,7 +2459,7 @@ build_implied_join_equality(PlannerInfo *root,
|
||||
/* Set mergejoinability/hashjoinability flags */
|
||||
check_mergejoinable(restrictinfo);
|
||||
check_hashjoinable(restrictinfo);
|
||||
check_resultcacheable(restrictinfo);
|
||||
check_memoizable(restrictinfo);
|
||||
|
||||
return restrictinfo;
|
||||
}
|
||||
@@ -2709,13 +2709,13 @@ check_hashjoinable(RestrictInfo *restrictinfo)
|
||||
}
|
||||
|
||||
/*
|
||||
* check_resultcacheable
|
||||
* If the restrictinfo's clause is suitable to be used for a Result Cache
|
||||
* node, set the hasheqoperator to the hash equality operator that will be
|
||||
* needed during caching.
|
||||
* check_memoizable
|
||||
* If the restrictinfo's clause is suitable to be used for a Memoize node,
|
||||
* set the hasheqoperator to the hash equality operator that will be needed
|
||||
* during caching.
|
||||
*/
|
||||
static void
|
||||
check_resultcacheable(RestrictInfo *restrictinfo)
|
||||
check_memoizable(RestrictInfo *restrictinfo)
|
||||
{
|
||||
TypeCacheEntry *typentry;
|
||||
Expr *clause = restrictinfo->clause;
|
||||
|
@@ -752,19 +752,19 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
|
||||
set_hash_references(root, plan, rtoffset);
|
||||
break;
|
||||
|
||||
case T_ResultCache:
|
||||
case T_Memoize:
|
||||
{
|
||||
ResultCache *rcplan = (ResultCache *) plan;
|
||||
Memoize *mplan = (Memoize *) plan;
|
||||
|
||||
/*
|
||||
* Result Cache does not evaluate its targetlist. It just
|
||||
* uses the same targetlist from its outer subnode.
|
||||
* Memoize does not evaluate its targetlist. It just uses the
|
||||
* same targetlist from its outer subnode.
|
||||
*/
|
||||
set_dummy_tlist_references(plan, rtoffset);
|
||||
|
||||
rcplan->param_exprs = fix_scan_list(root, rcplan->param_exprs,
|
||||
rtoffset,
|
||||
NUM_EXEC_TLIST(plan));
|
||||
mplan->param_exprs = fix_scan_list(root, mplan->param_exprs,
|
||||
rtoffset,
|
||||
NUM_EXEC_TLIST(plan));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@@ -2745,8 +2745,8 @@ finalize_plan(PlannerInfo *root, Plan *plan,
|
||||
/* rescan_param does *not* get added to scan_params */
|
||||
break;
|
||||
|
||||
case T_ResultCache:
|
||||
finalize_primnode((Node *) ((ResultCache *) plan)->param_exprs,
|
||||
case T_Memoize:
|
||||
finalize_primnode((Node *) ((Memoize *) plan)->param_exprs,
|
||||
&context);
|
||||
break;
|
||||
|
||||
|
@@ -1577,20 +1577,19 @@ create_material_path(RelOptInfo *rel, Path *subpath)
|
||||
}
|
||||
|
||||
/*
|
||||
* create_resultcache_path
|
||||
* Creates a path corresponding to a ResultCache plan, returning the
|
||||
* pathnode.
|
||||
* create_memoize_path
|
||||
* Creates a path corresponding to a Memoize plan, returning the pathnode.
|
||||
*/
|
||||
ResultCachePath *
|
||||
create_resultcache_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
|
||||
List *param_exprs, List *hash_operators,
|
||||
bool singlerow, double calls)
|
||||
MemoizePath *
|
||||
create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
|
||||
List *param_exprs, List *hash_operators,
|
||||
bool singlerow, double calls)
|
||||
{
|
||||
ResultCachePath *pathnode = makeNode(ResultCachePath);
|
||||
MemoizePath *pathnode = makeNode(MemoizePath);
|
||||
|
||||
Assert(subpath->parent == rel);
|
||||
|
||||
pathnode->path.pathtype = T_ResultCache;
|
||||
pathnode->path.pathtype = T_Memoize;
|
||||
pathnode->path.parent = rel;
|
||||
pathnode->path.pathtarget = rel->reltarget;
|
||||
pathnode->path.param_info = subpath->param_info;
|
||||
@@ -1607,17 +1606,16 @@ create_resultcache_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
|
||||
pathnode->calls = calls;
|
||||
|
||||
/*
|
||||
* For now we set est_entries to 0. cost_resultcache_rescan() does all
|
||||
* the hard work to determine how many cache entries there are likely to
|
||||
* be, so it seems best to leave it up to that function to fill this field
|
||||
* in. If left at 0, the executor will make a guess at a good value.
|
||||
* For now we set est_entries to 0. cost_memoize_rescan() does all the
|
||||
* hard work to determine how many cache entries there are likely to be,
|
||||
* so it seems best to leave it up to that function to fill this field in.
|
||||
* If left at 0, the executor will make a guess at a good value.
|
||||
*/
|
||||
pathnode->est_entries = 0;
|
||||
|
||||
/*
|
||||
* Add a small additional charge for caching the first entry. All the
|
||||
* harder calculations for rescans are performed in
|
||||
* cost_resultcache_rescan().
|
||||
* harder calculations for rescans are performed in cost_memoize_rescan().
|
||||
*/
|
||||
pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
|
||||
pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
|
||||
@@ -3936,16 +3934,16 @@ reparameterize_path(PlannerInfo *root, Path *path,
|
||||
apath->path.parallel_aware,
|
||||
-1);
|
||||
}
|
||||
case T_ResultCache:
|
||||
case T_Memoize:
|
||||
{
|
||||
ResultCachePath *rcpath = (ResultCachePath *) path;
|
||||
MemoizePath *mpath = (MemoizePath *) path;
|
||||
|
||||
return (Path *) create_resultcache_path(root, rel,
|
||||
rcpath->subpath,
|
||||
rcpath->param_exprs,
|
||||
rcpath->hash_operators,
|
||||
rcpath->singlerow,
|
||||
rcpath->calls);
|
||||
return (Path *) create_memoize_path(root, rel,
|
||||
mpath->subpath,
|
||||
mpath->param_exprs,
|
||||
mpath->hash_operators,
|
||||
mpath->singlerow,
|
||||
mpath->calls);
|
||||
}
|
||||
default:
|
||||
break;
|
||||
@@ -4165,13 +4163,13 @@ do { \
|
||||
}
|
||||
break;
|
||||
|
||||
case T_ResultCachePath:
|
||||
case T_MemoizePath:
|
||||
{
|
||||
ResultCachePath *rcpath;
|
||||
MemoizePath *mpath;
|
||||
|
||||
FLAT_COPY_PATH(rcpath, path, ResultCachePath);
|
||||
REPARAMETERIZE_CHILD_PATH(rcpath->subpath);
|
||||
new_path = (Path *) rcpath;
|
||||
FLAT_COPY_PATH(mpath, path, MemoizePath);
|
||||
REPARAMETERIZE_CHILD_PATH(mpath->subpath);
|
||||
new_path = (Path *) mpath;
|
||||
}
|
||||
break;
|
||||
|
||||
|
Reference in New Issue
Block a user