1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-12 21:01:52 +03:00

Change the name of the Result Cache node to Memoize

"Result Cache" was never a great name for this node, but nobody managed
to come up with another name that anyone liked enough.  That was until
David Johnston mentioned "Node Memoization", which Tom Lane revised to
just "Memoize".  People seem to like "Memoize", so let's do the rename.

Reviewed-by: Justin Pryzby
Discussion: https://postgr.es/m/20210708165145.GG1176@momjian.us
Backpatch-through: 14, where Result Cache was introduced
This commit is contained in:
David Rowley
2021-07-14 12:45:00 +12:00
parent 6201fa3c16
commit 47ca483644
44 changed files with 596 additions and 607 deletions

View File

@ -171,7 +171,7 @@ add_paths_to_joinrel(PlannerInfo *root,
case JOIN_ANTI:
/*
* XXX it may be worth proving this to allow a ResultCache to be
* XXX it may be worth proving this to allow a Memoize to be
* considered for Nested Loop Semi/Anti Joins.
*/
extra.inner_unique = false; /* well, unproven */
@ -395,7 +395,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
OpExpr *opexpr;
Node *expr;
/* can't use result cache without a valid hash equals operator */
/* can't use a memoize node without a valid hash equals operator */
if (!OidIsValid(rinfo->hasheqoperator) ||
!clause_sides_match_join(rinfo, outerrel, innerrel))
{
@ -436,7 +436,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
typentry = lookup_type_cache(exprType(expr),
TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR);
/* can't use result cache without a valid hash equals operator */
/* can't use a memoize node without a valid hash equals operator */
if (!OidIsValid(typentry->hash_proc) || !OidIsValid(typentry->eq_opr))
{
list_free(*operators);
@ -448,27 +448,27 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
*param_exprs = lappend(*param_exprs, expr);
}
/* We're okay to use result cache */
/* We're okay to use memoize */
return true;
}
/*
* get_resultcache_path
* If possible, make and return a Result Cache path atop of 'inner_path'.
* get_memoize_path
* If possible, make and return a Memoize path atop of 'inner_path'.
* Otherwise return NULL.
*/
static Path *
get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
RelOptInfo *outerrel, Path *inner_path,
Path *outer_path, JoinType jointype,
JoinPathExtraData *extra)
get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
RelOptInfo *outerrel, Path *inner_path,
Path *outer_path, JoinType jointype,
JoinPathExtraData *extra)
{
List *param_exprs;
List *hash_operators;
ListCell *lc;
/* Obviously not if it's disabled */
if (!enable_resultcache)
if (!enable_memoize)
return NULL;
/*
@ -481,7 +481,7 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL;
/*
* We can only have a result cache when there's some kind of cache key,
* We can only have a memoize node when there's some kind of cache key,
* either parameterized path clauses or lateral Vars. No cache key sounds
* more like something a Materialize node might be more useful for.
*/
@ -493,8 +493,8 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
/*
* Currently we don't do this for SEMI and ANTI joins unless they're
* marked as inner_unique. This is because nested loop SEMI/ANTI joins
* don't scan the inner node to completion, which will mean result cache
* cannot mark the cache entry as complete.
* don't scan the inner node to completion, which will mean memoize cannot
* mark the cache entry as complete.
*
* XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique
* = true. Should we? See add_paths_to_joinrel()
@ -504,8 +504,8 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL;
/*
* Result Cache normally marks cache entries as complete when it runs out
* of tuples to read from its subplan. However, with unique joins, Nested
* Memoize normally marks cache entries as complete when it runs out of
* tuples to read from its subplan. However, with unique joins, Nested
* Loop will skip to the next outer tuple after finding the first matching
* inner tuple. This means that we may not read the inner side of the
* join to completion which leaves no opportunity to mark the cache entry
@ -516,11 +516,11 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
* condition, we can't be sure which part of it causes the join to be
* unique. This means there are no guarantees that only 1 tuple will be
* read. We cannot mark the cache entry as complete after reading the
* first tuple without that guarantee. This means the scope of Result
* Cache's usefulness is limited to only outer rows that have no join
* first tuple without that guarantee. This means the scope of Memoize
* node's usefulness is limited to only outer rows that have no join
* partner as this is the only case where Nested Loop would exhaust the
* inner scan of a unique join. Since the scope is limited to that, we
* just don't bother making a result cache path in this case.
* just don't bother making a memoize path in this case.
*
* Lateral vars needn't be considered here as they're not considered when
* determining if the join is unique.
@ -536,7 +536,7 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL;
/*
* We can't use a result cache if there are volatile functions in the
* We can't use a memoize node if there are volatile functions in the
* inner rel's target list or restrict list. A cache hit could reduce the
* number of calls to these functions.
*/
@ -559,13 +559,13 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel,
&param_exprs,
&hash_operators))
{
return (Path *) create_resultcache_path(root,
innerrel,
inner_path,
param_exprs,
hash_operators,
extra->inner_unique,
outer_path->parent->rows);
return (Path *) create_memoize_path(root,
innerrel,
inner_path,
param_exprs,
hash_operators,
extra->inner_unique,
outer_path->parent->rows);
}
return NULL;
@ -1688,7 +1688,7 @@ match_unsorted_outer(PlannerInfo *root,
foreach(lc2, innerrel->cheapest_parameterized_paths)
{
Path *innerpath = (Path *) lfirst(lc2);
Path *rcpath;
Path *mpath;
try_nestloop_path(root,
joinrel,
@ -1699,17 +1699,17 @@ match_unsorted_outer(PlannerInfo *root,
extra);
/*
* Try generating a result cache path and see if that makes
* the nested loop any cheaper.
* Try generating a memoize path and see if that makes the
* nested loop any cheaper.
*/
rcpath = get_resultcache_path(root, innerrel, outerrel,
innerpath, outerpath, jointype,
extra);
if (rcpath != NULL)
mpath = get_memoize_path(root, innerrel, outerrel,
innerpath, outerpath, jointype,
extra);
if (mpath != NULL)
try_nestloop_path(root,
joinrel,
outerpath,
rcpath,
mpath,
merge_pathkeys,
jointype,
extra);
@ -1867,7 +1867,7 @@ consider_parallel_nestloop(PlannerInfo *root,
foreach(lc2, innerrel->cheapest_parameterized_paths)
{
Path *innerpath = (Path *) lfirst(lc2);
Path *rcpath;
Path *mpath;
/* Can't join to an inner path that is not parallel-safe */
if (!innerpath->parallel_safe)
@ -1894,14 +1894,14 @@ consider_parallel_nestloop(PlannerInfo *root,
pathkeys, jointype, extra);
/*
* Try generating a result cache path and see if that makes the
* nested loop any cheaper.
* Try generating a memoize path and see if that makes the nested
* loop any cheaper.
*/
rcpath = get_resultcache_path(root, innerrel, outerrel,
innerpath, outerpath, jointype,
extra);
if (rcpath != NULL)
try_partial_nestloop_path(root, joinrel, outerpath, rcpath,
mpath = get_memoize_path(root, innerrel, outerrel,
innerpath, outerpath, jointype,
extra);
if (mpath != NULL)
try_partial_nestloop_path(root, joinrel, outerpath, mpath,
pathkeys, jointype, extra);
}
}