1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-03 15:22:11 +03:00

Change the name of the Result Cache node to Memoize

"Result Cache" was never a great name for this node, but nobody managed
to come up with another name that anyone liked enough.  That was until
David Johnston mentioned "Node Memoization", which Tom Lane revised to
just "Memoize".  People seem to like "Memoize", so let's do the rename.

Reviewed-by: Justin Pryzby
Discussion: https://postgr.es/m/20210708165145.GG1176@momjian.us
Backpatch-through: 14, where Result Cache was introduced
This commit is contained in:
David Rowley
2021-07-14 12:45:00 +12:00
parent 6201fa3c16
commit 47ca483644
44 changed files with 596 additions and 607 deletions

View File

@@ -2584,7 +2584,7 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
-- Make sure that generation of HashAggregate for uniqification purposes
-- does not lead to array overflow due to unexpected duplicate hash keys
-- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com
set enable_resultcache to off;
set enable_memoize to off;
explain (costs off)
select 1 from tenk1
where (hundred, thousand) in (select twothousand, twothousand from onek);
@@ -2600,7 +2600,7 @@ explain (costs off)
-> Seq Scan on onek
(8 rows)
reset enable_resultcache;
reset enable_memoize;
--
-- Hash Aggregation Spill tests
--

View File

@@ -2536,7 +2536,7 @@ reset enable_nestloop;
--
set work_mem to '64kB';
set enable_mergejoin to off;
set enable_resultcache to off;
set enable_memoize to off;
explain (costs off)
select count(*) from tenk1 a, tenk1 b
where a.hundred = b.thousand and (b.fivethous % 10) < 10;
@@ -2560,7 +2560,7 @@ select count(*) from tenk1 a, tenk1 b
reset work_mem;
reset enable_mergejoin;
reset enable_resultcache;
reset enable_memoize;
--
-- regression test for 8.2 bug with improper re-ordering of left joins
--
@@ -3684,7 +3684,7 @@ where t1.unique1 = 1;
Recheck Cond: (t1.hundred = hundred)
-> Bitmap Index Scan on tenk1_hundred
Index Cond: (hundred = t1.hundred)
-> Result Cache
-> Memoize
Cache Key: t2.thousand
-> Index Scan using tenk1_unique2 on tenk1 t3
Index Cond: (unique2 = t2.thousand)
@@ -3706,7 +3706,7 @@ where t1.unique1 = 1;
Recheck Cond: (t1.hundred = hundred)
-> Bitmap Index Scan on tenk1_hundred
Index Cond: (hundred = t1.hundred)
-> Result Cache
-> Memoize
Cache Key: t2.thousand
-> Index Scan using tenk1_unique2 on tenk1 t3
Index Cond: (unique2 = t2.thousand)
@@ -4235,7 +4235,7 @@ where t1.f1 = ss.f1;
-> Seq Scan on public.int8_tbl i8
Output: i8.q1, i8.q2
Filter: (i8.q2 = 123)
-> Result Cache
-> Memoize
Output: (i8.q1), t2.f1
Cache Key: i8.q1
-> Limit
@@ -4279,14 +4279,14 @@ where t1.f1 = ss2.f1;
-> Seq Scan on public.int8_tbl i8
Output: i8.q1, i8.q2
Filter: (i8.q2 = 123)
-> Result Cache
-> Memoize
Output: (i8.q1), t2.f1
Cache Key: i8.q1
-> Limit
Output: (i8.q1), t2.f1
-> Seq Scan on public.text_tbl t2
Output: i8.q1, t2.f1
-> Result Cache
-> Memoize
Output: ((i8.q1)), (t2.f1)
Cache Key: (i8.q1), t2.f1
-> Limit
@@ -4339,7 +4339,7 @@ where tt1.f1 = ss1.c0;
-> Seq Scan on public.text_tbl tt4
Output: tt4.f1
Filter: (tt4.f1 = 'foo'::text)
-> Result Cache
-> Memoize
Output: ss1.c0
Cache Key: tt4.f1
-> Subquery Scan on ss1
@@ -5028,7 +5028,7 @@ explain (costs off)
Aggregate
-> Nested Loop
-> Seq Scan on tenk1 a
-> Result Cache
-> Memoize
Cache Key: a.two
-> Function Scan on generate_series g
(6 rows)
@@ -5040,7 +5040,7 @@ explain (costs off)
Aggregate
-> Nested Loop
-> Seq Scan on tenk1 a
-> Result Cache
-> Memoize
Cache Key: a.two
-> Function Scan on generate_series g
(6 rows)
@@ -5053,7 +5053,7 @@ explain (costs off)
Aggregate
-> Nested Loop
-> Seq Scan on tenk1 a
-> Result Cache
-> Memoize
Cache Key: a.two
-> Function Scan on generate_series g
(6 rows)
@@ -5115,7 +5115,7 @@ explain (costs off)
-> Nested Loop
-> Index Only Scan using tenk1_unique1 on tenk1 a
-> Values Scan on "*VALUES*"
-> Result Cache
-> Memoize
Cache Key: "*VALUES*".column1
-> Index Only Scan using tenk1_unique2 on tenk1 b
Index Cond: (unique2 = "*VALUES*".column1)

View File

@@ -1,9 +1,9 @@
-- Perform tests on the Result Cache node.
-- The cache hits/misses/evictions from the Result Cache node can vary between
-- Perform tests on the Memoize node.
-- The cache hits/misses/evictions from the Memoize node can vary between
-- machines. Let's just replace the number with an 'N'. In order to allow us
-- to perform validation when the measure was zero, we replace a zero value
-- with "Zero". All other numbers are replaced with 'N'.
create function explain_resultcache(query text, hide_hitmiss bool) returns setof text
create function explain_memoize(query text, hide_hitmiss bool) returns setof text
language plpgsql as
$$
declare
@@ -28,21 +28,21 @@ begin
end loop;
end;
$$;
-- Ensure we get a result cache on the inner side of the nested loop
-- Ensure we get a memoize node on the inner side of the nested loop
SET enable_hashjoin TO off;
SET enable_bitmapscan TO off;
SELECT explain_resultcache('
SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
WHERE t2.unique1 < 1000;', false);
explain_resultcache
explain_memoize
-------------------------------------------------------------------------------------------
Aggregate (actual rows=1 loops=N)
-> Nested Loop (actual rows=1000 loops=N)
-> Seq Scan on tenk1 t2 (actual rows=1000 loops=N)
Filter: (unique1 < 1000)
Rows Removed by Filter: 9000
-> Result Cache (actual rows=1 loops=N)
-> Memoize (actual rows=1 loops=N)
Cache Key: t2.twenty
Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB
-> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N)
@@ -60,18 +60,18 @@ WHERE t2.unique1 < 1000;
(1 row)
-- Try with LATERAL joins
SELECT explain_resultcache('
SELECT explain_memoize('
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;', false);
explain_resultcache
explain_memoize
-------------------------------------------------------------------------------------------
Aggregate (actual rows=1 loops=N)
-> Nested Loop (actual rows=1000 loops=N)
-> Seq Scan on tenk1 t1 (actual rows=1000 loops=N)
Filter: (unique1 < 1000)
Rows Removed by Filter: 9000
-> Result Cache (actual rows=1 loops=N)
-> Memoize (actual rows=1 loops=N)
Cache Key: t1.twenty
Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB
-> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N)
@@ -94,18 +94,18 @@ SET enable_mergejoin TO off;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
-- between different machines.
SELECT explain_resultcache('
SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand
WHERE t2.unique1 < 1200;', true);
explain_resultcache
explain_memoize
-------------------------------------------------------------------------------------------
Aggregate (actual rows=1 loops=N)
-> Nested Loop (actual rows=1200 loops=N)
-> Seq Scan on tenk1 t2 (actual rows=1200 loops=N)
Filter: (unique1 < 1200)
Rows Removed by Filter: 8800
-> Result Cache (actual rows=1 loops=N)
-> Memoize (actual rows=1 loops=N)
Cache Key: t2.thousand
Hits: N Misses: N Evictions: N Overflows: 0 Memory Usage: NkB
-> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N)
@@ -117,7 +117,7 @@ RESET enable_mergejoin;
RESET work_mem;
RESET enable_bitmapscan;
RESET enable_hashjoin;
-- Test parallel plans with Result Cache.
-- Test parallel plans with Memoize
SET min_parallel_table_scan_size TO 0;
SET parallel_setup_cost TO 0;
SET parallel_tuple_cost TO 0;
@@ -138,7 +138,7 @@ WHERE t1.unique1 < 1000;
Recheck Cond: (unique1 < 1000)
-> Bitmap Index Scan on tenk1_unique1
Index Cond: (unique1 < 1000)
-> Result Cache
-> Memoize
Cache Key: t1.twenty
-> Index Only Scan using tenk1_unique1 on tenk1 t2
Index Cond: (unique1 = t1.twenty)

View File

@@ -2085,7 +2085,7 @@ create index ab_a3_b2_a_idx on ab_a3_b2 (a);
create index ab_a3_b3_a_idx on ab_a3_b3 (a);
set enable_hashjoin = 0;
set enable_mergejoin = 0;
set enable_resultcache = 0;
set enable_memoize = 0;
select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)');
explain_parallel_append
--------------------------------------------------------------------------------------------------------
@@ -2254,7 +2254,7 @@ select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on
reset enable_hashjoin;
reset enable_mergejoin;
reset enable_resultcache;
reset enable_memoize;
reset parallel_setup_cost;
reset parallel_tuple_cost;
reset min_parallel_table_scan_size;

View File

@@ -1097,7 +1097,7 @@ where o.ten = 1;
-> Nested Loop
-> Seq Scan on onek o
Filter: (ten = 1)
-> Result Cache
-> Memoize
Cache Key: o.four
-> CTE Scan on x
CTE x

View File

@@ -104,6 +104,7 @@ select name, setting from pg_settings where name like 'enable%';
enable_indexonlyscan | on
enable_indexscan | on
enable_material | on
enable_memoize | on
enable_mergejoin | on
enable_nestloop | on
enable_parallel_append | on
@@ -111,7 +112,6 @@ select name, setting from pg_settings where name like 'enable%';
enable_partition_pruning | on
enable_partitionwise_aggregate | off
enable_partitionwise_join | off
enable_resultcache | on
enable_seqscan | on
enable_sort | on
enable_tidscan | on

View File

@@ -120,7 +120,7 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr
# ----------
# Another group of parallel tests
# ----------
test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression resultcache
test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize
# event triggers cannot run concurrently with any test that runs DDL
# oidjoins is read-only, though, and should run late for best coverage

View File

@@ -1098,11 +1098,11 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
-- Make sure that generation of HashAggregate for uniqification purposes
-- does not lead to array overflow due to unexpected duplicate hash keys
-- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com
set enable_resultcache to off;
set enable_memoize to off;
explain (costs off)
select 1 from tenk1
where (hundred, thousand) in (select twothousand, twothousand from onek);
reset enable_resultcache;
reset enable_memoize;
--
-- Hash Aggregation Spill tests

View File

@@ -550,7 +550,7 @@ reset enable_nestloop;
set work_mem to '64kB';
set enable_mergejoin to off;
set enable_resultcache to off;
set enable_memoize to off;
explain (costs off)
select count(*) from tenk1 a, tenk1 b
@@ -560,7 +560,7 @@ select count(*) from tenk1 a, tenk1 b
reset work_mem;
reset enable_mergejoin;
reset enable_resultcache;
reset enable_memoize;
--
-- regression test for 8.2 bug with improper re-ordering of left joins

View File

@@ -1,10 +1,10 @@
-- Perform tests on the Result Cache node.
-- Perform tests on the Memoize node.
-- The cache hits/misses/evictions from the Result Cache node can vary between
-- The cache hits/misses/evictions from the Memoize node can vary between
-- machines. Let's just replace the number with an 'N'. In order to allow us
-- to perform validation when the measure was zero, we replace a zero value
-- with "Zero". All other numbers are replaced with 'N'.
create function explain_resultcache(query text, hide_hitmiss bool) returns setof text
create function explain_memoize(query text, hide_hitmiss bool) returns setof text
language plpgsql as
$$
declare
@@ -30,11 +30,11 @@ begin
end;
$$;
-- Ensure we get a result cache on the inner side of the nested loop
-- Ensure we get a memoize node on the inner side of the nested loop
SET enable_hashjoin TO off;
SET enable_bitmapscan TO off;
SELECT explain_resultcache('
SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
WHERE t2.unique1 < 1000;', false);
@@ -45,7 +45,7 @@ INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
WHERE t2.unique1 < 1000;
-- Try with LATERAL joins
SELECT explain_resultcache('
SELECT explain_memoize('
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;', false);
@@ -61,7 +61,7 @@ SET enable_mergejoin TO off;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
-- between different machines.
SELECT explain_resultcache('
SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand
WHERE t2.unique1 < 1200;', true);
@@ -70,7 +70,7 @@ RESET work_mem;
RESET enable_bitmapscan;
RESET enable_hashjoin;
-- Test parallel plans with Result Cache.
-- Test parallel plans with Memoize
SET min_parallel_table_scan_size TO 0;
SET parallel_setup_cost TO 0;
SET parallel_tuple_cost TO 0;

View File

@@ -515,7 +515,7 @@ create index ab_a3_b3_a_idx on ab_a3_b3 (a);
set enable_hashjoin = 0;
set enable_mergejoin = 0;
set enable_resultcache = 0;
set enable_memoize = 0;
select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)');
@@ -534,7 +534,7 @@ select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on
reset enable_hashjoin;
reset enable_mergejoin;
reset enable_resultcache;
reset enable_memoize;
reset parallel_setup_cost;
reset parallel_tuple_cost;
reset min_parallel_table_scan_size;