mirror of
https://github.com/postgres/postgres.git
synced 2025-05-28 05:21:27 +03:00
Here we adjust the EXPLAIN ANALYZE output for Result Cache so that we don't show any Result Cache stats for parallel workers who don't contribute anything to Result Cache plan nodes. I originally had ideas that workers who don't help could still have their Result Cache stats displayed. The idea with that was so that I could write some parallel Result Cache regression tests that show the EXPLAIN ANALYZE output. However, I realized a little too late that such tests would just not be possible to have run in a stable way on the buildfarm. With that knowledge, before 9eacee2e6 went in, I had removed all of the tests that were showing the EXPLAIN ANALYZE output of a parallel Result Cache plan, however, I forgot to put back the code that adjusts the EXPLAIN output to hide the Result Cache stats for parallel workers who were not fast enough to help out before query execution was over. All other nodes behave this way and so should Result Cache. Additionally, with this change, it now seems safe enough to remove the SET force_parallel_mode = off that I had added to the regression tests. Also, perform some cleanup in the partition_prune tests. I had adjusted the explain_parallel_append() function to sanitize the Result Cache EXPLAIN ANALYZE output. However, since I didn't actually include any parallel Result Cache tests that show their EXPLAIN ANALYZE output, that code does nothing and can be removed. In passing, move the setting of memPeakKb into the scope where it's used. Reported-by: Amit Khandekar Author: David Rowley, Amit Khandekar Discussion: https://postgr.es/m/CAJ3gD9d8SkfY95GpM1zmsOtX2-Ogx5q-WLsf8f0ykEb0hCRK3w@mail.gmail.com
94 lines
3.3 KiB
PL/PgSQL
94 lines
3.3 KiB
PL/PgSQL
-- Perform tests on the Result Cache node.
|
|
|
|
-- The cache hits/misses/evictions from the Result Cache node can vary between
|
|
-- machines. Let's just replace the number with an 'N'. In order to allow us
|
|
-- to perform validation when the measure was zero, we replace a zero value
|
|
-- with "Zero". All other numbers are replaced with 'N'.
|
|
create function explain_resultcache(query text, hide_hitmiss bool) returns setof text
|
|
language plpgsql as
|
|
$$
|
|
declare
|
|
ln text;
|
|
begin
|
|
for ln in
|
|
execute format('explain (analyze, costs off, summary off, timing off) %s',
|
|
query)
|
|
loop
|
|
if hide_hitmiss = true then
|
|
ln := regexp_replace(ln, 'Hits: 0', 'Hits: Zero');
|
|
ln := regexp_replace(ln, 'Hits: \d+', 'Hits: N');
|
|
ln := regexp_replace(ln, 'Misses: 0', 'Misses: Zero');
|
|
ln := regexp_replace(ln, 'Misses: \d+', 'Misses: N');
|
|
end if;
|
|
ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero');
|
|
ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N');
|
|
ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N');
|
|
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
|
|
ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
|
|
return next ln;
|
|
end loop;
|
|
end;
|
|
$$;
|
|
|
|
-- Ensure we get a result cache on the inner side of the nested loop
|
|
SET enable_hashjoin TO off;
|
|
SET enable_bitmapscan TO off;
|
|
|
|
SELECT explain_resultcache('
|
|
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
|
|
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
|
|
WHERE t2.unique1 < 1000;', false);
|
|
|
|
-- And check we get the expected results.
|
|
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
|
|
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
|
|
WHERE t2.unique1 < 1000;
|
|
|
|
-- Try with LATERAL joins
|
|
SELECT explain_resultcache('
|
|
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
|
|
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
|
|
WHERE t1.unique1 < 1000;', false);
|
|
|
|
-- And check we get the expected results.
|
|
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
|
|
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
|
|
WHERE t1.unique1 < 1000;
|
|
|
|
-- Reduce work_mem so that we see some cache evictions
|
|
SET work_mem TO '64kB';
|
|
SET enable_mergejoin TO off;
|
|
-- Ensure we get some evictions. We're unable to validate the hits and misses
|
|
-- here as the number of entries that fit in the cache at once will vary
|
|
-- between different machines.
|
|
SELECT explain_resultcache('
|
|
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
|
|
INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand
|
|
WHERE t2.unique1 < 1200;', true);
|
|
RESET enable_mergejoin;
|
|
RESET work_mem;
|
|
RESET enable_bitmapscan;
|
|
RESET enable_hashjoin;
|
|
|
|
-- Test parallel plans with Result Cache.
|
|
SET min_parallel_table_scan_size TO 0;
|
|
SET parallel_setup_cost TO 0;
|
|
SET parallel_tuple_cost TO 0;
|
|
SET max_parallel_workers_per_gather TO 2;
|
|
|
|
-- Ensure we get a parallel plan.
|
|
EXPLAIN (COSTS OFF)
|
|
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
|
|
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
|
|
WHERE t1.unique1 < 1000;
|
|
|
|
-- And ensure the parallel plan gives us the correct results.
|
|
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
|
|
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
|
|
WHERE t1.unique1 < 1000;
|
|
|
|
RESET max_parallel_workers_per_gather;
|
|
RESET parallel_tuple_cost;
|
|
RESET parallel_setup_cost;
|
|
RESET min_parallel_table_scan_size;
|