1
0
mirror of https://github.com/postgres/postgres.git synced 2026-01-26 09:41:40 +03:00

Fix rare test failure in nbtree_half_dead_pages

If auto-analyze kicks in at just the right moment, it can hold a
snapshot and prevent the VACUUM command in the test from removing the
deleted tuples. The test needs the tuples to be removed, otherwise no
half-dead page is generated. To fix, introduce a helper procedure to
wait for the removable cutoff to advance, like the one used in the
syscache-update-pruned test for similar purposes.

Thanks to Alexander Lakhin for reproducing and analyzing the test
failure, and Tom Lane for the report.

Discussion: https://www.postgresql.org/message-id/307198.1767408023@sss.pgh.pa.us
This commit is contained in:
Heikki Linnakangas
2026-01-16 14:09:22 +02:00
parent 84705b3727
commit 1c64d2fcbe
2 changed files with 47 additions and 0 deletions

View File

@@ -13,6 +13,28 @@ set client_min_messages TO 'warning';
create extension if not exists injection_points;
create extension if not exists amcheck;
reset client_min_messages;
-- Wait until all recently-dead tuples on a table become fully dead
-- and removable by vacuum. (We don't run any concurrent transactions
-- in the test itself, but auto-analyze can kick in at any time and
-- hold a transaction open, holding back the vacuum horizon.)
CREATE PROCEDURE wait_prunable() LANGUAGE plpgsql AS $$
DECLARE
barrier xid8;
cutoff xid8;
BEGIN
barrier := pg_current_xact_id();
-- Pass a shared catalog rather than the table we'll
-- prune, to prevent the cutoff from moving
-- backwards. See comments at removable_cutoff()
LOOP
ROLLBACK; -- release MyProc->xmin, which could be the oldest
cutoff := removable_cutoff('pg_database');
EXIT WHEN cutoff >= barrier;
RAISE LOG 'removable cutoff %; waiting for %', cutoff, barrier;
PERFORM pg_sleep(.1);
END LOOP;
END
$$;
-- Make all injection points local to this process, for concurrency.
SELECT injection_points_set_local();
injection_points_set_local
@@ -34,6 +56,7 @@ insert into nbtree_half_dead_pages SELECT g from generate_series(1, 150000) g;
create index nbtree_half_dead_pages_id_idx on nbtree_half_dead_pages using btree (id);
delete from nbtree_half_dead_pages where id > 100000 and id < 120000;
-- Run VACUUM and interrupt it so that it leaves behind a half-dead page
call wait_prunable();
SELECT injection_points_attach('nbtree-leave-page-half-dead', 'error');
injection_points_attach
-------------------------

View File

@@ -15,6 +15,29 @@ create extension if not exists injection_points;
create extension if not exists amcheck;
reset client_min_messages;
-- Wait until all recently-dead tuples on a table become fully dead
-- and removable by vacuum. (We don't run any concurrent transactions
-- in the test itself, but auto-analyze can kick in at any time and
-- hold a transaction open, holding back the vacuum horizon.)
CREATE PROCEDURE wait_prunable() LANGUAGE plpgsql AS $$
DECLARE
barrier xid8;
cutoff xid8;
BEGIN
barrier := pg_current_xact_id();
-- Pass a shared catalog rather than the table we'll
-- prune, to prevent the cutoff from moving
-- backwards. See comments at removable_cutoff()
LOOP
ROLLBACK; -- release MyProc->xmin, which could be the oldest
cutoff := removable_cutoff('pg_database');
EXIT WHEN cutoff >= barrier;
RAISE LOG 'removable cutoff %; waiting for %', cutoff, barrier;
PERFORM pg_sleep(.1);
END LOOP;
END
$$;
-- Make all injection points local to this process, for concurrency.
SELECT injection_points_set_local();
@@ -33,6 +56,7 @@ create index nbtree_half_dead_pages_id_idx on nbtree_half_dead_pages using btree
delete from nbtree_half_dead_pages where id > 100000 and id < 120000;
-- Run VACUUM and interrupt it so that it leaves behind a half-dead page
call wait_prunable();
SELECT injection_points_attach('nbtree-leave-page-half-dead', 'error');
vacuum nbtree_half_dead_pages;
SELECT injection_points_detach('nbtree-leave-page-half-dead');