mirror of
https://github.com/postgres/postgres.git
synced 2025-10-19 15:49:24 +03:00
Improve TAP tests by replacing ok() with better Test::More functions
The TAP tests whose ok() calls are changed in this commit were relying on perl operators, rather than equivalents available in Test::More. For example, rather than the following: ok($data =~ qr/expr/m, "expr matching"); ok($data !~ qr/expr/m, "expr not matching"); The new test code uses this equivalent: like($data, qr/expr/m, "expr matching"); unlike($data, qr/expr/m, "expr not matching"); A huge benefit of the new formulation is that it is possible to know about the values we are checking if a failure happens, making debugging easier, should the test runs happen in the buildfarm, in the CI or locally. This change leads to more test code overall as perltidy likes to make the code pretty the way it is in this commit. Author: Sadhuprasad Patro <b.sadhu@gmail.com> Discussion: https://postgr.es/m/CAFF0-CHhwNx_Cv2uy7tKjODUbeOgPrJpW4Rpf1jqB16_1bU2sg@mail.gmail.com
This commit is contained in:
@@ -159,7 +159,9 @@ $node->safe_psql(
|
||||
'postgres', q(
|
||||
SELECT bt_index_check('bttest_unique_idx1', true, true);
|
||||
));
|
||||
ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx1"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/index uniqueness is violated for index "bttest_unique_idx1"/,
|
||||
'detected uniqueness violation for index "bttest_unique_idx1"');
|
||||
|
||||
#
|
||||
@@ -177,7 +179,9 @@ ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx1"/,
|
||||
'postgres', q(
|
||||
SELECT bt_index_check('bttest_unique_idx2', true, true);
|
||||
));
|
||||
ok( $stderr =~ /item order invariant violated for index "bttest_unique_idx2"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/item order invariant violated for index "bttest_unique_idx2"/,
|
||||
'detected item order invariant violation for index "bttest_unique_idx2"');
|
||||
|
||||
$node->safe_psql(
|
||||
@@ -191,7 +195,9 @@ $node->safe_psql(
|
||||
'postgres', q(
|
||||
SELECT bt_index_check('bttest_unique_idx2', true, true);
|
||||
));
|
||||
ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx2"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/index uniqueness is violated for index "bttest_unique_idx2"/,
|
||||
'detected uniqueness violation for index "bttest_unique_idx2"');
|
||||
|
||||
#
|
||||
@@ -208,7 +214,9 @@ ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx2"/,
|
||||
'postgres', q(
|
||||
SELECT bt_index_check('bttest_unique_idx3', true, true);
|
||||
));
|
||||
ok( $stderr =~ /item order invariant violated for index "bttest_unique_idx3"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/item order invariant violated for index "bttest_unique_idx3"/,
|
||||
'detected item order invariant violation for index "bttest_unique_idx3"');
|
||||
|
||||
# For unique index deduplication is possible only for same values, but
|
||||
@@ -237,7 +245,9 @@ $node->safe_psql(
|
||||
'postgres', q(
|
||||
SELECT bt_index_check('bttest_unique_idx3', true, true);
|
||||
));
|
||||
ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx3"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/index uniqueness is violated for index "bttest_unique_idx3"/,
|
||||
'detected uniqueness violation for index "bttest_unique_idx3"');
|
||||
|
||||
$node->stop;
|
||||
|
@@ -40,7 +40,7 @@ my $npages = $node->safe_psql(
|
||||
"SELECT relpages FROM pg_class
|
||||
WHERE relname = 'corruption_test';"
|
||||
);
|
||||
ok($npages >= 10, 'table has at least 10 pages');
|
||||
cmp_ok($npages, '>=', 10, 'table has at least 10 pages');
|
||||
|
||||
my $file = $node->safe_psql("postgres",
|
||||
"SELECT pg_relation_filepath('corruption_test');");
|
||||
|
@@ -308,9 +308,9 @@ command_ok(
|
||||
'multiple --set options with different case');
|
||||
|
||||
my $conf = slurp_file("$tempdir/dataY/postgresql.conf");
|
||||
ok($conf !~ qr/^WORK_MEM = /m, "WORK_MEM should not be configured");
|
||||
ok($conf !~ qr/^Work_Mem = /m, "Work_Mem should not be configured");
|
||||
ok($conf =~ qr/^work_mem = 512/m, "work_mem should be in config");
|
||||
unlike($conf, qr/^WORK_MEM = /m, "WORK_MEM should not be configured");
|
||||
unlike($conf, qr/^Work_Mem = /m, "Work_Mem should not be configured");
|
||||
like($conf, qr/^work_mem = 512/m, "work_mem should be in config");
|
||||
|
||||
# Test the no-data-checksums flag
|
||||
my $datadir_nochecksums = "$tempdir/data_no_checksums";
|
||||
|
@@ -535,7 +535,7 @@ my $sysid_p = $node_p->safe_psql('postgres',
|
||||
'SELECT system_identifier FROM pg_control_system()');
|
||||
my $sysid_s = $node_s->safe_psql('postgres',
|
||||
'SELECT system_identifier FROM pg_control_system()');
|
||||
ok($sysid_p != $sysid_s, 'system identifier was changed');
|
||||
isnt($sysid_p, $sysid_s, 'system identifier was changed');
|
||||
|
||||
# clean up
|
||||
$node_p->teardown_node;
|
||||
|
@@ -144,12 +144,12 @@ sub check_data_file
|
||||
{
|
||||
# Get the file's stat information of each segment
|
||||
my $nlink_count = get_hard_link_count($segment);
|
||||
ok($nlink_count == 2, "File '$segment' has 2 hard links");
|
||||
is($nlink_count, 2, "File '$segment' has 2 hard links");
|
||||
}
|
||||
|
||||
# Get the file's stat information of the last segment
|
||||
my $nlink_count = get_hard_link_count($last_segment);
|
||||
ok($nlink_count == $last_segment_nlinks,
|
||||
is($nlink_count, $last_segment_nlinks,
|
||||
"File '$last_segment' has $last_segment_nlinks hard link(s)");
|
||||
}
|
||||
|
||||
|
@@ -5193,7 +5193,8 @@ foreach my $run (sort keys %pgdump_runs)
|
||||
if (($tests{$test}->{like}->{$test_key} || $tests{$test}->{all_runs})
|
||||
&& !defined($tests{$test}->{unlike}->{$test_key}))
|
||||
{
|
||||
if (!ok($output_file =~ $tests{$test}->{regexp},
|
||||
if (!like(
|
||||
$output_file, $tests{$test}->{regexp},
|
||||
"$run: should dump $test"))
|
||||
{
|
||||
diag("Review $run results in $tempdir");
|
||||
@@ -5201,7 +5202,8 @@ foreach my $run (sort keys %pgdump_runs)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!ok($output_file !~ $tests{$test}->{regexp},
|
||||
if (!unlike(
|
||||
$output_file, $tests{$test}->{regexp},
|
||||
"$run: should not dump $test"))
|
||||
{
|
||||
diag("Review $run results in $tempdir");
|
||||
|
@@ -100,10 +100,12 @@ command_ok(
|
||||
|
||||
my $dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "table one dumped");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "table two dumped");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "table three dumped");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
|
||||
like($dump, qr/^CREATE TABLE public\.table_one/m, "table one dumped");
|
||||
like($dump, qr/^CREATE TABLE public\.table_two/m, "table two dumped");
|
||||
like($dump, qr/^CREATE TABLE public\.table_three/m, "table three dumped");
|
||||
like(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public\.table_three_one/m,
|
||||
"table three one dumped");
|
||||
|
||||
# Test various combinations of whitespace, comments and correct filters
|
||||
@@ -130,14 +132,21 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "dumped table one");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
|
||||
ok($dump !~ qr/^CREATE TABLE public\.table_three/m, "table three not dumped");
|
||||
ok($dump !~ qr/^CREATE TABLE public\.table_three_one/m,
|
||||
like($dump, qr/^CREATE TABLE public\.table_one/m, "dumped table one");
|
||||
like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public\.table_three/m,
|
||||
"table three not dumped");
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public\.table_three_one/m,
|
||||
"table three_one not dumped");
|
||||
ok( $dump !~ qr/^COPY public\.table_one/m,
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^COPY public\.table_one/m,
|
||||
"content of table one is not included");
|
||||
ok($dump =~ qr/^COPY public\.table_two/m, "content of table two is included");
|
||||
like($dump, qr/^COPY public\.table_two/m, "content of table two is included");
|
||||
|
||||
# Test dumping tables specified by qualified names
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
@@ -159,9 +168,9 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "dumped table one");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
|
||||
like($dump, qr/^CREATE TABLE public\.table_one/m, "dumped table one");
|
||||
like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
|
||||
like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
|
||||
|
||||
# Test dumping all tables except one
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
@@ -181,10 +190,12 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump !~ qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
|
||||
unlike($dump, qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
|
||||
like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
|
||||
like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
|
||||
like(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public\.table_three_one/m,
|
||||
"dumped table three_one");
|
||||
|
||||
# Test dumping tables with a wildcard pattern
|
||||
@@ -205,10 +216,12 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump !~ qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
|
||||
ok($dump !~ qr/^CREATE TABLE public\.table_two/m, "table two not dumped");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
|
||||
unlike($dump, qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
|
||||
unlike($dump, qr/^CREATE TABLE public\.table_two/m, "table two not dumped");
|
||||
like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
|
||||
like(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public\.table_three_one/m,
|
||||
"dumped table three_one");
|
||||
|
||||
# Test dumping table with multiline quoted tablename
|
||||
@@ -230,7 +243,9 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public.\"strange aaa/m,
|
||||
like(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public.\"strange aaa/m,
|
||||
"dump table with new line in name");
|
||||
|
||||
# Test excluding multiline quoted tablename from dump
|
||||
@@ -251,7 +266,9 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump !~ qr/^CREATE TABLE public.\"strange aaa/m,
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public.\"strange aaa/m,
|
||||
"dump table with new line in name");
|
||||
|
||||
# Test excluding an entire schema
|
||||
@@ -272,7 +289,7 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump !~ qr/^CREATE TABLE/m, "no table dumped");
|
||||
unlike($dump, qr/^CREATE TABLE/m, "no table dumped");
|
||||
|
||||
# Test including and excluding an entire schema by multiple filterfiles
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
@@ -298,7 +315,7 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump !~ qr/^CREATE TABLE/m, "no table dumped");
|
||||
unlike($dump, qr/^CREATE TABLE/m, "no table dumped");
|
||||
|
||||
# Test dumping a table with a single leading newline on a row
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
@@ -321,7 +338,9 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
|
||||
like(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
|
||||
"dump table with multiline strange name");
|
||||
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
@@ -341,7 +360,9 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
|
||||
like(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
|
||||
"dump table with multiline strange name");
|
||||
|
||||
#########################################
|
||||
@@ -380,7 +401,7 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE SERVER dummyserver/m, "dump foreign server");
|
||||
like($dump, qr/^CREATE SERVER dummyserver/m, "dump foreign server");
|
||||
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
or die "unable to open filterfile for writing";
|
||||
@@ -497,7 +518,7 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "no table dumped");
|
||||
like($dump, qr/^CREATE TABLE public\.table_one/m, "no table dumped");
|
||||
|
||||
# Now append a pattern to the filter file which doesn't resolve
|
||||
open $inputfile, '>>', "$tempdir/inputfile.txt"
|
||||
@@ -537,8 +558,8 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump !~ qr/^\\connect postgres/m, "database postgres is not dumped");
|
||||
ok($dump =~ qr/^\\connect template1/m, "database template1 is dumped");
|
||||
unlike($dump, qr/^\\connect postgres/m, "database postgres is not dumped");
|
||||
like($dump, qr/^\\connect template1/m, "database template1 is dumped");
|
||||
|
||||
# Make sure this option dont break the existing limitation of using
|
||||
# --globals-only with exclusions
|
||||
@@ -632,8 +653,10 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "wanted table restored");
|
||||
ok($dump !~ qr/^CREATE TABLE public\.table_one/m,
|
||||
like($dump, qr/^CREATE TABLE public\.table_two/m, "wanted table restored");
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public\.table_one/m,
|
||||
"unwanted table is not restored");
|
||||
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
@@ -727,8 +750,10 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE FUNCTION public\.foo1/m, "wanted function restored");
|
||||
ok( $dump !~ qr/^CREATE TABLE public\.foo2/m,
|
||||
like($dump, qr/^CREATE FUNCTION public\.foo1/m, "wanted function restored");
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public\.foo2/m,
|
||||
"unwanted function is not restored");
|
||||
|
||||
# this should be white space tolerant (against the -P argument)
|
||||
@@ -751,7 +776,7 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE FUNCTION public\.foo3/m, "wanted function restored");
|
||||
like($dump, qr/^CREATE FUNCTION public\.foo3/m, "wanted function restored");
|
||||
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
or die "unable to open filterfile for writing";
|
||||
@@ -775,10 +800,10 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE INDEX t1_idx1/m, "wanted index restored");
|
||||
ok($dump !~ qr/^CREATE INDEX t2_idx2/m, "unwanted index are not restored");
|
||||
ok($dump =~ qr/^CREATE TRIGGER trg1/m, "wanted trigger restored");
|
||||
ok($dump !~ qr/^CREATE TRIGGER trg2/m, "unwanted trigger is not restored");
|
||||
like($dump, qr/^CREATE INDEX t1_idx1/m, "wanted index restored");
|
||||
unlike($dump, qr/^CREATE INDEX t2_idx2/m, "unwanted index are not restored");
|
||||
like($dump, qr/^CREATE TRIGGER trg1/m, "wanted trigger restored");
|
||||
unlike($dump, qr/^CREATE TRIGGER trg2/m, "unwanted trigger is not restored");
|
||||
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
or die "unable to open filterfile for writing";
|
||||
@@ -798,10 +823,12 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE s1\.t1/m, "wanted table from schema restored");
|
||||
ok( $dump =~ qr/^CREATE SEQUENCE s1\.s1/m,
|
||||
like($dump, qr/^CREATE TABLE s1\.t1/m, "wanted table from schema restored");
|
||||
like(
|
||||
$dump,
|
||||
qr/^CREATE SEQUENCE s1\.s1/m,
|
||||
"wanted sequence from schema restored");
|
||||
ok($dump !~ qr/^CREATE TABLE s2\t2/m, "unwanted table is not restored");
|
||||
unlike($dump, qr/^CREATE TABLE s2\t2/m, "unwanted table is not restored");
|
||||
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
or die "unable to open filterfile for writing";
|
||||
@@ -821,12 +848,16 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump !~ qr/^CREATE TABLE s1\.t1/m,
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^CREATE TABLE s1\.t1/m,
|
||||
"unwanted table from schema is not restored");
|
||||
ok($dump !~ qr/^CREATE SEQUENCE s1\.s1/m,
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^CREATE SEQUENCE s1\.s1/m,
|
||||
"unwanted sequence from schema is not restored");
|
||||
ok($dump =~ qr/^CREATE TABLE s2\.t2/m, "wanted table restored");
|
||||
ok($dump =~ qr/^CREATE TABLE public\.t1/m, "wanted table restored");
|
||||
like($dump, qr/^CREATE TABLE s2\.t2/m, "wanted table restored");
|
||||
like($dump, qr/^CREATE TABLE public\.t1/m, "wanted table restored");
|
||||
|
||||
#########################################
|
||||
# test of supported syntax
|
||||
@@ -849,7 +880,7 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public\.bootab/m, "dumped children table");
|
||||
like($dump, qr/^CREATE TABLE public\.bootab/m, "dumped children table");
|
||||
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
or die "unable to open filterfile for writing";
|
||||
@@ -869,7 +900,9 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump !~ qr/^CREATE TABLE public\.bootab/m,
|
||||
unlike(
|
||||
$dump,
|
||||
qr/^CREATE TABLE public\.bootab/m,
|
||||
"exclude dumped children table");
|
||||
|
||||
open $inputfile, '>', "$tempdir/inputfile.txt"
|
||||
@@ -890,8 +923,8 @@ command_ok(
|
||||
|
||||
$dump = slurp_file($plainfile);
|
||||
|
||||
ok($dump =~ qr/^CREATE TABLE public\.bootab/m, "dumped children table");
|
||||
ok($dump !~ qr/^COPY public\.bootab/m, "exclude dumped children table");
|
||||
like($dump, qr/^CREATE TABLE public\.bootab/m, "dumped children table");
|
||||
unlike($dump, qr/^COPY public\.bootab/m, "exclude dumped children table");
|
||||
|
||||
#########################################
|
||||
# Test extension
|
||||
|
@@ -753,15 +753,23 @@ my ($ret, $out, $err) = $node->psql('postgres',
|
||||
'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val'
|
||||
);
|
||||
|
||||
ok($ret == 0, "psql seeded_random count ok");
|
||||
ok($err eq '', "psql seeded_random count stderr is empty");
|
||||
ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/,
|
||||
is($ret, 0, "psql seeded_random count ok");
|
||||
is($err, '', "psql seeded_random count stderr is empty");
|
||||
like(
|
||||
$out,
|
||||
qr/\b$seed\|uniform\|1\d\d\d\|2/,
|
||||
"psql seeded_random count uniform");
|
||||
ok( $out =~ /\b$seed\|exponential\|2\d\d\d\|2/,
|
||||
like(
|
||||
$out,
|
||||
qr/\b$seed\|exponential\|2\d\d\d\|2/,
|
||||
"psql seeded_random count exponential");
|
||||
ok( $out =~ /\b$seed\|gaussian\|3\d\d\d\|2/,
|
||||
like(
|
||||
$out,
|
||||
qr/\b$seed\|gaussian\|3\d\d\d\|2/,
|
||||
"psql seeded_random count gaussian");
|
||||
ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/,
|
||||
like(
|
||||
$out,
|
||||
qr/\b$seed\|zipfian\|4\d\d\d\|2/,
|
||||
"psql seeded_random count zipfian");
|
||||
|
||||
$node->safe_psql('postgres', 'DROP TABLE seeded_random;');
|
||||
@@ -1521,8 +1529,9 @@ sub check_pgbench_logs
|
||||
|
||||
# $prefix is simple enough, thus does not need escaping
|
||||
my @logs = list_files($dir, qr{^$prefix\..*$});
|
||||
ok(@logs == $nb, "number of log files");
|
||||
ok(grep(/\/$prefix\.\d+(\.\d+)?$/, @logs) == $nb, "file name format");
|
||||
is(scalar(@logs), $nb, "number of log files");
|
||||
is(scalar(grep(/\/$prefix\.\d+(\.\d+)?$/, @logs)),
|
||||
$nb, "file name format");
|
||||
|
||||
my $log_number = 0;
|
||||
for my $log (sort @logs)
|
||||
@@ -1532,10 +1541,12 @@ sub check_pgbench_logs
|
||||
|
||||
my @contents = split(/\n/, $contents_raw);
|
||||
my $clen = @contents;
|
||||
ok( $min <= $clen && $clen <= $max,
|
||||
"transaction count for $log ($clen)");
|
||||
cmp_ok($clen, '>=', $min,
|
||||
"transaction count for $log ($clen) is above min");
|
||||
cmp_ok($clen, '<=', $max,
|
||||
"transaction count for $log ($clen) is below max");
|
||||
my $clen_match = grep(/$re/, @contents);
|
||||
ok($clen_match == $clen, "transaction format for $prefix");
|
||||
is($clen_match, $clen, "transaction format for $prefix");
|
||||
|
||||
# Show more information if some logs don't match
|
||||
# to help with debugging.
|
||||
|
@@ -141,7 +141,7 @@ my ($ret, $out, $err) = $node->psql('postgres',
|
||||
|
||||
is($ret, 2, 'server crash: psql exit code');
|
||||
like($out, qr/before/, 'server crash: output before crash');
|
||||
ok($out !~ qr/AFTER/, 'server crash: no output after crash');
|
||||
unlike($out, qr/AFTER/, 'server crash: no output after crash');
|
||||
is( $err,
|
||||
'psql:<stdin>:2: FATAL: terminating connection due to administrator command
|
||||
psql:<stdin>:2: server closed the connection unexpectedly
|
||||
|
@@ -351,9 +351,9 @@ $node->issues_sql_like(
|
||||
'create database with owner role_foobar');
|
||||
($ret, $stdout, $stderr) =
|
||||
$node->psql('foobar2', 'DROP OWNED BY role_foobar;', on_error_die => 1,);
|
||||
ok($ret == 0, "DROP OWNED BY role_foobar");
|
||||
is($ret, 0, "DROP OWNED BY role_foobar");
|
||||
($ret, $stdout, $stderr) =
|
||||
$node->psql('foobar2', 'DROP DATABASE foobar8;', on_error_die => 1,);
|
||||
ok($ret == 0, "DROP DATABASE foobar8");
|
||||
is($ret, 0, "DROP DATABASE foobar8");
|
||||
|
||||
done_testing();
|
||||
|
@@ -61,10 +61,13 @@ my $node3_occurrences = () =
|
||||
my $total_occurrences =
|
||||
$node1_occurrences + $node2_occurrences + $node3_occurrences;
|
||||
|
||||
ok($node1_occurrences > 1, "received at least one connection on node1");
|
||||
ok($node2_occurrences > 1, "received at least one connection on node2");
|
||||
ok($node3_occurrences > 1, "received at least one connection on node3");
|
||||
ok($total_occurrences == 50, "received 50 connections across all nodes");
|
||||
cmp_ok($node1_occurrences, '>', 1,
|
||||
"received at least one connection on node1");
|
||||
cmp_ok($node2_occurrences, '>', 1,
|
||||
"received at least one connection on node2");
|
||||
cmp_ok($node3_occurrences, '>', 1,
|
||||
"received at least one connection on node3");
|
||||
is($total_occurrences, 50, "received 50 connections across all nodes");
|
||||
|
||||
$node1->stop();
|
||||
$node2->stop();
|
||||
|
@@ -111,10 +111,13 @@ my $node3_occurrences = () =
|
||||
my $total_occurrences =
|
||||
$node1_occurrences + $node2_occurrences + $node3_occurrences;
|
||||
|
||||
ok($node1_occurrences > 1, "received at least one connection on node1");
|
||||
ok($node2_occurrences > 1, "received at least one connection on node2");
|
||||
ok($node3_occurrences > 1, "received at least one connection on node3");
|
||||
ok($total_occurrences == 50, "received 50 connections across all nodes");
|
||||
cmp_ok($node1_occurrences, '>', 1,
|
||||
"received at least one connection on node1");
|
||||
cmp_ok($node2_occurrences, '>', 1,
|
||||
"received at least one connection on node2");
|
||||
cmp_ok($node3_occurrences, '>', 1,
|
||||
"received at least one connection on node3");
|
||||
is($total_occurrences, 50, "received 50 connections across all nodes");
|
||||
|
||||
$node1->stop();
|
||||
$node2->stop();
|
||||
|
@@ -67,8 +67,9 @@ sub change_number_of_io_workers
|
||||
|
||||
if ($expect_failure)
|
||||
{
|
||||
ok( $stderr =~
|
||||
/$worker_count is outside the valid range for parameter "io_workers"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/$worker_count is outside the valid range for parameter "io_workers"/,
|
||||
"updating number of io_workers to $worker_count failed, as expected"
|
||||
);
|
||||
|
||||
|
@@ -58,8 +58,9 @@ run_sql_command(
|
||||
# normal run will verify table data
|
||||
$output = run_sql_command('alter table atacc1 alter test_a set not null;');
|
||||
ok(!is_table_verified($output), 'with constraint will not scan table');
|
||||
ok( $output =~
|
||||
m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
|
||||
like(
|
||||
$output,
|
||||
qr/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
|
||||
'test_a proved by constraints');
|
||||
|
||||
run_sql_command('alter table atacc1 alter test_a drop not null;');
|
||||
@@ -70,9 +71,9 @@ $output = run_sql_command(
|
||||
);
|
||||
ok(is_table_verified($output), 'table was scanned');
|
||||
# we may miss debug message for test_a constraint because we need verify table due test_b
|
||||
ok( !( $output =~
|
||||
m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/
|
||||
),
|
||||
unlike(
|
||||
$output,
|
||||
qr/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
|
||||
'test_b not proved by wrong constraints');
|
||||
run_sql_command(
|
||||
'alter table atacc1 alter test_a drop not null, alter test_b drop not null;'
|
||||
@@ -86,11 +87,13 @@ $output = run_sql_command(
|
||||
'alter table atacc1 alter test_b set not null, alter test_a set not null;'
|
||||
);
|
||||
ok(!is_table_verified($output), 'table was not scanned for both columns');
|
||||
ok( $output =~
|
||||
m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
|
||||
like(
|
||||
$output,
|
||||
qr/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
|
||||
'test_a proved by constraints');
|
||||
ok( $output =~
|
||||
m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
|
||||
like(
|
||||
$output,
|
||||
qr/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
|
||||
'test_b proved by constraints');
|
||||
run_sql_command('drop table atacc1;');
|
||||
|
||||
@@ -119,8 +122,9 @@ $output = run_sql_command(
|
||||
'ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);'
|
||||
);
|
||||
ok(!is_table_verified($output), 'table part_3_4 not scanned');
|
||||
ok( $output =~
|
||||
m/partition constraint for table "part_3_4" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/partition constraint for table "part_3_4" is implied by existing constraints/,
|
||||
'part_3_4 verified by existing constraints');
|
||||
|
||||
# test attach default partition
|
||||
@@ -131,16 +135,18 @@ run_sql_command(
|
||||
$output = run_sql_command(
|
||||
'ALTER TABLE list_parted2 ATTACH PARTITION list_parted2_def default;');
|
||||
ok(!is_table_verified($output), 'table list_parted2_def not scanned');
|
||||
ok( $output =~
|
||||
m/partition constraint for table "list_parted2_def" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/partition constraint for table "list_parted2_def" is implied by existing constraints/,
|
||||
'list_parted2_def verified by existing constraints');
|
||||
|
||||
$output = run_sql_command(
|
||||
'CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66);'
|
||||
);
|
||||
ok(!is_table_verified($output), 'table list_parted2_def not scanned');
|
||||
ok( $output =~
|
||||
m/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
|
||||
'updated partition constraint for default partition list_parted2_def');
|
||||
|
||||
# test attach another partitioned table
|
||||
@@ -153,11 +159,14 @@ run_sql_command(
|
||||
);
|
||||
$output = run_sql_command(
|
||||
'ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);');
|
||||
ok(!($output =~ m/verifying table "part_5"/), 'table part_5 not scanned');
|
||||
ok($output =~ m/verifying table "list_parted2_def"/,
|
||||
unlike($output, qr/verifying table "part_5"/, 'table part_5 not scanned');
|
||||
like(
|
||||
$output,
|
||||
qr/verifying table "list_parted2_def"/,
|
||||
'list_parted2_def scanned');
|
||||
ok( $output =~
|
||||
m/partition constraint for table "part_5" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/partition constraint for table "part_5" is implied by existing constraints/,
|
||||
'part_5 verified by existing constraints');
|
||||
|
||||
run_sql_command(
|
||||
@@ -171,11 +180,14 @@ run_sql_command(
|
||||
);
|
||||
$output = run_sql_command(
|
||||
'ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);');
|
||||
ok(!($output =~ m/verifying table "part_5"/), 'table part_5 not scanned');
|
||||
ok($output =~ m/verifying table "list_parted2_def"/,
|
||||
unlike($output, qr/verifying table "part_5"/, 'table part_5 not scanned');
|
||||
like(
|
||||
$output,
|
||||
qr/verifying table "list_parted2_def"/,
|
||||
'list_parted2_def scanned');
|
||||
ok( $output =~
|
||||
m/partition constraint for table "part_5" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/partition constraint for table "part_5" is implied by existing constraints/,
|
||||
'part_5 verified by existing constraints');
|
||||
|
||||
# Check the case where attnos of the partitioning columns in the table being
|
||||
@@ -190,11 +202,14 @@ run_sql_command(
|
||||
ALTER TABLE part_6 DROP c;');
|
||||
$output = run_sql_command(
|
||||
'ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6);');
|
||||
ok(!($output =~ m/verifying table "part_6"/), 'table part_6 not scanned');
|
||||
ok($output =~ m/verifying table "list_parted2_def"/,
|
||||
unlike($output, qr/verifying table "part_6"/, 'table part_6 not scanned');
|
||||
like(
|
||||
$output,
|
||||
qr/verifying table "list_parted2_def"/,
|
||||
'list_parted2_def scanned');
|
||||
ok( $output =~
|
||||
m/partition constraint for table "part_6" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/partition constraint for table "part_6" is implied by existing constraints/,
|
||||
'part_6 verified by existing constraints');
|
||||
|
||||
# Similar to above, but the table being attached is a partitioned table
|
||||
@@ -219,17 +234,20 @@ $output = run_sql_command(
|
||||
'ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN (\'a\', null);'
|
||||
);
|
||||
ok(!is_table_verified($output), 'table not scanned');
|
||||
ok( $output =~
|
||||
m/partition constraint for table "part_7_a_null" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/partition constraint for table "part_7_a_null" is implied by existing constraints/,
|
||||
'part_7_a_null verified by existing constraints');
|
||||
$output = run_sql_command(
|
||||
'ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);');
|
||||
ok(!is_table_verified($output), 'tables not scanned');
|
||||
ok( $output =~
|
||||
m/partition constraint for table "part_7" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/partition constraint for table "part_7" is implied by existing constraints/,
|
||||
'part_7 verified by existing constraints');
|
||||
ok( $output =~
|
||||
m/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
|
||||
'updated partition constraint for default partition list_parted2_def');
|
||||
|
||||
run_sql_command(
|
||||
@@ -245,9 +263,9 @@ $output = run_sql_command(
|
||||
'ALTER TABLE range_parted ATTACH PARTITION range_part1 FOR VALUES FROM (1, 1) TO (1, 10);'
|
||||
);
|
||||
ok(is_table_verified($output), 'table range_part1 scanned');
|
||||
ok( !( $output =~
|
||||
m/partition constraint for table "range_part1" is implied by existing constraints/
|
||||
),
|
||||
unlike(
|
||||
$output,
|
||||
qr/partition constraint for table "range_part1" is implied by existing constraints/,
|
||||
'range_part1 not verified by existing constraints');
|
||||
|
||||
run_sql_command(
|
||||
@@ -259,8 +277,9 @@ $output = run_sql_command(
|
||||
'ALTER TABLE range_parted ATTACH PARTITION range_part2 FOR VALUES FROM (1, 10) TO (1, 20);'
|
||||
);
|
||||
ok(!is_table_verified($output), 'table range_part2 not scanned');
|
||||
ok( $output =~
|
||||
m/partition constraint for table "range_part2" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/partition constraint for table "range_part2" is implied by existing constraints/,
|
||||
'range_part2 verified by existing constraints');
|
||||
|
||||
# If a partitioned table being created or an existing table being attached
|
||||
@@ -278,19 +297,22 @@ run_sql_command(
|
||||
$output = run_sql_command(
|
||||
'ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1);');
|
||||
ok(is_table_verified($output), 'quuux1 table scanned');
|
||||
ok( !( $output =~
|
||||
m/partition constraint for table "quuux1" is implied by existing constraints/
|
||||
),
|
||||
unlike(
|
||||
$output,
|
||||
qr/partition constraint for table "quuux1" is implied by existing constraints/,
|
||||
'quuux1 verified by existing constraints');
|
||||
|
||||
run_sql_command('CREATE TABLE quuux2 (a int, b text);');
|
||||
$output = run_sql_command(
|
||||
'ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2);');
|
||||
ok(!($output =~ m/verifying table "quuux_default1"/),
|
||||
unlike(
|
||||
$output,
|
||||
qr/verifying table "quuux_default1"/,
|
||||
'quuux_default1 not scanned');
|
||||
ok($output =~ m/verifying table "quuux2"/, 'quuux2 scanned');
|
||||
ok( $output =~
|
||||
m/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
|
||||
like($output, qr/verifying table "quuux2"/, 'quuux2 scanned');
|
||||
like(
|
||||
$output,
|
||||
qr/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
|
||||
'updated partition constraint for default partition quuux_default1');
|
||||
run_sql_command('DROP TABLE quuux1, quuux2;');
|
||||
|
||||
@@ -298,15 +320,16 @@ run_sql_command('DROP TABLE quuux1, quuux2;');
|
||||
$output = run_sql_command(
|
||||
'CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1);');
|
||||
ok(!is_table_verified($output), 'tables not scanned');
|
||||
ok( !( $output =~
|
||||
m/partition constraint for table "quuux1" is implied by existing constraints/
|
||||
),
|
||||
unlike(
|
||||
$output,
|
||||
qr/partition constraint for table "quuux1" is implied by existing constraints/,
|
||||
'quuux1 verified by existing constraints');
|
||||
$output = run_sql_command(
|
||||
'CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2);');
|
||||
ok(!is_table_verified($output), 'tables not scanned');
|
||||
ok( $output =~
|
||||
m/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
|
||||
like(
|
||||
$output,
|
||||
qr/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
|
||||
'updated partition constraint for default partition quuux_default1');
|
||||
run_sql_command('DROP TABLE quuux;');
|
||||
|
||||
|
@@ -29,69 +29,69 @@ my $result;
|
||||
# Create a tablespace with an absolute path
|
||||
$result = $node->psql('postgres',
|
||||
"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
|
||||
ok($result == 0, 'create tablespace with absolute path');
|
||||
is($result, 0, 'create tablespace with absolute path');
|
||||
|
||||
# Can't create a tablespace where there is one already
|
||||
$result = $node->psql('postgres',
|
||||
"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
|
||||
ok($result != 0, 'clobber tablespace with absolute path');
|
||||
isnt($result, 0, 'clobber tablespace with absolute path');
|
||||
|
||||
# Create table in it
|
||||
$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
|
||||
ok($result == 0, 'create table in tablespace with absolute path');
|
||||
is($result, 0, 'create table in tablespace with absolute path');
|
||||
|
||||
# Can't drop a tablespace that still has a table in it
|
||||
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
|
||||
ok($result != 0, 'drop tablespace with absolute path');
|
||||
isnt($result, 0, 'drop tablespace with absolute path');
|
||||
|
||||
# Drop the table
|
||||
$result = $node->psql('postgres', "DROP TABLE t");
|
||||
ok($result == 0, 'drop table in tablespace with absolute path');
|
||||
is($result, 0, 'drop table in tablespace with absolute path');
|
||||
|
||||
# Drop the tablespace
|
||||
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
|
||||
ok($result == 0, 'drop tablespace with absolute path');
|
||||
is($result, 0, 'drop tablespace with absolute path');
|
||||
|
||||
# Create two absolute tablespaces and two in-place tablespaces, so we can
|
||||
# testing various kinds of tablespace moves.
|
||||
$result = $node->psql('postgres',
|
||||
"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
|
||||
ok($result == 0, 'create tablespace 1 with absolute path');
|
||||
is($result, 0, 'create tablespace 1 with absolute path');
|
||||
$result = $node->psql('postgres',
|
||||
"CREATE TABLESPACE regress_ts2 LOCATION '$TS2_LOCATION'");
|
||||
ok($result == 0, 'create tablespace 2 with absolute path');
|
||||
is($result, 0, 'create tablespace 2 with absolute path');
|
||||
$result = $node->psql('postgres',
|
||||
"SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''"
|
||||
);
|
||||
ok($result == 0, 'create tablespace 3 with in-place directory');
|
||||
is($result, 0, 'create tablespace 3 with in-place directory');
|
||||
$result = $node->psql('postgres',
|
||||
"SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''"
|
||||
);
|
||||
ok($result == 0, 'create tablespace 4 with in-place directory');
|
||||
is($result, 0, 'create tablespace 4 with in-place directory');
|
||||
|
||||
# Create a table and test moving between absolute and in-place tablespaces
|
||||
$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
|
||||
ok($result == 0, 'create table in tablespace 1');
|
||||
is($result, 0, 'create table in tablespace 1');
|
||||
$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts2");
|
||||
ok($result == 0, 'move table abs->abs');
|
||||
is($result, 0, 'move table abs->abs');
|
||||
$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts3");
|
||||
ok($result == 0, 'move table abs->in-place');
|
||||
is($result, 0, 'move table abs->in-place');
|
||||
$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts4");
|
||||
ok($result == 0, 'move table in-place->in-place');
|
||||
is($result, 0, 'move table in-place->in-place');
|
||||
$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts1");
|
||||
ok($result == 0, 'move table in-place->abs');
|
||||
is($result, 0, 'move table in-place->abs');
|
||||
|
||||
# Drop everything
|
||||
$result = $node->psql('postgres', "DROP TABLE t");
|
||||
ok($result == 0, 'create table in tablespace 1');
|
||||
is($result, 0, 'create table in tablespace 1');
|
||||
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
|
||||
ok($result == 0, 'drop tablespace 1');
|
||||
is($result, 0, 'drop tablespace 1');
|
||||
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts2");
|
||||
ok($result == 0, 'drop tablespace 2');
|
||||
is($result, 0, 'drop tablespace 2');
|
||||
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts3");
|
||||
ok($result == 0, 'drop tablespace 3');
|
||||
is($result, 0, 'drop tablespace 3');
|
||||
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts4");
|
||||
ok($result == 0, 'drop tablespace 4');
|
||||
is($result, 0, 'drop tablespace 4');
|
||||
|
||||
$node->stop;
|
||||
|
||||
|
@@ -981,7 +981,8 @@ foreach my $run (sort keys %pgdump_runs)
|
||||
if ($tests{$test}->{like}->{$test_key}
|
||||
&& !defined($tests{$test}->{unlike}->{$test_key}))
|
||||
{
|
||||
if (!ok($output_file =~ $tests{$test}->{regexp},
|
||||
if (!like(
|
||||
$output_file, $tests{$test}->{regexp},
|
||||
"$run: should dump $test"))
|
||||
{
|
||||
diag("Review $run results in $tempdir");
|
||||
@@ -989,7 +990,8 @@ foreach my $run (sort keys %pgdump_runs)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!ok($output_file !~ $tests{$test}->{regexp},
|
||||
if (!unlike(
|
||||
$output_file, $tests{$test}->{regexp},
|
||||
"$run: should not dump $test"))
|
||||
{
|
||||
diag("Review $run results in $tempdir");
|
||||
|
@@ -90,7 +90,7 @@ for my $i (1 .. 15)
|
||||
last;
|
||||
}
|
||||
}
|
||||
ok($warn_limit == 1, "warn-limit reached");
|
||||
is($warn_limit, 1, "warn-limit reached");
|
||||
|
||||
# We can still INSERT, despite the warnings.
|
||||
$node->safe_psql('postgres',
|
||||
|
@@ -265,26 +265,26 @@ my ($ret, $stdout, $stderr) = $node_primary->psql(
|
||||
'postgres', 'SHOW ALL;',
|
||||
on_error_die => 1,
|
||||
extra_params => [ '--dbname' => $connstr_rep ]);
|
||||
ok($ret == 0, "SHOW ALL with replication role and physical replication");
|
||||
is($ret, 0, "SHOW ALL with replication role and physical replication");
|
||||
($ret, $stdout, $stderr) = $node_primary->psql(
|
||||
'postgres', 'SHOW ALL;',
|
||||
on_error_die => 1,
|
||||
extra_params => [ '--dbname' => $connstr_db ]);
|
||||
ok($ret == 0, "SHOW ALL with replication role and logical replication");
|
||||
is($ret, 0, "SHOW ALL with replication role and logical replication");
|
||||
|
||||
# Test SHOW with a user-settable parameter
|
||||
($ret, $stdout, $stderr) = $node_primary->psql(
|
||||
'postgres', 'SHOW work_mem;',
|
||||
on_error_die => 1,
|
||||
extra_params => [ '--dbname' => $connstr_rep ]);
|
||||
ok( $ret == 0,
|
||||
is($ret, 0,
|
||||
"SHOW with user-settable parameter, replication role and physical replication"
|
||||
);
|
||||
($ret, $stdout, $stderr) = $node_primary->psql(
|
||||
'postgres', 'SHOW work_mem;',
|
||||
on_error_die => 1,
|
||||
extra_params => [ '--dbname' => $connstr_db ]);
|
||||
ok( $ret == 0,
|
||||
is($ret, 0,
|
||||
"SHOW with user-settable parameter, replication role and logical replication"
|
||||
);
|
||||
|
||||
@@ -293,14 +293,14 @@ ok( $ret == 0,
|
||||
'postgres', 'SHOW primary_conninfo;',
|
||||
on_error_die => 1,
|
||||
extra_params => [ '--dbname' => $connstr_rep ]);
|
||||
ok( $ret == 0,
|
||||
is($ret, 0,
|
||||
"SHOW with superuser-settable parameter, replication role and physical replication"
|
||||
);
|
||||
($ret, $stdout, $stderr) = $node_primary->psql(
|
||||
'postgres', 'SHOW primary_conninfo;',
|
||||
on_error_die => 1,
|
||||
extra_params => [ '--dbname' => $connstr_db ]);
|
||||
ok( $ret == 0,
|
||||
is($ret, 0,
|
||||
"SHOW with superuser-settable parameter, replication role and logical replication"
|
||||
);
|
||||
|
||||
@@ -312,7 +312,7 @@ my $slotname = 'test_read_replication_slot_physical';
|
||||
'postgres',
|
||||
'READ_REPLICATION_SLOT non_existent_slot;',
|
||||
extra_params => [ '--dbname' => $connstr_rep ]);
|
||||
ok($ret == 0, "READ_REPLICATION_SLOT exit code 0 on success");
|
||||
is($ret, 0, "READ_REPLICATION_SLOT exit code 0 on success");
|
||||
like($stdout, qr/^\|\|$/,
|
||||
"READ_REPLICATION_SLOT returns NULL values if slot does not exist");
|
||||
|
||||
@@ -325,7 +325,7 @@ $node_primary->psql(
|
||||
'postgres',
|
||||
"READ_REPLICATION_SLOT $slotname;",
|
||||
extra_params => [ '--dbname' => $connstr_rep ]);
|
||||
ok($ret == 0, "READ_REPLICATION_SLOT success with existing slot");
|
||||
is($ret, 0, "READ_REPLICATION_SLOT success with existing slot");
|
||||
like($stdout, qr/^physical\|[^|]*\|1$/,
|
||||
"READ_REPLICATION_SLOT returns tuple with slot information");
|
||||
|
||||
@@ -577,7 +577,7 @@ my $phys_restart_lsn_post = $node_primary->safe_psql('postgres',
|
||||
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
|
||||
);
|
||||
chomp($phys_restart_lsn_post);
|
||||
ok( ($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0,
|
||||
is($phys_restart_lsn_pre, $phys_restart_lsn_post,
|
||||
"physical slot advance persists across restarts");
|
||||
|
||||
# Check if the previous segment gets correctly recycled after the
|
||||
|
@@ -155,7 +155,9 @@ my $res = run_log(
|
||||
ok(!$res, 'invalid recovery startup fails');
|
||||
|
||||
my $logfile = slurp_file($node_standby->logfile());
|
||||
ok($logfile =~ qr/multiple recovery targets specified/,
|
||||
like(
|
||||
$logfile,
|
||||
qr/multiple recovery targets specified/,
|
||||
'multiple conflicting settings');
|
||||
|
||||
# Check behavior when recovery ends before target is reached
|
||||
@@ -183,7 +185,8 @@ foreach my $i (0 .. 10 * $PostgreSQL::Test::Utils::timeout_default)
|
||||
usleep(100_000);
|
||||
}
|
||||
$logfile = slurp_file($node_standby->logfile());
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/FATAL: .* recovery ended before configured recovery target was reached/,
|
||||
'recovery end before target reached is a fatal error');
|
||||
|
||||
|
@@ -53,10 +53,8 @@ $node_standby->poll_query_until('postgres',
|
||||
|
||||
# This test is successful if and only if the LSN has been applied with at least
|
||||
# the configured apply delay.
|
||||
ok(time() - $primary_insert_time >= $delay,
|
||||
"standby applies WAL only after replication delay");
|
||||
|
||||
|
||||
cmp_ok(time() - $primary_insert_time,
|
||||
'>=', $delay, "standby applies WAL only after replication delay");
|
||||
# Check that recovery can be paused or resumed expectedly.
|
||||
my $node_standby2 = PostgreSQL::Test::Cluster->new('standby2');
|
||||
$node_standby2->init_from_backup($node_primary, $backup_name,
|
||||
|
@@ -35,8 +35,9 @@ my ($result, $stdout, $stderr) = $node_primary->psql(
|
||||
'template1',
|
||||
qq[START_REPLICATION SLOT test_slot LOGICAL 0/0],
|
||||
replication => 'database');
|
||||
ok( $stderr =~
|
||||
m/replication slot "test_slot" was not created in this database/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/replication slot "test_slot" was not created in this database/,
|
||||
"Logical decoding correctly fails to start");
|
||||
|
||||
($result, $stdout, $stderr) = $node_primary->psql(
|
||||
@@ -54,7 +55,9 @@ like(
|
||||
'template1',
|
||||
qq[START_REPLICATION SLOT s1 LOGICAL 0/1],
|
||||
replication => 'true');
|
||||
ok($stderr =~ /ERROR: logical decoding requires a database connection/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: logical decoding requires a database connection/,
|
||||
"Logical decoding fails on non-database connection");
|
||||
|
||||
$node_primary->safe_psql('postgres',
|
||||
@@ -201,7 +204,7 @@ my $logical_restart_lsn_post = $node_primary->safe_psql('postgres',
|
||||
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"
|
||||
);
|
||||
chomp($logical_restart_lsn_post);
|
||||
ok(($logical_restart_lsn_pre cmp $logical_restart_lsn_post) == 0,
|
||||
is($logical_restart_lsn_pre, $logical_restart_lsn_post,
|
||||
"logical slot advance persists across restarts");
|
||||
|
||||
my $stats_test_slot1 = 'test_slot';
|
||||
|
@@ -245,7 +245,9 @@ my $log_location = -s $standby2->logfile;
|
||||
|
||||
$standby2->stop;
|
||||
my $logfile = slurp_file($standby2->logfile, $log_location);
|
||||
ok( $logfile =~ qr/archiver process shutting down/,
|
||||
like(
|
||||
$logfile,
|
||||
qr/archiver process shutting down/,
|
||||
'check shutdown callback of shell archive module');
|
||||
|
||||
# Test that we can enter and leave backup mode without crashes
|
||||
|
@@ -91,7 +91,8 @@ sub test_recovery_wal_level_minimal
|
||||
|
||||
# Confirm that the archive recovery fails with an expected error
|
||||
my $logfile = slurp_file($recovery_node->logfile());
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/,
|
||||
"$node_text ends with an error because it finds WAL generated with \"wal_level=minimal\""
|
||||
);
|
||||
|
@@ -394,7 +394,8 @@ foreach my $i (0 .. 10 * $PostgreSQL::Test::Utils::timeout_default)
|
||||
|
||||
# Confirm that the server startup fails with an expected error
|
||||
my $logfile = slurp_file($node_standby->logfile());
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/FATAL: .* logical replication slot ".*" exists on the standby, but "hot_standby" = "off"/,
|
||||
"the standby ends with an error during startup because hot_standby was disabled"
|
||||
);
|
||||
@@ -487,8 +488,9 @@ $node_primary->wait_for_replay_catchup($node_standby);
|
||||
($result, $stdout, $stderr) = $node_standby->psql('otherdb',
|
||||
"SELECT lsn FROM pg_logical_slot_peek_changes('behaves_ok_activeslot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
|
||||
);
|
||||
ok( $stderr =~
|
||||
m/replication slot "behaves_ok_activeslot" was not created in this database/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/replication slot "behaves_ok_activeslot" was not created in this database/,
|
||||
"replaying logical slot from another database fails");
|
||||
|
||||
##################################################
|
||||
@@ -620,8 +622,9 @@ check_pg_recvlogical_stderr($handle,
|
||||
'postgres',
|
||||
qq[select pg_copy_logical_replication_slot('vacuum_full_inactiveslot', 'vacuum_full_inactiveslot_copy');],
|
||||
replication => 'database');
|
||||
ok( $stderr =~
|
||||
/ERROR: cannot copy invalidated replication slot "vacuum_full_inactiveslot"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: cannot copy invalidated replication slot "vacuum_full_inactiveslot"/,
|
||||
"invalidated slot cannot be copied");
|
||||
|
||||
# Set hot_standby_feedback to on
|
||||
|
@@ -100,8 +100,9 @@ $subscriber1->safe_psql('postgres',
|
||||
# Disable failover for enabled subscription
|
||||
my ($result, $stdout, $stderr) = $subscriber1->psql('postgres',
|
||||
"ALTER SUBSCRIPTION regress_mysub1 SET (failover = false)");
|
||||
ok( $stderr =~
|
||||
/ERROR: cannot set option "failover" for enabled subscription/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: cannot set option "failover" for enabled subscription/,
|
||||
"altering failover is not allowed for enabled subscription");
|
||||
|
||||
##################################################
|
||||
@@ -110,8 +111,9 @@ ok( $stderr =~
|
||||
|
||||
($result, $stdout, $stderr) =
|
||||
$publisher->psql('postgres', "SELECT pg_sync_replication_slots();");
|
||||
ok( $stderr =~
|
||||
/ERROR: replication slots can only be synchronized to a standby server/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: replication slots can only be synchronized to a standby server/,
|
||||
"cannot sync slots on a non-standby server");
|
||||
|
||||
##################################################
|
||||
@@ -313,8 +315,9 @@ $standby1->reload;
|
||||
# Attempting to perform logical decoding on a synced slot should result in an error
|
||||
($result, $stdout, $stderr) = $standby1->psql('postgres',
|
||||
"select * from pg_logical_slot_get_changes('lsub1_slot', NULL, NULL);");
|
||||
ok( $stderr =~
|
||||
/ERROR: cannot use replication slot "lsub1_slot" for logical decoding/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: cannot use replication slot "lsub1_slot" for logical decoding/,
|
||||
"logical decoding is not allowed on synced slot");
|
||||
|
||||
# Attempting to alter a synced slot should result in an error
|
||||
@@ -322,13 +325,17 @@ ok( $stderr =~
|
||||
'postgres',
|
||||
qq[ALTER_REPLICATION_SLOT lsub1_slot (failover);],
|
||||
replication => 'database');
|
||||
ok($stderr =~ /ERROR: cannot alter replication slot "lsub1_slot"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: cannot alter replication slot "lsub1_slot"/,
|
||||
"synced slot on standby cannot be altered");
|
||||
|
||||
# Attempting to drop a synced slot should result in an error
|
||||
($result, $stdout, $stderr) = $standby1->psql('postgres',
|
||||
"SELECT pg_drop_replication_slot('lsub1_slot');");
|
||||
ok($stderr =~ /ERROR: cannot drop replication slot "lsub1_slot"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: cannot drop replication slot "lsub1_slot"/,
|
||||
"synced slot on standby cannot be dropped");
|
||||
|
||||
##################################################
|
||||
@@ -341,8 +348,9 @@ $standby1->reload;
|
||||
|
||||
($result, $stdout, $stderr) =
|
||||
$standby1->psql('postgres', "SELECT pg_sync_replication_slots();");
|
||||
ok( $stderr =~
|
||||
/ERROR: replication slot synchronization requires "dbname" to be specified in "primary_conninfo"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: replication slot synchronization requires "dbname" to be specified in "primary_conninfo"/,
|
||||
"cannot sync slots if dbname is not specified in primary_conninfo");
|
||||
|
||||
# Add the dbname back to the primary_conninfo for further tests
|
||||
@@ -379,8 +387,9 @@ $cascading_standby->start;
|
||||
|
||||
($result, $stdout, $stderr) =
|
||||
$cascading_standby->psql('postgres', "SELECT pg_sync_replication_slots();");
|
||||
ok( $stderr =~
|
||||
/ERROR: cannot synchronize replication slots from a standby server/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: cannot synchronize replication slots from a standby server/,
|
||||
"cannot sync slots to a cascading standby server");
|
||||
|
||||
$cascading_standby->stop;
|
||||
|
@@ -94,7 +94,9 @@ my ($result, $stdout, $stderr);
|
||||
'postgres', qq[
|
||||
SELECT pg_replication_slot_advance('logical_slot', '0/1');
|
||||
]);
|
||||
ok( $stderr =~ /can no longer access replication slot "logical_slot"/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/can no longer access replication slot "logical_slot"/,
|
||||
"detected error upon trying to acquire invalidated slot on node")
|
||||
or die
|
||||
"could not detect error upon trying to acquire invalidated slot \"logical_slot\" on node";
|
||||
|
@@ -364,13 +364,16 @@ $node_publisher->safe_psql('postgres', "DELETE FROM tab_full_pk WHERE a = 2");
|
||||
$node_publisher->wait_for_catchup('tap_sub');
|
||||
|
||||
my $logfile = slurp_file($node_subscriber->logfile, $log_location);
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab_full_pk": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(1, quux\); replica identity \(a\)=\(1\)/m,
|
||||
'update target row is missing');
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab_full": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(26\); replica identity full \(25\)/m,
|
||||
'update target row is missing');
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab_full_pk": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(2\)/m,
|
||||
'delete target row is missing');
|
||||
|
||||
@@ -515,7 +518,9 @@ $node_publisher->safe_psql('postgres', "INSERT INTO tab_notrep VALUES (11)");
|
||||
$node_publisher->wait_for_catchup('tap_sub');
|
||||
|
||||
$logfile = slurp_file($node_publisher->logfile, $log_location);
|
||||
ok($logfile =~ qr/skipped replication of an empty transaction with XID/,
|
||||
like(
|
||||
$logfile,
|
||||
qr/skipped replication of an empty transaction with XID/,
|
||||
'empty transaction is skipped');
|
||||
|
||||
$result =
|
||||
@@ -588,8 +593,9 @@ CREATE TABLE skip_wal();
|
||||
CREATE PUBLICATION tap_pub2 FOR TABLE skip_wal;
|
||||
ROLLBACK;
|
||||
});
|
||||
ok( $reterr =~
|
||||
m/WARNING: "wal_level" is insufficient to publish logical changes/,
|
||||
like(
|
||||
$reterr,
|
||||
qr/WARNING: "wal_level" is insufficient to publish logical changes/,
|
||||
'CREATE PUBLICATION while "wal_level=minimal"');
|
||||
|
||||
done_testing();
|
||||
|
@@ -45,8 +45,9 @@ pass "subscription disable and drop in same transaction did not hang";
|
||||
my ($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
|
||||
"CREATE SUBSCRIPTION mysub1 CONNECTION '$publisher_connstr' PUBLICATION mypub, non_existent_pub"
|
||||
);
|
||||
ok( $stderr =~
|
||||
m/WARNING: publication "non_existent_pub" does not exist on the publisher/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/WARNING: publication "non_existent_pub" does not exist on the publisher/,
|
||||
"Create subscription throws warning for non-existent publication");
|
||||
|
||||
# Wait for initial table sync to finish.
|
||||
@@ -56,16 +57,18 @@ $node_subscriber->wait_for_subscription_sync($node_publisher, 'mysub1');
|
||||
($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
|
||||
"ALTER SUBSCRIPTION mysub1 ADD PUBLICATION non_existent_pub1, non_existent_pub2"
|
||||
);
|
||||
ok( $stderr =~
|
||||
m/WARNING: publications "non_existent_pub1", "non_existent_pub2" do not exist on the publisher/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/WARNING: publications "non_existent_pub1", "non_existent_pub2" do not exist on the publisher/,
|
||||
"Alter subscription add publication throws warning for non-existent publications"
|
||||
);
|
||||
|
||||
# Specifying non-existent publication along with set publication.
|
||||
($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
|
||||
"ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub");
|
||||
ok( $stderr =~
|
||||
m/WARNING: publication "non_existent_pub" does not exist on the publisher/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/WARNING: publication "non_existent_pub" does not exist on the publisher/,
|
||||
"Alter subscription set publication throws warning for non-existent publication"
|
||||
);
|
||||
|
||||
|
@@ -367,16 +367,20 @@ $node_publisher->wait_for_catchup('sub1');
|
||||
$node_publisher->wait_for_catchup('sub2');
|
||||
|
||||
my $logfile = slurp_file($node_subscriber1->logfile(), $log_location);
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab1_2_2": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(null, 4, quux\); replica identity \(a\)=\(4\)/,
|
||||
'update target row is missing in tab1_2_2');
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab1_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/,
|
||||
'delete target row is missing in tab1_1');
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab1_2_2": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(4\)/,
|
||||
'delete target row is missing in tab1_2_2');
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab1_def": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(10\)/,
|
||||
'delete target row is missing in tab1_def');
|
||||
|
||||
@@ -780,10 +784,12 @@ $node_publisher->wait_for_catchup('sub_viaroot');
|
||||
$node_publisher->wait_for_catchup('sub2');
|
||||
|
||||
$logfile = slurp_file($node_subscriber1->logfile(), $log_location);
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab2_1": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(pub_tab2, quux, 5\); replica identity \(a\)=\(5\)/,
|
||||
'update target row is missing in tab2_1');
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab2_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/,
|
||||
'delete target row is missing in tab2_1');
|
||||
|
||||
@@ -801,7 +807,8 @@ $node_publisher->safe_psql('postgres',
|
||||
$node_publisher->wait_for_catchup('sub_viaroot');
|
||||
|
||||
$logfile = slurp_file($node_subscriber1->logfile(), $log_location);
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab2_1": conflict=update_origin_differs.*\n.*DETAIL:.* Updating the row that was modified locally in transaction [0-9]+ at .*\n.*Existing local row \(yyy, null, 3\); remote row \(pub_tab2, quux, 3\); replica identity \(a\)=\(3\)/,
|
||||
'updating a row that was modified by a different origin');
|
||||
|
||||
|
@@ -399,8 +399,9 @@ SKIP:
|
||||
isnt($ret, 0,
|
||||
"non zero exit for subscription whose owner is a non-superuser must specify password parameter of the connection string"
|
||||
);
|
||||
ok( $stderr =~
|
||||
m/DETAIL: Non-superusers must provide a password in the connection string./,
|
||||
like(
|
||||
$stderr,
|
||||
qr/DETAIL: Non-superusers must provide a password in the connection string./,
|
||||
'subscription whose owner is a non-superuser must specify password parameter of the connection string'
|
||||
);
|
||||
|
||||
|
@@ -1272,7 +1272,8 @@ my ($cmdret, $stdout, $stderr) = $node_subscriber->psql(
|
||||
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_1, pub_mix_2;
|
||||
));
|
||||
|
||||
ok( $stderr =~
|
||||
like(
|
||||
$stderr,
|
||||
qr/cannot use different column lists for table "public.test_mix_1" in different publications/,
|
||||
'different column lists detected');
|
||||
|
||||
|
@@ -224,8 +224,9 @@ ok( $node_B->poll_query_until(
|
||||
# Alter retain_dead_tuples for enabled subscription
|
||||
my ($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
|
||||
"ALTER SUBSCRIPTION $subname_AB SET (retain_dead_tuples = true)");
|
||||
ok( $stderr =~
|
||||
/ERROR: cannot set option \"retain_dead_tuples\" for enabled subscription/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: cannot set option \"retain_dead_tuples\" for enabled subscription/,
|
||||
"altering retain_dead_tuples is not allowed for enabled subscription");
|
||||
|
||||
# Disable the subscription
|
||||
@@ -239,8 +240,9 @@ $node_A->poll_query_until('postgres',
|
||||
# Enable retain_dead_tuples for disabled subscription
|
||||
($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
|
||||
"ALTER SUBSCRIPTION $subname_AB SET (retain_dead_tuples = true);");
|
||||
ok( $stderr =~
|
||||
/NOTICE: deleted rows to detect conflicts would not be removed until the subscription is enabled/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/NOTICE: deleted rows to detect conflicts would not be removed until the subscription is enabled/,
|
||||
"altering retain_dead_tuples is allowed for disabled subscription");
|
||||
|
||||
# Re-enable the subscription
|
||||
@@ -262,9 +264,11 @@ ok( $node_A->poll_query_until(
|
||||
|
||||
($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
|
||||
"ALTER SUBSCRIPTION $subname_AB SET (origin = any);");
|
||||
ok( $stderr =~
|
||||
/WARNING: subscription "tap_sub_a_b" enabled retain_dead_tuples but might not reliably detect conflicts for changes from different origins/,
|
||||
"warn of the possibility of receiving changes from origins other than the publisher");
|
||||
like(
|
||||
$stderr,
|
||||
qr/WARNING: subscription "tap_sub_a_b" enabled retain_dead_tuples but might not reliably detect conflicts for changes from different origins/,
|
||||
"warn of the possibility of receiving changes from origins other than the publisher"
|
||||
);
|
||||
|
||||
# Reset the origin to none
|
||||
$node_A->psql('postgres',
|
||||
@@ -302,7 +306,8 @@ $node_A->safe_psql('postgres', "DELETE FROM tab WHERE a = 1;");
|
||||
'postgres', qq(VACUUM (verbose) public.tab;)
|
||||
);
|
||||
|
||||
ok( $stderr =~
|
||||
like(
|
||||
$stderr,
|
||||
qr/1 are dead but not yet removable/,
|
||||
'the deleted column is non-removable');
|
||||
|
||||
@@ -311,7 +316,8 @@ $node_A->wait_for_catchup($subname_BA);
|
||||
|
||||
# Check the conflict detected on Node B
|
||||
my $logfile = slurp_file($node_B->logfile(), $log_location);
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab": conflict=delete_origin_differs.*
|
||||
.*DETAIL:.* Deleting the row that was modified locally in transaction [0-9]+ at .*
|
||||
.*Existing local row \(1, 3\); replica identity \(a\)=\(1\)/,
|
||||
@@ -324,7 +330,8 @@ $node_A->safe_psql(
|
||||
$node_B->wait_for_catchup($subname_AB);
|
||||
|
||||
$logfile = slurp_file($node_A->logfile(), $log_location);
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab": conflict=update_deleted.*
|
||||
.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
|
||||
.*Remote row \(1, 3\); replica identity \(a\)=\(1\)/,
|
||||
@@ -371,7 +378,8 @@ $node_A->safe_psql(
|
||||
$node_B->wait_for_catchup($subname_AB);
|
||||
|
||||
$logfile = slurp_file($node_A->logfile(), $log_location);
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab": conflict=update_deleted.*
|
||||
.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
|
||||
.*Remote row \(2, 4\); replica identity full \(2, 2\)/,
|
||||
@@ -502,7 +510,9 @@ if ($injection_points_supported != 0)
|
||||
($cmdret, $stdout, $stderr) =
|
||||
$node_A->psql('postgres', qq(VACUUM (verbose) public.tab;));
|
||||
|
||||
ok($stderr =~ qr/1 are dead but not yet removable/,
|
||||
like(
|
||||
$stderr,
|
||||
qr/1 are dead but not yet removable/,
|
||||
'the deleted column is non-removable');
|
||||
|
||||
$log_location = -s $node_A->logfile;
|
||||
@@ -527,7 +537,8 @@ if ($injection_points_supported != 0)
|
||||
$node_B->wait_for_catchup($subname_AB);
|
||||
|
||||
$logfile = slurp_file($node_A->logfile(), $log_location);
|
||||
ok( $logfile =~
|
||||
like(
|
||||
$logfile,
|
||||
qr/conflict detected on relation "public.tab": conflict=update_deleted.*
|
||||
.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
|
||||
.*Remote row \(1, 2\); replica identity full \(1, 1\)/,
|
||||
|
Reference in New Issue
Block a user