mirror of
https://github.com/postgres/postgres.git
synced 2025-07-03 20:02:46 +03:00
Run pgperltidy
This is required before the creation of a new branch. pgindent is clean, as well as is reformat-dat-files. perltidy version is v20230309, as documented in pgindent's README.
This commit is contained in:
@ -54,20 +54,17 @@ sub invalid_entry_order_leaf_page_test
|
||||
|
||||
$node->stop;
|
||||
|
||||
my $blkno = 1; # root
|
||||
my $blkno = 1; # root
|
||||
|
||||
# produce wrong order by replacing aaaaa with ccccc
|
||||
string_replace_block(
|
||||
$relpath,
|
||||
'aaaaa',
|
||||
'ccccc',
|
||||
$blkno
|
||||
);
|
||||
string_replace_block($relpath, 'aaaaa', 'ccccc', $blkno);
|
||||
|
||||
$node->start;
|
||||
|
||||
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
|
||||
my ($result, $stdout, $stderr) =
|
||||
$node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected =
|
||||
"index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
|
||||
like($stderr, qr/$expected/);
|
||||
}
|
||||
|
||||
@ -96,20 +93,17 @@ sub invalid_entry_order_inner_page_test
|
||||
|
||||
$node->stop;
|
||||
|
||||
my $blkno = 1; # root
|
||||
my $blkno = 1; # root
|
||||
|
||||
# we have rrrrrrrrr... and tttttttttt... as keys in the root, so produce wrong order by replacing rrrrrrrrrr....
|
||||
string_replace_block(
|
||||
$relpath,
|
||||
'rrrrrrrrrr',
|
||||
'zzzzzzzzzz',
|
||||
$blkno
|
||||
);
|
||||
string_replace_block($relpath, 'rrrrrrrrrr', 'zzzzzzzzzz', $blkno);
|
||||
|
||||
$node->start;
|
||||
|
||||
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
|
||||
my ($result, $stdout, $stderr) =
|
||||
$node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected =
|
||||
"index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
|
||||
like($stderr, qr/$expected/);
|
||||
}
|
||||
|
||||
@ -129,7 +123,7 @@ sub invalid_entry_columns_order_test
|
||||
|
||||
$node->stop;
|
||||
|
||||
my $blkno = 1; # root
|
||||
my $blkno = 1; # root
|
||||
|
||||
# mess column numbers
|
||||
# root items order before: (1,aaa), (2,bbb)
|
||||
@ -139,26 +133,18 @@ sub invalid_entry_columns_order_test
|
||||
|
||||
my $find = qr/($attrno_1)(.)(aaa)/s;
|
||||
my $replace = $attrno_2 . '$2$3';
|
||||
string_replace_block(
|
||||
$relpath,
|
||||
$find,
|
||||
$replace,
|
||||
$blkno
|
||||
);
|
||||
string_replace_block($relpath, $find, $replace, $blkno);
|
||||
|
||||
$find = qr/($attrno_2)(.)(bbb)/s;
|
||||
$replace = $attrno_1 . '$2$3';
|
||||
string_replace_block(
|
||||
$relpath,
|
||||
$find,
|
||||
$replace,
|
||||
$blkno
|
||||
);
|
||||
string_replace_block($relpath, $find, $replace, $blkno);
|
||||
|
||||
$node->start;
|
||||
|
||||
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
|
||||
my ($result, $stdout, $stderr) =
|
||||
$node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected =
|
||||
"index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
|
||||
like($stderr, qr/$expected/);
|
||||
}
|
||||
|
||||
@ -183,20 +169,17 @@ sub inconsistent_with_parent_key__parent_key_corrupted_test
|
||||
|
||||
$node->stop;
|
||||
|
||||
my $blkno = 1; # root
|
||||
my $blkno = 1; # root
|
||||
|
||||
# we have nnnnnnnnnn... as parent key in the root, so replace it with something smaller then child's keys
|
||||
string_replace_block(
|
||||
$relpath,
|
||||
'nnnnnnnnnn',
|
||||
'aaaaaaaaaa',
|
||||
$blkno
|
||||
);
|
||||
string_replace_block($relpath, 'nnnnnnnnnn', 'aaaaaaaaaa', $blkno);
|
||||
|
||||
$node->start;
|
||||
|
||||
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3";
|
||||
my ($result, $stdout, $stderr) =
|
||||
$node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected =
|
||||
"index \"$indexname\" has inconsistent records on page 3 offset 3";
|
||||
like($stderr, qr/$expected/);
|
||||
}
|
||||
|
||||
@ -221,20 +204,17 @@ sub inconsistent_with_parent_key__child_key_corrupted_test
|
||||
|
||||
$node->stop;
|
||||
|
||||
my $blkno = 3; # leaf
|
||||
my $blkno = 3; # leaf
|
||||
|
||||
# we have nnnnnnnnnn... as parent key in the root, so replace child key with something bigger
|
||||
string_replace_block(
|
||||
$relpath,
|
||||
'nnnnnnnnnn',
|
||||
'pppppppppp',
|
||||
$blkno
|
||||
);
|
||||
string_replace_block($relpath, 'nnnnnnnnnn', 'pppppppppp', $blkno);
|
||||
|
||||
$node->start;
|
||||
|
||||
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3";
|
||||
my ($result, $stdout, $stderr) =
|
||||
$node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected =
|
||||
"index \"$indexname\" has inconsistent records on page 3 offset 3";
|
||||
like($stderr, qr/$expected/);
|
||||
}
|
||||
|
||||
@ -254,24 +234,21 @@ sub inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test
|
||||
|
||||
$node->stop;
|
||||
|
||||
my $blkno = 2; # posting tree root
|
||||
my $blkno = 2; # posting tree root
|
||||
|
||||
# we have a posting tree for 'aaaaa' key with the root at 2nd block
|
||||
# and two leaf pages 3 and 4. replace 4th page's high key with (1,1)
|
||||
# so that there are tid's in leaf page that are larger then the new high key.
|
||||
my $find = pack('S*', 0, 4, 0) . '....';
|
||||
my $replace = pack('S*', 0, 4, 0, 1, 1);
|
||||
string_replace_block(
|
||||
$relpath,
|
||||
$find,
|
||||
$replace,
|
||||
$blkno
|
||||
);
|
||||
string_replace_block($relpath, $find, $replace, $blkno);
|
||||
|
||||
$node->start;
|
||||
|
||||
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected = "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4";
|
||||
my ($result, $stdout, $stderr) =
|
||||
$node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
|
||||
my $expected =
|
||||
"index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4";
|
||||
like($stderr, qr/$expected/);
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,8 @@ command_like(
|
||||
'checksums are enabled in control file');
|
||||
|
||||
command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only');
|
||||
command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], '--no-sync-data-files');
|
||||
command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ],
|
||||
'--no-sync-data-files');
|
||||
command_fails([ 'initdb', $datadir ], 'existing data directory');
|
||||
|
||||
if ($supports_syncfs)
|
||||
|
@ -147,7 +147,8 @@ $node->command_ok(
|
||||
'slot with failover created');
|
||||
|
||||
my $result = $node->safe_psql('postgres',
|
||||
"SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'");
|
||||
"SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'"
|
||||
);
|
||||
is($result, 't', "failover is enabled for the new slot");
|
||||
|
||||
done_testing();
|
||||
|
@ -56,7 +56,7 @@ $primary->command_ok(
|
||||
'--pgdata' => $backup1path,
|
||||
'--no-sync',
|
||||
'--checkpoint' => 'fast',
|
||||
'--wal-method' => 'none'
|
||||
'--wal-method' => 'none'
|
||||
],
|
||||
"full backup");
|
||||
|
||||
@ -74,7 +74,7 @@ $primary->command_ok(
|
||||
'--pgdata' => $backup2path,
|
||||
'--no-sync',
|
||||
'--checkpoint' => 'fast',
|
||||
'--wal-method' => 'none',
|
||||
'--wal-method' => 'none',
|
||||
'--incremental' => $backup1path . '/backup_manifest'
|
||||
],
|
||||
"incremental backup");
|
||||
@ -112,45 +112,45 @@ done_testing();
|
||||
# of the given data file.
|
||||
sub check_data_file
|
||||
{
|
||||
my ($data_file, $last_segment_nlinks) = @_;
|
||||
my ($data_file, $last_segment_nlinks) = @_;
|
||||
|
||||
my @data_file_segments = ($data_file);
|
||||
my @data_file_segments = ($data_file);
|
||||
|
||||
# Start checking for additional segments
|
||||
my $segment_number = 1;
|
||||
# Start checking for additional segments
|
||||
my $segment_number = 1;
|
||||
|
||||
while (1)
|
||||
{
|
||||
my $next_segment = $data_file . '.' . $segment_number;
|
||||
while (1)
|
||||
{
|
||||
my $next_segment = $data_file . '.' . $segment_number;
|
||||
|
||||
# If the file exists and is a regular file, add it to the list
|
||||
if (-f $next_segment)
|
||||
{
|
||||
push @data_file_segments, $next_segment;
|
||||
$segment_number++;
|
||||
}
|
||||
# Stop the loop if the file doesn't exist
|
||||
else
|
||||
{
|
||||
last;
|
||||
}
|
||||
}
|
||||
# If the file exists and is a regular file, add it to the list
|
||||
if (-f $next_segment)
|
||||
{
|
||||
push @data_file_segments, $next_segment;
|
||||
$segment_number++;
|
||||
}
|
||||
# Stop the loop if the file doesn't exist
|
||||
else
|
||||
{
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
# All segments of the given data file should contain 2 hard links, except
|
||||
# for the last one, which should match the given number of links.
|
||||
my $last_segment = pop @data_file_segments;
|
||||
# All segments of the given data file should contain 2 hard links, except
|
||||
# for the last one, which should match the given number of links.
|
||||
my $last_segment = pop @data_file_segments;
|
||||
|
||||
for my $segment (@data_file_segments)
|
||||
{
|
||||
# Get the file's stat information of each segment
|
||||
my $nlink_count = get_hard_link_count($segment);
|
||||
ok($nlink_count == 2, "File '$segment' has 2 hard links");
|
||||
}
|
||||
for my $segment (@data_file_segments)
|
||||
{
|
||||
# Get the file's stat information of each segment
|
||||
my $nlink_count = get_hard_link_count($segment);
|
||||
ok($nlink_count == 2, "File '$segment' has 2 hard links");
|
||||
}
|
||||
|
||||
# Get the file's stat information of the last segment
|
||||
my $nlink_count = get_hard_link_count($last_segment);
|
||||
ok($nlink_count == $last_segment_nlinks,
|
||||
"File '$last_segment' has $last_segment_nlinks hard link(s)");
|
||||
# Get the file's stat information of the last segment
|
||||
my $nlink_count = get_hard_link_count($last_segment);
|
||||
ok($nlink_count == $last_segment_nlinks,
|
||||
"File '$last_segment' has $last_segment_nlinks hard link(s)");
|
||||
}
|
||||
|
||||
|
||||
@ -159,11 +159,11 @@ sub check_data_file
|
||||
# that file.
|
||||
sub get_hard_link_count
|
||||
{
|
||||
my ($file) = @_;
|
||||
my ($file) = @_;
|
||||
|
||||
# Get file stats
|
||||
my @stats = stat($file);
|
||||
my $nlink = $stats[3]; # Number of hard links
|
||||
# Get file stats
|
||||
my @stats = stat($file);
|
||||
my $nlink = $stats[3]; # Number of hard links
|
||||
|
||||
return $nlink;
|
||||
return $nlink;
|
||||
}
|
||||
|
@ -240,17 +240,20 @@ command_fails_like(
|
||||
command_fails_like(
|
||||
[ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ],
|
||||
qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
|
||||
'pg_restore: option --exclude-database cannot be used together with -g/--globals-only');
|
||||
'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'
|
||||
);
|
||||
|
||||
command_fails_like(
|
||||
[ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ],
|
||||
qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/,
|
||||
'When option --exclude-database is used in pg_restore with dump of pg_dump');
|
||||
'When option --exclude-database is used in pg_restore with dump of pg_dump'
|
||||
);
|
||||
|
||||
command_fails_like(
|
||||
[ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ],
|
||||
qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/,
|
||||
'When option --globals-only is not used in pg_restore with dump of pg_dump');
|
||||
'When option --globals-only is not used in pg_restore with dump of pg_dump'
|
||||
);
|
||||
|
||||
# also fails for -r and -t, but it seems pointless to add more tests for those.
|
||||
command_fails_like(
|
||||
|
@ -368,7 +368,7 @@ my %pgdump_runs = (
|
||||
'--data-only',
|
||||
'--superuser' => 'test_superuser',
|
||||
'--disable-triggers',
|
||||
'--verbose', # no-op, just make sure it works
|
||||
'--verbose', # no-op, just make sure it works
|
||||
'postgres',
|
||||
],
|
||||
},
|
||||
@ -810,8 +810,7 @@ my %pgdump_runs = (
|
||||
dump_cmd => [
|
||||
'pg_dump', '--no-sync',
|
||||
"--file=$tempdir/no_schema.sql", '--no-schema',
|
||||
'--with-statistics',
|
||||
'postgres',
|
||||
'--with-statistics', 'postgres',
|
||||
],
|
||||
},);
|
||||
|
||||
|
@ -294,17 +294,17 @@ my %pgdumpall_runs = (
|
||||
'--format' => 'directory',
|
||||
'--globals-only',
|
||||
'--file' => "$tempdir/dump_globals_only",
|
||||
],
|
||||
restore_cmd => [
|
||||
'pg_restore', '-C', '--globals-only',
|
||||
'--format' => 'directory',
|
||||
'--file' => "$tempdir/dump_globals_only.sql",
|
||||
"$tempdir/dump_globals_only",
|
||||
],
|
||||
like => qr/
|
||||
],
|
||||
restore_cmd => [
|
||||
'pg_restore', '-C', '--globals-only',
|
||||
'--format' => 'directory',
|
||||
'--file' => "$tempdir/dump_globals_only.sql",
|
||||
"$tempdir/dump_globals_only",
|
||||
],
|
||||
like => qr/
|
||||
^\s*\QCREATE ROLE dumpall;\E\s*\n
|
||||
/xm
|
||||
}, );
|
||||
},);
|
||||
|
||||
# First execute the setup_sql
|
||||
foreach my $run (sort keys %pgdumpall_runs)
|
||||
@ -339,7 +339,8 @@ foreach my $run (sort keys %pgdumpall_runs)
|
||||
# pg_restore --file output file.
|
||||
my $output_file = slurp_file("$tempdir/${run}.sql");
|
||||
|
||||
if (!($pgdumpall_runs{$run}->{like}) && !($pgdumpall_runs{$run}->{unlike}))
|
||||
if ( !($pgdumpall_runs{$run}->{like})
|
||||
&& !($pgdumpall_runs{$run}->{unlike}))
|
||||
{
|
||||
die "missing \"like\" or \"unlike\" in test \"$run\"";
|
||||
}
|
||||
@ -361,30 +362,38 @@ foreach my $run (sort keys %pgdumpall_runs)
|
||||
# Some negative test case with dump of pg_dumpall and restore using pg_restore
|
||||
# test case 1: when -C is not used in pg_restore with dump of pg_dumpall
|
||||
$node->command_fails_like(
|
||||
[ 'pg_restore',
|
||||
"$tempdir/format_custom",
|
||||
'--format' => 'custom',
|
||||
'--file' => "$tempdir/error_test.sql", ],
|
||||
qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/,
|
||||
'When -C is not used in pg_restore with dump of pg_dumpall');
|
||||
[
|
||||
'pg_restore',
|
||||
"$tempdir/format_custom",
|
||||
'--format' => 'custom',
|
||||
'--file' => "$tempdir/error_test.sql",
|
||||
],
|
||||
qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/,
|
||||
'When -C is not used in pg_restore with dump of pg_dumpall');
|
||||
|
||||
# test case 2: When --list option is used with dump of pg_dumpall
|
||||
$node->command_fails_like(
|
||||
[ 'pg_restore',
|
||||
[
|
||||
'pg_restore',
|
||||
"$tempdir/format_custom", '-C',
|
||||
'--format' => 'custom', '--list',
|
||||
'--file' => "$tempdir/error_test.sql", ],
|
||||
'--format' => 'custom',
|
||||
'--list',
|
||||
'--file' => "$tempdir/error_test.sql",
|
||||
],
|
||||
qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/,
|
||||
'When --list is used in pg_restore with dump of pg_dumpall');
|
||||
|
||||
# test case 3: When non-exist database is given with -d option
|
||||
$node->command_fails_like(
|
||||
[ 'pg_restore',
|
||||
[
|
||||
'pg_restore',
|
||||
"$tempdir/format_custom", '-C',
|
||||
'--format' => 'custom',
|
||||
'-d' => 'dbpq', ],
|
||||
'-d' => 'dbpq',
|
||||
],
|
||||
qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
|
||||
'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall');
|
||||
'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'
|
||||
);
|
||||
|
||||
$node->stop('fast');
|
||||
|
||||
|
@ -285,7 +285,7 @@ sub run_pg_rewind
|
||||
# Check that pg_rewind with dbname and --write-recovery-conf
|
||||
# wrote the dbname in the generated primary_conninfo value.
|
||||
like(slurp_file("$primary_pgdata/postgresql.auto.conf"),
|
||||
qr/dbname=postgres/m, 'recovery conf file sets dbname');
|
||||
qr/dbname=postgres/m, 'recovery conf file sets dbname');
|
||||
|
||||
# Check that standby.signal is here as recovery configuration
|
||||
# was requested.
|
||||
|
@ -53,7 +53,8 @@ $old_sub->safe_psql('postgres',
|
||||
|
||||
$old_sub->stop;
|
||||
|
||||
$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 0");
|
||||
$new_sub->append_conf('postgresql.conf',
|
||||
"max_active_replication_origins = 0");
|
||||
|
||||
# pg_upgrade will fail because the new cluster has insufficient
|
||||
# max_active_replication_origins.
|
||||
@ -80,7 +81,8 @@ command_checks_all(
|
||||
);
|
||||
|
||||
# Reset max_active_replication_origins
|
||||
$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 10");
|
||||
$new_sub->append_conf('postgresql.conf',
|
||||
"max_active_replication_origins = 10");
|
||||
|
||||
# Cleanup
|
||||
$publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1");
|
||||
|
@ -13,7 +13,8 @@ sub test_mode
|
||||
{
|
||||
my ($mode) = @_;
|
||||
|
||||
my $old = PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
|
||||
my $old =
|
||||
PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
|
||||
my $new = PostgreSQL::Test::Cluster->new('new');
|
||||
|
||||
# --swap can't be used to upgrade from versions older than 10, so just skip
|
||||
@ -40,9 +41,11 @@ sub test_mode
|
||||
# Create a small variety of simple test objects on the old cluster. We'll
|
||||
# check that these reach the new version after upgrading.
|
||||
$old->start;
|
||||
$old->safe_psql('postgres', "CREATE TABLE test1 AS SELECT generate_series(1, 100)");
|
||||
$old->safe_psql('postgres',
|
||||
"CREATE TABLE test1 AS SELECT generate_series(1, 100)");
|
||||
$old->safe_psql('postgres', "CREATE DATABASE testdb1");
|
||||
$old->safe_psql('testdb1', "CREATE TABLE test2 AS SELECT generate_series(200, 300)");
|
||||
$old->safe_psql('testdb1',
|
||||
"CREATE TABLE test2 AS SELECT generate_series(200, 300)");
|
||||
$old->safe_psql('testdb1', "VACUUM FULL test2");
|
||||
$old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432");
|
||||
|
||||
@ -51,10 +54,15 @@ sub test_mode
|
||||
if (defined($ENV{oldinstall}))
|
||||
{
|
||||
my $tblspc = PostgreSQL::Test::Utils::tempdir_short();
|
||||
$old->safe_psql('postgres', "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
|
||||
$old->safe_psql('postgres', "CREATE DATABASE testdb2 TABLESPACE test_tblspc");
|
||||
$old->safe_psql('postgres', "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)");
|
||||
$old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)");
|
||||
$old->safe_psql('postgres',
|
||||
"CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
|
||||
$old->safe_psql('postgres',
|
||||
"CREATE DATABASE testdb2 TABLESPACE test_tblspc");
|
||||
$old->safe_psql('postgres',
|
||||
"CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)"
|
||||
);
|
||||
$old->safe_psql('testdb2',
|
||||
"CREATE TABLE test4 AS SELECT generate_series(400, 502)");
|
||||
}
|
||||
$old->stop;
|
||||
|
||||
@ -90,9 +98,11 @@ sub test_mode
|
||||
# tablespace.
|
||||
if (defined($ENV{oldinstall}))
|
||||
{
|
||||
$result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
|
||||
$result =
|
||||
$new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
|
||||
is($result, '102', "test3 data after pg_upgrade $mode");
|
||||
$result = $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
|
||||
$result =
|
||||
$new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
|
||||
is($result, '103', "test4 data after pg_upgrade $mode");
|
||||
}
|
||||
$new->stop;
|
||||
|
@ -238,62 +238,105 @@ $node->command_fails_like(
|
||||
'cannot use option --all and a dbname as argument at the same time');
|
||||
|
||||
$node->safe_psql('postgres',
|
||||
'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;');
|
||||
'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;'
|
||||
);
|
||||
$node->issues_sql_like(
|
||||
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-only',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_test', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with missing stats');
|
||||
$node->issues_sql_unlike(
|
||||
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-only',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_test', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with no missing stats');
|
||||
|
||||
$node->safe_psql('postgres',
|
||||
'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));');
|
||||
'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));'
|
||||
);
|
||||
$node->issues_sql_like(
|
||||
[ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-in-stages',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_test', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with missing index expression stats');
|
||||
$node->issues_sql_unlike(
|
||||
[ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-in-stages',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_test', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with no missing index expression stats');
|
||||
|
||||
$node->safe_psql('postgres',
|
||||
'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;');
|
||||
'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;'
|
||||
);
|
||||
$node->issues_sql_like(
|
||||
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-only',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_test', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with missing extended stats');
|
||||
$node->issues_sql_unlike(
|
||||
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-only',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_test', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with no missing extended stats');
|
||||
|
||||
$node->safe_psql('postgres',
|
||||
"CREATE TABLE regression_vacuumdb_child (a INT) INHERITS (regression_vacuumdb_test);\n"
|
||||
. "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
|
||||
. "ANALYZE regression_vacuumdb_child;\n");
|
||||
. "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
|
||||
. "ANALYZE regression_vacuumdb_child;\n");
|
||||
$node->issues_sql_like(
|
||||
[ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-in-stages',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_test', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with missing inherited stats');
|
||||
$node->issues_sql_unlike(
|
||||
[ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-in-stages',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_test', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with no missing inherited stats');
|
||||
|
||||
$node->safe_psql('postgres',
|
||||
"CREATE TABLE regression_vacuumdb_parted (a INT) PARTITION BY LIST (a);\n"
|
||||
. "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
|
||||
. "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
|
||||
. "ANALYZE regression_vacuumdb_part1;\n");
|
||||
. "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
|
||||
. "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
|
||||
. "ANALYZE regression_vacuumdb_part1;\n");
|
||||
$node->issues_sql_like(
|
||||
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-only',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_parted', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with missing partition stats');
|
||||
$node->issues_sql_unlike(
|
||||
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ],
|
||||
[
|
||||
'vacuumdb', '--analyze-only',
|
||||
'--missing-stats-only', '-t',
|
||||
'regression_vacuumdb_parted', 'postgres'
|
||||
],
|
||||
qr/statement:\ ANALYZE/sx,
|
||||
'--missing-stats-only with no missing partition stats');
|
||||
|
||||
|
@ -33,7 +33,8 @@
|
||||
descr => 'sorts by Unicode code point; Unicode and POSIX character semantics',
|
||||
collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6',
|
||||
colllocale => 'C.UTF-8', collversion => '1' },
|
||||
{ oid => '9535', descr => 'sorts by Unicode code point; Unicode character semantics',
|
||||
{ oid => '9535',
|
||||
descr => 'sorts by Unicode code point; Unicode character semantics',
|
||||
collname => 'pg_unicode_fast', collprovider => 'b', collencoding => '6',
|
||||
colllocale => 'PG_UNICODE_FAST', collversion => '1' },
|
||||
|
||||
|
@ -1190,14 +1190,14 @@
|
||||
proname => 'bytea', proleakproof => 't', prorettype => 'bytea',
|
||||
proargtypes => 'int8', prosrc => 'int8_bytea' },
|
||||
{ oid => '8580', descr => 'convert bytea to int2',
|
||||
proname => 'int2', prorettype => 'int2',
|
||||
proargtypes => 'bytea', prosrc => 'bytea_int2' },
|
||||
proname => 'int2', prorettype => 'int2', proargtypes => 'bytea',
|
||||
prosrc => 'bytea_int2' },
|
||||
{ oid => '8581', descr => 'convert bytea to int4',
|
||||
proname => 'int4', prorettype => 'int4',
|
||||
proargtypes => 'bytea', prosrc => 'bytea_int4' },
|
||||
proname => 'int4', prorettype => 'int4', proargtypes => 'bytea',
|
||||
prosrc => 'bytea_int4' },
|
||||
{ oid => '8582', descr => 'convert bytea to int8',
|
||||
proname => 'int8', prorettype => 'int8',
|
||||
proargtypes => 'bytea', prosrc => 'bytea_int8' },
|
||||
proname => 'int8', prorettype => 'int8', proargtypes => 'bytea',
|
||||
prosrc => 'bytea_int8' },
|
||||
|
||||
{ oid => '449', descr => 'hash',
|
||||
proname => 'hashint2', prorettype => 'int4', proargtypes => 'int2',
|
||||
@ -3597,7 +3597,8 @@
|
||||
{ oid => '8702', descr => 'gamma function',
|
||||
proname => 'gamma', prorettype => 'float8', proargtypes => 'float8',
|
||||
prosrc => 'dgamma' },
|
||||
{ oid => '8703', descr => 'natural logarithm of absolute value of gamma function',
|
||||
{ oid => '8703',
|
||||
descr => 'natural logarithm of absolute value of gamma function',
|
||||
proname => 'lgamma', prorettype => 'float8', proargtypes => 'float8',
|
||||
prosrc => 'dlgamma' },
|
||||
|
||||
@ -9360,8 +9361,8 @@
|
||||
proname => 'to_json', provolatile => 's', prorettype => 'json',
|
||||
proargtypes => 'anyelement', prosrc => 'to_json' },
|
||||
{ oid => '3261', descr => 'remove object fields with null values from json',
|
||||
proname => 'json_strip_nulls', prorettype => 'json', proargtypes => 'json bool',
|
||||
prosrc => 'json_strip_nulls' },
|
||||
proname => 'json_strip_nulls', prorettype => 'json',
|
||||
proargtypes => 'json bool', prosrc => 'json_strip_nulls' },
|
||||
|
||||
{ oid => '3947',
|
||||
proname => 'json_object_field', prorettype => 'json',
|
||||
@ -9483,17 +9484,19 @@
|
||||
proname => 'uuid_hash_extended', prorettype => 'int8',
|
||||
proargtypes => 'uuid int8', prosrc => 'uuid_hash_extended' },
|
||||
{ oid => '3432', descr => 'generate random UUID',
|
||||
proname => 'gen_random_uuid', provolatile => 'v',
|
||||
prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' },
|
||||
proname => 'gen_random_uuid', provolatile => 'v', prorettype => 'uuid',
|
||||
proargtypes => '', prosrc => 'gen_random_uuid' },
|
||||
{ oid => '9895', descr => 'generate UUID version 4',
|
||||
proname => 'uuidv4', provolatile => 'v',
|
||||
prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' },
|
||||
proname => 'uuidv4', provolatile => 'v', prorettype => 'uuid',
|
||||
proargtypes => '', prosrc => 'gen_random_uuid' },
|
||||
{ oid => '9896', descr => 'generate UUID version 7',
|
||||
proname => 'uuidv7', provolatile => 'v',
|
||||
prorettype => 'uuid', proargtypes => '', prosrc => 'uuidv7' },
|
||||
{ oid => '9897', descr => 'generate UUID version 7 with a timestamp shifted by specified interval',
|
||||
proname => 'uuidv7', provolatile => 'v', proargnames => '{shift}',
|
||||
prorettype => 'uuid', proargtypes => 'interval', prosrc => 'uuidv7_interval' },
|
||||
proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid',
|
||||
proargtypes => '', prosrc => 'uuidv7' },
|
||||
{ oid => '9897',
|
||||
descr => 'generate UUID version 7 with a timestamp shifted by specified interval',
|
||||
proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid',
|
||||
proargtypes => 'interval', proargnames => '{shift}',
|
||||
prosrc => 'uuidv7_interval' },
|
||||
{ oid => '6342', descr => 'extract timestamp from UUID',
|
||||
proname => 'uuid_extract_timestamp', proleakproof => 't',
|
||||
prorettype => 'timestamptz', proargtypes => 'uuid',
|
||||
@ -10299,8 +10302,8 @@
|
||||
prorettype => 'jsonb', proargtypes => '',
|
||||
prosrc => 'jsonb_build_object_noargs' },
|
||||
{ oid => '3262', descr => 'remove object fields with null values from jsonb',
|
||||
proname => 'jsonb_strip_nulls', prorettype => 'jsonb', proargtypes => 'jsonb bool',
|
||||
prosrc => 'jsonb_strip_nulls' },
|
||||
proname => 'jsonb_strip_nulls', prorettype => 'jsonb',
|
||||
proargtypes => 'jsonb bool', prosrc => 'jsonb_strip_nulls' },
|
||||
|
||||
{ oid => '3478',
|
||||
proname => 'jsonb_object_field', prorettype => 'jsonb',
|
||||
@ -12508,34 +12511,22 @@
|
||||
proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}',
|
||||
prosrc => 'pg_get_wal_summarizer_state' },
|
||||
# Statistics Import
|
||||
{ oid => '8459',
|
||||
descr => 'restore statistics on relation',
|
||||
proname => 'pg_restore_relation_stats', provolatile => 'v', proisstrict => 'f',
|
||||
provariadic => 'any',
|
||||
proparallel => 'u', prorettype => 'bool',
|
||||
proargtypes => 'any',
|
||||
proargnames => '{kwargs}',
|
||||
proargmodes => '{v}',
|
||||
prosrc => 'pg_restore_relation_stats' },
|
||||
{ oid => '9160',
|
||||
descr => 'clear statistics on relation',
|
||||
proname => 'pg_clear_relation_stats', provolatile => 'v', proisstrict => 'f',
|
||||
proparallel => 'u', prorettype => 'void',
|
||||
proargtypes => 'text text',
|
||||
proargnames => '{schemaname,relname}',
|
||||
prosrc => 'pg_clear_relation_stats' },
|
||||
{ oid => '8461',
|
||||
descr => 'restore statistics on attribute',
|
||||
proname => 'pg_restore_attribute_stats', provolatile => 'v', proisstrict => 'f',
|
||||
provariadic => 'any',
|
||||
proparallel => 'u', prorettype => 'bool',
|
||||
proargtypes => 'any',
|
||||
proargnames => '{kwargs}',
|
||||
proargmodes => '{v}',
|
||||
prosrc => 'pg_restore_attribute_stats' },
|
||||
{ oid => '9162',
|
||||
descr => 'clear statistics on attribute',
|
||||
proname => 'pg_clear_attribute_stats', provolatile => 'v', proisstrict => 'f',
|
||||
{ oid => '8459', descr => 'restore statistics on relation',
|
||||
proname => 'pg_restore_relation_stats', provariadic => 'any',
|
||||
proisstrict => 'f', provolatile => 'v', proparallel => 'u',
|
||||
prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}',
|
||||
proargnames => '{kwargs}', prosrc => 'pg_restore_relation_stats' },
|
||||
{ oid => '9160', descr => 'clear statistics on relation',
|
||||
proname => 'pg_clear_relation_stats', proisstrict => 'f', provolatile => 'v',
|
||||
proparallel => 'u', prorettype => 'void', proargtypes => 'text text',
|
||||
proargnames => '{schemaname,relname}', prosrc => 'pg_clear_relation_stats' },
|
||||
{ oid => '8461', descr => 'restore statistics on attribute',
|
||||
proname => 'pg_restore_attribute_stats', provariadic => 'any',
|
||||
proisstrict => 'f', provolatile => 'v', proparallel => 'u',
|
||||
prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}',
|
||||
proargnames => '{kwargs}', prosrc => 'pg_restore_attribute_stats' },
|
||||
{ oid => '9162', descr => 'clear statistics on attribute',
|
||||
proname => 'pg_clear_attribute_stats', proisstrict => 'f', provolatile => 'v',
|
||||
proparallel => 'u', prorettype => 'void',
|
||||
proargtypes => 'text text text bool',
|
||||
proargnames => '{schemaname,relname,attname,inherited}',
|
||||
@ -12544,13 +12535,13 @@
|
||||
# GiST stratnum implementations
|
||||
{ oid => '8047', descr => 'GiST support',
|
||||
proname => 'gist_translate_cmptype_common', prorettype => 'int2',
|
||||
proargtypes => 'int4',
|
||||
prosrc => 'gist_translate_cmptype_common' },
|
||||
proargtypes => 'int4', prosrc => 'gist_translate_cmptype_common' },
|
||||
|
||||
# AIO related functions
|
||||
{ oid => '9200', descr => 'information about in-progress asynchronous IOs',
|
||||
proname => 'pg_get_aios', prorows => '100', proretset => 't',
|
||||
provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => '',
|
||||
provolatile => 'v', proparallel => 'r', prorettype => 'record',
|
||||
proargtypes => '',
|
||||
proallargtypes => '{int4,int4,int8,text,text,int8,int8,text,int2,int4,text,text,bool,bool,bool}',
|
||||
proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}',
|
||||
proargnames => '{pid,io_id,io_generation,state,operation,off,length,target,handle_data_len,raw_result,result,target_desc,f_sync,f_localmem,f_buffered}',
|
||||
|
@ -79,39 +79,40 @@ $node->start;
|
||||
# other tests are added to this file in the future
|
||||
$node->safe_psql('postgres', "CREATE DATABASE test_log_connections");
|
||||
|
||||
my $log_connections = $node->safe_psql('test_log_connections', q(SHOW log_connections;));
|
||||
my $log_connections =
|
||||
$node->safe_psql('test_log_connections', q(SHOW log_connections;));
|
||||
is($log_connections, 'on', qq(check log connections has expected value 'on'));
|
||||
|
||||
$node->connect_ok('test_log_connections',
|
||||
$node->connect_ok(
|
||||
'test_log_connections',
|
||||
qq(log_connections 'on' works as expected for backwards compatibility),
|
||||
log_like => [
|
||||
qr/connection received/,
|
||||
qr/connection authenticated/,
|
||||
qr/connection authorized: user=\S+ database=test_log_connections/,
|
||||
],
|
||||
log_unlike => [
|
||||
qr/connection ready/,
|
||||
],);
|
||||
log_unlike => [ qr/connection ready/, ],);
|
||||
|
||||
$node->safe_psql('test_log_connections',
|
||||
$node->safe_psql(
|
||||
'test_log_connections',
|
||||
q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations;
|
||||
SELECT pg_reload_conf();]);
|
||||
|
||||
$node->connect_ok('test_log_connections',
|
||||
$node->connect_ok(
|
||||
'test_log_connections',
|
||||
q(log_connections with subset of specified options logs only those aspects),
|
||||
log_like => [
|
||||
qr/connection received/,
|
||||
qr/connection authorized: user=\S+ database=test_log_connections/,
|
||||
qr/connection ready/,
|
||||
],
|
||||
log_unlike => [
|
||||
qr/connection authenticated/,
|
||||
],);
|
||||
log_unlike => [ qr/connection authenticated/, ],);
|
||||
|
||||
$node->safe_psql('test_log_connections',
|
||||
qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();));
|
||||
|
||||
$node->connect_ok('test_log_connections',
|
||||
$node->connect_ok(
|
||||
'test_log_connections',
|
||||
qq(log_connections 'all' logs all available connection aspects),
|
||||
log_like => [
|
||||
qr/connection received/,
|
||||
|
@ -53,7 +53,8 @@ for my $testname (@tests)
|
||||
$node->command_ok(
|
||||
[
|
||||
'libpq_pipeline', @extraargs,
|
||||
$testname, $node->connstr('postgres') . " max_protocol_version=latest"
|
||||
$testname,
|
||||
$node->connstr('postgres') . " max_protocol_version=latest"
|
||||
],
|
||||
"libpq_pipeline $testname");
|
||||
|
||||
@ -76,7 +77,8 @@ for my $testname (@tests)
|
||||
# test separately that it still works the old protocol version too.
|
||||
$node->command_ok(
|
||||
[
|
||||
'libpq_pipeline', 'cancel', $node->connstr('postgres') . " max_protocol_version=3.0"
|
||||
'libpq_pipeline', 'cancel',
|
||||
$node->connstr('postgres') . " max_protocol_version=3.0"
|
||||
],
|
||||
"libpq_pipeline cancel with protocol 3.0");
|
||||
|
||||
|
@ -1123,7 +1123,8 @@ COMMIT;
|
||||
{
|
||||
# Create a corruption and then read the block without waiting for
|
||||
# completion.
|
||||
$psql_a->query(qq(
|
||||
$psql_a->query(
|
||||
qq(
|
||||
SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true);
|
||||
SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true)
|
||||
));
|
||||
@ -1133,7 +1134,8 @@ SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>tru
|
||||
$psql_b,
|
||||
"$persistency: test completing read by other session doesn't generate warning",
|
||||
qq(SELECT count(*) > 0 FROM tbl_zero;),
|
||||
qr/^t$/, qr/^$/);
|
||||
qr/^t$/,
|
||||
qr/^$/);
|
||||
}
|
||||
|
||||
# Clean up
|
||||
@ -1355,18 +1357,24 @@ SELECT modify_rel_block('tbl_cs_fail', 6, corrupt_checksum=>true);
|
||||
));
|
||||
|
||||
$psql->query_safe($invalidate_sql);
|
||||
psql_like($io_method, $psql,
|
||||
psql_like(
|
||||
$io_method,
|
||||
$psql,
|
||||
"reading block w/ wrong checksum with ignore_checksum_failure=off fails",
|
||||
$count_sql, qr/^$/, qr/ERROR: invalid page in block/);
|
||||
$count_sql,
|
||||
qr/^$/,
|
||||
qr/ERROR: invalid page in block/);
|
||||
|
||||
$psql->query_safe("SET ignore_checksum_failure=on");
|
||||
|
||||
$psql->query_safe($invalidate_sql);
|
||||
psql_like($io_method, $psql,
|
||||
"reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
|
||||
$count_sql,
|
||||
qr/^$expect$/,
|
||||
qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
|
||||
psql_like(
|
||||
$io_method,
|
||||
$psql,
|
||||
"reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
|
||||
$count_sql,
|
||||
qr/^$expect$/,
|
||||
qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
|
||||
|
||||
|
||||
# Verify that ignore_checksum_failure=off works in multi-block reads
|
||||
@ -1432,19 +1440,22 @@ SELECT read_rel_block_ll('tbl_cs_fail', 1, nblocks=>5, zero_on_error=>true);),
|
||||
# file.
|
||||
|
||||
$node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/,
|
||||
$log_location);
|
||||
$log_location);
|
||||
ok(1, "$io_method: found information about checksum failure in block 2");
|
||||
|
||||
$node->wait_for_log(qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
$node->wait_for_log(
|
||||
qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
ok(1, "$io_method: found information about invalid page in block 3");
|
||||
|
||||
$node->wait_for_log(qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
$node->wait_for_log(
|
||||
qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
ok(1, "$io_method: found information about checksum failure in block 4");
|
||||
|
||||
$node->wait_for_log(qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
$node->wait_for_log(
|
||||
qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
ok(1, "$io_method: found information about checksum failure in block 5");
|
||||
|
||||
|
||||
@ -1462,8 +1473,7 @@ SELECT modify_rel_block('tbl_cs_fail', 3, corrupt_checksum=>true, corrupt_header
|
||||
qq(
|
||||
SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);),
|
||||
qr/^$/,
|
||||
qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/
|
||||
);
|
||||
qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/);
|
||||
|
||||
psql_like(
|
||||
$io_method,
|
||||
|
@ -68,7 +68,8 @@ sub connect_fails_wait
|
||||
my $log_location = -s $node->logfile;
|
||||
|
||||
$node->connect_fails($connstr, $test_name, %params);
|
||||
$node->wait_for_log(qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
|
||||
$node->wait_for_log(
|
||||
qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
|
||||
$log_location);
|
||||
ok(1, "$test_name: client backend process exited");
|
||||
}
|
||||
|
@ -941,8 +941,7 @@ is( $standby1->safe_psql(
|
||||
'synced slot retained on the new primary');
|
||||
|
||||
# Commit the prepared transaction
|
||||
$standby1->safe_psql('postgres',
|
||||
"COMMIT PREPARED 'test_twophase_slotsync';");
|
||||
$standby1->safe_psql('postgres', "COMMIT PREPARED 'test_twophase_slotsync';");
|
||||
$standby1->wait_for_catchup('regress_mysub1');
|
||||
|
||||
# Confirm that the prepared transaction is replicated to the subscriber
|
||||
|
@ -47,7 +47,7 @@ my $psql_primaryA =
|
||||
$node_primary->background_psql($test_db, on_error_stop => 1);
|
||||
|
||||
# Long-running Primary Session B
|
||||
my $psql_primaryB =
|
||||
my $psql_primaryB =
|
||||
$node_primary->background_psql($test_db, on_error_stop => 1);
|
||||
|
||||
# Our test relies on two rounds of index vacuuming for reasons elaborated
|
||||
@ -81,7 +81,8 @@ my $nrows = 2000;
|
||||
# insert and delete enough rows that we force at least one round of index
|
||||
# vacuuming before getting to a dead tuple which was killed after the standby
|
||||
# is disconnected.
|
||||
$node_primary->safe_psql($test_db, qq[
|
||||
$node_primary->safe_psql(
|
||||
$test_db, qq[
|
||||
CREATE TABLE ${table1}(col1 int)
|
||||
WITH (autovacuum_enabled=false, fillfactor=10);
|
||||
INSERT INTO $table1 VALUES(7);
|
||||
@ -98,21 +99,24 @@ my $primary_lsn = $node_primary->lsn('flush');
|
||||
$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
|
||||
|
||||
# Test that the WAL receiver is up and running.
|
||||
$node_replica->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't');
|
||||
$node_replica->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
|
||||
|
||||
# Set primary_conninfo to something invalid on the replica and reload the
|
||||
# config. Once the config is reloaded, the startup process will force the WAL
|
||||
# receiver to restart and it will be unable to reconnect because of the
|
||||
# invalid connection information.
|
||||
$node_replica->safe_psql($test_db, qq[
|
||||
$node_replica->safe_psql(
|
||||
$test_db, qq[
|
||||
ALTER SYSTEM SET primary_conninfo = '';
|
||||
SELECT pg_reload_conf();
|
||||
]);
|
||||
|
||||
# Wait until the WAL receiver has shut down and been unable to start up again.
|
||||
$node_replica->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f');
|
||||
$node_replica->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
|
||||
|
||||
# Now insert and update a tuple which will be visible to the vacuum on the
|
||||
# primary but which will have xmax newer than the oldest xmin on the standby
|
||||
@ -123,7 +127,7 @@ my $res = $psql_primaryA->query_safe(
|
||||
UPDATE $table1 SET col1 = 100 WHERE col1 = 99;
|
||||
SELECT 'after_update';
|
||||
]
|
||||
);
|
||||
);
|
||||
|
||||
# Make sure the UPDATE finished
|
||||
like($res, qr/^after_update$/m, "UPDATE occurred on primary session A");
|
||||
@ -148,7 +152,7 @@ $res = $psql_primaryB->query_safe(
|
||||
DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7;
|
||||
FETCH $primary_cursor1;
|
||||
]
|
||||
);
|
||||
);
|
||||
|
||||
is($res, 7, qq[Cursor query returned $res. Expected value 7.]);
|
||||
|
||||
@ -183,7 +187,8 @@ $psql_primaryA->{run}->pump_nb();
|
||||
# just waiting on the lock to start vacuuming. We don't want the standby to
|
||||
# re-establish a connection to the primary and push the horizon back until
|
||||
# we've saved initial values in GlobalVisState and calculated OldestXmin.
|
||||
$node_primary->poll_query_until($test_db,
|
||||
$node_primary->poll_query_until(
|
||||
$test_db,
|
||||
qq[
|
||||
SELECT count(*) >= 1 FROM pg_stat_activity
|
||||
WHERE pid = $vacuum_pid
|
||||
@ -192,8 +197,9 @@ $node_primary->poll_query_until($test_db,
|
||||
't');
|
||||
|
||||
# Ensure the WAL receiver is still not active on the replica.
|
||||
$node_replica->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f');
|
||||
$node_replica->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
|
||||
|
||||
# Allow the WAL receiver connection to re-establish.
|
||||
$node_replica->safe_psql(
|
||||
@ -203,15 +209,17 @@ $node_replica->safe_psql(
|
||||
]);
|
||||
|
||||
# Ensure the new WAL receiver has connected.
|
||||
$node_replica->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't');
|
||||
$node_replica->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
|
||||
|
||||
# Once the WAL sender is shown on the primary, the replica should have
|
||||
# connected with the primary and pushed the horizon backward. Primary Session
|
||||
# A won't see that until the VACUUM FREEZE proceeds and does its first round
|
||||
# of index vacuuming.
|
||||
$node_primary->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_replication);] , 't');
|
||||
$node_primary->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_replication);], 't');
|
||||
|
||||
# Move the cursor forward to the next 7. We inserted the 7 much later, so
|
||||
# advancing the cursor should allow vacuum to proceed vacuuming most pages of
|
||||
@ -225,20 +233,21 @@ is($res, 7,
|
||||
|
||||
# Prevent the test from incorrectly passing by confirming that we did indeed
|
||||
# do a pass of index vacuuming.
|
||||
$node_primary->poll_query_until($test_db, qq[
|
||||
$node_primary->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT index_vacuum_count > 0
|
||||
FROM pg_stat_progress_vacuum
|
||||
WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass;
|
||||
] , 't');
|
||||
], 't');
|
||||
|
||||
# Commit the transaction with the open cursor so that the VACUUM can finish.
|
||||
$psql_primaryB->query_until(
|
||||
qr/^commit$/m,
|
||||
qq[
|
||||
qr/^commit$/m,
|
||||
qq[
|
||||
COMMIT;
|
||||
\\echo commit
|
||||
]
|
||||
);
|
||||
);
|
||||
|
||||
# VACUUM proceeds with pruning and does a visibility check on each tuple. In
|
||||
# older versions of Postgres, pruning found our final dead tuple
|
||||
@ -252,7 +261,8 @@ $psql_primaryB->query_until(
|
||||
|
||||
# With the fix, VACUUM should finish successfully, incrementing the table
|
||||
# vacuum_count.
|
||||
$node_primary->poll_query_until($test_db,
|
||||
$node_primary->poll_query_until(
|
||||
$test_db,
|
||||
qq[
|
||||
SELECT vacuum_count > 0
|
||||
FROM pg_stat_all_tables WHERE relname = '${table1}';
|
||||
|
@ -318,7 +318,8 @@ sub switch_server_cert
|
||||
$node->append_conf('sslconfig.conf', "ssl=on");
|
||||
$node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params));
|
||||
# use lists of ECDH curves and cipher suites for syntax testing
|
||||
$node->append_conf('sslconfig.conf', 'ssl_groups=X25519:prime256v1:secp521r1');
|
||||
$node->append_conf('sslconfig.conf',
|
||||
'ssl_groups=X25519:prime256v1:secp521r1');
|
||||
$node->append_conf('sslconfig.conf',
|
||||
'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256');
|
||||
|
||||
|
@ -70,7 +70,8 @@ ok( $stderr =~
|
||||
);
|
||||
|
||||
# Cleanup
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
DROP PUBLICATION mypub;
|
||||
SELECT pg_drop_replication_slot('mysub');
|
||||
]);
|
||||
@ -86,32 +87,38 @@ sub test_swap
|
||||
my ($table_name, $pubname, $appname) = @_;
|
||||
|
||||
# Confirms tuples can be replicated
|
||||
$node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (1);");
|
||||
$node_publisher->safe_psql('postgres',
|
||||
"INSERT INTO $table_name VALUES (1);");
|
||||
$node_publisher->wait_for_catchup($appname);
|
||||
my $result =
|
||||
$node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
|
||||
is($result, qq(1), 'check replication worked well before renaming a publication');
|
||||
$node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
|
||||
is($result, qq(1),
|
||||
'check replication worked well before renaming a publication');
|
||||
|
||||
# Swap the name of publications; $pubname <-> pub_empty
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
|
||||
ALTER PUBLICATION pub_empty RENAME TO $pubname;
|
||||
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
|
||||
]);
|
||||
|
||||
# Insert the data again
|
||||
$node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (2);");
|
||||
$node_publisher->safe_psql('postgres',
|
||||
"INSERT INTO $table_name VALUES (2);");
|
||||
$node_publisher->wait_for_catchup($appname);
|
||||
|
||||
# Confirms the second tuple won't be replicated because $pubname does not
|
||||
# contains relations anymore.
|
||||
$result =
|
||||
$node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name ORDER BY a");
|
||||
$node_subscriber->safe_psql('postgres',
|
||||
"SELECT a FROM $table_name ORDER BY a");
|
||||
is($result, qq(1),
|
||||
'check the tuple inserted after the RENAME was not replicated');
|
||||
|
||||
# Restore the name of publications because it can be called several times
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
|
||||
ALTER PUBLICATION pub_empty RENAME TO $pubname;
|
||||
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
|
||||
@ -124,7 +131,8 @@ $node_publisher->safe_psql('postgres', $ddl);
|
||||
$node_subscriber->safe_psql('postgres', $ddl);
|
||||
|
||||
# Create publications and a subscription
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
CREATE PUBLICATION pub_empty;
|
||||
CREATE PUBLICATION pub_for_tab FOR TABLE test1;
|
||||
CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES;
|
||||
@ -139,19 +147,20 @@ test_swap('test1', 'pub_for_tab', 'tap_sub');
|
||||
|
||||
# Switches a publication which includes all tables
|
||||
$node_subscriber->safe_psql('postgres',
|
||||
"ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;"
|
||||
);
|
||||
"ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;");
|
||||
$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
|
||||
|
||||
# Confirms RENAME command works well for ALL TABLES publication
|
||||
test_swap('test2', 'pub_for_all_tables', 'tap_sub');
|
||||
|
||||
# Cleanup
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables;
|
||||
DROP TABLE test1, test2;
|
||||
]);
|
||||
$node_subscriber->safe_psql('postgres', qq[
|
||||
$node_subscriber->safe_psql(
|
||||
'postgres', qq[
|
||||
DROP SUBSCRIPTION tap_sub;
|
||||
DROP TABLE test1, test2;
|
||||
]);
|
||||
|
@ -51,8 +51,7 @@ $node_subscriber1->safe_psql('postgres',
|
||||
);
|
||||
# make a BRIN index to test aminsertcleanup logic in subscriber
|
||||
$node_subscriber1->safe_psql('postgres',
|
||||
"CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)"
|
||||
);
|
||||
"CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)");
|
||||
$node_subscriber1->safe_psql('postgres',
|
||||
"CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"
|
||||
);
|
||||
|
@ -108,11 +108,12 @@ $node_publisher->poll_query_until('postgres',
|
||||
|
||||
my $offset = -s $node_publisher->logfile;
|
||||
|
||||
$node_publisher->safe_psql('postgres',"INSERT INTO tab_3 values(1)");
|
||||
$node_publisher->safe_psql('postgres', "INSERT INTO tab_3 values(1)");
|
||||
|
||||
# Verify that a warning is logged.
|
||||
$node_publisher->wait_for_log(
|
||||
qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/, $offset);
|
||||
qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/,
|
||||
$offset);
|
||||
|
||||
$node_publisher->safe_psql('postgres',
|
||||
"CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3");
|
||||
@ -128,10 +129,11 @@ $node_publisher->wait_for_catchup('tap_sub');
|
||||
|
||||
# Verify that the insert operation gets replicated to subscriber after
|
||||
# publication is created.
|
||||
$result = $node_subscriber->safe_psql('postgres',
|
||||
"SELECT * FROM tab_3");
|
||||
is($result, qq(1
|
||||
2), 'check that the incremental data is replicated after the publication is created');
|
||||
$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_3");
|
||||
is( $result, qq(1
|
||||
2),
|
||||
'check that the incremental data is replicated after the publication is created'
|
||||
);
|
||||
|
||||
# shutdown
|
||||
$node_subscriber->stop('fast');
|
||||
|
@ -26,7 +26,8 @@ $node_publisher->safe_psql('postgres',
|
||||
"CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
|
||||
|
||||
$node_publisher->safe_psql('postgres',
|
||||
"CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
|
||||
"CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"
|
||||
);
|
||||
|
||||
# Create same table on subscriber
|
||||
$node_subscriber->safe_psql('postgres',
|
||||
|
Reference in New Issue
Block a user