1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-03 20:02:46 +03:00

Run pgperltidy

This is required before the creation of a new branch.  pgindent is
clean, as well as is reformat-dat-files.

perltidy version is v20230309, as documented in pgindent's README.
This commit is contained in:
Joe Conway
2025-06-29 21:14:21 -04:00
parent 66e9df9f6e
commit 0ebd242555
24 changed files with 362 additions and 290 deletions

View File

@ -54,20 +54,17 @@ sub invalid_entry_order_leaf_page_test
$node->stop; $node->stop;
my $blkno = 1; # root my $blkno = 1; # root
# produce wrong order by replacing aaaaa with ccccc # produce wrong order by replacing aaaaa with ccccc
string_replace_block( string_replace_block($relpath, 'aaaaa', 'ccccc', $blkno);
$relpath,
'aaaaa',
'ccccc',
$blkno
);
$node->start; $node->start;
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); my ($result, $stdout, $stderr) =
my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
my $expected =
"index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
like($stderr, qr/$expected/); like($stderr, qr/$expected/);
} }
@ -96,20 +93,17 @@ sub invalid_entry_order_inner_page_test
$node->stop; $node->stop;
my $blkno = 1; # root my $blkno = 1; # root
# we have rrrrrrrrr... and tttttttttt... as keys in the root, so produce wrong order by replacing rrrrrrrrrr.... # we have rrrrrrrrr... and tttttttttt... as keys in the root, so produce wrong order by replacing rrrrrrrrrr....
string_replace_block( string_replace_block($relpath, 'rrrrrrrrrr', 'zzzzzzzzzz', $blkno);
$relpath,
'rrrrrrrrrr',
'zzzzzzzzzz',
$blkno
);
$node->start; $node->start;
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); my ($result, $stdout, $stderr) =
my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
my $expected =
"index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
like($stderr, qr/$expected/); like($stderr, qr/$expected/);
} }
@ -129,7 +123,7 @@ sub invalid_entry_columns_order_test
$node->stop; $node->stop;
my $blkno = 1; # root my $blkno = 1; # root
# mess column numbers # mess column numbers
# root items order before: (1,aaa), (2,bbb) # root items order before: (1,aaa), (2,bbb)
@ -139,26 +133,18 @@ sub invalid_entry_columns_order_test
my $find = qr/($attrno_1)(.)(aaa)/s; my $find = qr/($attrno_1)(.)(aaa)/s;
my $replace = $attrno_2 . '$2$3'; my $replace = $attrno_2 . '$2$3';
string_replace_block( string_replace_block($relpath, $find, $replace, $blkno);
$relpath,
$find,
$replace,
$blkno
);
$find = qr/($attrno_2)(.)(bbb)/s; $find = qr/($attrno_2)(.)(bbb)/s;
$replace = $attrno_1 . '$2$3'; $replace = $attrno_1 . '$2$3';
string_replace_block( string_replace_block($relpath, $find, $replace, $blkno);
$relpath,
$find,
$replace,
$blkno
);
$node->start; $node->start;
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); my ($result, $stdout, $stderr) =
my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
my $expected =
"index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
like($stderr, qr/$expected/); like($stderr, qr/$expected/);
} }
@ -183,20 +169,17 @@ sub inconsistent_with_parent_key__parent_key_corrupted_test
$node->stop; $node->stop;
my $blkno = 1; # root my $blkno = 1; # root
# we have nnnnnnnnnn... as parent key in the root, so replace it with something smaller then child's keys # we have nnnnnnnnnn... as parent key in the root, so replace it with something smaller then child's keys
string_replace_block( string_replace_block($relpath, 'nnnnnnnnnn', 'aaaaaaaaaa', $blkno);
$relpath,
'nnnnnnnnnn',
'aaaaaaaaaa',
$blkno
);
$node->start; $node->start;
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); my ($result, $stdout, $stderr) =
my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
my $expected =
"index \"$indexname\" has inconsistent records on page 3 offset 3";
like($stderr, qr/$expected/); like($stderr, qr/$expected/);
} }
@ -221,20 +204,17 @@ sub inconsistent_with_parent_key__child_key_corrupted_test
$node->stop; $node->stop;
my $blkno = 3; # leaf my $blkno = 3; # leaf
# we have nnnnnnnnnn... as parent key in the root, so replace child key with something bigger # we have nnnnnnnnnn... as parent key in the root, so replace child key with something bigger
string_replace_block( string_replace_block($relpath, 'nnnnnnnnnn', 'pppppppppp', $blkno);
$relpath,
'nnnnnnnnnn',
'pppppppppp',
$blkno
);
$node->start; $node->start;
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); my ($result, $stdout, $stderr) =
my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
my $expected =
"index \"$indexname\" has inconsistent records on page 3 offset 3";
like($stderr, qr/$expected/); like($stderr, qr/$expected/);
} }
@ -254,24 +234,21 @@ sub inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test
$node->stop; $node->stop;
my $blkno = 2; # posting tree root my $blkno = 2; # posting tree root
# we have a posting tree for 'aaaaa' key with the root at 2nd block # we have a posting tree for 'aaaaa' key with the root at 2nd block
# and two leaf pages 3 and 4. replace 4th page's high key with (1,1) # and two leaf pages 3 and 4. replace 4th page's high key with (1,1)
# so that there are tid's in leaf page that are larger then the new high key. # so that there are tid's in leaf page that are larger then the new high key.
my $find = pack('S*', 0, 4, 0) . '....'; my $find = pack('S*', 0, 4, 0) . '....';
my $replace = pack('S*', 0, 4, 0, 1, 1); my $replace = pack('S*', 0, 4, 0, 1, 1);
string_replace_block( string_replace_block($relpath, $find, $replace, $blkno);
$relpath,
$find,
$replace,
$blkno
);
$node->start; $node->start;
my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); my ($result, $stdout, $stderr) =
my $expected = "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4"; $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
my $expected =
"index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4";
like($stderr, qr/$expected/); like($stderr, qr/$expected/);
} }

View File

@ -76,7 +76,8 @@ command_like(
'checksums are enabled in control file'); 'checksums are enabled in control file');
command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only'); command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only');
command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], '--no-sync-data-files'); command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ],
'--no-sync-data-files');
command_fails([ 'initdb', $datadir ], 'existing data directory'); command_fails([ 'initdb', $datadir ], 'existing data directory');
if ($supports_syncfs) if ($supports_syncfs)

View File

@ -147,7 +147,8 @@ $node->command_ok(
'slot with failover created'); 'slot with failover created');
my $result = $node->safe_psql('postgres', my $result = $node->safe_psql('postgres',
"SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'"); "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'"
);
is($result, 't', "failover is enabled for the new slot"); is($result, 't', "failover is enabled for the new slot");
done_testing(); done_testing();

View File

@ -56,7 +56,7 @@ $primary->command_ok(
'--pgdata' => $backup1path, '--pgdata' => $backup1path,
'--no-sync', '--no-sync',
'--checkpoint' => 'fast', '--checkpoint' => 'fast',
'--wal-method' => 'none' '--wal-method' => 'none'
], ],
"full backup"); "full backup");
@ -74,7 +74,7 @@ $primary->command_ok(
'--pgdata' => $backup2path, '--pgdata' => $backup2path,
'--no-sync', '--no-sync',
'--checkpoint' => 'fast', '--checkpoint' => 'fast',
'--wal-method' => 'none', '--wal-method' => 'none',
'--incremental' => $backup1path . '/backup_manifest' '--incremental' => $backup1path . '/backup_manifest'
], ],
"incremental backup"); "incremental backup");
@ -112,45 +112,45 @@ done_testing();
# of the given data file. # of the given data file.
sub check_data_file sub check_data_file
{ {
my ($data_file, $last_segment_nlinks) = @_; my ($data_file, $last_segment_nlinks) = @_;
my @data_file_segments = ($data_file); my @data_file_segments = ($data_file);
# Start checking for additional segments # Start checking for additional segments
my $segment_number = 1; my $segment_number = 1;
while (1) while (1)
{ {
my $next_segment = $data_file . '.' . $segment_number; my $next_segment = $data_file . '.' . $segment_number;
# If the file exists and is a regular file, add it to the list # If the file exists and is a regular file, add it to the list
if (-f $next_segment) if (-f $next_segment)
{ {
push @data_file_segments, $next_segment; push @data_file_segments, $next_segment;
$segment_number++; $segment_number++;
} }
# Stop the loop if the file doesn't exist # Stop the loop if the file doesn't exist
else else
{ {
last; last;
} }
} }
# All segments of the given data file should contain 2 hard links, except # All segments of the given data file should contain 2 hard links, except
# for the last one, which should match the given number of links. # for the last one, which should match the given number of links.
my $last_segment = pop @data_file_segments; my $last_segment = pop @data_file_segments;
for my $segment (@data_file_segments) for my $segment (@data_file_segments)
{ {
# Get the file's stat information of each segment # Get the file's stat information of each segment
my $nlink_count = get_hard_link_count($segment); my $nlink_count = get_hard_link_count($segment);
ok($nlink_count == 2, "File '$segment' has 2 hard links"); ok($nlink_count == 2, "File '$segment' has 2 hard links");
} }
# Get the file's stat information of the last segment # Get the file's stat information of the last segment
my $nlink_count = get_hard_link_count($last_segment); my $nlink_count = get_hard_link_count($last_segment);
ok($nlink_count == $last_segment_nlinks, ok($nlink_count == $last_segment_nlinks,
"File '$last_segment' has $last_segment_nlinks hard link(s)"); "File '$last_segment' has $last_segment_nlinks hard link(s)");
} }
@ -159,11 +159,11 @@ sub check_data_file
# that file. # that file.
sub get_hard_link_count sub get_hard_link_count
{ {
my ($file) = @_; my ($file) = @_;
# Get file stats # Get file stats
my @stats = stat($file); my @stats = stat($file);
my $nlink = $stats[3]; # Number of hard links my $nlink = $stats[3]; # Number of hard links
return $nlink; return $nlink;
} }

View File

@ -240,17 +240,20 @@ command_fails_like(
command_fails_like( command_fails_like(
[ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ], [ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ],
qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/, qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'); 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'
);
command_fails_like( command_fails_like(
[ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ], [ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ],
qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/, qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/,
'When option --exclude-database is used in pg_restore with dump of pg_dump'); 'When option --exclude-database is used in pg_restore with dump of pg_dump'
);
command_fails_like( command_fails_like(
[ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ], [ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ],
qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/, qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/,
'When option --globals-only is not used in pg_restore with dump of pg_dump'); 'When option --globals-only is not used in pg_restore with dump of pg_dump'
);
# also fails for -r and -t, but it seems pointless to add more tests for those. # also fails for -r and -t, but it seems pointless to add more tests for those.
command_fails_like( command_fails_like(

View File

@ -368,7 +368,7 @@ my %pgdump_runs = (
'--data-only', '--data-only',
'--superuser' => 'test_superuser', '--superuser' => 'test_superuser',
'--disable-triggers', '--disable-triggers',
'--verbose', # no-op, just make sure it works '--verbose', # no-op, just make sure it works
'postgres', 'postgres',
], ],
}, },
@ -810,8 +810,7 @@ my %pgdump_runs = (
dump_cmd => [ dump_cmd => [
'pg_dump', '--no-sync', 'pg_dump', '--no-sync',
"--file=$tempdir/no_schema.sql", '--no-schema', "--file=$tempdir/no_schema.sql", '--no-schema',
'--with-statistics', '--with-statistics', 'postgres',
'postgres',
], ],
},); },);

View File

@ -294,17 +294,17 @@ my %pgdumpall_runs = (
'--format' => 'directory', '--format' => 'directory',
'--globals-only', '--globals-only',
'--file' => "$tempdir/dump_globals_only", '--file' => "$tempdir/dump_globals_only",
], ],
restore_cmd => [ restore_cmd => [
'pg_restore', '-C', '--globals-only', 'pg_restore', '-C', '--globals-only',
'--format' => 'directory', '--format' => 'directory',
'--file' => "$tempdir/dump_globals_only.sql", '--file' => "$tempdir/dump_globals_only.sql",
"$tempdir/dump_globals_only", "$tempdir/dump_globals_only",
], ],
like => qr/ like => qr/
^\s*\QCREATE ROLE dumpall;\E\s*\n ^\s*\QCREATE ROLE dumpall;\E\s*\n
/xm /xm
}, ); },);
# First execute the setup_sql # First execute the setup_sql
foreach my $run (sort keys %pgdumpall_runs) foreach my $run (sort keys %pgdumpall_runs)
@ -339,7 +339,8 @@ foreach my $run (sort keys %pgdumpall_runs)
# pg_restore --file output file. # pg_restore --file output file.
my $output_file = slurp_file("$tempdir/${run}.sql"); my $output_file = slurp_file("$tempdir/${run}.sql");
if (!($pgdumpall_runs{$run}->{like}) && !($pgdumpall_runs{$run}->{unlike})) if ( !($pgdumpall_runs{$run}->{like})
&& !($pgdumpall_runs{$run}->{unlike}))
{ {
die "missing \"like\" or \"unlike\" in test \"$run\""; die "missing \"like\" or \"unlike\" in test \"$run\"";
} }
@ -361,30 +362,38 @@ foreach my $run (sort keys %pgdumpall_runs)
# Some negative test case with dump of pg_dumpall and restore using pg_restore # Some negative test case with dump of pg_dumpall and restore using pg_restore
# test case 1: when -C is not used in pg_restore with dump of pg_dumpall # test case 1: when -C is not used in pg_restore with dump of pg_dumpall
$node->command_fails_like( $node->command_fails_like(
[ 'pg_restore', [
"$tempdir/format_custom", 'pg_restore',
'--format' => 'custom', "$tempdir/format_custom",
'--file' => "$tempdir/error_test.sql", ], '--format' => 'custom',
qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, '--file' => "$tempdir/error_test.sql",
'When -C is not used in pg_restore with dump of pg_dumpall'); ],
qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/,
'When -C is not used in pg_restore with dump of pg_dumpall');
# test case 2: When --list option is used with dump of pg_dumpall # test case 2: When --list option is used with dump of pg_dumpall
$node->command_fails_like( $node->command_fails_like(
[ 'pg_restore', [
'pg_restore',
"$tempdir/format_custom", '-C', "$tempdir/format_custom", '-C',
'--format' => 'custom', '--list', '--format' => 'custom',
'--file' => "$tempdir/error_test.sql", ], '--list',
'--file' => "$tempdir/error_test.sql",
],
qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/, qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/,
'When --list is used in pg_restore with dump of pg_dumpall'); 'When --list is used in pg_restore with dump of pg_dumpall');
# test case 3: When non-exist database is given with -d option # test case 3: When non-exist database is given with -d option
$node->command_fails_like( $node->command_fails_like(
[ 'pg_restore', [
'pg_restore',
"$tempdir/format_custom", '-C', "$tempdir/format_custom", '-C',
'--format' => 'custom', '--format' => 'custom',
'-d' => 'dbpq', ], '-d' => 'dbpq',
],
qr/\Qpg_restore: error: could not connect to database "dbpq"\E/, qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'); 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'
);
$node->stop('fast'); $node->stop('fast');

View File

@ -285,7 +285,7 @@ sub run_pg_rewind
# Check that pg_rewind with dbname and --write-recovery-conf # Check that pg_rewind with dbname and --write-recovery-conf
# wrote the dbname in the generated primary_conninfo value. # wrote the dbname in the generated primary_conninfo value.
like(slurp_file("$primary_pgdata/postgresql.auto.conf"), like(slurp_file("$primary_pgdata/postgresql.auto.conf"),
qr/dbname=postgres/m, 'recovery conf file sets dbname'); qr/dbname=postgres/m, 'recovery conf file sets dbname');
# Check that standby.signal is here as recovery configuration # Check that standby.signal is here as recovery configuration
# was requested. # was requested.

View File

@ -53,7 +53,8 @@ $old_sub->safe_psql('postgres',
$old_sub->stop; $old_sub->stop;
$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 0"); $new_sub->append_conf('postgresql.conf',
"max_active_replication_origins = 0");
# pg_upgrade will fail because the new cluster has insufficient # pg_upgrade will fail because the new cluster has insufficient
# max_active_replication_origins. # max_active_replication_origins.
@ -80,7 +81,8 @@ command_checks_all(
); );
# Reset max_active_replication_origins # Reset max_active_replication_origins
$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 10"); $new_sub->append_conf('postgresql.conf',
"max_active_replication_origins = 10");
# Cleanup # Cleanup
$publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1"); $publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1");

View File

@ -13,7 +13,8 @@ sub test_mode
{ {
my ($mode) = @_; my ($mode) = @_;
my $old = PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall}); my $old =
PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
my $new = PostgreSQL::Test::Cluster->new('new'); my $new = PostgreSQL::Test::Cluster->new('new');
# --swap can't be used to upgrade from versions older than 10, so just skip # --swap can't be used to upgrade from versions older than 10, so just skip
@ -40,9 +41,11 @@ sub test_mode
# Create a small variety of simple test objects on the old cluster. We'll # Create a small variety of simple test objects on the old cluster. We'll
# check that these reach the new version after upgrading. # check that these reach the new version after upgrading.
$old->start; $old->start;
$old->safe_psql('postgres', "CREATE TABLE test1 AS SELECT generate_series(1, 100)"); $old->safe_psql('postgres',
"CREATE TABLE test1 AS SELECT generate_series(1, 100)");
$old->safe_psql('postgres', "CREATE DATABASE testdb1"); $old->safe_psql('postgres', "CREATE DATABASE testdb1");
$old->safe_psql('testdb1', "CREATE TABLE test2 AS SELECT generate_series(200, 300)"); $old->safe_psql('testdb1',
"CREATE TABLE test2 AS SELECT generate_series(200, 300)");
$old->safe_psql('testdb1', "VACUUM FULL test2"); $old->safe_psql('testdb1', "VACUUM FULL test2");
$old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432"); $old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432");
@ -51,10 +54,15 @@ sub test_mode
if (defined($ENV{oldinstall})) if (defined($ENV{oldinstall}))
{ {
my $tblspc = PostgreSQL::Test::Utils::tempdir_short(); my $tblspc = PostgreSQL::Test::Utils::tempdir_short();
$old->safe_psql('postgres', "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'"); $old->safe_psql('postgres',
$old->safe_psql('postgres', "CREATE DATABASE testdb2 TABLESPACE test_tblspc"); "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
$old->safe_psql('postgres', "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)"); $old->safe_psql('postgres',
$old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)"); "CREATE DATABASE testdb2 TABLESPACE test_tblspc");
$old->safe_psql('postgres',
"CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)"
);
$old->safe_psql('testdb2',
"CREATE TABLE test4 AS SELECT generate_series(400, 502)");
} }
$old->stop; $old->stop;
@ -90,9 +98,11 @@ sub test_mode
# tablespace. # tablespace.
if (defined($ENV{oldinstall})) if (defined($ENV{oldinstall}))
{ {
$result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3"); $result =
$new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
is($result, '102', "test3 data after pg_upgrade $mode"); is($result, '102', "test3 data after pg_upgrade $mode");
$result = $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4"); $result =
$new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
is($result, '103', "test4 data after pg_upgrade $mode"); is($result, '103', "test4 data after pg_upgrade $mode");
} }
$new->stop; $new->stop;

View File

@ -238,62 +238,105 @@ $node->command_fails_like(
'cannot use option --all and a dbname as argument at the same time'); 'cannot use option --all and a dbname as argument at the same time');
$node->safe_psql('postgres', $node->safe_psql('postgres',
'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;'); 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;'
);
$node->issues_sql_like( $node->issues_sql_like(
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], [
'vacuumdb', '--analyze-only',
'--missing-stats-only', '-t',
'regression_vacuumdb_test', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing stats'); '--missing-stats-only with missing stats');
$node->issues_sql_unlike( $node->issues_sql_unlike(
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], [
'vacuumdb', '--analyze-only',
'--missing-stats-only', '-t',
'regression_vacuumdb_test', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing stats'); '--missing-stats-only with no missing stats');
$node->safe_psql('postgres', $node->safe_psql('postgres',
'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));'); 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));'
);
$node->issues_sql_like( $node->issues_sql_like(
[ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], [
'vacuumdb', '--analyze-in-stages',
'--missing-stats-only', '-t',
'regression_vacuumdb_test', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing index expression stats'); '--missing-stats-only with missing index expression stats');
$node->issues_sql_unlike( $node->issues_sql_unlike(
[ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], [
'vacuumdb', '--analyze-in-stages',
'--missing-stats-only', '-t',
'regression_vacuumdb_test', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing index expression stats'); '--missing-stats-only with no missing index expression stats');
$node->safe_psql('postgres', $node->safe_psql('postgres',
'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;'); 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;'
);
$node->issues_sql_like( $node->issues_sql_like(
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], [
'vacuumdb', '--analyze-only',
'--missing-stats-only', '-t',
'regression_vacuumdb_test', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing extended stats'); '--missing-stats-only with missing extended stats');
$node->issues_sql_unlike( $node->issues_sql_unlike(
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], [
'vacuumdb', '--analyze-only',
'--missing-stats-only', '-t',
'regression_vacuumdb_test', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing extended stats'); '--missing-stats-only with no missing extended stats');
$node->safe_psql('postgres', $node->safe_psql('postgres',
"CREATE TABLE regression_vacuumdb_child (a INT) INHERITS (regression_vacuumdb_test);\n" "CREATE TABLE regression_vacuumdb_child (a INT) INHERITS (regression_vacuumdb_test);\n"
. "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n" . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
. "ANALYZE regression_vacuumdb_child;\n"); . "ANALYZE regression_vacuumdb_child;\n");
$node->issues_sql_like( $node->issues_sql_like(
[ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], [
'vacuumdb', '--analyze-in-stages',
'--missing-stats-only', '-t',
'regression_vacuumdb_test', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing inherited stats'); '--missing-stats-only with missing inherited stats');
$node->issues_sql_unlike( $node->issues_sql_unlike(
[ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], [
'vacuumdb', '--analyze-in-stages',
'--missing-stats-only', '-t',
'regression_vacuumdb_test', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing inherited stats'); '--missing-stats-only with no missing inherited stats');
$node->safe_psql('postgres', $node->safe_psql('postgres',
"CREATE TABLE regression_vacuumdb_parted (a INT) PARTITION BY LIST (a);\n" "CREATE TABLE regression_vacuumdb_parted (a INT) PARTITION BY LIST (a);\n"
. "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n" . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
. "INSERT INTO regression_vacuumdb_parted VALUES (1);\n" . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
. "ANALYZE regression_vacuumdb_part1;\n"); . "ANALYZE regression_vacuumdb_part1;\n");
$node->issues_sql_like( $node->issues_sql_like(
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ], [
'vacuumdb', '--analyze-only',
'--missing-stats-only', '-t',
'regression_vacuumdb_parted', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing partition stats'); '--missing-stats-only with missing partition stats');
$node->issues_sql_unlike( $node->issues_sql_unlike(
[ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ], [
'vacuumdb', '--analyze-only',
'--missing-stats-only', '-t',
'regression_vacuumdb_parted', 'postgres'
],
qr/statement:\ ANALYZE/sx, qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing partition stats'); '--missing-stats-only with no missing partition stats');

View File

@ -33,7 +33,8 @@
descr => 'sorts by Unicode code point; Unicode and POSIX character semantics', descr => 'sorts by Unicode code point; Unicode and POSIX character semantics',
collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6', collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6',
colllocale => 'C.UTF-8', collversion => '1' }, colllocale => 'C.UTF-8', collversion => '1' },
{ oid => '9535', descr => 'sorts by Unicode code point; Unicode character semantics', { oid => '9535',
descr => 'sorts by Unicode code point; Unicode character semantics',
collname => 'pg_unicode_fast', collprovider => 'b', collencoding => '6', collname => 'pg_unicode_fast', collprovider => 'b', collencoding => '6',
colllocale => 'PG_UNICODE_FAST', collversion => '1' }, colllocale => 'PG_UNICODE_FAST', collversion => '1' },

View File

@ -1190,14 +1190,14 @@
proname => 'bytea', proleakproof => 't', prorettype => 'bytea', proname => 'bytea', proleakproof => 't', prorettype => 'bytea',
proargtypes => 'int8', prosrc => 'int8_bytea' }, proargtypes => 'int8', prosrc => 'int8_bytea' },
{ oid => '8580', descr => 'convert bytea to int2', { oid => '8580', descr => 'convert bytea to int2',
proname => 'int2', prorettype => 'int2', proname => 'int2', prorettype => 'int2', proargtypes => 'bytea',
proargtypes => 'bytea', prosrc => 'bytea_int2' }, prosrc => 'bytea_int2' },
{ oid => '8581', descr => 'convert bytea to int4', { oid => '8581', descr => 'convert bytea to int4',
proname => 'int4', prorettype => 'int4', proname => 'int4', prorettype => 'int4', proargtypes => 'bytea',
proargtypes => 'bytea', prosrc => 'bytea_int4' }, prosrc => 'bytea_int4' },
{ oid => '8582', descr => 'convert bytea to int8', { oid => '8582', descr => 'convert bytea to int8',
proname => 'int8', prorettype => 'int8', proname => 'int8', prorettype => 'int8', proargtypes => 'bytea',
proargtypes => 'bytea', prosrc => 'bytea_int8' }, prosrc => 'bytea_int8' },
{ oid => '449', descr => 'hash', { oid => '449', descr => 'hash',
proname => 'hashint2', prorettype => 'int4', proargtypes => 'int2', proname => 'hashint2', prorettype => 'int4', proargtypes => 'int2',
@ -3597,7 +3597,8 @@
{ oid => '8702', descr => 'gamma function', { oid => '8702', descr => 'gamma function',
proname => 'gamma', prorettype => 'float8', proargtypes => 'float8', proname => 'gamma', prorettype => 'float8', proargtypes => 'float8',
prosrc => 'dgamma' }, prosrc => 'dgamma' },
{ oid => '8703', descr => 'natural logarithm of absolute value of gamma function', { oid => '8703',
descr => 'natural logarithm of absolute value of gamma function',
proname => 'lgamma', prorettype => 'float8', proargtypes => 'float8', proname => 'lgamma', prorettype => 'float8', proargtypes => 'float8',
prosrc => 'dlgamma' }, prosrc => 'dlgamma' },
@ -9360,8 +9361,8 @@
proname => 'to_json', provolatile => 's', prorettype => 'json', proname => 'to_json', provolatile => 's', prorettype => 'json',
proargtypes => 'anyelement', prosrc => 'to_json' }, proargtypes => 'anyelement', prosrc => 'to_json' },
{ oid => '3261', descr => 'remove object fields with null values from json', { oid => '3261', descr => 'remove object fields with null values from json',
proname => 'json_strip_nulls', prorettype => 'json', proargtypes => 'json bool', proname => 'json_strip_nulls', prorettype => 'json',
prosrc => 'json_strip_nulls' }, proargtypes => 'json bool', prosrc => 'json_strip_nulls' },
{ oid => '3947', { oid => '3947',
proname => 'json_object_field', prorettype => 'json', proname => 'json_object_field', prorettype => 'json',
@ -9483,17 +9484,19 @@
proname => 'uuid_hash_extended', prorettype => 'int8', proname => 'uuid_hash_extended', prorettype => 'int8',
proargtypes => 'uuid int8', prosrc => 'uuid_hash_extended' }, proargtypes => 'uuid int8', prosrc => 'uuid_hash_extended' },
{ oid => '3432', descr => 'generate random UUID', { oid => '3432', descr => 'generate random UUID',
proname => 'gen_random_uuid', provolatile => 'v', proname => 'gen_random_uuid', provolatile => 'v', prorettype => 'uuid',
prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' }, proargtypes => '', prosrc => 'gen_random_uuid' },
{ oid => '9895', descr => 'generate UUID version 4', { oid => '9895', descr => 'generate UUID version 4',
proname => 'uuidv4', provolatile => 'v', proname => 'uuidv4', provolatile => 'v', prorettype => 'uuid',
prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' }, proargtypes => '', prosrc => 'gen_random_uuid' },
{ oid => '9896', descr => 'generate UUID version 7', { oid => '9896', descr => 'generate UUID version 7',
proname => 'uuidv7', provolatile => 'v', proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid',
prorettype => 'uuid', proargtypes => '', prosrc => 'uuidv7' }, proargtypes => '', prosrc => 'uuidv7' },
{ oid => '9897', descr => 'generate UUID version 7 with a timestamp shifted by specified interval', { oid => '9897',
proname => 'uuidv7', provolatile => 'v', proargnames => '{shift}', descr => 'generate UUID version 7 with a timestamp shifted by specified interval',
prorettype => 'uuid', proargtypes => 'interval', prosrc => 'uuidv7_interval' }, proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid',
proargtypes => 'interval', proargnames => '{shift}',
prosrc => 'uuidv7_interval' },
{ oid => '6342', descr => 'extract timestamp from UUID', { oid => '6342', descr => 'extract timestamp from UUID',
proname => 'uuid_extract_timestamp', proleakproof => 't', proname => 'uuid_extract_timestamp', proleakproof => 't',
prorettype => 'timestamptz', proargtypes => 'uuid', prorettype => 'timestamptz', proargtypes => 'uuid',
@ -10299,8 +10302,8 @@
prorettype => 'jsonb', proargtypes => '', prorettype => 'jsonb', proargtypes => '',
prosrc => 'jsonb_build_object_noargs' }, prosrc => 'jsonb_build_object_noargs' },
{ oid => '3262', descr => 'remove object fields with null values from jsonb', { oid => '3262', descr => 'remove object fields with null values from jsonb',
proname => 'jsonb_strip_nulls', prorettype => 'jsonb', proargtypes => 'jsonb bool', proname => 'jsonb_strip_nulls', prorettype => 'jsonb',
prosrc => 'jsonb_strip_nulls' }, proargtypes => 'jsonb bool', prosrc => 'jsonb_strip_nulls' },
{ oid => '3478', { oid => '3478',
proname => 'jsonb_object_field', prorettype => 'jsonb', proname => 'jsonb_object_field', prorettype => 'jsonb',
@ -12508,34 +12511,22 @@
proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}', proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}',
prosrc => 'pg_get_wal_summarizer_state' }, prosrc => 'pg_get_wal_summarizer_state' },
# Statistics Import # Statistics Import
{ oid => '8459', { oid => '8459', descr => 'restore statistics on relation',
descr => 'restore statistics on relation', proname => 'pg_restore_relation_stats', provariadic => 'any',
proname => 'pg_restore_relation_stats', provolatile => 'v', proisstrict => 'f', proisstrict => 'f', provolatile => 'v', proparallel => 'u',
provariadic => 'any', prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}',
proparallel => 'u', prorettype => 'bool', proargnames => '{kwargs}', prosrc => 'pg_restore_relation_stats' },
proargtypes => 'any', { oid => '9160', descr => 'clear statistics on relation',
proargnames => '{kwargs}', proname => 'pg_clear_relation_stats', proisstrict => 'f', provolatile => 'v',
proargmodes => '{v}', proparallel => 'u', prorettype => 'void', proargtypes => 'text text',
prosrc => 'pg_restore_relation_stats' }, proargnames => '{schemaname,relname}', prosrc => 'pg_clear_relation_stats' },
{ oid => '9160', { oid => '8461', descr => 'restore statistics on attribute',
descr => 'clear statistics on relation', proname => 'pg_restore_attribute_stats', provariadic => 'any',
proname => 'pg_clear_relation_stats', provolatile => 'v', proisstrict => 'f', proisstrict => 'f', provolatile => 'v', proparallel => 'u',
proparallel => 'u', prorettype => 'void', prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}',
proargtypes => 'text text', proargnames => '{kwargs}', prosrc => 'pg_restore_attribute_stats' },
proargnames => '{schemaname,relname}', { oid => '9162', descr => 'clear statistics on attribute',
prosrc => 'pg_clear_relation_stats' }, proname => 'pg_clear_attribute_stats', proisstrict => 'f', provolatile => 'v',
{ oid => '8461',
descr => 'restore statistics on attribute',
proname => 'pg_restore_attribute_stats', provolatile => 'v', proisstrict => 'f',
provariadic => 'any',
proparallel => 'u', prorettype => 'bool',
proargtypes => 'any',
proargnames => '{kwargs}',
proargmodes => '{v}',
prosrc => 'pg_restore_attribute_stats' },
{ oid => '9162',
descr => 'clear statistics on attribute',
proname => 'pg_clear_attribute_stats', provolatile => 'v', proisstrict => 'f',
proparallel => 'u', prorettype => 'void', proparallel => 'u', prorettype => 'void',
proargtypes => 'text text text bool', proargtypes => 'text text text bool',
proargnames => '{schemaname,relname,attname,inherited}', proargnames => '{schemaname,relname,attname,inherited}',
@ -12544,13 +12535,13 @@
# GiST stratnum implementations # GiST stratnum implementations
{ oid => '8047', descr => 'GiST support', { oid => '8047', descr => 'GiST support',
proname => 'gist_translate_cmptype_common', prorettype => 'int2', proname => 'gist_translate_cmptype_common', prorettype => 'int2',
proargtypes => 'int4', proargtypes => 'int4', prosrc => 'gist_translate_cmptype_common' },
prosrc => 'gist_translate_cmptype_common' },
# AIO related functions # AIO related functions
{ oid => '9200', descr => 'information about in-progress asynchronous IOs', { oid => '9200', descr => 'information about in-progress asynchronous IOs',
proname => 'pg_get_aios', prorows => '100', proretset => 't', proname => 'pg_get_aios', prorows => '100', proretset => 't',
provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => '', provolatile => 'v', proparallel => 'r', prorettype => 'record',
proargtypes => '',
proallargtypes => '{int4,int4,int8,text,text,int8,int8,text,int2,int4,text,text,bool,bool,bool}', proallargtypes => '{int4,int4,int8,text,text,int8,int8,text,int2,int4,text,text,bool,bool,bool}',
proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}',
proargnames => '{pid,io_id,io_generation,state,operation,off,length,target,handle_data_len,raw_result,result,target_desc,f_sync,f_localmem,f_buffered}', proargnames => '{pid,io_id,io_generation,state,operation,off,length,target,handle_data_len,raw_result,result,target_desc,f_sync,f_localmem,f_buffered}',

View File

@ -79,39 +79,40 @@ $node->start;
# other tests are added to this file in the future # other tests are added to this file in the future
$node->safe_psql('postgres', "CREATE DATABASE test_log_connections"); $node->safe_psql('postgres', "CREATE DATABASE test_log_connections");
my $log_connections = $node->safe_psql('test_log_connections', q(SHOW log_connections;)); my $log_connections =
$node->safe_psql('test_log_connections', q(SHOW log_connections;));
is($log_connections, 'on', qq(check log connections has expected value 'on')); is($log_connections, 'on', qq(check log connections has expected value 'on'));
$node->connect_ok('test_log_connections', $node->connect_ok(
'test_log_connections',
qq(log_connections 'on' works as expected for backwards compatibility), qq(log_connections 'on' works as expected for backwards compatibility),
log_like => [ log_like => [
qr/connection received/, qr/connection received/,
qr/connection authenticated/, qr/connection authenticated/,
qr/connection authorized: user=\S+ database=test_log_connections/, qr/connection authorized: user=\S+ database=test_log_connections/,
], ],
log_unlike => [ log_unlike => [ qr/connection ready/, ],);
qr/connection ready/,
],);
$node->safe_psql('test_log_connections', $node->safe_psql(
'test_log_connections',
q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations; q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations;
SELECT pg_reload_conf();]); SELECT pg_reload_conf();]);
$node->connect_ok('test_log_connections', $node->connect_ok(
'test_log_connections',
q(log_connections with subset of specified options logs only those aspects), q(log_connections with subset of specified options logs only those aspects),
log_like => [ log_like => [
qr/connection received/, qr/connection received/,
qr/connection authorized: user=\S+ database=test_log_connections/, qr/connection authorized: user=\S+ database=test_log_connections/,
qr/connection ready/, qr/connection ready/,
], ],
log_unlike => [ log_unlike => [ qr/connection authenticated/, ],);
qr/connection authenticated/,
],);
$node->safe_psql('test_log_connections', $node->safe_psql('test_log_connections',
qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();)); qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();));
$node->connect_ok('test_log_connections', $node->connect_ok(
'test_log_connections',
qq(log_connections 'all' logs all available connection aspects), qq(log_connections 'all' logs all available connection aspects),
log_like => [ log_like => [
qr/connection received/, qr/connection received/,

View File

@ -53,7 +53,8 @@ for my $testname (@tests)
$node->command_ok( $node->command_ok(
[ [
'libpq_pipeline', @extraargs, 'libpq_pipeline', @extraargs,
$testname, $node->connstr('postgres') . " max_protocol_version=latest" $testname,
$node->connstr('postgres') . " max_protocol_version=latest"
], ],
"libpq_pipeline $testname"); "libpq_pipeline $testname");
@ -76,7 +77,8 @@ for my $testname (@tests)
# test separately that it still works the old protocol version too. # test separately that it still works the old protocol version too.
$node->command_ok( $node->command_ok(
[ [
'libpq_pipeline', 'cancel', $node->connstr('postgres') . " max_protocol_version=3.0" 'libpq_pipeline', 'cancel',
$node->connstr('postgres') . " max_protocol_version=3.0"
], ],
"libpq_pipeline cancel with protocol 3.0"); "libpq_pipeline cancel with protocol 3.0");

View File

@ -1123,7 +1123,8 @@ COMMIT;
{ {
# Create a corruption and then read the block without waiting for # Create a corruption and then read the block without waiting for
# completion. # completion.
$psql_a->query(qq( $psql_a->query(
qq(
SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true); SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true);
SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true) SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true)
)); ));
@ -1133,7 +1134,8 @@ SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>tru
$psql_b, $psql_b,
"$persistency: test completing read by other session doesn't generate warning", "$persistency: test completing read by other session doesn't generate warning",
qq(SELECT count(*) > 0 FROM tbl_zero;), qq(SELECT count(*) > 0 FROM tbl_zero;),
qr/^t$/, qr/^$/); qr/^t$/,
qr/^$/);
} }
# Clean up # Clean up
@ -1355,18 +1357,24 @@ SELECT modify_rel_block('tbl_cs_fail', 6, corrupt_checksum=>true);
)); ));
$psql->query_safe($invalidate_sql); $psql->query_safe($invalidate_sql);
psql_like($io_method, $psql, psql_like(
$io_method,
$psql,
"reading block w/ wrong checksum with ignore_checksum_failure=off fails", "reading block w/ wrong checksum with ignore_checksum_failure=off fails",
$count_sql, qr/^$/, qr/ERROR: invalid page in block/); $count_sql,
qr/^$/,
qr/ERROR: invalid page in block/);
$psql->query_safe("SET ignore_checksum_failure=on"); $psql->query_safe("SET ignore_checksum_failure=on");
$psql->query_safe($invalidate_sql); $psql->query_safe($invalidate_sql);
psql_like($io_method, $psql, psql_like(
"reading block w/ wrong checksum with ignore_checksum_failure=off succeeds", $io_method,
$count_sql, $psql,
qr/^$expect$/, "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
qr/WARNING: ignoring (checksum failure|\d checksum failures)/); $count_sql,
qr/^$expect$/,
qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
# Verify that ignore_checksum_failure=off works in multi-block reads # Verify that ignore_checksum_failure=off works in multi-block reads
@ -1432,19 +1440,22 @@ SELECT read_rel_block_ll('tbl_cs_fail', 1, nblocks=>5, zero_on_error=>true);),
# file. # file.
$node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/, $node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/,
$log_location); $log_location);
ok(1, "$io_method: found information about checksum failure in block 2"); ok(1, "$io_method: found information about checksum failure in block 2");
$node->wait_for_log(qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/, $node->wait_for_log(
$log_location); qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
$log_location);
ok(1, "$io_method: found information about invalid page in block 3"); ok(1, "$io_method: found information about invalid page in block 3");
$node->wait_for_log(qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/, $node->wait_for_log(
$log_location); qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
$log_location);
ok(1, "$io_method: found information about checksum failure in block 4"); ok(1, "$io_method: found information about checksum failure in block 4");
$node->wait_for_log(qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/, $node->wait_for_log(
$log_location); qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
$log_location);
ok(1, "$io_method: found information about checksum failure in block 5"); ok(1, "$io_method: found information about checksum failure in block 5");
@ -1462,8 +1473,7 @@ SELECT modify_rel_block('tbl_cs_fail', 3, corrupt_checksum=>true, corrupt_header
qq( qq(
SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);), SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);),
qr/^$/, qr/^$/,
qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/ qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/);
);
psql_like( psql_like(
$io_method, $io_method,

View File

@ -68,7 +68,8 @@ sub connect_fails_wait
my $log_location = -s $node->logfile; my $log_location = -s $node->logfile;
$node->connect_fails($connstr, $test_name, %params); $node->connect_fails($connstr, $test_name, %params);
$node->wait_for_log(qr/DEBUG: (00000: )?client backend.*exited with exit code 1/, $node->wait_for_log(
qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
$log_location); $log_location);
ok(1, "$test_name: client backend process exited"); ok(1, "$test_name: client backend process exited");
} }

View File

@ -941,8 +941,7 @@ is( $standby1->safe_psql(
'synced slot retained on the new primary'); 'synced slot retained on the new primary');
# Commit the prepared transaction # Commit the prepared transaction
$standby1->safe_psql('postgres', $standby1->safe_psql('postgres', "COMMIT PREPARED 'test_twophase_slotsync';");
"COMMIT PREPARED 'test_twophase_slotsync';");
$standby1->wait_for_catchup('regress_mysub1'); $standby1->wait_for_catchup('regress_mysub1');
# Confirm that the prepared transaction is replicated to the subscriber # Confirm that the prepared transaction is replicated to the subscriber

View File

@ -47,7 +47,7 @@ my $psql_primaryA =
$node_primary->background_psql($test_db, on_error_stop => 1); $node_primary->background_psql($test_db, on_error_stop => 1);
# Long-running Primary Session B # Long-running Primary Session B
my $psql_primaryB = my $psql_primaryB =
$node_primary->background_psql($test_db, on_error_stop => 1); $node_primary->background_psql($test_db, on_error_stop => 1);
# Our test relies on two rounds of index vacuuming for reasons elaborated # Our test relies on two rounds of index vacuuming for reasons elaborated
@ -81,7 +81,8 @@ my $nrows = 2000;
# insert and delete enough rows that we force at least one round of index # insert and delete enough rows that we force at least one round of index
# vacuuming before getting to a dead tuple which was killed after the standby # vacuuming before getting to a dead tuple which was killed after the standby
# is disconnected. # is disconnected.
$node_primary->safe_psql($test_db, qq[ $node_primary->safe_psql(
$test_db, qq[
CREATE TABLE ${table1}(col1 int) CREATE TABLE ${table1}(col1 int)
WITH (autovacuum_enabled=false, fillfactor=10); WITH (autovacuum_enabled=false, fillfactor=10);
INSERT INTO $table1 VALUES(7); INSERT INTO $table1 VALUES(7);
@ -98,21 +99,24 @@ my $primary_lsn = $node_primary->lsn('flush');
$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn); $node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
# Test that the WAL receiver is up and running. # Test that the WAL receiver is up and running.
$node_replica->poll_query_until($test_db, qq[ $node_replica->poll_query_until(
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't'); $test_db, qq[
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
# Set primary_conninfo to something invalid on the replica and reload the # Set primary_conninfo to something invalid on the replica and reload the
# config. Once the config is reloaded, the startup process will force the WAL # config. Once the config is reloaded, the startup process will force the WAL
# receiver to restart and it will be unable to reconnect because of the # receiver to restart and it will be unable to reconnect because of the
# invalid connection information. # invalid connection information.
$node_replica->safe_psql($test_db, qq[ $node_replica->safe_psql(
$test_db, qq[
ALTER SYSTEM SET primary_conninfo = ''; ALTER SYSTEM SET primary_conninfo = '';
SELECT pg_reload_conf(); SELECT pg_reload_conf();
]); ]);
# Wait until the WAL receiver has shut down and been unable to start up again. # Wait until the WAL receiver has shut down and been unable to start up again.
$node_replica->poll_query_until($test_db, qq[ $node_replica->poll_query_until(
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f'); $test_db, qq[
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
# Now insert and update a tuple which will be visible to the vacuum on the # Now insert and update a tuple which will be visible to the vacuum on the
# primary but which will have xmax newer than the oldest xmin on the standby # primary but which will have xmax newer than the oldest xmin on the standby
@ -123,7 +127,7 @@ my $res = $psql_primaryA->query_safe(
UPDATE $table1 SET col1 = 100 WHERE col1 = 99; UPDATE $table1 SET col1 = 100 WHERE col1 = 99;
SELECT 'after_update'; SELECT 'after_update';
] ]
); );
# Make sure the UPDATE finished # Make sure the UPDATE finished
like($res, qr/^after_update$/m, "UPDATE occurred on primary session A"); like($res, qr/^after_update$/m, "UPDATE occurred on primary session A");
@ -148,7 +152,7 @@ $res = $psql_primaryB->query_safe(
DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7; DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7;
FETCH $primary_cursor1; FETCH $primary_cursor1;
] ]
); );
is($res, 7, qq[Cursor query returned $res. Expected value 7.]); is($res, 7, qq[Cursor query returned $res. Expected value 7.]);
@ -183,7 +187,8 @@ $psql_primaryA->{run}->pump_nb();
# just waiting on the lock to start vacuuming. We don't want the standby to # just waiting on the lock to start vacuuming. We don't want the standby to
# re-establish a connection to the primary and push the horizon back until # re-establish a connection to the primary and push the horizon back until
# we've saved initial values in GlobalVisState and calculated OldestXmin. # we've saved initial values in GlobalVisState and calculated OldestXmin.
$node_primary->poll_query_until($test_db, $node_primary->poll_query_until(
$test_db,
qq[ qq[
SELECT count(*) >= 1 FROM pg_stat_activity SELECT count(*) >= 1 FROM pg_stat_activity
WHERE pid = $vacuum_pid WHERE pid = $vacuum_pid
@ -192,8 +197,9 @@ $node_primary->poll_query_until($test_db,
't'); 't');
# Ensure the WAL receiver is still not active on the replica. # Ensure the WAL receiver is still not active on the replica.
$node_replica->poll_query_until($test_db, qq[ $node_replica->poll_query_until(
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f'); $test_db, qq[
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
# Allow the WAL receiver connection to re-establish. # Allow the WAL receiver connection to re-establish.
$node_replica->safe_psql( $node_replica->safe_psql(
@ -203,15 +209,17 @@ $node_replica->safe_psql(
]); ]);
# Ensure the new WAL receiver has connected. # Ensure the new WAL receiver has connected.
$node_replica->poll_query_until($test_db, qq[ $node_replica->poll_query_until(
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't'); $test_db, qq[
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
# Once the WAL sender is shown on the primary, the replica should have # Once the WAL sender is shown on the primary, the replica should have
# connected with the primary and pushed the horizon backward. Primary Session # connected with the primary and pushed the horizon backward. Primary Session
# A won't see that until the VACUUM FREEZE proceeds and does its first round # A won't see that until the VACUUM FREEZE proceeds and does its first round
# of index vacuuming. # of index vacuuming.
$node_primary->poll_query_until($test_db, qq[ $node_primary->poll_query_until(
SELECT EXISTS (SELECT * FROM pg_stat_replication);] , 't'); $test_db, qq[
SELECT EXISTS (SELECT * FROM pg_stat_replication);], 't');
# Move the cursor forward to the next 7. We inserted the 7 much later, so # Move the cursor forward to the next 7. We inserted the 7 much later, so
# advancing the cursor should allow vacuum to proceed vacuuming most pages of # advancing the cursor should allow vacuum to proceed vacuuming most pages of
@ -225,20 +233,21 @@ is($res, 7,
# Prevent the test from incorrectly passing by confirming that we did indeed # Prevent the test from incorrectly passing by confirming that we did indeed
# do a pass of index vacuuming. # do a pass of index vacuuming.
$node_primary->poll_query_until($test_db, qq[ $node_primary->poll_query_until(
$test_db, qq[
SELECT index_vacuum_count > 0 SELECT index_vacuum_count > 0
FROM pg_stat_progress_vacuum FROM pg_stat_progress_vacuum
WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass; WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass;
] , 't'); ], 't');
# Commit the transaction with the open cursor so that the VACUUM can finish. # Commit the transaction with the open cursor so that the VACUUM can finish.
$psql_primaryB->query_until( $psql_primaryB->query_until(
qr/^commit$/m, qr/^commit$/m,
qq[ qq[
COMMIT; COMMIT;
\\echo commit \\echo commit
] ]
); );
# VACUUM proceeds with pruning and does a visibility check on each tuple. In # VACUUM proceeds with pruning and does a visibility check on each tuple. In
# older versions of Postgres, pruning found our final dead tuple # older versions of Postgres, pruning found our final dead tuple
@ -252,7 +261,8 @@ $psql_primaryB->query_until(
# With the fix, VACUUM should finish successfully, incrementing the table # With the fix, VACUUM should finish successfully, incrementing the table
# vacuum_count. # vacuum_count.
$node_primary->poll_query_until($test_db, $node_primary->poll_query_until(
$test_db,
qq[ qq[
SELECT vacuum_count > 0 SELECT vacuum_count > 0
FROM pg_stat_all_tables WHERE relname = '${table1}'; FROM pg_stat_all_tables WHERE relname = '${table1}';

View File

@ -318,7 +318,8 @@ sub switch_server_cert
$node->append_conf('sslconfig.conf', "ssl=on"); $node->append_conf('sslconfig.conf', "ssl=on");
$node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params)); $node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params));
# use lists of ECDH curves and cipher suites for syntax testing # use lists of ECDH curves and cipher suites for syntax testing
$node->append_conf('sslconfig.conf', 'ssl_groups=X25519:prime256v1:secp521r1'); $node->append_conf('sslconfig.conf',
'ssl_groups=X25519:prime256v1:secp521r1');
$node->append_conf('sslconfig.conf', $node->append_conf('sslconfig.conf',
'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256'); 'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256');

View File

@ -70,7 +70,8 @@ ok( $stderr =~
); );
# Cleanup # Cleanup
$node_publisher->safe_psql('postgres', qq[ $node_publisher->safe_psql(
'postgres', qq[
DROP PUBLICATION mypub; DROP PUBLICATION mypub;
SELECT pg_drop_replication_slot('mysub'); SELECT pg_drop_replication_slot('mysub');
]); ]);
@ -86,32 +87,38 @@ sub test_swap
my ($table_name, $pubname, $appname) = @_; my ($table_name, $pubname, $appname) = @_;
# Confirms tuples can be replicated # Confirms tuples can be replicated
$node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (1);"); $node_publisher->safe_psql('postgres',
"INSERT INTO $table_name VALUES (1);");
$node_publisher->wait_for_catchup($appname); $node_publisher->wait_for_catchup($appname);
my $result = my $result =
$node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name"); $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
is($result, qq(1), 'check replication worked well before renaming a publication'); is($result, qq(1),
'check replication worked well before renaming a publication');
# Swap the name of publications; $pubname <-> pub_empty # Swap the name of publications; $pubname <-> pub_empty
$node_publisher->safe_psql('postgres', qq[ $node_publisher->safe_psql(
'postgres', qq[
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp; ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
ALTER PUBLICATION pub_empty RENAME TO $pubname; ALTER PUBLICATION pub_empty RENAME TO $pubname;
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty; ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
]); ]);
# Insert the data again # Insert the data again
$node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (2);"); $node_publisher->safe_psql('postgres',
"INSERT INTO $table_name VALUES (2);");
$node_publisher->wait_for_catchup($appname); $node_publisher->wait_for_catchup($appname);
# Confirms the second tuple won't be replicated because $pubname does not # Confirms the second tuple won't be replicated because $pubname does not
# contains relations anymore. # contains relations anymore.
$result = $result =
$node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name ORDER BY a"); $node_subscriber->safe_psql('postgres',
"SELECT a FROM $table_name ORDER BY a");
is($result, qq(1), is($result, qq(1),
'check the tuple inserted after the RENAME was not replicated'); 'check the tuple inserted after the RENAME was not replicated');
# Restore the name of publications because it can be called several times # Restore the name of publications because it can be called several times
$node_publisher->safe_psql('postgres', qq[ $node_publisher->safe_psql(
'postgres', qq[
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp; ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
ALTER PUBLICATION pub_empty RENAME TO $pubname; ALTER PUBLICATION pub_empty RENAME TO $pubname;
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty; ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
@ -124,7 +131,8 @@ $node_publisher->safe_psql('postgres', $ddl);
$node_subscriber->safe_psql('postgres', $ddl); $node_subscriber->safe_psql('postgres', $ddl);
# Create publications and a subscription # Create publications and a subscription
$node_publisher->safe_psql('postgres', qq[ $node_publisher->safe_psql(
'postgres', qq[
CREATE PUBLICATION pub_empty; CREATE PUBLICATION pub_empty;
CREATE PUBLICATION pub_for_tab FOR TABLE test1; CREATE PUBLICATION pub_for_tab FOR TABLE test1;
CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES; CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES;
@ -139,19 +147,20 @@ test_swap('test1', 'pub_for_tab', 'tap_sub');
# Switches a publication which includes all tables # Switches a publication which includes all tables
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;" "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;");
);
$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub'); $node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
# Confirms RENAME command works well for ALL TABLES publication # Confirms RENAME command works well for ALL TABLES publication
test_swap('test2', 'pub_for_all_tables', 'tap_sub'); test_swap('test2', 'pub_for_all_tables', 'tap_sub');
# Cleanup # Cleanup
$node_publisher->safe_psql('postgres', qq[ $node_publisher->safe_psql(
'postgres', qq[
DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables; DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables;
DROP TABLE test1, test2; DROP TABLE test1, test2;
]); ]);
$node_subscriber->safe_psql('postgres', qq[ $node_subscriber->safe_psql(
'postgres', qq[
DROP SUBSCRIPTION tap_sub; DROP SUBSCRIPTION tap_sub;
DROP TABLE test1, test2; DROP TABLE test1, test2;
]); ]);

View File

@ -51,8 +51,7 @@ $node_subscriber1->safe_psql('postgres',
); );
# make a BRIN index to test aminsertcleanup logic in subscriber # make a BRIN index to test aminsertcleanup logic in subscriber
$node_subscriber1->safe_psql('postgres', $node_subscriber1->safe_psql('postgres',
"CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)" "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)");
);
$node_subscriber1->safe_psql('postgres', $node_subscriber1->safe_psql('postgres',
"CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)" "CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"
); );

View File

@ -108,11 +108,12 @@ $node_publisher->poll_query_until('postgres',
my $offset = -s $node_publisher->logfile; my $offset = -s $node_publisher->logfile;
$node_publisher->safe_psql('postgres',"INSERT INTO tab_3 values(1)"); $node_publisher->safe_psql('postgres', "INSERT INTO tab_3 values(1)");
# Verify that a warning is logged. # Verify that a warning is logged.
$node_publisher->wait_for_log( $node_publisher->wait_for_log(
qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/, $offset); qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/,
$offset);
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3"); "CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3");
@ -128,10 +129,11 @@ $node_publisher->wait_for_catchup('tap_sub');
# Verify that the insert operation gets replicated to subscriber after # Verify that the insert operation gets replicated to subscriber after
# publication is created. # publication is created.
$result = $node_subscriber->safe_psql('postgres', $result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_3");
"SELECT * FROM tab_3"); is( $result, qq(1
is($result, qq(1 2),
2), 'check that the incremental data is replicated after the publication is created'); 'check that the incremental data is replicated after the publication is created'
);
# shutdown # shutdown
$node_subscriber->stop('fast'); $node_subscriber->stop('fast');

View File

@ -26,7 +26,8 @@ $node_publisher->safe_psql('postgres',
"CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"); "CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"); "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"
);
# Create same table on subscriber # Create same table on subscriber
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',