mirror of
https://github.com/postgres/postgres.git
synced 2025-04-20 00:42:27 +03:00
Improve grammar of options for command arrays in TAP tests
This commit rewrites a good chunk of the command arrays in TAP tests with a grammar based on the following rules: - Fat commas are used between option names and their values, making it clear to both humans and perltidy that values and names are bound together. This is particularly useful for the readability of multi-line command arrays, and there are plenty of them in the TAP tests. Most of the test code is updated to use this style. Some commands used parenthesis to show the link, or attached values and options in a single string. These are updated to use fat commas instead. - Option names are switched to use their long names, making them more self-documented. Based on a suggestion by Andrew Dunstan. - Add some trailing commas after the last item in multi-line arrays, which is a common perl style. Not all the places are taken care of, but this covers a very good chunk of them. Author: Dagfinn Ilmari Mannsåker Reviewed-by: Michael Paquier, Peter Smith, Euler Taveira Discussion: https://postgr.es/m/87jzc46d8u.fsf@wibble.ilmari.org
This commit is contained in:
parent
4a0e7314f1
commit
ce1b0f9da0
@ -25,7 +25,7 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
|
|||||||
# This is only needed on Windows machines that don't use UNIX sockets.
|
# This is only needed on Windows machines that don't use UNIX sockets.
|
||||||
$node->init(
|
$node->init(
|
||||||
'allows_streaming' => 1,
|
'allows_streaming' => 1,
|
||||||
'auth_extra' => [ '--create-role', 'backupuser' ]);
|
'auth_extra' => [ '--create-role' => 'backupuser' ]);
|
||||||
|
|
||||||
$node->append_conf('postgresql.conf',
|
$node->append_conf('postgresql.conf',
|
||||||
"shared_preload_libraries = 'basebackup_to_shell'");
|
"shared_preload_libraries = 'basebackup_to_shell'");
|
||||||
@ -37,15 +37,19 @@ $node->safe_psql('postgres', 'CREATE ROLE trustworthy');
|
|||||||
# to keep test times reasonable. Using @pg_basebackup_defs as the first
|
# to keep test times reasonable. Using @pg_basebackup_defs as the first
|
||||||
# element of the array passed to IPC::Run interpolate the array (as it is
|
# element of the array passed to IPC::Run interpolate the array (as it is
|
||||||
# not a reference to an array)...
|
# not a reference to an array)...
|
||||||
my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
|
my @pg_basebackup_defs =
|
||||||
|
('pg_basebackup', '--no-sync', '--checkpoint' => 'fast');
|
||||||
|
|
||||||
# This particular test module generally wants to run with -Xfetch, because
|
# This particular test module generally wants to run with -Xfetch, because
|
||||||
# -Xstream is not supported with a backup target, and with -U backupuser.
|
# -Xstream is not supported with a backup target, and with -U backupuser.
|
||||||
my @pg_basebackup_cmd = (@pg_basebackup_defs, '-U', 'backupuser', '-Xfetch');
|
my @pg_basebackup_cmd = (
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--username' => 'backupuser',
|
||||||
|
'--wal-method' => 'fetch');
|
||||||
|
|
||||||
# Can't use this module without setting basebackup_to_shell.command.
|
# Can't use this module without setting basebackup_to_shell.command.
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ @pg_basebackup_cmd, '--target', 'shell' ],
|
[ @pg_basebackup_cmd, '--target' => 'shell' ],
|
||||||
qr/shell command for backup is not configured/,
|
qr/shell command for backup is not configured/,
|
||||||
'fails if basebackup_to_shell.command is not set');
|
'fails if basebackup_to_shell.command is not set');
|
||||||
|
|
||||||
@ -64,13 +68,13 @@ $node->reload();
|
|||||||
|
|
||||||
# Should work now.
|
# Should work now.
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_cmd, '--target', 'shell' ],
|
[ @pg_basebackup_cmd, '--target' => 'shell' ],
|
||||||
'backup with no detail: pg_basebackup');
|
'backup with no detail: pg_basebackup');
|
||||||
verify_backup('', $backup_path, "backup with no detail");
|
verify_backup('', $backup_path, "backup with no detail");
|
||||||
|
|
||||||
# Should fail with a detail.
|
# Should fail with a detail.
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ @pg_basebackup_cmd, '--target', 'shell:foo' ],
|
[ @pg_basebackup_cmd, '--target' => 'shell:foo' ],
|
||||||
qr/a target detail is not permitted because the configured command does not include %d/,
|
qr/a target detail is not permitted because the configured command does not include %d/,
|
||||||
'fails if detail provided without %d');
|
'fails if detail provided without %d');
|
||||||
|
|
||||||
@ -87,19 +91,19 @@ $node->reload();
|
|||||||
|
|
||||||
# Should fail due to lack of permission.
|
# Should fail due to lack of permission.
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ @pg_basebackup_cmd, '--target', 'shell' ],
|
[ @pg_basebackup_cmd, '--target' => 'shell' ],
|
||||||
qr/permission denied to use basebackup_to_shell/,
|
qr/permission denied to use basebackup_to_shell/,
|
||||||
'fails if required_role not granted');
|
'fails if required_role not granted');
|
||||||
|
|
||||||
# Should fail due to lack of a detail.
|
# Should fail due to lack of a detail.
|
||||||
$node->safe_psql('postgres', 'GRANT trustworthy TO backupuser');
|
$node->safe_psql('postgres', 'GRANT trustworthy TO backupuser');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ @pg_basebackup_cmd, '--target', 'shell' ],
|
[ @pg_basebackup_cmd, '--target' => 'shell' ],
|
||||||
qr/a target detail is required because the configured command includes %d/,
|
qr/a target detail is required because the configured command includes %d/,
|
||||||
'fails if %d is present and detail not given');
|
'fails if %d is present and detail not given');
|
||||||
|
|
||||||
# Should work.
|
# Should work.
|
||||||
$node->command_ok([ @pg_basebackup_cmd, '--target', 'shell:bar' ],
|
$node->command_ok([ @pg_basebackup_cmd, '--target' => 'shell:bar' ],
|
||||||
'backup with detail: pg_basebackup');
|
'backup with detail: pg_basebackup');
|
||||||
verify_backup('bar.', $backup_path, "backup with detail");
|
verify_backup('bar.', $backup_path, "backup with detail");
|
||||||
|
|
||||||
@ -133,9 +137,11 @@ sub verify_backup
|
|||||||
# Verify.
|
# Verify.
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_verifybackup', '-n',
|
'pg_verifybackup',
|
||||||
'-m', "${backup_dir}/${prefix}backup_manifest",
|
'--no-parse-wal',
|
||||||
'-e', $extract_path
|
'--manifest-path' => "${backup_dir}/${prefix}backup_manifest",
|
||||||
|
'--exit-on-error',
|
||||||
|
$extract_path
|
||||||
],
|
],
|
||||||
"$test_name: backup verifies ok");
|
"$test_name: backup verifies ok");
|
||||||
}
|
}
|
||||||
|
@ -22,21 +22,19 @@ program_help_ok('initdb');
|
|||||||
program_version_ok('initdb');
|
program_version_ok('initdb');
|
||||||
program_options_handling_ok('initdb');
|
program_options_handling_ok('initdb');
|
||||||
|
|
||||||
command_fails([ 'initdb', '-S', "$tempdir/nonexistent" ],
|
command_fails([ 'initdb', '--sync-only', "$tempdir/nonexistent" ],
|
||||||
'sync missing data directory');
|
'sync missing data directory');
|
||||||
|
|
||||||
mkdir $xlogdir;
|
mkdir $xlogdir;
|
||||||
mkdir "$xlogdir/lost+found";
|
mkdir "$xlogdir/lost+found";
|
||||||
command_fails(
|
command_fails([ 'initdb', '--waldir' => $xlogdir, $datadir ],
|
||||||
[ 'initdb', '-X', $xlogdir, $datadir ],
|
|
||||||
'existing nonempty xlog directory');
|
'existing nonempty xlog directory');
|
||||||
rmdir "$xlogdir/lost+found";
|
rmdir "$xlogdir/lost+found";
|
||||||
command_fails(
|
command_fails(
|
||||||
[ 'initdb', '-X', 'pgxlog', $datadir ],
|
[ 'initdb', '--waldir' => 'pgxlog', $datadir ],
|
||||||
'relative xlog directory not allowed');
|
'relative xlog directory not allowed');
|
||||||
|
|
||||||
command_fails(
|
command_fails([ 'initdb', '--username' => 'pg_test', $datadir ],
|
||||||
[ 'initdb', '-U', 'pg_test', $datadir ],
|
|
||||||
'role names cannot begin with "pg_"');
|
'role names cannot begin with "pg_"');
|
||||||
|
|
||||||
mkdir $datadir;
|
mkdir $datadir;
|
||||||
@ -49,12 +47,15 @@ mkdir $datadir;
|
|||||||
local (%ENV) = %ENV;
|
local (%ENV) = %ENV;
|
||||||
delete $ENV{TZ};
|
delete $ENV{TZ};
|
||||||
|
|
||||||
# while we are here, also exercise -T and -c options
|
# while we are here, also exercise --text-search-config and --set options
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'initdb', '-N', '-T', 'german', '-c',
|
'initdb',
|
||||||
'default_text_search_config=german',
|
'--no-sync',
|
||||||
'-X', $xlogdir, $datadir
|
'--text-search-config' => 'german',
|
||||||
|
'--set' => 'default_text_search_config=german',
|
||||||
|
'--waldir' => $xlogdir,
|
||||||
|
$datadir
|
||||||
],
|
],
|
||||||
'successful creation');
|
'successful creation');
|
||||||
|
|
||||||
@ -75,17 +76,19 @@ command_like(
|
|||||||
qr/Data page checksum version:.*1/,
|
qr/Data page checksum version:.*1/,
|
||||||
'checksums are enabled in control file');
|
'checksums are enabled in control file');
|
||||||
|
|
||||||
command_ok([ 'initdb', '-S', $datadir ], 'sync only');
|
command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only');
|
||||||
command_fails([ 'initdb', $datadir ], 'existing data directory');
|
command_fails([ 'initdb', $datadir ], 'existing data directory');
|
||||||
|
|
||||||
if ($supports_syncfs)
|
if ($supports_syncfs)
|
||||||
{
|
{
|
||||||
command_ok([ 'initdb', '-S', $datadir, '--sync-method', 'syncfs' ],
|
command_ok(
|
||||||
|
[ 'initdb', '--sync-only', $datadir, '--sync-method' => 'syncfs' ],
|
||||||
'sync method syncfs');
|
'sync method syncfs');
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
command_fails([ 'initdb', '-S', $datadir, '--sync-method', 'syncfs' ],
|
command_fails(
|
||||||
|
[ 'initdb', '--sync-only', $datadir, '--sync-method' => 'syncfs' ],
|
||||||
'sync method syncfs');
|
'sync method syncfs');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,7 +129,7 @@ if ($ENV{with_icu} eq 'yes')
|
|||||||
command_like(
|
command_like(
|
||||||
[
|
[
|
||||||
'initdb', '--no-sync',
|
'initdb', '--no-sync',
|
||||||
'-A', 'trust',
|
'-A' => 'trust',
|
||||||
'--locale-provider=icu', '--locale=und',
|
'--locale-provider=icu', '--locale=und',
|
||||||
'--lc-collate=C', '--lc-ctype=C',
|
'--lc-collate=C', '--lc-ctype=C',
|
||||||
'--lc-messages=C', '--lc-numeric=C',
|
'--lc-messages=C', '--lc-numeric=C',
|
||||||
@ -246,7 +249,8 @@ command_fails(
|
|||||||
],
|
],
|
||||||
'fails for invalid option combination');
|
'fails for invalid option combination');
|
||||||
|
|
||||||
command_fails([ 'initdb', '--no-sync', '--set', 'foo=bar', "$tempdir/dataX" ],
|
command_fails(
|
||||||
|
[ 'initdb', '--no-sync', '--set' => 'foo=bar', "$tempdir/dataX" ],
|
||||||
'fails for invalid --set option');
|
'fails for invalid --set option');
|
||||||
|
|
||||||
# Make sure multiple invocations of -c parameters are added case insensitive
|
# Make sure multiple invocations of -c parameters are added case insensitive
|
||||||
@ -279,7 +283,7 @@ command_like(
|
|||||||
# not part of the tests included in pg_checksums to save from
|
# not part of the tests included in pg_checksums to save from
|
||||||
# the creation of an extra instance.
|
# the creation of an extra instance.
|
||||||
command_fails(
|
command_fails(
|
||||||
[ 'pg_checksums', '-D', $datadir_nochecksums ],
|
[ 'pg_checksums', '--pgdata' => $datadir_nochecksums ],
|
||||||
"pg_checksums fails with data checksum disabled");
|
"pg_checksums fails with data checksum disabled");
|
||||||
|
|
||||||
done_testing();
|
done_testing();
|
||||||
|
@ -30,7 +30,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Failing to resolve a database pattern is an error by default.
|
# Failing to resolve a database pattern is an error by default.
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'qqq', '-d', 'postgres' ],
|
[ 'pg_amcheck', '--database' => 'qqq', '--database' => 'postgres' ],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[qr/pg_amcheck: error: no connectable databases to check matching "qqq"/],
|
[qr/pg_amcheck: error: no connectable databases to check matching "qqq"/],
|
||||||
@ -38,7 +38,12 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# But only a warning under --no-strict-names
|
# But only a warning under --no-strict-names
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '--no-strict-names', '-d', 'qqq', '-d', 'postgres' ],
|
[
|
||||||
|
'pg_amcheck',
|
||||||
|
'--no-strict-names',
|
||||||
|
'--database' => 'qqq',
|
||||||
|
'--database' => 'postgres'
|
||||||
|
],
|
||||||
0,
|
0,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -49,7 +54,7 @@ $node->command_checks_all(
|
|||||||
# Check that a substring of an existent database name does not get interpreted
|
# Check that a substring of an existent database name does not get interpreted
|
||||||
# as a matching pattern.
|
# as a matching pattern.
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'post', '-d', 'postgres' ],
|
[ 'pg_amcheck', '--database' => 'post', '--database' => 'postgres' ],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -61,7 +66,11 @@ $node->command_checks_all(
|
|||||||
# Check that a superstring of an existent database name does not get interpreted
|
# Check that a superstring of an existent database name does not get interpreted
|
||||||
# as a matching pattern.
|
# as a matching pattern.
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'postgresql', '-d', 'postgres' ],
|
[
|
||||||
|
'pg_amcheck',
|
||||||
|
'--database' => 'postgresql',
|
||||||
|
'--database' => 'postgres'
|
||||||
|
],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -74,7 +83,8 @@ $node->command_checks_all(
|
|||||||
# Test connecting with a non-existent user
|
# Test connecting with a non-existent user
|
||||||
|
|
||||||
# Failing to connect to the initial database due to bad username is an error.
|
# Failing to connect to the initial database due to bad username is an error.
|
||||||
$node->command_checks_all([ 'pg_amcheck', '-U', 'no_such_user', 'postgres' ],
|
$node->command_checks_all(
|
||||||
|
[ 'pg_amcheck', '--username' => 'no_such_user', 'postgres' ],
|
||||||
1, [qr/^$/], [], 'checking with a non-existent user');
|
1, [qr/^$/], [], 'checking with a non-existent user');
|
||||||
|
|
||||||
#########################################
|
#########################################
|
||||||
@ -96,7 +106,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Again, but this time with another database to check, so no error is raised.
|
# Again, but this time with another database to check, so no error is raised.
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'template1', '-d', 'postgres' ],
|
[ 'pg_amcheck', '--database' => 'template1', '--database' => 'postgres' ],
|
||||||
0,
|
0,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -121,7 +131,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Check three-part unreasonable pattern that has zero-length names
|
# Check three-part unreasonable pattern that has zero-length names
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'postgres', '-t', '..' ],
|
[ 'pg_amcheck', '--database' => 'postgres', '--table' => '..' ],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -131,7 +141,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Again, but with non-trivial schema and relation parts
|
# Again, but with non-trivial schema and relation parts
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'postgres', '-t', '.foo.bar' ],
|
[ 'pg_amcheck', '--database' => 'postgres', '--table' => '.foo.bar' ],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -141,7 +151,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Check two-part unreasonable pattern that has zero-length names
|
# Check two-part unreasonable pattern that has zero-length names
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'postgres', '-t', '.' ],
|
[ 'pg_amcheck', '--database' => 'postgres', '--table' => '.' ],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[qr/pg_amcheck: error: no heap tables to check matching "\."/],
|
[qr/pg_amcheck: error: no heap tables to check matching "\."/],
|
||||||
@ -149,7 +159,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Check that a multipart database name is rejected
|
# Check that a multipart database name is rejected
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'localhost.postgres' ],
|
[ 'pg_amcheck', '--database' => 'localhost.postgres' ],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -159,7 +169,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Check that a three-part schema name is rejected
|
# Check that a three-part schema name is rejected
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-s', 'localhost.postgres.pg_catalog' ],
|
[ 'pg_amcheck', '--schema' => 'localhost.postgres.pg_catalog' ],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -169,7 +179,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Check that a four-part table name is rejected
|
# Check that a four-part table name is rejected
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-t', 'localhost.postgres.pg_catalog.pg_class' ],
|
[ 'pg_amcheck', '--table' => 'localhost.postgres.pg_catalog.pg_class' ],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -183,7 +193,7 @@ $node->command_checks_all(
|
|||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_amcheck', '--no-strict-names',
|
'pg_amcheck', '--no-strict-names',
|
||||||
'-t', 'this.is.a.really.long.dotted.string'
|
'--table' => 'this.is.a.really.long.dotted.string'
|
||||||
],
|
],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
@ -193,8 +203,8 @@ $node->command_checks_all(
|
|||||||
'ungrammatical table names still draw errors under --no-strict-names');
|
'ungrammatical table names still draw errors under --no-strict-names');
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_amcheck', '--no-strict-names', '-s',
|
'pg_amcheck', '--no-strict-names',
|
||||||
'postgres.long.dotted.string'
|
'--schema' => 'postgres.long.dotted.string'
|
||||||
],
|
],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
@ -204,8 +214,8 @@ $node->command_checks_all(
|
|||||||
'ungrammatical schema names still draw errors under --no-strict-names');
|
'ungrammatical schema names still draw errors under --no-strict-names');
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_amcheck', '--no-strict-names', '-d',
|
'pg_amcheck', '--no-strict-names',
|
||||||
'postgres.long.dotted.string'
|
'--database' => 'postgres.long.dotted.string'
|
||||||
],
|
],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
@ -216,7 +226,7 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
# Likewise for exclusion patterns
|
# Likewise for exclusion patterns
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '--no-strict-names', '-T', 'a.b.c.d' ],
|
[ 'pg_amcheck', '--no-strict-names', '--exclude-table' => 'a.b.c.d' ],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -225,7 +235,7 @@ $node->command_checks_all(
|
|||||||
'ungrammatical table exclusions still draw errors under --no-strict-names'
|
'ungrammatical table exclusions still draw errors under --no-strict-names'
|
||||||
);
|
);
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '--no-strict-names', '-S', 'a.b.c' ],
|
[ 'pg_amcheck', '--no-strict-names', '--exclude-schema' => 'a.b.c' ],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -234,7 +244,7 @@ $node->command_checks_all(
|
|||||||
'ungrammatical schema exclusions still draw errors under --no-strict-names'
|
'ungrammatical schema exclusions still draw errors under --no-strict-names'
|
||||||
);
|
);
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '--no-strict-names', '-D', 'a.b' ],
|
[ 'pg_amcheck', '--no-strict-names', '--exclude-database' => 'a.b' ],
|
||||||
2,
|
2,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -252,20 +262,20 @@ $node->command_checks_all(
|
|||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_amcheck', '--no-strict-names',
|
'pg_amcheck', '--no-strict-names',
|
||||||
'-t', 'no_such_table',
|
'--table' => 'no_such_table',
|
||||||
'-t', 'no*such*table',
|
'--table' => 'no*such*table',
|
||||||
'-i', 'no_such_index',
|
'--index' => 'no_such_index',
|
||||||
'-i', 'no*such*index',
|
'--index' => 'no*such*index',
|
||||||
'-r', 'no_such_relation',
|
'--relation' => 'no_such_relation',
|
||||||
'-r', 'no*such*relation',
|
'--relation' => 'no*such*relation',
|
||||||
'-d', 'no_such_database',
|
'--database' => 'no_such_database',
|
||||||
'-d', 'no*such*database',
|
'--database' => 'no*such*database',
|
||||||
'-r', 'none.none',
|
'--relation' => 'none.none',
|
||||||
'-r', 'none.none.none',
|
'--relation' => 'none.none.none',
|
||||||
'-r', 'postgres.none.none',
|
'--relation' => 'postgres.none.none',
|
||||||
'-r', 'postgres.pg_catalog.none',
|
'--relation' => 'postgres.pg_catalog.none',
|
||||||
'-r', 'postgres.none.pg_class',
|
'--relation' => 'postgres.none.pg_class',
|
||||||
'-t', 'postgres.pg_catalog.pg_class', # This exists
|
'--table' => 'postgres.pg_catalog.pg_class', # This exists
|
||||||
],
|
],
|
||||||
0,
|
0,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
@ -302,7 +312,7 @@ $node->safe_psql(
|
|||||||
));
|
));
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-d', 'regression_invalid' ],
|
[ 'pg_amcheck', '--database' => 'regression_invalid' ],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
@ -312,7 +322,9 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_amcheck', '-d', 'postgres', '-t', 'regression_invalid.public.foo',
|
'pg_amcheck',
|
||||||
|
'--database' => 'postgres',
|
||||||
|
'--table' => 'regression_invalid.public.foo',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
@ -334,14 +346,15 @@ $node->safe_psql('postgres', q(CREATE DATABASE another_db));
|
|||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_amcheck', '-d',
|
'pg_amcheck',
|
||||||
'postgres', '--no-strict-names',
|
'--database' => 'postgres',
|
||||||
'-t', 'template1.public.foo',
|
'--no-strict-names',
|
||||||
'-t', 'another_db.public.foo',
|
'--table' => 'template1.public.foo',
|
||||||
'-t', 'no_such_database.public.foo',
|
'--table' => 'another_db.public.foo',
|
||||||
'-i', 'template1.public.foo_idx',
|
'--table' => 'no_such_database.public.foo',
|
||||||
'-i', 'another_db.public.foo_idx',
|
'--index' => 'template1.public.foo_idx',
|
||||||
'-i', 'no_such_database.public.foo_idx',
|
'--index' => 'another_db.public.foo_idx',
|
||||||
|
'--index' => 'no_such_database.public.foo_idx',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
@ -364,9 +377,13 @@ $node->command_checks_all(
|
|||||||
# Check with only schema exclusion patterns
|
# Check with only schema exclusion patterns
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_amcheck', '--all', '--no-strict-names', '-S',
|
'pg_amcheck',
|
||||||
'public', '-S', 'pg_catalog', '-S',
|
'--all',
|
||||||
'pg_toast', '-S', 'information_schema',
|
'--no-strict-names',
|
||||||
|
'--exclude-schema' => 'public',
|
||||||
|
'--exclude-schema' => 'pg_catalog',
|
||||||
|
'--exclude-schema' => 'pg_toast',
|
||||||
|
'--exclude-schema' => 'information_schema',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
@ -379,10 +396,15 @@ $node->command_checks_all(
|
|||||||
# Check with schema exclusion patterns overriding relation and schema inclusion patterns
|
# Check with schema exclusion patterns overriding relation and schema inclusion patterns
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_amcheck', '--all', '--no-strict-names', '-s',
|
'pg_amcheck',
|
||||||
'public', '-s', 'pg_catalog', '-s',
|
'--all',
|
||||||
'pg_toast', '-s', 'information_schema', '-t',
|
'--no-strict-names',
|
||||||
'pg_catalog.pg_class', '-S*'
|
'--schema' => 'public',
|
||||||
|
'--schema' => 'pg_catalog',
|
||||||
|
'--schema' => 'pg_toast',
|
||||||
|
'--schema' => 'information_schema',
|
||||||
|
'--table' => 'pg_catalog.pg_class',
|
||||||
|
'--exclude-schema' => '*'
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
|
@ -319,7 +319,7 @@ plan_to_remove_relation_file('db2', 's1.t1_btree');
|
|||||||
#
|
#
|
||||||
|
|
||||||
# Standard first arguments to PostgreSQL::Test::Utils functions
|
# Standard first arguments to PostgreSQL::Test::Utils functions
|
||||||
my @cmd = ('pg_amcheck', '-p', $port);
|
my @cmd = ('pg_amcheck', '--port' => $port);
|
||||||
|
|
||||||
# Regular expressions to match various expected output
|
# Regular expressions to match various expected output
|
||||||
my $no_output_re = qr/^$/;
|
my $no_output_re = qr/^$/;
|
||||||
@ -332,8 +332,17 @@ my $index_missing_relation_fork_re =
|
|||||||
# yet corrupted anything. As such, we expect no corruption and verify that
|
# yet corrupted anything. As such, we expect no corruption and verify that
|
||||||
# none is reported
|
# none is reported
|
||||||
#
|
#
|
||||||
$node->command_checks_all([ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
|
$node->command_checks_all(
|
||||||
0, [$no_output_re], [$no_output_re], 'pg_amcheck prior to corruption');
|
[
|
||||||
|
@cmd,
|
||||||
|
'--database' => 'db1',
|
||||||
|
'--database' => 'db2',
|
||||||
|
'--database' => 'db3'
|
||||||
|
],
|
||||||
|
0,
|
||||||
|
[$no_output_re],
|
||||||
|
[$no_output_re],
|
||||||
|
'pg_amcheck prior to corruption');
|
||||||
|
|
||||||
# Perform the corruptions we planned above using only a single database restart.
|
# Perform the corruptions we planned above using only a single database restart.
|
||||||
#
|
#
|
||||||
@ -356,7 +365,12 @@ $node->command_checks_all(
|
|||||||
'pg_amcheck all schemas, tables and indexes in database db1');
|
'pg_amcheck all schemas, tables and indexes in database db1');
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
|
[
|
||||||
|
@cmd,
|
||||||
|
'--database' => 'db1',
|
||||||
|
'--database' => 'db2',
|
||||||
|
'--database' => 'db3'
|
||||||
|
],
|
||||||
2,
|
2,
|
||||||
[
|
[
|
||||||
$index_missing_relation_fork_re, $line_pointer_corruption_re,
|
$index_missing_relation_fork_re, $line_pointer_corruption_re,
|
||||||
@ -376,7 +390,7 @@ $node->command_checks_all(
|
|||||||
# complaint on stderr, but otherwise stderr should be quiet.
|
# complaint on stderr, but otherwise stderr should be quiet.
|
||||||
#
|
#
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '--all', '-s', 's1', '-i', 't1_btree' ],
|
[ @cmd, '--all', '--schema' => 's1', '--index' => 't1_btree' ],
|
||||||
2,
|
2,
|
||||||
[$index_missing_relation_fork_re],
|
[$index_missing_relation_fork_re],
|
||||||
[
|
[
|
||||||
@ -385,7 +399,12 @@ $node->command_checks_all(
|
|||||||
'pg_amcheck index s1.t1_btree reports missing main relation fork');
|
'pg_amcheck index s1.t1_btree reports missing main relation fork');
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-d', 'db1', '-s', 's1', '-i', 't2_btree' ],
|
[
|
||||||
|
@cmd,
|
||||||
|
'--database' => 'db1',
|
||||||
|
'--schema' => 's1',
|
||||||
|
'--index' => 't2_btree'
|
||||||
|
],
|
||||||
2,
|
2,
|
||||||
[qr/.+/], # Any non-empty error message is acceptable
|
[qr/.+/], # Any non-empty error message is acceptable
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
@ -396,22 +415,24 @@ $node->command_checks_all(
|
|||||||
# are quiet.
|
# are quiet.
|
||||||
#
|
#
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ],
|
[ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db1' ],
|
||||||
0, [$no_output_re], [$no_output_re],
|
0, [$no_output_re], [$no_output_re],
|
||||||
'pg_amcheck of db1.s1 excluding indexes');
|
'pg_amcheck of db1.s1 excluding indexes');
|
||||||
|
|
||||||
# Checking db2.s1 should show table corruptions if indexes are excluded
|
# Checking db2.s1 should show table corruptions if indexes are excluded
|
||||||
#
|
#
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db2' ],
|
[ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db2' ],
|
||||||
2, [$missing_file_re], [$no_output_re],
|
2,
|
||||||
|
[$missing_file_re],
|
||||||
|
[$no_output_re],
|
||||||
'pg_amcheck of db2.s1 excluding indexes');
|
'pg_amcheck of db2.s1 excluding indexes');
|
||||||
|
|
||||||
# In schema db1.s3, the tables and indexes are both corrupt. We should see
|
# In schema db1.s3, the tables and indexes are both corrupt. We should see
|
||||||
# corruption messages on stdout, and nothing on stderr.
|
# corruption messages on stdout, and nothing on stderr.
|
||||||
#
|
#
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-s', 's3', 'db1' ],
|
[ @cmd, '--schema' => 's3', 'db1' ],
|
||||||
2,
|
2,
|
||||||
[
|
[
|
||||||
$index_missing_relation_fork_re, $line_pointer_corruption_re,
|
$index_missing_relation_fork_re, $line_pointer_corruption_re,
|
||||||
@ -423,13 +444,16 @@ $node->command_checks_all(
|
|||||||
# In schema db1.s4, only toast tables are corrupt. Check that under default
|
# In schema db1.s4, only toast tables are corrupt. Check that under default
|
||||||
# options the toast corruption is reported, but when excluding toast we get no
|
# options the toast corruption is reported, but when excluding toast we get no
|
||||||
# error reports.
|
# error reports.
|
||||||
$node->command_checks_all([ @cmd, '-s', 's4', 'db1' ],
|
$node->command_checks_all([ @cmd, '--schema' => 's4', 'db1' ],
|
||||||
2, [$missing_file_re], [$no_output_re],
|
2, [$missing_file_re], [$no_output_re],
|
||||||
'pg_amcheck in schema s4 reports toast corruption');
|
'pg_amcheck in schema s4 reports toast corruption');
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
@cmd, '--no-dependent-toast', '--exclude-toast-pointers', '-s', 's4',
|
@cmd,
|
||||||
|
'--no-dependent-toast',
|
||||||
|
'--exclude-toast-pointers',
|
||||||
|
'--schema' => 's4',
|
||||||
'db1'
|
'db1'
|
||||||
],
|
],
|
||||||
0,
|
0,
|
||||||
@ -438,7 +462,7 @@ $node->command_checks_all(
|
|||||||
'pg_amcheck in schema s4 excluding toast reports no corruption');
|
'pg_amcheck in schema s4 excluding toast reports no corruption');
|
||||||
|
|
||||||
# Check that no corruption is reported in schema db1.s5
|
# Check that no corruption is reported in schema db1.s5
|
||||||
$node->command_checks_all([ @cmd, '-s', 's5', 'db1' ],
|
$node->command_checks_all([ @cmd, '--schema' => 's5', 'db1' ],
|
||||||
0, [$no_output_re], [$no_output_re],
|
0, [$no_output_re], [$no_output_re],
|
||||||
'pg_amcheck over schema s5 reports no corruption');
|
'pg_amcheck over schema s5 reports no corruption');
|
||||||
|
|
||||||
@ -446,7 +470,13 @@ $node->command_checks_all([ @cmd, '-s', 's5', 'db1' ],
|
|||||||
# the indexes, no corruption is reported about the schema.
|
# the indexes, no corruption is reported about the schema.
|
||||||
#
|
#
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-s', 's1', '-I', 't1_btree', '-I', 't2_btree', 'db1' ],
|
[
|
||||||
|
@cmd,
|
||||||
|
'--schema' => 's1',
|
||||||
|
'--exclude-index' => 't1_btree',
|
||||||
|
'--exclude-index' => 't2_btree',
|
||||||
|
'db1'
|
||||||
|
],
|
||||||
0,
|
0,
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
@ -458,7 +488,7 @@ $node->command_checks_all(
|
|||||||
# about the schema.
|
# about the schema.
|
||||||
#
|
#
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ],
|
[ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db1' ],
|
||||||
0,
|
0,
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
@ -469,7 +499,13 @@ $node->command_checks_all(
|
|||||||
# tables that no corruption is reported.
|
# tables that no corruption is reported.
|
||||||
#
|
#
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-s', 's2', '-T', 't1', '-T', 't2', 'db1' ],
|
[
|
||||||
|
@cmd,
|
||||||
|
'--schema' => 's2',
|
||||||
|
'--exclude-table' => 't1',
|
||||||
|
'--exclude-table' => 't2',
|
||||||
|
'db1'
|
||||||
|
],
|
||||||
0,
|
0,
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
@ -480,17 +516,23 @@ $node->command_checks_all(
|
|||||||
# to avoid getting messages about corrupt tables or indexes.
|
# to avoid getting messages about corrupt tables or indexes.
|
||||||
#
|
#
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ @cmd, '-s', 's5', '--startblock', 'junk', 'db1' ],
|
[ @cmd, '--schema' => 's5', '--startblock' => 'junk', 'db1' ],
|
||||||
qr/invalid start block/,
|
qr/invalid start block/,
|
||||||
'pg_amcheck rejects garbage startblock');
|
'pg_amcheck rejects garbage startblock');
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ @cmd, '-s', 's5', '--endblock', '1234junk', 'db1' ],
|
[ @cmd, '--schema' => 's5', '--endblock' => '1234junk', 'db1' ],
|
||||||
qr/invalid end block/,
|
qr/invalid end block/,
|
||||||
'pg_amcheck rejects garbage endblock');
|
'pg_amcheck rejects garbage endblock');
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ @cmd, '-s', 's5', '--startblock', '5', '--endblock', '4', 'db1' ],
|
[
|
||||||
|
@cmd,
|
||||||
|
'--schema' => 's5',
|
||||||
|
'--startblock' => '5',
|
||||||
|
'--endblock' => '4',
|
||||||
|
'db1'
|
||||||
|
],
|
||||||
qr/end block precedes start block/,
|
qr/end block precedes start block/,
|
||||||
'pg_amcheck rejects invalid block range');
|
'pg_amcheck rejects invalid block range');
|
||||||
|
|
||||||
@ -499,7 +541,12 @@ command_fails_like(
|
|||||||
# arguments are handled sensibly.
|
# arguments are handled sensibly.
|
||||||
#
|
#
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-s', 's1', '-i', 't1_btree', '--parent-check', 'db1' ],
|
[
|
||||||
|
@cmd,
|
||||||
|
'--schema' => 's1',
|
||||||
|
'--index' => 't1_btree',
|
||||||
|
'--parent-check', 'db1'
|
||||||
|
],
|
||||||
2,
|
2,
|
||||||
[$index_missing_relation_fork_re],
|
[$index_missing_relation_fork_re],
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
@ -507,7 +554,10 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
@cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed',
|
@cmd,
|
||||||
|
'--schema' => 's1',
|
||||||
|
'--index' => 't1_btree',
|
||||||
|
'--heapallindexed',
|
||||||
'--rootdescend', 'db1'
|
'--rootdescend', 'db1'
|
||||||
],
|
],
|
||||||
2,
|
2,
|
||||||
@ -516,13 +566,24 @@ $node->command_checks_all(
|
|||||||
'pg_amcheck smoke test --heapallindexed --rootdescend');
|
'pg_amcheck smoke test --heapallindexed --rootdescend');
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3', '-S', 's*' ],
|
[
|
||||||
0, [$no_output_re], [$no_output_re],
|
@cmd,
|
||||||
|
'--database' => 'db1',
|
||||||
|
'--database' => 'db2',
|
||||||
|
'--database' => 'db3',
|
||||||
|
'--exclude-schema' => 's*'
|
||||||
|
],
|
||||||
|
0,
|
||||||
|
[$no_output_re],
|
||||||
|
[$no_output_re],
|
||||||
'pg_amcheck excluding all corrupt schemas');
|
'pg_amcheck excluding all corrupt schemas');
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
@cmd, '-s', 's1', '-i', 't1_btree', '--parent-check',
|
@cmd,
|
||||||
|
'--schema' => 's1',
|
||||||
|
'--index' => 't1_btree',
|
||||||
|
'--parent-check',
|
||||||
'--checkunique', 'db1'
|
'--checkunique', 'db1'
|
||||||
],
|
],
|
||||||
2,
|
2,
|
||||||
@ -532,7 +593,10 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
@cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed',
|
@cmd,
|
||||||
|
'--schema' => 's1',
|
||||||
|
'--index' => 't1_btree',
|
||||||
|
'--heapallindexed',
|
||||||
'--rootdescend', '--checkunique', 'db1'
|
'--rootdescend', '--checkunique', 'db1'
|
||||||
],
|
],
|
||||||
2,
|
2,
|
||||||
@ -542,8 +606,12 @@ $node->command_checks_all(
|
|||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
@cmd, '--checkunique', '-d', 'db1', '-d', 'db2',
|
@cmd,
|
||||||
'-d', 'db3', '-S', 's*'
|
'--checkunique',
|
||||||
|
'--database' => 'db1',
|
||||||
|
'--database' => 'db2',
|
||||||
|
'--database' => 'db3',
|
||||||
|
'--exclude-schema' => 's*'
|
||||||
],
|
],
|
||||||
0,
|
0,
|
||||||
[$no_output_re],
|
[$no_output_re],
|
||||||
|
@ -386,11 +386,12 @@ $node->start;
|
|||||||
|
|
||||||
# Check that pg_amcheck runs against the uncorrupted table without error.
|
# Check that pg_amcheck runs against the uncorrupted table without error.
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ 'pg_amcheck', '-p', $port, 'postgres' ],
|
[ 'pg_amcheck', '--port' => $port, 'postgres' ],
|
||||||
'pg_amcheck test table, prior to corruption');
|
'pg_amcheck test table, prior to corruption');
|
||||||
|
|
||||||
# Check that pg_amcheck runs against the uncorrupted table and index without error.
|
# Check that pg_amcheck runs against the uncorrupted table and index without error.
|
||||||
$node->command_ok([ 'pg_amcheck', '-p', $port, 'postgres' ],
|
$node->command_ok(
|
||||||
|
[ 'pg_amcheck', '--port' => $port, 'postgres' ],
|
||||||
'pg_amcheck test table and index, prior to corruption');
|
'pg_amcheck test table and index, prior to corruption');
|
||||||
|
|
||||||
$node->stop;
|
$node->stop;
|
||||||
@ -754,7 +755,7 @@ $node->start;
|
|||||||
# Run pg_amcheck against the corrupt table with epoch=0, comparing actual
|
# Run pg_amcheck against the corrupt table with epoch=0, comparing actual
|
||||||
# corruption messages against the expected messages
|
# corruption messages against the expected messages
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ],
|
[ 'pg_amcheck', '--no-dependent-indexes', '--port' => $port, 'postgres' ],
|
||||||
2, [@expected], [], 'Expected corruption message output');
|
2, [@expected], [], 'Expected corruption message output');
|
||||||
$node->safe_psql(
|
$node->safe_psql(
|
||||||
'postgres', qq(
|
'postgres', qq(
|
||||||
|
@ -52,7 +52,7 @@ $node->safe_psql(
|
|||||||
));
|
));
|
||||||
|
|
||||||
# We have not yet broken the index, so we should get no corruption
|
# We have not yet broken the index, so we should get no corruption
|
||||||
$node->command_like([ 'pg_amcheck', '-p', $node->port, 'postgres' ],
|
$node->command_like([ 'pg_amcheck', '--port' => $node->port, 'postgres' ],
|
||||||
qr/^$/,
|
qr/^$/,
|
||||||
'pg_amcheck all schemas, tables and indexes reports no corruption');
|
'pg_amcheck all schemas, tables and indexes reports no corruption');
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ $node->safe_psql(
|
|||||||
|
|
||||||
# Index corruption should now be reported
|
# Index corruption should now be reported
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '-p', $node->port, 'postgres' ],
|
[ 'pg_amcheck', '--port' => $node->port, 'postgres' ],
|
||||||
2,
|
2,
|
||||||
[qr/item order invariant violated for index "fickleidx"/],
|
[qr/item order invariant violated for index "fickleidx"/],
|
||||||
[],
|
[],
|
||||||
@ -90,7 +90,7 @@ $node->safe_psql(
|
|||||||
|
|
||||||
# We should get no corruptions
|
# We should get no corruptions
|
||||||
$node->command_like(
|
$node->command_like(
|
||||||
[ 'pg_amcheck', '--checkunique', '-p', $node->port, 'postgres' ],
|
[ 'pg_amcheck', '--checkunique', '--port' => $node->port, 'postgres' ],
|
||||||
qr/^$/,
|
qr/^$/,
|
||||||
'pg_amcheck all schemas, tables and indexes reports no corruption');
|
'pg_amcheck all schemas, tables and indexes reports no corruption');
|
||||||
|
|
||||||
@ -116,7 +116,7 @@ $node->safe_psql(
|
|||||||
|
|
||||||
# Unique index corruption should now be reported
|
# Unique index corruption should now be reported
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_amcheck', '--checkunique', '-p', $node->port, 'postgres' ],
|
[ 'pg_amcheck', '--checkunique', '--port' => $node->port, 'postgres' ],
|
||||||
2,
|
2,
|
||||||
[qr/index uniqueness is violated for index "bttest_unique_idx"/],
|
[qr/index uniqueness is violated for index "bttest_unique_idx"/],
|
||||||
[],
|
[],
|
||||||
|
@ -31,7 +31,7 @@ umask(0077);
|
|||||||
# Initialize node without replication settings
|
# Initialize node without replication settings
|
||||||
$node->init(
|
$node->init(
|
||||||
extra => ['--data-checksums'],
|
extra => ['--data-checksums'],
|
||||||
auth_extra => [ '--create-role', 'backupuser' ]);
|
auth_extra => [ '--create-role' => 'backupuser' ]);
|
||||||
$node->start;
|
$node->start;
|
||||||
my $pgdata = $node->data_dir;
|
my $pgdata = $node->data_dir;
|
||||||
|
|
||||||
@ -40,11 +40,19 @@ $node->command_fails(['pg_basebackup'],
|
|||||||
|
|
||||||
# Sanity checks for options
|
# Sanity checks for options
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'none:1' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => "$tempdir/backup",
|
||||||
|
'--compress' => 'none:1'
|
||||||
|
],
|
||||||
qr/\Qcompression algorithm "none" does not accept a compression level/,
|
qr/\Qcompression algorithm "none" does not accept a compression level/,
|
||||||
'failure if method "none" specified with compression level');
|
'failure if method "none" specified with compression level');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'none+' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => "$tempdir/backup",
|
||||||
|
'--compress' => 'none+'
|
||||||
|
],
|
||||||
qr/\Qunrecognized compression algorithm: "none+"/,
|
qr/\Qunrecognized compression algorithm: "none+"/,
|
||||||
'failure on incorrect separator to define compression level');
|
'failure on incorrect separator to define compression level');
|
||||||
|
|
||||||
@ -60,7 +68,7 @@ $node->set_replication_conf();
|
|||||||
$node->reload;
|
$node->reload;
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup" ],
|
[ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup" ],
|
||||||
'pg_basebackup fails because of WAL configuration');
|
'pg_basebackup fails because of WAL configuration');
|
||||||
|
|
||||||
ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
|
ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
|
||||||
@ -71,7 +79,8 @@ mkdir("$tempdir/backup")
|
|||||||
or BAIL_OUT("unable to create $tempdir/backup");
|
or BAIL_OUT("unable to create $tempdir/backup");
|
||||||
append_to_file("$tempdir/backup/dir-not-empty.txt", "Some data");
|
append_to_file("$tempdir/backup/dir-not-empty.txt", "Some data");
|
||||||
|
|
||||||
$node->command_fails([ @pg_basebackup_defs, '-D', "$tempdir/backup", '-n' ],
|
$node->command_fails(
|
||||||
|
[ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup", '-n' ],
|
||||||
'failing run with no-clean option');
|
'failing run with no-clean option');
|
||||||
|
|
||||||
ok(-d "$tempdir/backup", 'backup directory was created and left behind');
|
ok(-d "$tempdir/backup", 'backup directory was created and left behind');
|
||||||
@ -153,17 +162,17 @@ SKIP:
|
|||||||
my $sfail = quotemeta($server_fails . $cft->[1]);
|
my $sfail = quotemeta($server_fails . $cft->[1]);
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D',
|
'pg_basebackup',
|
||||||
"$tempdir/backup", '--compress',
|
'--pgdata' => "$tempdir/backup",
|
||||||
$cft->[0]
|
'--compress' => $cft->[0],
|
||||||
],
|
],
|
||||||
qr/$cfail/,
|
qr/$cfail/,
|
||||||
'client ' . $cft->[2]);
|
'client ' . $cft->[2]);
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D',
|
'pg_basebackup',
|
||||||
"$tempdir/backup", '--compress',
|
'--pgdata' => "$tempdir/backup",
|
||||||
'server-' . $cft->[0]
|
'--compress' => 'server-' . $cft->[0],
|
||||||
],
|
],
|
||||||
qr/$sfail/,
|
qr/$sfail/,
|
||||||
'server ' . $cft->[2]);
|
'server ' . $cft->[2]);
|
||||||
@ -219,7 +228,11 @@ foreach my $filename (@tempRelationFiles)
|
|||||||
|
|
||||||
# Run base backup.
|
# Run base backup.
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backup",
|
||||||
|
'--wal-method' => 'none'
|
||||||
|
],
|
||||||
'pg_basebackup runs');
|
'pg_basebackup runs');
|
||||||
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
|
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
|
||||||
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
|
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
|
||||||
@ -289,9 +302,10 @@ unlink("$pgdata/backup_label")
|
|||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backup2", '--no-manifest',
|
'--pgdata' => "$tempdir/backup2",
|
||||||
'--waldir', "$tempdir/xlog2"
|
'--no-manifest',
|
||||||
|
'--waldir' => "$tempdir/xlog2"
|
||||||
],
|
],
|
||||||
'separate xlog directory');
|
'separate xlog directory');
|
||||||
ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
|
ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
|
||||||
@ -300,32 +314,64 @@ ok(-d "$tempdir/xlog2/", 'xlog directory was created');
|
|||||||
rmtree("$tempdir/backup2");
|
rmtree("$tempdir/backup2");
|
||||||
rmtree("$tempdir/xlog2");
|
rmtree("$tempdir/xlog2");
|
||||||
|
|
||||||
$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup", '-Ft' ],
|
$node->command_ok(
|
||||||
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/tarbackup",
|
||||||
|
'--format' => 'tar'
|
||||||
|
],
|
||||||
'tar format');
|
'tar format');
|
||||||
ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
|
ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
|
||||||
rmtree("$tempdir/tarbackup");
|
rmtree("$tempdir/tarbackup");
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T=/foo" ],
|
[
|
||||||
'-T with empty old directory fails');
|
@pg_basebackup_defs,
|
||||||
$node->command_fails(
|
'--pgdata' => "$tempdir/backup_foo",
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=" ],
|
'--format' => 'plain',
|
||||||
'-T with empty new directory fails');
|
'--tablespace-mapping' => '=/foo'
|
||||||
|
],
|
||||||
|
'--tablespace-mapping with empty old directory fails');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp',
|
@pg_basebackup_defs,
|
||||||
"-T/foo=/bar=/baz"
|
'--pgdata' => "$tempdir/backup_foo",
|
||||||
|
'--format' => 'plain',
|
||||||
|
'--tablespace-mapping' => '/foo='
|
||||||
],
|
],
|
||||||
'-T with multiple = fails');
|
'--tablespace-mapping with empty new directory fails');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo=/bar" ],
|
[
|
||||||
'-T with old directory not absolute fails');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backup_foo",
|
||||||
|
'--format' => 'plain',
|
||||||
|
'--tablespace-mapping' => '/foo=/bar=/baz'
|
||||||
|
],
|
||||||
|
'--tablespace-mapping with multiple = fails');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=bar" ],
|
[
|
||||||
'-T with new directory not absolute fails');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backup_foo",
|
||||||
|
'--format' => 'plain',
|
||||||
|
'--tablespace-mapping' => 'foo=/bar'
|
||||||
|
],
|
||||||
|
'--tablespace-mapping with old directory not absolute fails');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo" ],
|
[
|
||||||
'-T with invalid format fails');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backup_foo",
|
||||||
|
'--format' => 'plain',
|
||||||
|
'--tablespace-mapping' => '/foo=bar'
|
||||||
|
],
|
||||||
|
'--tablespace-mapping with new directory not absolute fails');
|
||||||
|
$node->command_fails(
|
||||||
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backup_foo",
|
||||||
|
'--format' => 'plain',
|
||||||
|
'--tablespace-mapping' => 'foo'
|
||||||
|
],
|
||||||
|
'--tablespace-mapping with invalid format fails');
|
||||||
|
|
||||||
my $superlongname = "superlongname_" . ("x" x 100);
|
my $superlongname = "superlongname_" . ("x" x 100);
|
||||||
# Tar format doesn't support filenames longer than 100 bytes.
|
# Tar format doesn't support filenames longer than 100 bytes.
|
||||||
@ -340,7 +386,11 @@ SKIP:
|
|||||||
or die "unable to create file $superlongpath";
|
or die "unable to create file $superlongpath";
|
||||||
close $file;
|
close $file;
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l1", '-Ft' ],
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/tarbackup_l1",
|
||||||
|
'--format' => 'tar'
|
||||||
|
],
|
||||||
'pg_basebackup tar with long name fails');
|
'pg_basebackup tar with long name fails');
|
||||||
unlink "$superlongpath";
|
unlink "$superlongpath";
|
||||||
}
|
}
|
||||||
@ -384,7 +434,7 @@ $node->safe_psql('postgres',
|
|||||||
$node->safe_psql('postgres',
|
$node->safe_psql('postgres',
|
||||||
"CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
|
"CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
|
||||||
. "INSERT INTO test1 VALUES (1234);");
|
. "INSERT INTO test1 VALUES (1234);");
|
||||||
$node->backup('tarbackup2', backup_options => ['-Ft']);
|
$node->backup('tarbackup2', backup_options => [ '--format' => 'tar' ]);
|
||||||
# empty test1, just so that it's different from the to-be-restored data
|
# empty test1, just so that it's different from the to-be-restored data
|
||||||
$node->safe_psql('postgres', "TRUNCATE TABLE test1;");
|
$node->safe_psql('postgres', "TRUNCATE TABLE test1;");
|
||||||
|
|
||||||
@ -451,14 +501,19 @@ foreach my $filename (@tempRelationFiles)
|
|||||||
}
|
}
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup1", '-Fp' ],
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backup1",
|
||||||
|
'--format' => 'plain'
|
||||||
|
],
|
||||||
'plain format with tablespaces fails without tablespace mapping');
|
'plain format with tablespaces fails without tablespace mapping');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backup1", '-Fp',
|
'--pgdata' => "$tempdir/backup1",
|
||||||
"-T$realTsDir=$tempdir/tbackup/tblspc1",
|
'--format' => 'plain',
|
||||||
|
'--tablespace-mapping' => "$realTsDir=$tempdir/tbackup/tblspc1",
|
||||||
],
|
],
|
||||||
'plain format with tablespaces succeeds with tablespace mapping');
|
'plain format with tablespaces succeeds with tablespace mapping');
|
||||||
ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated');
|
ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated');
|
||||||
@ -526,9 +581,10 @@ $node->safe_psql('postgres',
|
|||||||
$realTsDir =~ s/=/\\=/;
|
$realTsDir =~ s/=/\\=/;
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backup3", '-Fp',
|
'--pgdata' => "$tempdir/backup3",
|
||||||
"-T$realTsDir=$tempdir/tbackup/tbl\\=spc2",
|
'--format' => 'plain',
|
||||||
|
'--tablespace-mapping' => "$realTsDir=$tempdir/tbackup/tbl\\=spc2",
|
||||||
],
|
],
|
||||||
'mapping tablespace with = sign in path');
|
'mapping tablespace with = sign in path');
|
||||||
ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated');
|
ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated');
|
||||||
@ -540,13 +596,22 @@ $realTsDir = "$real_sys_tempdir/$superlongname";
|
|||||||
$node->safe_psql('postgres',
|
$node->safe_psql('postgres',
|
||||||
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
|
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/tarbackup_l3",
|
||||||
|
'--format' => 'tar'
|
||||||
|
],
|
||||||
'pg_basebackup tar with long symlink target');
|
'pg_basebackup tar with long symlink target');
|
||||||
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
|
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
|
||||||
rmtree("$tempdir/tarbackup_l3");
|
rmtree("$tempdir/tarbackup_l3");
|
||||||
|
|
||||||
$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ],
|
$node->command_ok(
|
||||||
'pg_basebackup -R runs');
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backupR",
|
||||||
|
'--write-recovery-conf'
|
||||||
|
],
|
||||||
|
'pg_basebackup --write-recovery-conf runs');
|
||||||
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
|
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
|
||||||
ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
|
ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
|
||||||
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
|
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
|
||||||
@ -558,76 +623,105 @@ like(
|
|||||||
qr/^primary_conninfo = '.*port=$port.*'\n/m,
|
qr/^primary_conninfo = '.*port=$port.*'\n/m,
|
||||||
'postgresql.auto.conf sets primary_conninfo');
|
'postgresql.auto.conf sets primary_conninfo');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok([ @pg_basebackup_defs, '--pgdata' => "$tempdir/backupxd" ],
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backupxd" ],
|
|
||||||
'pg_basebackup runs in default xlog mode');
|
'pg_basebackup runs in default xlog mode');
|
||||||
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
|
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
|
||||||
'WAL files copied');
|
'WAL files copied');
|
||||||
rmtree("$tempdir/backupxd");
|
rmtree("$tempdir/backupxd");
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ],
|
[
|
||||||
'pg_basebackup -X fetch runs');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backupxf",
|
||||||
|
'--wal-method' => 'fetch'
|
||||||
|
],
|
||||||
|
'pg_basebackup --wal-method fetch runs');
|
||||||
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxf/pg_wal")),
|
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxf/pg_wal")),
|
||||||
'WAL files copied');
|
'WAL files copied');
|
||||||
rmtree("$tempdir/backupxf");
|
rmtree("$tempdir/backupxf");
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backupxs", '-X', 'stream' ],
|
[
|
||||||
'pg_basebackup -X stream runs');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backupxs",
|
||||||
|
'--wal-method' => 'stream'
|
||||||
|
],
|
||||||
|
'pg_basebackup --wal-method stream runs');
|
||||||
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
|
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
|
||||||
'WAL files copied');
|
'WAL files copied');
|
||||||
rmtree("$tempdir/backupxs");
|
rmtree("$tempdir/backupxs");
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream',
|
@pg_basebackup_defs,
|
||||||
'-Ft'
|
'--pgdata' => "$tempdir/backupxst",
|
||||||
|
'--wal-method' => 'stream',
|
||||||
|
'--format' => 'tar'
|
||||||
],
|
],
|
||||||
'pg_basebackup -X stream runs in tar mode');
|
'pg_basebackup --wal-method stream runs in tar mode');
|
||||||
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
|
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
|
||||||
rmtree("$tempdir/backupxst");
|
rmtree("$tempdir/backupxst");
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backupnoslot", '-X',
|
'--pgdata' => "$tempdir/backupnoslot",
|
||||||
'stream', '--no-slot'
|
'--wal-method' => 'stream',
|
||||||
|
'--no-slot'
|
||||||
],
|
],
|
||||||
'pg_basebackup -X stream runs with --no-slot');
|
'pg_basebackup --wal-method stream runs with --no-slot');
|
||||||
rmtree("$tempdir/backupnoslot");
|
rmtree("$tempdir/backupnoslot");
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ],
|
[
|
||||||
'pg_basebackup -X fetch runs');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backupxf",
|
||||||
|
'--wal-method' => 'fetch'
|
||||||
|
],
|
||||||
|
'pg_basebackup --wal-method fetch runs');
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ @pg_basebackup_defs, '--target', 'blackhole' ],
|
[ @pg_basebackup_defs, '--target' => 'blackhole' ],
|
||||||
qr/WAL cannot be streamed when a backup target is specified/,
|
qr/WAL cannot be streamed when a backup target is specified/,
|
||||||
'backup target requires -X');
|
'backup target requires --wal-method');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'stream' ],
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--target' => 'blackhole',
|
||||||
|
'--wal-method' => 'stream'
|
||||||
|
],
|
||||||
qr/WAL cannot be streamed when a backup target is specified/,
|
qr/WAL cannot be streamed when a backup target is specified/,
|
||||||
'backup target requires -X other than -X stream');
|
'backup target requires --wal-method other than --wal-method stream');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ @pg_basebackup_defs, '--target', 'bogus', '-X', 'none' ],
|
[ @pg_basebackup_defs, '--target' => 'bogus', '--wal-method' => 'none' ],
|
||||||
qr/unrecognized target/,
|
qr/unrecognized target/,
|
||||||
'backup target unrecognized');
|
'backup target unrecognized');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '--target', 'blackhole', '-X',
|
@pg_basebackup_defs,
|
||||||
'none', '-D', "$tempdir/blackhole"
|
'--target' => 'blackhole',
|
||||||
|
'--wal-method' => 'none',
|
||||||
|
'--pgdata' => "$tempdir/blackhole"
|
||||||
],
|
],
|
||||||
qr/cannot specify both output directory and backup target/,
|
qr/cannot specify both output directory and backup target/,
|
||||||
'backup target and output directory');
|
'backup target and output directory');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none', '-Ft' ],
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--target' => 'blackhole',
|
||||||
|
'--wal-method' => 'none',
|
||||||
|
'--format' => 'tar'
|
||||||
|
],
|
||||||
qr/cannot specify both format and backup target/,
|
qr/cannot specify both format and backup target/,
|
||||||
'backup target and output directory');
|
'backup target and output directory');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ],
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--target' => 'blackhole',
|
||||||
|
'--wal-method' => 'none'
|
||||||
|
],
|
||||||
'backup target blackhole');
|
'backup target blackhole');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '--target',
|
@pg_basebackup_defs,
|
||||||
"server:$tempdir/backuponserver", '-X',
|
'--target' => "server:$tempdir/backuponserver",
|
||||||
'none'
|
'--wal-method' => 'none'
|
||||||
],
|
],
|
||||||
'backup target server');
|
'backup target server');
|
||||||
ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created');
|
ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created');
|
||||||
@ -638,9 +732,10 @@ $node->command_ok(
|
|||||||
'create backup user');
|
'create backup user');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-U', 'backupuser', '--target',
|
@pg_basebackup_defs,
|
||||||
"server:$tempdir/backuponserver",
|
'--username' => 'backupuser',
|
||||||
'-X', 'none'
|
'--target' => "server:$tempdir/backuponserver",
|
||||||
|
'--wal-method' => 'none'
|
||||||
],
|
],
|
||||||
'backup target server');
|
'backup target server');
|
||||||
ok( -f "$tempdir/backuponserver/base.tar",
|
ok( -f "$tempdir/backuponserver/base.tar",
|
||||||
@ -649,66 +744,82 @@ rmtree("$tempdir/backuponserver");
|
|||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backupxs_sl_fail", '-X',
|
'--pgdata' => "$tempdir/backupxs_sl_fail",
|
||||||
'stream', '-S',
|
'--wal-method' => 'stream',
|
||||||
'slot0'
|
'--slot' => 'slot0'
|
||||||
],
|
],
|
||||||
'pg_basebackup fails with nonexistent replication slot');
|
'pg_basebackup fails with nonexistent replication slot');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ],
|
[
|
||||||
'pg_basebackup -C fails without slot name');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backupxs_slot",
|
||||||
|
'--create-slot'
|
||||||
|
],
|
||||||
|
'pg_basebackup --create-slot fails without slot name');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backupxs_slot", '-C',
|
'--pgdata' => "$tempdir/backupxs_slot",
|
||||||
'-S', 'slot0',
|
'--create-slot',
|
||||||
|
'--slot' => 'slot0',
|
||||||
'--no-slot'
|
'--no-slot'
|
||||||
],
|
],
|
||||||
'pg_basebackup fails with -C -S --no-slot');
|
'pg_basebackup fails with --create-slot --slot --no-slot');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '--target', 'blackhole', '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/blackhole"
|
'--target' => 'blackhole',
|
||||||
|
'--pgdata' => "$tempdir/blackhole"
|
||||||
],
|
],
|
||||||
qr/cannot specify both output directory and backup target/,
|
qr/cannot specify both output directory and backup target/,
|
||||||
'backup target and output directory');
|
'backup target and output directory');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backuptr/co", '-X', 'none' ],
|
[
|
||||||
'pg_basebackup -X fetch runs');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backuptr/co",
|
||||||
|
'--wal-method' => 'none'
|
||||||
|
],
|
||||||
|
'pg_basebackup --wal-method fetch runs');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backupxs_sl_fail", '-X',
|
'--pgdata' => "$tempdir/backupxs_sl_fail",
|
||||||
'stream', '-S',
|
'--wal-method' => 'stream',
|
||||||
'slot0'
|
'--slot' => 'slot0'
|
||||||
],
|
],
|
||||||
'pg_basebackup fails with nonexistent replication slot');
|
'pg_basebackup fails with nonexistent replication slot');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ],
|
[
|
||||||
'pg_basebackup -C fails without slot name');
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => "$tempdir/backupxs_slot",
|
||||||
|
'--create-slot'
|
||||||
|
],
|
||||||
|
'pg_basebackup --create-slot fails without slot name');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backupxs_slot", '-C',
|
'--pgdata' => "$tempdir/backupxs_slot",
|
||||||
'-S', 'slot0',
|
'--create-slot',
|
||||||
|
'--slot' => 'slot0',
|
||||||
'--no-slot'
|
'--no-slot'
|
||||||
],
|
],
|
||||||
'pg_basebackup fails with -C -S --no-slot');
|
'pg_basebackup fails with --create-slot --slot --no-slot');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backupxs_slot", '-C',
|
'--pgdata' => "$tempdir/backupxs_slot",
|
||||||
'-S', 'slot0'
|
'--create-slot',
|
||||||
|
'--slot' => 'slot0'
|
||||||
],
|
],
|
||||||
'pg_basebackup -C runs');
|
'pg_basebackup --create-slot runs');
|
||||||
rmtree("$tempdir/backupxs_slot");
|
rmtree("$tempdir/backupxs_slot");
|
||||||
|
|
||||||
is( $node->safe_psql(
|
is( $node->safe_psql(
|
||||||
@ -727,11 +838,13 @@ isnt(
|
|||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backupxs_slot1", '-C',
|
'--pgdata' => "$tempdir/backupxs_slot1",
|
||||||
'-S', 'slot0'
|
'--create-slot',
|
||||||
|
'--slot' => 'slot0'
|
||||||
],
|
],
|
||||||
'pg_basebackup fails with -C -S and a previously existing slot');
|
'pg_basebackup fails with --create-slot --slot and a previously existing slot'
|
||||||
|
);
|
||||||
|
|
||||||
$node->safe_psql('postgres',
|
$node->safe_psql('postgres',
|
||||||
q{SELECT * FROM pg_create_physical_replication_slot('slot1')});
|
q{SELECT * FROM pg_create_physical_replication_slot('slot1')});
|
||||||
@ -741,16 +854,20 @@ my $lsn = $node->safe_psql('postgres',
|
|||||||
is($lsn, '', 'restart LSN of new slot is null');
|
is($lsn, '', 'restart LSN of new slot is null');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
|
@pg_basebackup_defs,
|
||||||
'slot1', '-X', 'none'
|
'--pgdata' => "$tempdir/fail",
|
||||||
|
'--slot' => 'slot1',
|
||||||
|
'--wal-method' => 'none'
|
||||||
],
|
],
|
||||||
'pg_basebackup with replication slot fails without WAL streaming');
|
'pg_basebackup with replication slot fails without WAL streaming');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X',
|
@pg_basebackup_defs,
|
||||||
'stream', '-S', 'slot1'
|
'--pgdata' => "$tempdir/backupxs_sl",
|
||||||
|
'--wal-method' => 'stream',
|
||||||
|
'--slot' => 'slot1'
|
||||||
],
|
],
|
||||||
'pg_basebackup -X stream with replication slot runs');
|
'pg_basebackup --wal-method stream with replication slot runs');
|
||||||
$lsn = $node->safe_psql('postgres',
|
$lsn = $node->safe_psql('postgres',
|
||||||
q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1'}
|
q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1'}
|
||||||
);
|
);
|
||||||
@ -759,10 +876,13 @@ rmtree("$tempdir/backupxs_sl");
|
|||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X',
|
@pg_basebackup_defs,
|
||||||
'stream', '-S', 'slot1', '-R',
|
'--pgdata' => "$tempdir/backupxs_sl_R",
|
||||||
|
'--wal-method' => 'stream',
|
||||||
|
'--slot' => 'slot1',
|
||||||
|
'--write-recovery-conf',
|
||||||
],
|
],
|
||||||
'pg_basebackup with replication slot and -R runs');
|
'pg_basebackup with replication slot and --write-recovery-conf runs');
|
||||||
like(
|
like(
|
||||||
slurp_file("$tempdir/backupxs_sl_R/postgresql.auto.conf"),
|
slurp_file("$tempdir/backupxs_sl_R/postgresql.auto.conf"),
|
||||||
qr/^primary_slot_name = 'slot1'\n/m,
|
qr/^primary_slot_name = 'slot1'\n/m,
|
||||||
@ -774,10 +894,13 @@ rmtree("$tempdir/backupxs_sl_R");
|
|||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D', "$tempdir/backup_dbname_R", '-X',
|
@pg_basebackup_defs,
|
||||||
'stream', '-d', "dbname=db1", '-R',
|
'--pgdata' => "$tempdir/backup_dbname_R",
|
||||||
|
'--wal-method' => 'stream',
|
||||||
|
'--dbname' => "dbname=db1",
|
||||||
|
'--write-recovery-conf',
|
||||||
],
|
],
|
||||||
'pg_basebackup with dbname and -R runs');
|
'pg_basebackup with dbname and --write-recovery-conf runs');
|
||||||
like(slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"),
|
like(slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"),
|
||||||
qr/dbname=db1/m, 'recovery conf file sets dbname');
|
qr/dbname=db1/m, 'recovery conf file sets dbname');
|
||||||
|
|
||||||
@ -800,7 +923,7 @@ $node->corrupt_page_checksum($file_corrupt1, 0);
|
|||||||
$node->start;
|
$node->start;
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt" ],
|
[ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt" ],
|
||||||
1,
|
1,
|
||||||
[qr{^$}],
|
[qr{^$}],
|
||||||
[qr/^WARNING.*checksum verification failed/s],
|
[qr/^WARNING.*checksum verification failed/s],
|
||||||
@ -816,7 +939,7 @@ for my $i (1 .. 5)
|
|||||||
$node->start;
|
$node->start;
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt2" ],
|
[ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt2" ],
|
||||||
1,
|
1,
|
||||||
[qr{^$}],
|
[qr{^$}],
|
||||||
[qr/^WARNING.*further.*failures.*will.not.be.reported/s],
|
[qr/^WARNING.*further.*failures.*will.not.be.reported/s],
|
||||||
@ -829,7 +952,7 @@ $node->corrupt_page_checksum($file_corrupt2, 0);
|
|||||||
$node->start;
|
$node->start;
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt3" ],
|
[ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt3" ],
|
||||||
1,
|
1,
|
||||||
[qr{^$}],
|
[qr{^$}],
|
||||||
[qr/^WARNING.*7 total checksum verification failures/s],
|
[qr/^WARNING.*7 total checksum verification failures/s],
|
||||||
@ -839,8 +962,9 @@ rmtree("$tempdir/backup_corrupt3");
|
|||||||
# do not verify checksums, should return ok
|
# do not verify checksums, should return ok
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backup_corrupt4", '--no-verify-checksums',
|
'--pgdata' => "$tempdir/backup_corrupt4",
|
||||||
|
'--no-verify-checksums',
|
||||||
],
|
],
|
||||||
'pg_basebackup with -k does not report checksum mismatch');
|
'pg_basebackup with -k does not report checksum mismatch');
|
||||||
rmtree("$tempdir/backup_corrupt4");
|
rmtree("$tempdir/backup_corrupt4");
|
||||||
@ -858,25 +982,26 @@ SKIP:
|
|||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backup_gzip", '--compress',
|
'--pgdata' => "$tempdir/backup_gzip",
|
||||||
'1', '--format',
|
'--compress' => '1',
|
||||||
't'
|
'--format' => 't'
|
||||||
],
|
],
|
||||||
'pg_basebackup with --compress');
|
'pg_basebackup with --compress');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backup_gzip2", '--gzip',
|
'--pgdata' => "$tempdir/backup_gzip2",
|
||||||
'--format', 't'
|
'--gzip',
|
||||||
|
'--format' => 't'
|
||||||
],
|
],
|
||||||
'pg_basebackup with --gzip');
|
'pg_basebackup with --gzip');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir/backup_gzip3", '--compress',
|
'--pgdata' => "$tempdir/backup_gzip3",
|
||||||
'gzip:1', '--format',
|
'--compress' => 'gzip:1',
|
||||||
't'
|
'--format' => 't'
|
||||||
],
|
],
|
||||||
'pg_basebackup with --compress=gzip:1');
|
'pg_basebackup with --compress=gzip:1');
|
||||||
|
|
||||||
@ -921,16 +1046,13 @@ my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
|
|||||||
my $sigchld_bb = IPC::Run::start(
|
my $sigchld_bb = IPC::Run::start(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '--wal-method=stream',
|
@pg_basebackup_defs, '--wal-method=stream',
|
||||||
'-D', "$tempdir/sigchld",
|
'--pgdata' => "$tempdir/sigchld",
|
||||||
'--max-rate=32', '-d',
|
'--max-rate' => '32',
|
||||||
$node->connstr('postgres')
|
'--dbname' => $node->connstr('postgres')
|
||||||
],
|
],
|
||||||
'<',
|
'<' => \$sigchld_bb_stdin,
|
||||||
\$sigchld_bb_stdin,
|
'>' => \$sigchld_bb_stdout,
|
||||||
'>',
|
'2>' => \$sigchld_bb_stderr,
|
||||||
\$sigchld_bb_stdout,
|
|
||||||
'2>',
|
|
||||||
\$sigchld_bb_stderr,
|
|
||||||
$sigchld_bb_timeout);
|
$sigchld_bb_timeout);
|
||||||
|
|
||||||
is( $node->poll_query_until(
|
is( $node->poll_query_until(
|
||||||
@ -977,9 +1099,9 @@ $node2->start;
|
|||||||
|
|
||||||
$node2->command_fails_like(
|
$node2->command_fails_like(
|
||||||
[
|
[
|
||||||
@pg_basebackup_defs, '-D',
|
@pg_basebackup_defs,
|
||||||
"$tempdir" . '/diff_sysid', '--incremental',
|
'--pgdata' => "$tempdir/diff_sysid",
|
||||||
"$backupdir" . '/backup_manifest'
|
'--incremental' => "$backupdir/backup_manifest",
|
||||||
],
|
],
|
||||||
qr/system identifier in backup manifest is .*, but database system identifier is/,
|
qr/system identifier in backup manifest is .*, but database system identifier is/,
|
||||||
"pg_basebackup fails with different database system manifest");
|
"pg_basebackup fails with different database system manifest");
|
||||||
|
@ -12,7 +12,8 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir;
|
|||||||
# to keep test times reasonable. Using @pg_basebackup_defs as the first
|
# to keep test times reasonable. Using @pg_basebackup_defs as the first
|
||||||
# element of the array passed to IPC::Run interpolate the array (as it is
|
# element of the array passed to IPC::Run interpolate the array (as it is
|
||||||
# not a reference to an array)...
|
# not a reference to an array)...
|
||||||
my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
|
my @pg_basebackup_defs =
|
||||||
|
('pg_basebackup', '--no-sync', '--checkpoint' => 'fast');
|
||||||
|
|
||||||
# Set up an instance.
|
# Set up an instance.
|
||||||
my $node = PostgreSQL::Test::Cluster->new('main');
|
my $node = PostgreSQL::Test::Cluster->new('main');
|
||||||
@ -28,7 +29,12 @@ EOM
|
|||||||
# Back it up.
|
# Back it up.
|
||||||
my $backupdir = $tempdir . '/backup';
|
my $backupdir = $tempdir . '/backup';
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ @pg_basebackup_defs, '-D', $backupdir, '-Ft', '-X', 'none' ],
|
[
|
||||||
|
@pg_basebackup_defs,
|
||||||
|
'--pgdata' => $backupdir,
|
||||||
|
'--format' => 'tar',
|
||||||
|
'--wal-method' => 'none'
|
||||||
|
],
|
||||||
'pg_basebackup runs');
|
'pg_basebackup runs');
|
||||||
|
|
||||||
# Make sure we got base.tar and one tablespace.
|
# Make sure we got base.tar and one tablespace.
|
||||||
|
@ -25,28 +25,43 @@ mkdir($stream_dir);
|
|||||||
$primary->command_fails(['pg_receivewal'],
|
$primary->command_fails(['pg_receivewal'],
|
||||||
'pg_receivewal needs target directory specified');
|
'pg_receivewal needs target directory specified');
|
||||||
$primary->command_fails(
|
$primary->command_fails(
|
||||||
[ 'pg_receivewal', '-D', $stream_dir, '--create-slot', '--drop-slot' ],
|
[
|
||||||
|
'pg_receivewal',
|
||||||
|
'--directory' => $stream_dir,
|
||||||
|
'--create-slot',
|
||||||
|
'--drop-slot',
|
||||||
|
],
|
||||||
'failure if both --create-slot and --drop-slot specified');
|
'failure if both --create-slot and --drop-slot specified');
|
||||||
$primary->command_fails(
|
$primary->command_fails(
|
||||||
[ 'pg_receivewal', '-D', $stream_dir, '--create-slot' ],
|
[ 'pg_receivewal', '--directory' => $stream_dir, '--create-slot' ],
|
||||||
'failure if --create-slot specified without --slot');
|
'failure if --create-slot specified without --slot');
|
||||||
$primary->command_fails(
|
$primary->command_fails(
|
||||||
[ 'pg_receivewal', '-D', $stream_dir, '--synchronous', '--no-sync' ],
|
[
|
||||||
|
'pg_receivewal',
|
||||||
|
'--directory' => $stream_dir,
|
||||||
|
'--synchronous',
|
||||||
|
'--no-sync',
|
||||||
|
],
|
||||||
'failure if --synchronous specified with --no-sync');
|
'failure if --synchronous specified with --no-sync');
|
||||||
$primary->command_fails_like(
|
$primary->command_fails_like(
|
||||||
[ 'pg_receivewal', '-D', $stream_dir, '--compress', 'none:1', ],
|
[
|
||||||
|
'pg_receivewal',
|
||||||
|
'--directory' => $stream_dir,
|
||||||
|
'--compress' => 'none:1',
|
||||||
|
],
|
||||||
qr/\Qpg_receivewal: error: invalid compression specification: compression algorithm "none" does not accept a compression level/,
|
qr/\Qpg_receivewal: error: invalid compression specification: compression algorithm "none" does not accept a compression level/,
|
||||||
'failure if --compress none:N (where N > 0)');
|
'failure if --compress none:N (where N > 0)');
|
||||||
|
|
||||||
# Slot creation and drop
|
# Slot creation and drop
|
||||||
my $slot_name = 'test';
|
my $slot_name = 'test';
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[ 'pg_receivewal', '--slot', $slot_name, '--create-slot' ],
|
[ 'pg_receivewal', '--slot' => $slot_name, '--create-slot' ],
|
||||||
'creating a replication slot');
|
'creating a replication slot');
|
||||||
my $slot = $primary->slot($slot_name);
|
my $slot = $primary->slot($slot_name);
|
||||||
is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
|
is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
|
||||||
is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
|
is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
|
||||||
$primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ],
|
$primary->command_ok(
|
||||||
|
[ 'pg_receivewal', '--slot' => $slot_name, '--drop-slot' ],
|
||||||
'dropping a replication slot');
|
'dropping a replication slot');
|
||||||
is($primary->slot($slot_name)->{'slot_type'},
|
is($primary->slot($slot_name)->{'slot_type'},
|
||||||
'', 'replication slot was removed');
|
'', 'replication slot was removed');
|
||||||
@ -66,8 +81,12 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (1);');
|
|||||||
# compression involved.
|
# compression involved.
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_receivewal', '-D', $stream_dir, '--verbose',
|
'pg_receivewal',
|
||||||
'--endpos', $nextlsn, '--synchronous', '--no-loop'
|
'--directory' => $stream_dir,
|
||||||
|
'--verbose',
|
||||||
|
'--endpos' => $nextlsn,
|
||||||
|
'--synchronous',
|
||||||
|
'--no-loop',
|
||||||
],
|
],
|
||||||
'streaming some WAL with --synchronous');
|
'streaming some WAL with --synchronous');
|
||||||
|
|
||||||
@ -92,8 +111,11 @@ SKIP:
|
|||||||
|
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_receivewal', '-D', $stream_dir, '--verbose',
|
'pg_receivewal',
|
||||||
'--endpos', $nextlsn, '--compress', 'gzip:1',
|
'--directory' => $stream_dir,
|
||||||
|
'--verbose',
|
||||||
|
'--endpos' => $nextlsn,
|
||||||
|
'--compress' => 'gzip:1',
|
||||||
'--no-loop'
|
'--no-loop'
|
||||||
],
|
],
|
||||||
"streaming some WAL using ZLIB compression");
|
"streaming some WAL using ZLIB compression");
|
||||||
@ -145,9 +167,12 @@ SKIP:
|
|||||||
# Stream up to the given position.
|
# Stream up to the given position.
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_receivewal', '-D', $stream_dir, '--verbose',
|
'pg_receivewal',
|
||||||
'--endpos', $nextlsn, '--no-loop', '--compress',
|
'--directory' => $stream_dir,
|
||||||
'lz4'
|
'--verbose',
|
||||||
|
'--endpos' => $nextlsn,
|
||||||
|
'--no-loop',
|
||||||
|
'--compress' => 'lz4'
|
||||||
],
|
],
|
||||||
'streaming some WAL using --compress=lz4');
|
'streaming some WAL using --compress=lz4');
|
||||||
|
|
||||||
@ -191,8 +216,11 @@ chomp($nextlsn);
|
|||||||
$primary->psql('postgres', 'INSERT INTO test_table VALUES (4);');
|
$primary->psql('postgres', 'INSERT INTO test_table VALUES (4);');
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_receivewal', '-D', $stream_dir, '--verbose',
|
'pg_receivewal',
|
||||||
'--endpos', $nextlsn, '--no-loop'
|
'--directory' => $stream_dir,
|
||||||
|
'--verbose',
|
||||||
|
'--endpos' => $nextlsn,
|
||||||
|
'--no-loop'
|
||||||
],
|
],
|
||||||
"streaming some WAL");
|
"streaming some WAL");
|
||||||
|
|
||||||
@ -247,17 +275,25 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (6);');
|
|||||||
# Check case where the slot does not exist.
|
# Check case where the slot does not exist.
|
||||||
$primary->command_fails_like(
|
$primary->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_receivewal', '-D', $slot_dir, '--slot',
|
'pg_receivewal',
|
||||||
'nonexistentslot', '-n', '--no-sync', '--verbose',
|
'--directory' => $slot_dir,
|
||||||
'--endpos', $nextlsn
|
'--slot' => 'nonexistentslot',
|
||||||
|
'--no-loop',
|
||||||
|
'--no-sync',
|
||||||
|
'--verbose',
|
||||||
|
'--endpos' => $nextlsn
|
||||||
],
|
],
|
||||||
qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/,
|
qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/,
|
||||||
'pg_receivewal fails with non-existing slot');
|
'pg_receivewal fails with non-existing slot');
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_receivewal', '-D', $slot_dir, '--slot',
|
'pg_receivewal',
|
||||||
$slot_name, '-n', '--no-sync', '--verbose',
|
'--directory' => $slot_dir,
|
||||||
'--endpos', $nextlsn
|
'--slot' => $slot_name,
|
||||||
|
'--no-loop',
|
||||||
|
'--no-sync',
|
||||||
|
'--verbose',
|
||||||
|
'--endpos' => $nextlsn
|
||||||
],
|
],
|
||||||
"WAL streamed from the slot's restart_lsn");
|
"WAL streamed from the slot's restart_lsn");
|
||||||
ok(-e "$slot_dir/$walfile_streamed",
|
ok(-e "$slot_dir/$walfile_streamed",
|
||||||
@ -311,9 +347,13 @@ mkdir($timeline_dir);
|
|||||||
|
|
||||||
$standby->command_ok(
|
$standby->command_ok(
|
||||||
[
|
[
|
||||||
'pg_receivewal', '-D', $timeline_dir, '--verbose',
|
'pg_receivewal',
|
||||||
'--endpos', $nextlsn, '--slot', $archive_slot,
|
'--directory' => $timeline_dir,
|
||||||
'--no-sync', '-n'
|
'--verbose',
|
||||||
|
'--endpos' => $nextlsn,
|
||||||
|
'--slot' => $archive_slot,
|
||||||
|
'--no-sync',
|
||||||
|
'--no-loop'
|
||||||
],
|
],
|
||||||
"Stream some wal after promoting, resuming from the slot's position");
|
"Stream some wal after promoting, resuming from the slot's position");
|
||||||
ok(-e "$timeline_dir/$walfile_before_promotion",
|
ok(-e "$timeline_dir/$walfile_before_promotion",
|
||||||
|
@ -28,23 +28,27 @@ $node->dump_info;
|
|||||||
$node->start;
|
$node->start;
|
||||||
|
|
||||||
$node->command_fails(['pg_recvlogical'], 'pg_recvlogical needs a slot name');
|
$node->command_fails(['pg_recvlogical'], 'pg_recvlogical needs a slot name');
|
||||||
$node->command_fails([ 'pg_recvlogical', '-S', 'test' ],
|
$node->command_fails(
|
||||||
|
[ 'pg_recvlogical', '--slot' => 'test' ],
|
||||||
'pg_recvlogical needs a database');
|
'pg_recvlogical needs a database');
|
||||||
$node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ],
|
$node->command_fails(
|
||||||
|
[ 'pg_recvlogical', '--slot' => 'test', '--dbname' => 'postgres' ],
|
||||||
'pg_recvlogical needs an action');
|
'pg_recvlogical needs an action');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'pg_recvlogical', '-S',
|
'pg_recvlogical',
|
||||||
'test', '-d',
|
'--slot' => 'test',
|
||||||
$node->connstr('postgres'), '--start'
|
'--dbname' => $node->connstr('postgres'),
|
||||||
|
'--start',
|
||||||
],
|
],
|
||||||
'no destination file');
|
'no destination file');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_recvlogical', '-S',
|
'pg_recvlogical',
|
||||||
'test', '-d',
|
'--slot' => 'test',
|
||||||
$node->connstr('postgres'), '--create-slot'
|
'--dbname' => $node->connstr('postgres'),
|
||||||
|
'--create-slot',
|
||||||
],
|
],
|
||||||
'slot created');
|
'slot created');
|
||||||
|
|
||||||
@ -60,26 +64,33 @@ chomp($nextlsn);
|
|||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
|
'pg_recvlogical',
|
||||||
'--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'
|
'--slot' => 'test',
|
||||||
|
'--dbname' => $node->connstr('postgres'),
|
||||||
|
'--start',
|
||||||
|
'--endpos' => $nextlsn,
|
||||||
|
'--no-loop',
|
||||||
|
'--file' => '-',
|
||||||
],
|
],
|
||||||
'replayed a transaction');
|
'replayed a transaction');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_recvlogical', '-S',
|
'pg_recvlogical',
|
||||||
'test', '-d',
|
'--slot' => 'test',
|
||||||
$node->connstr('postgres'), '--drop-slot'
|
'--dbname' => $node->connstr('postgres'),
|
||||||
|
'--drop-slot'
|
||||||
],
|
],
|
||||||
'slot dropped');
|
'slot dropped');
|
||||||
|
|
||||||
#test with two-phase option enabled
|
#test with two-phase option enabled
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_recvlogical', '-S',
|
'pg_recvlogical',
|
||||||
'test', '-d',
|
'--slot' => 'test',
|
||||||
$node->connstr('postgres'), '--create-slot',
|
'--dbname' => $node->connstr('postgres'),
|
||||||
'--two-phase'
|
'--create-slot',
|
||||||
|
'--two-phase',
|
||||||
],
|
],
|
||||||
'slot with two-phase created');
|
'slot with two-phase created');
|
||||||
|
|
||||||
@ -94,19 +105,25 @@ chomp($nextlsn);
|
|||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'pg_recvlogical', '-S',
|
'pg_recvlogical',
|
||||||
'test', '-d',
|
'--slot' => 'test',
|
||||||
$node->connstr('postgres'), '--start',
|
'--dbname' => $node->connstr('postgres'),
|
||||||
'--endpos', "$nextlsn",
|
'--start',
|
||||||
|
'--endpos' => $nextlsn,
|
||||||
'--two-phase', '--no-loop',
|
'--two-phase', '--no-loop',
|
||||||
'-f', '-'
|
'--file' => '-',
|
||||||
],
|
],
|
||||||
'incorrect usage');
|
'incorrect usage');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
|
'pg_recvlogical',
|
||||||
'--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'
|
'--slot' => 'test',
|
||||||
|
'--dbname' => $node->connstr('postgres'),
|
||||||
|
'--start',
|
||||||
|
'--endpos' => $nextlsn,
|
||||||
|
'--no-loop',
|
||||||
|
'--file' => '-',
|
||||||
],
|
],
|
||||||
'replayed a two-phase transaction');
|
'replayed a two-phase transaction');
|
||||||
|
|
||||||
|
@ -46,69 +46,75 @@ sub generate_db
|
|||||||
command_fails(['pg_createsubscriber'],
|
command_fails(['pg_createsubscriber'],
|
||||||
'no subscriber data directory specified');
|
'no subscriber data directory specified');
|
||||||
command_fails(
|
command_fails(
|
||||||
[ 'pg_createsubscriber', '--pgdata', $datadir ],
|
[ 'pg_createsubscriber', '--pgdata' => $datadir ],
|
||||||
'no publisher connection string specified');
|
'no publisher connection string specified');
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--pgdata', $datadir,
|
'--verbose',
|
||||||
'--publisher-server', 'port=5432'
|
'--pgdata' => $datadir,
|
||||||
|
'--publisher-server' => 'port=5432',
|
||||||
],
|
],
|
||||||
'no database name specified');
|
'no database name specified');
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--pgdata', $datadir,
|
'--verbose',
|
||||||
'--publisher-server', 'port=5432',
|
'--pgdata' => $datadir,
|
||||||
'--database', 'pg1',
|
'--publisher-server' => 'port=5432',
|
||||||
'--database', 'pg1'
|
'--database' => 'pg1',
|
||||||
|
'--database' => 'pg1',
|
||||||
],
|
],
|
||||||
'duplicate database name');
|
'duplicate database name');
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--pgdata', $datadir,
|
'--verbose',
|
||||||
'--publisher-server', 'port=5432',
|
'--pgdata' => $datadir,
|
||||||
'--publication', 'foo1',
|
'--publisher-server' => 'port=5432',
|
||||||
'--publication', 'foo1',
|
'--publication' => 'foo1',
|
||||||
'--database', 'pg1',
|
'--publication' => 'foo1',
|
||||||
'--database', 'pg2'
|
'--database' => 'pg1',
|
||||||
|
'--database' => 'pg2',
|
||||||
],
|
],
|
||||||
'duplicate publication name');
|
'duplicate publication name');
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--pgdata', $datadir,
|
'--verbose',
|
||||||
'--publisher-server', 'port=5432',
|
'--pgdata' => $datadir,
|
||||||
'--publication', 'foo1',
|
'--publisher-server' => 'port=5432',
|
||||||
'--database', 'pg1',
|
'--publication' => 'foo1',
|
||||||
'--database', 'pg2'
|
'--database' => 'pg1',
|
||||||
|
'--database' => 'pg2',
|
||||||
],
|
],
|
||||||
'wrong number of publication names');
|
'wrong number of publication names');
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--pgdata', $datadir,
|
'--verbose',
|
||||||
'--publisher-server', 'port=5432',
|
'--pgdata' => $datadir,
|
||||||
'--publication', 'foo1',
|
'--publisher-server' => 'port=5432',
|
||||||
'--publication', 'foo2',
|
'--publication' => 'foo1',
|
||||||
'--subscription', 'bar1',
|
'--publication' => 'foo2',
|
||||||
'--database', 'pg1',
|
'--subscription' => 'bar1',
|
||||||
'--database', 'pg2'
|
'--database' => 'pg1',
|
||||||
|
'--database' => 'pg2',
|
||||||
],
|
],
|
||||||
'wrong number of subscription names');
|
'wrong number of subscription names');
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--pgdata', $datadir,
|
'--verbose',
|
||||||
'--publisher-server', 'port=5432',
|
'--pgdata' => $datadir,
|
||||||
'--publication', 'foo1',
|
'--publisher-server' => 'port=5432',
|
||||||
'--publication', 'foo2',
|
'--publication' => 'foo1',
|
||||||
'--subscription', 'bar1',
|
'--publication' => 'foo2',
|
||||||
'--subscription', 'bar2',
|
'--subscription' => 'bar1',
|
||||||
'--replication-slot', 'baz1',
|
'--subscription' => 'bar2',
|
||||||
'--database', 'pg1',
|
'--replication-slot' => 'baz1',
|
||||||
'--database', 'pg2'
|
'--database' => 'pg1',
|
||||||
|
'--database' => 'pg2',
|
||||||
],
|
],
|
||||||
'wrong number of replication slot names');
|
'wrong number of replication slot names');
|
||||||
|
|
||||||
@ -168,41 +174,44 @@ $node_t->stop;
|
|||||||
# Run pg_createsubscriber on a promoted server
|
# Run pg_createsubscriber on a promoted server
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--dry-run', '--pgdata',
|
'--verbose',
|
||||||
$node_t->data_dir, '--publisher-server',
|
'--dry-run',
|
||||||
$node_p->connstr($db1), '--socketdir',
|
'--pgdata' => $node_t->data_dir,
|
||||||
$node_t->host, '--subscriber-port',
|
'--publisher-server' => $node_p->connstr($db1),
|
||||||
$node_t->port, '--database',
|
'--socketdir' => $node_t->host,
|
||||||
$db1, '--database',
|
'--subscriber-port' => $node_t->port,
|
||||||
$db2
|
'--database' => $db1,
|
||||||
|
'--database' => $db2,
|
||||||
],
|
],
|
||||||
'target server is not in recovery');
|
'target server is not in recovery');
|
||||||
|
|
||||||
# Run pg_createsubscriber when standby is running
|
# Run pg_createsubscriber when standby is running
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--dry-run', '--pgdata',
|
'--verbose',
|
||||||
$node_s->data_dir, '--publisher-server',
|
'--dry-run',
|
||||||
$node_p->connstr($db1), '--socketdir',
|
'--pgdata' => $node_s->data_dir,
|
||||||
$node_s->host, '--subscriber-port',
|
'--publisher-server' => $node_p->connstr($db1),
|
||||||
$node_s->port, '--database',
|
'--socketdir' => $node_s->host,
|
||||||
$db1, '--database',
|
'--subscriber-port' => $node_s->port,
|
||||||
$db2
|
'--database' => $db1,
|
||||||
|
'--database' => $db2,
|
||||||
],
|
],
|
||||||
'standby is up and running');
|
'standby is up and running');
|
||||||
|
|
||||||
# Run pg_createsubscriber on about-to-fail node F
|
# Run pg_createsubscriber on about-to-fail node F
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--pgdata', $node_f->data_dir,
|
'--verbose',
|
||||||
'--publisher-server', $node_p->connstr($db1),
|
'--pgdata' => $node_f->data_dir,
|
||||||
'--socketdir', $node_f->host,
|
'--publisher-server' => $node_p->connstr($db1),
|
||||||
'--subscriber-port', $node_f->port,
|
'--socketdir' => $node_f->host,
|
||||||
'--database', $db1,
|
'--subscriber-port' => $node_f->port,
|
||||||
'--database', $db2
|
'--database' => $db1,
|
||||||
|
'--database' => $db2
|
||||||
],
|
],
|
||||||
'subscriber data directory is not a copy of the source database cluster');
|
'subscriber data directory is not a copy of the source database cluster');
|
||||||
|
|
||||||
@ -216,14 +225,15 @@ $node_c->set_standby_mode();
|
|||||||
# Run pg_createsubscriber on node C (P -> S -> C)
|
# Run pg_createsubscriber on node C (P -> S -> C)
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--dry-run', '--pgdata',
|
'--verbose',
|
||||||
$node_c->data_dir, '--publisher-server',
|
'--dry-run',
|
||||||
$node_s->connstr($db1), '--socketdir',
|
'--pgdata' => $node_c->data_dir,
|
||||||
$node_c->host, '--subscriber-port',
|
'--publisher-server' => $node_s->connstr($db1),
|
||||||
$node_c->port, '--database',
|
'--socketdir' => $node_c->host,
|
||||||
$db1, '--database',
|
'--subscriber-port' => $node_c->port,
|
||||||
$db2
|
'--database' => $db1,
|
||||||
|
'--database' => $db2,
|
||||||
],
|
],
|
||||||
'primary server is in recovery');
|
'primary server is in recovery');
|
||||||
|
|
||||||
@ -239,14 +249,16 @@ $node_p->restart;
|
|||||||
$node_s->stop;
|
$node_s->stop;
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--dry-run', '--pgdata',
|
'--verbose',
|
||||||
$node_s->data_dir, '--publisher-server',
|
'--dry-run',
|
||||||
$node_p->connstr($db1), '--socketdir',
|
'--pgdata' => $node_s->data_dir,
|
||||||
$node_s->host, '--subscriber-port',
|
'--publisher-server' => $node_p->connstr($db1),
|
||||||
$node_s->port, '--database',
|
'--socketdir' => $node_s->host,
|
||||||
$db1, '--database',
|
'--subscriber-port' => $node_s->port,
|
||||||
$db2
|
'--database' => $db1,
|
||||||
|
'--database' => $db2,
|
||||||
|
|
||||||
],
|
],
|
||||||
'primary contains unmet conditions on node P');
|
'primary contains unmet conditions on node P');
|
||||||
# Restore default settings here but only apply it after testing standby. Some
|
# Restore default settings here but only apply it after testing standby. Some
|
||||||
@ -268,14 +280,15 @@ max_worker_processes = 2
|
|||||||
});
|
});
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--dry-run', '--pgdata',
|
'--verbose',
|
||||||
$node_s->data_dir, '--publisher-server',
|
'--dry-run',
|
||||||
$node_p->connstr($db1), '--socketdir',
|
'--pgdata' => $node_s->data_dir,
|
||||||
$node_s->host, '--subscriber-port',
|
'--publisher-server' => $node_p->connstr($db1),
|
||||||
$node_s->port, '--database',
|
'--socketdir' => $node_s->host,
|
||||||
$db1, '--database',
|
'--subscriber-port' => $node_s->port,
|
||||||
$db2
|
'--database' => $db1,
|
||||||
|
'--database' => $db2,
|
||||||
],
|
],
|
||||||
'standby contains unmet conditions on node S');
|
'standby contains unmet conditions on node S');
|
||||||
$node_s->append_conf(
|
$node_s->append_conf(
|
||||||
@ -321,19 +334,20 @@ $node_s->stop;
|
|||||||
# dry run mode on node S
|
# dry run mode on node S
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--recovery-timeout', "$PostgreSQL::Test::Utils::timeout_default",
|
'--verbose',
|
||||||
'--dry-run', '--pgdata',
|
'--dry-run',
|
||||||
$node_s->data_dir, '--publisher-server',
|
'--recovery-timeout' => $PostgreSQL::Test::Utils::timeout_default,
|
||||||
$node_p->connstr($db1), '--socketdir',
|
'--pgdata' => $node_s->data_dir,
|
||||||
$node_s->host, '--subscriber-port',
|
'--publisher-server' => $node_p->connstr($db1),
|
||||||
$node_s->port, '--publication',
|
'--socketdir' => $node_s->host,
|
||||||
'pub1', '--publication',
|
'--subscriber-port' => $node_s->port,
|
||||||
'pub2', '--subscription',
|
'--publication' => 'pub1',
|
||||||
'sub1', '--subscription',
|
'--publication' => 'pub2',
|
||||||
'sub2', '--database',
|
'--subscription' => 'sub1',
|
||||||
$db1, '--database',
|
'--subscription' => 'sub2',
|
||||||
$db2
|
'--database' => $db1,
|
||||||
|
'--database' => $db2,
|
||||||
],
|
],
|
||||||
'run pg_createsubscriber --dry-run on node S');
|
'run pg_createsubscriber --dry-run on node S');
|
||||||
|
|
||||||
@ -346,32 +360,34 @@ $node_s->stop;
|
|||||||
# pg_createsubscriber can run without --databases option
|
# pg_createsubscriber can run without --databases option
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--dry-run', '--pgdata',
|
'--verbose',
|
||||||
$node_s->data_dir, '--publisher-server',
|
'--dry-run',
|
||||||
$node_p->connstr($db1), '--socketdir',
|
'--pgdata' => $node_s->data_dir,
|
||||||
$node_s->host, '--subscriber-port',
|
'--publisher-server' => $node_p->connstr($db1),
|
||||||
$node_s->port, '--replication-slot',
|
'--socketdir' => $node_s->host,
|
||||||
'replslot1'
|
'--subscriber-port' => $node_s->port,
|
||||||
|
'--replication-slot' => 'replslot1',
|
||||||
],
|
],
|
||||||
'run pg_createsubscriber without --databases');
|
'run pg_createsubscriber without --databases');
|
||||||
|
|
||||||
# Run pg_createsubscriber on node S
|
# Run pg_createsubscriber on node S. --verbose is used twice
|
||||||
|
# to show more information.
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_createsubscriber', '--verbose',
|
'pg_createsubscriber',
|
||||||
'--recovery-timeout', "$PostgreSQL::Test::Utils::timeout_default",
|
'--verbose', '--verbose',
|
||||||
'--verbose', '--pgdata',
|
'--recovery-timeout' => $PostgreSQL::Test::Utils::timeout_default,
|
||||||
$node_s->data_dir, '--publisher-server',
|
'--pgdata' => $node_s->data_dir,
|
||||||
$node_p->connstr($db1), '--socketdir',
|
'--publisher-server' => $node_p->connstr($db1),
|
||||||
$node_s->host, '--subscriber-port',
|
'--socketdir' => $node_s->host,
|
||||||
$node_s->port, '--publication',
|
'--subscriber-port' => $node_s->port,
|
||||||
'pub1', '--publication',
|
'--publication' => 'pub1',
|
||||||
'Pub2', '--replication-slot',
|
'--publication' => 'pub2',
|
||||||
'replslot1', '--replication-slot',
|
'--replication-slot' => 'replslot1',
|
||||||
'replslot2', '--database',
|
'--replication-slot' => 'replslot2',
|
||||||
$db1, '--database',
|
'--database' => $db1,
|
||||||
$db2
|
'--database' => $db2,
|
||||||
],
|
],
|
||||||
'run pg_createsubscriber on node S');
|
'run pg_createsubscriber on node S');
|
||||||
|
|
||||||
|
@ -44,9 +44,10 @@ sub check_relation_corruption
|
|||||||
# corrupted yet.
|
# corrupted yet.
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_checksums', '--check',
|
'pg_checksums',
|
||||||
'-D', $pgdata,
|
'--check',
|
||||||
'--filenode', $relfilenode_corrupted
|
'--pgdata' => $pgdata,
|
||||||
|
'--filenode' => $relfilenode_corrupted,
|
||||||
],
|
],
|
||||||
"succeeds for single relfilenode on tablespace $tablespace with offline cluster"
|
"succeeds for single relfilenode on tablespace $tablespace with offline cluster"
|
||||||
);
|
);
|
||||||
@ -57,9 +58,10 @@ sub check_relation_corruption
|
|||||||
# Checksum checks on single relfilenode fail
|
# Checksum checks on single relfilenode fail
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_checksums', '--check',
|
'pg_checksums',
|
||||||
'-D', $pgdata,
|
'--check',
|
||||||
'--filenode', $relfilenode_corrupted
|
'--pgdata' => $pgdata,
|
||||||
|
'--filenode' => $relfilenode_corrupted,
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[qr/Bad checksums:.*1/],
|
[qr/Bad checksums:.*1/],
|
||||||
@ -69,7 +71,7 @@ sub check_relation_corruption
|
|||||||
|
|
||||||
# Global checksum checks fail as well
|
# Global checksum checks fail as well
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_checksums', '--check', '-D', $pgdata ],
|
[ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
|
||||||
1,
|
1,
|
||||||
[qr/Bad checksums:.*1/],
|
[qr/Bad checksums:.*1/],
|
||||||
[qr/checksum verification failed/],
|
[qr/checksum verification failed/],
|
||||||
@ -79,7 +81,8 @@ sub check_relation_corruption
|
|||||||
$node->start;
|
$node->start;
|
||||||
$node->safe_psql('postgres', "DROP TABLE $table;");
|
$node->safe_psql('postgres', "DROP TABLE $table;");
|
||||||
$node->stop;
|
$node->stop;
|
||||||
$node->command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
|
$node->command_ok(
|
||||||
|
[ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
|
||||||
"succeeds again after table drop on tablespace $tablespace");
|
"succeeds again after table drop on tablespace $tablespace");
|
||||||
|
|
||||||
$node->start;
|
$node->start;
|
||||||
@ -122,11 +125,12 @@ append_to_file "$pgdata/global/.DS_Store", "foo"
|
|||||||
unless ($Config{osname} eq 'darwin');
|
unless ($Config{osname} eq 'darwin');
|
||||||
|
|
||||||
# Enable checksums.
|
# Enable checksums.
|
||||||
command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
|
command_ok([ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
|
||||||
"checksums successfully enabled in cluster");
|
"checksums successfully enabled in cluster");
|
||||||
|
|
||||||
# Successive attempt to enable checksums fails.
|
# Successive attempt to enable checksums fails.
|
||||||
command_fails([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
|
command_fails(
|
||||||
|
[ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
|
||||||
"enabling checksums fails if already enabled");
|
"enabling checksums fails if already enabled");
|
||||||
|
|
||||||
# Control file should know that checksums are enabled.
|
# Control file should know that checksums are enabled.
|
||||||
@ -137,12 +141,12 @@ command_like(
|
|||||||
|
|
||||||
# Disable checksums again. Flush result here as that should be cheap.
|
# Disable checksums again. Flush result here as that should be cheap.
|
||||||
command_ok(
|
command_ok(
|
||||||
[ 'pg_checksums', '--disable', '-D', $pgdata ],
|
[ 'pg_checksums', '--disable', '--pgdata' => $pgdata ],
|
||||||
"checksums successfully disabled in cluster");
|
"checksums successfully disabled in cluster");
|
||||||
|
|
||||||
# Successive attempt to disable checksums fails.
|
# Successive attempt to disable checksums fails.
|
||||||
command_fails(
|
command_fails(
|
||||||
[ 'pg_checksums', '--disable', '--no-sync', '-D', $pgdata ],
|
[ 'pg_checksums', '--disable', '--no-sync', '--pgdata' => $pgdata ],
|
||||||
"disabling checksums fails if already disabled");
|
"disabling checksums fails if already disabled");
|
||||||
|
|
||||||
# Control file should know that checksums are disabled.
|
# Control file should know that checksums are disabled.
|
||||||
@ -152,7 +156,7 @@ command_like(
|
|||||||
'checksums disabled in control file');
|
'checksums disabled in control file');
|
||||||
|
|
||||||
# Enable checksums again for follow-up tests.
|
# Enable checksums again for follow-up tests.
|
||||||
command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
|
command_ok([ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
|
||||||
"checksums successfully enabled in cluster");
|
"checksums successfully enabled in cluster");
|
||||||
|
|
||||||
# Control file should know that checksums are enabled.
|
# Control file should know that checksums are enabled.
|
||||||
@ -162,21 +166,31 @@ command_like(
|
|||||||
'checksums enabled in control file');
|
'checksums enabled in control file');
|
||||||
|
|
||||||
# Checksums pass on a newly-created cluster
|
# Checksums pass on a newly-created cluster
|
||||||
command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
|
command_ok([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
|
||||||
"succeeds with offline cluster");
|
"succeeds with offline cluster");
|
||||||
|
|
||||||
# Checksums are verified if no other arguments are specified
|
# Checksums are verified if no other arguments are specified
|
||||||
command_ok(
|
command_ok(
|
||||||
[ 'pg_checksums', '-D', $pgdata ],
|
[ 'pg_checksums', '--pgdata' => $pgdata ],
|
||||||
"verifies checksums as default action");
|
"verifies checksums as default action");
|
||||||
|
|
||||||
# Specific relation files cannot be requested when action is --disable
|
# Specific relation files cannot be requested when action is --disable
|
||||||
# or --enable.
|
# or --enable.
|
||||||
command_fails(
|
command_fails(
|
||||||
[ 'pg_checksums', '--disable', '--filenode', '1234', '-D', $pgdata ],
|
[
|
||||||
|
'pg_checksums',
|
||||||
|
'--disable',
|
||||||
|
'--filenode' => '1234',
|
||||||
|
'--pgdata' => $pgdata
|
||||||
|
],
|
||||||
"fails when relfilenodes are requested and action is --disable");
|
"fails when relfilenodes are requested and action is --disable");
|
||||||
command_fails(
|
command_fails(
|
||||||
[ 'pg_checksums', '--enable', '--filenode', '1234', '-D', $pgdata ],
|
[
|
||||||
|
'pg_checksums',
|
||||||
|
'--enable',
|
||||||
|
'--filenode' => '1234',
|
||||||
|
'--pgdata' => $pgdata
|
||||||
|
],
|
||||||
"fails when relfilenodes are requested and action is --enable");
|
"fails when relfilenodes are requested and action is --enable");
|
||||||
|
|
||||||
# Test postgres -C for an offline cluster.
|
# Test postgres -C for an offline cluster.
|
||||||
@ -187,8 +201,10 @@ command_fails(
|
|||||||
# account on Windows.
|
# account on Windows.
|
||||||
command_checks_all(
|
command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_ctl', 'start', '-D', $pgdata, '-s', '-o',
|
'pg_ctl', 'start',
|
||||||
'-C data_checksums -c log_min_messages=fatal'
|
'--silent',
|
||||||
|
'--pgdata' => $pgdata,
|
||||||
|
'-o' => '-C data_checksums -c log_min_messages=fatal',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[qr/^on$/],
|
[qr/^on$/],
|
||||||
@ -197,7 +213,7 @@ command_checks_all(
|
|||||||
|
|
||||||
# Checks cannot happen with an online cluster
|
# Checks cannot happen with an online cluster
|
||||||
$node->start;
|
$node->start;
|
||||||
command_fails([ 'pg_checksums', '--check', '-D', $pgdata ],
|
command_fails([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
|
||||||
"fails with online cluster");
|
"fails with online cluster");
|
||||||
|
|
||||||
# Check corruption of table on default tablespace.
|
# Check corruption of table on default tablespace.
|
||||||
@ -224,7 +240,7 @@ sub fail_corrupt
|
|||||||
append_to_file $file_name, "foo";
|
append_to_file $file_name, "foo";
|
||||||
|
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'pg_checksums', '--check', '-D', $pgdata ],
|
[ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
|
||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[qr/could not read block 0 in file.*$file\":/],
|
[qr/could not read block 0 in file.*$file\":/],
|
||||||
@ -242,7 +258,7 @@ $node->stop;
|
|||||||
# when verifying checksums.
|
# when verifying checksums.
|
||||||
mkdir "$tablespace_dir/PG_99_999999991/";
|
mkdir "$tablespace_dir/PG_99_999999991/";
|
||||||
append_to_file "$tablespace_dir/PG_99_999999991/foo", "123";
|
append_to_file "$tablespace_dir/PG_99_999999991/foo", "123";
|
||||||
command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
|
command_ok([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
|
||||||
"succeeds with foreign tablespace");
|
"succeeds with foreign tablespace");
|
||||||
|
|
||||||
# Authorized relation files filled with corrupted data cause the
|
# Authorized relation files filled with corrupted data cause the
|
||||||
|
@ -58,9 +58,11 @@ my $tsbackup1path = $tempdir . '/ts1backup';
|
|||||||
mkdir($tsbackup1path) || die "mkdir $tsbackup1path: $!";
|
mkdir($tsbackup1path) || die "mkdir $tsbackup1path: $!";
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D',
|
'pg_basebackup',
|
||||||
$backup1path, '--no-sync',
|
'--no-sync',
|
||||||
'-cfast', "-T${tsprimary}=${tsbackup1path}"
|
'--pgdata' => $backup1path,
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--tablespace-mapping' => "${tsprimary}=${tsbackup1path}"
|
||||||
],
|
],
|
||||||
"full backup");
|
"full backup");
|
||||||
|
|
||||||
@ -89,10 +91,12 @@ my $tsbackup2path = $tempdir . '/tsbackup2';
|
|||||||
mkdir($tsbackup2path) || die "mkdir $tsbackup2path: $!";
|
mkdir($tsbackup2path) || die "mkdir $tsbackup2path: $!";
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D',
|
'pg_basebackup',
|
||||||
$backup2path, '--no-sync',
|
'--no-sync',
|
||||||
'-cfast', "-T${tsprimary}=${tsbackup2path}",
|
'--pgdata' => $backup2path,
|
||||||
'--incremental', $backup1path . '/backup_manifest'
|
'--checkpoint' => 'fast',
|
||||||
|
'--tablespace-mapping' => "${tsprimary}=${tsbackup2path}",
|
||||||
|
'--incremental' => $backup1path . '/backup_manifest'
|
||||||
],
|
],
|
||||||
"incremental backup");
|
"incremental backup");
|
||||||
|
|
||||||
@ -169,18 +173,20 @@ my $dump1 = $backupdir . '/pitr1.dump';
|
|||||||
my $dump2 = $backupdir . '/pitr2.dump';
|
my $dump2 = $backupdir . '/pitr2.dump';
|
||||||
$pitr1->command_ok(
|
$pitr1->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-f',
|
'pg_dumpall',
|
||||||
$dump1, '--no-sync',
|
'--no-sync',
|
||||||
'--no-unlogged-table-data', '-d',
|
'--no-unlogged-table-data',
|
||||||
$pitr1->connstr('postgres'),
|
'--file' => $dump1,
|
||||||
|
'--dbname' => $pitr1->connstr('postgres'),
|
||||||
],
|
],
|
||||||
'dump from PITR 1');
|
'dump from PITR 1');
|
||||||
$pitr2->command_ok(
|
$pitr2->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-f',
|
'pg_dumpall',
|
||||||
$dump2, '--no-sync',
|
'--no-sync',
|
||||||
'--no-unlogged-table-data', '-d',
|
'--no-unlogged-table-data',
|
||||||
$pitr2->connstr('postgres'),
|
'--file' => $dump2,
|
||||||
|
'--dbname' => $pitr2->connstr('postgres'),
|
||||||
],
|
],
|
||||||
'dump from PITR 2');
|
'dump from PITR 2');
|
||||||
|
|
||||||
|
@ -30,7 +30,12 @@ EOM
|
|||||||
# Take a full backup.
|
# Take a full backup.
|
||||||
my $backup1path = $node1->backup_dir . '/backup1';
|
my $backup1path = $node1->backup_dir . '/backup1';
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup1path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast'
|
||||||
|
],
|
||||||
"full backup from node1");
|
"full backup from node1");
|
||||||
|
|
||||||
# Insert a second row on the original node.
|
# Insert a second row on the original node.
|
||||||
@ -42,8 +47,11 @@ EOM
|
|||||||
my $backup2path = $node1->backup_dir . '/backup2';
|
my $backup2path = $node1->backup_dir . '/backup2';
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backup1path . '/backup_manifest'
|
'--pgdata' => $backup2path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backup1path . '/backup_manifest'
|
||||||
],
|
],
|
||||||
"incremental backup from node1");
|
"incremental backup from node1");
|
||||||
|
|
||||||
@ -65,8 +73,11 @@ EOM
|
|||||||
my $backup3path = $node1->backup_dir . '/backup3';
|
my $backup3path = $node1->backup_dir . '/backup3';
|
||||||
$node2->command_ok(
|
$node2->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backup2path . '/backup_manifest'
|
'--pgdata' => $backup3path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backup2path . '/backup_manifest'
|
||||||
],
|
],
|
||||||
"incremental backup from node2");
|
"incremental backup from node2");
|
||||||
|
|
||||||
|
@ -25,7 +25,12 @@ $node->start;
|
|||||||
# Take a full backup.
|
# Take a full backup.
|
||||||
my $original_backup_path = $node->backup_dir . '/original';
|
my $original_backup_path = $node->backup_dir . '/original';
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $original_backup_path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $original_backup_path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
],
|
||||||
"full backup");
|
"full backup");
|
||||||
|
|
||||||
# Verify the full backup.
|
# Verify the full backup.
|
||||||
@ -39,9 +44,11 @@ sub combine_and_test_one_backup
|
|||||||
my $revised_backup_path = $node->backup_dir . '/' . $backup_name;
|
my $revised_backup_path = $node->backup_dir . '/' . $backup_name;
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $original_backup_path,
|
'pg_combinebackup',
|
||||||
'-o', $revised_backup_path,
|
$original_backup_path,
|
||||||
'--no-sync', @extra_options
|
'--output' => $revised_backup_path,
|
||||||
|
'--no-sync',
|
||||||
|
@extra_options,
|
||||||
],
|
],
|
||||||
"pg_combinebackup with @extra_options");
|
"pg_combinebackup with @extra_options");
|
||||||
if (defined $failure_pattern)
|
if (defined $failure_pattern)
|
||||||
|
@ -43,15 +43,23 @@ $node2->start;
|
|||||||
# Take a full backup from node1.
|
# Take a full backup from node1.
|
||||||
my $backup1path = $node1->backup_dir . '/backup1';
|
my $backup1path = $node1->backup_dir . '/backup1';
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup1path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
],
|
||||||
"full backup from node1");
|
"full backup from node1");
|
||||||
|
|
||||||
# Now take an incremental backup.
|
# Now take an incremental backup.
|
||||||
my $backup2path = $node1->backup_dir . '/backup2';
|
my $backup2path = $node1->backup_dir . '/backup2';
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backup1path . '/backup_manifest'
|
'--pgdata' => $backup2path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backup1path . '/backup_manifest',
|
||||||
],
|
],
|
||||||
"incremental backup from node1");
|
"incremental backup from node1");
|
||||||
|
|
||||||
@ -59,23 +67,34 @@ $node1->command_ok(
|
|||||||
my $backup3path = $node1->backup_dir . '/backup3';
|
my $backup3path = $node1->backup_dir . '/backup3';
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backup2path . '/backup_manifest'
|
'--pgdata' => $backup3path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backup2path . '/backup_manifest',
|
||||||
],
|
],
|
||||||
"another incremental backup from node1");
|
"another incremental backup from node1");
|
||||||
|
|
||||||
# Take a full backup from node2.
|
# Take a full backup from node2.
|
||||||
my $backupother1path = $node1->backup_dir . '/backupother1';
|
my $backupother1path = $node1->backup_dir . '/backupother1';
|
||||||
$node2->command_ok(
|
$node2->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backupother1path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backupother1path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
],
|
||||||
"full backup from node2");
|
"full backup from node2");
|
||||||
|
|
||||||
# Take an incremental backup from node2.
|
# Take an incremental backup from node2.
|
||||||
my $backupother2path = $node1->backup_dir . '/backupother2';
|
my $backupother2path = $node1->backup_dir . '/backupother2';
|
||||||
$node2->command_ok(
|
$node2->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backupother2path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backupother1path . '/backup_manifest'
|
'--pgdata' => $backupother2path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backupother1path . '/backup_manifest',
|
||||||
],
|
],
|
||||||
"incremental backup from node2");
|
"incremental backup from node2");
|
||||||
|
|
||||||
@ -85,8 +104,9 @@ my $resultpath = $node1->backup_dir . '/result';
|
|||||||
# Can't combine 2 full backups.
|
# Can't combine 2 full backups.
|
||||||
$node1->command_fails_like(
|
$node1->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $backup1path, $backup1path, '-o',
|
'pg_combinebackup', $backup1path, $backup1path,
|
||||||
$resultpath, $mode
|
'--output' => $resultpath,
|
||||||
|
$mode,
|
||||||
],
|
],
|
||||||
qr/is a full backup, but only the first backup should be a full backup/,
|
qr/is a full backup, but only the first backup should be a full backup/,
|
||||||
"can't combine full backups");
|
"can't combine full backups");
|
||||||
@ -94,8 +114,9 @@ $node1->command_fails_like(
|
|||||||
# Can't combine 2 incremental backups.
|
# Can't combine 2 incremental backups.
|
||||||
$node1->command_fails_like(
|
$node1->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $backup2path, $backup2path, '-o',
|
'pg_combinebackup', $backup2path, $backup2path,
|
||||||
$resultpath, $mode
|
'--output' => $resultpath,
|
||||||
|
$mode,
|
||||||
],
|
],
|
||||||
qr/is an incremental backup, but the first backup should be a full backup/,
|
qr/is an incremental backup, but the first backup should be a full backup/,
|
||||||
"can't combine full backups");
|
"can't combine full backups");
|
||||||
@ -103,8 +124,9 @@ $node1->command_fails_like(
|
|||||||
# Can't combine full backup with an incremental backup from a different system.
|
# Can't combine full backup with an incremental backup from a different system.
|
||||||
$node1->command_fails_like(
|
$node1->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $backup1path, $backupother2path, '-o',
|
'pg_combinebackup', $backup1path, $backupother2path,
|
||||||
$resultpath, $mode
|
'--output' => $resultpath,
|
||||||
|
$mode,
|
||||||
],
|
],
|
||||||
qr/expected system identifier.*but found/,
|
qr/expected system identifier.*but found/,
|
||||||
"can't combine backups from different nodes");
|
"can't combine backups from different nodes");
|
||||||
@ -117,7 +139,8 @@ copy("$backupother2path/backup_manifest", "$backup2path/backup_manifest")
|
|||||||
$node1->command_fails_like(
|
$node1->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
|
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
|
||||||
'-o', $resultpath, $mode
|
'--output' => $resultpath,
|
||||||
|
$mode,
|
||||||
],
|
],
|
||||||
qr/ manifest system identifier is .*, but control file has /,
|
qr/ manifest system identifier is .*, but control file has /,
|
||||||
"can't combine backups with different manifest system identifier ");
|
"can't combine backups with different manifest system identifier ");
|
||||||
@ -128,8 +151,9 @@ move("$backup2path/backup_manifest.orig", "$backup2path/backup_manifest")
|
|||||||
# Can't omit a required backup.
|
# Can't omit a required backup.
|
||||||
$node1->command_fails_like(
|
$node1->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $backup1path, $backup3path, '-o',
|
'pg_combinebackup', $backup1path, $backup3path,
|
||||||
$resultpath, $mode
|
'--output' => $resultpath,
|
||||||
|
$mode,
|
||||||
],
|
],
|
||||||
qr/starts at LSN.*but expected/,
|
qr/starts at LSN.*but expected/,
|
||||||
"can't omit a required backup");
|
"can't omit a required backup");
|
||||||
@ -138,7 +162,8 @@ $node1->command_fails_like(
|
|||||||
$node1->command_fails_like(
|
$node1->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $backup1path, $backup3path, $backup2path,
|
'pg_combinebackup', $backup1path, $backup3path, $backup2path,
|
||||||
'-o', $resultpath, $mode
|
'--output' => $resultpath,
|
||||||
|
$mode,
|
||||||
],
|
],
|
||||||
qr/starts at LSN.*but expected/,
|
qr/starts at LSN.*but expected/,
|
||||||
"can't combine backups in the wrong order");
|
"can't combine backups in the wrong order");
|
||||||
@ -147,7 +172,8 @@ $node1->command_fails_like(
|
|||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
|
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
|
||||||
'-o', $resultpath, $mode
|
'--output' => $resultpath,
|
||||||
|
$mode,
|
||||||
],
|
],
|
||||||
"can combine 3 matching backups");
|
"can combine 3 matching backups");
|
||||||
rmtree($resultpath);
|
rmtree($resultpath);
|
||||||
@ -156,17 +182,18 @@ rmtree($resultpath);
|
|||||||
my $synthetic12path = $node1->backup_dir . '/synthetic12';
|
my $synthetic12path = $node1->backup_dir . '/synthetic12';
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $backup1path, $backup2path, '-o',
|
'pg_combinebackup', $backup1path, $backup2path,
|
||||||
$synthetic12path, $mode
|
'--output' => $synthetic12path,
|
||||||
|
$mode,
|
||||||
],
|
],
|
||||||
"can combine 2 matching backups");
|
"can combine 2 matching backups");
|
||||||
|
|
||||||
# Can combine result of previous step with second incremental.
|
# Can combine result of previous step with second incremental.
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $synthetic12path,
|
'pg_combinebackup', $synthetic12path, $backup3path,
|
||||||
$backup3path, '-o',
|
'--output' => $resultpath,
|
||||||
$resultpath, $mode
|
$mode,
|
||||||
],
|
],
|
||||||
"can combine synthetic backup with later incremental");
|
"can combine synthetic backup with later incremental");
|
||||||
rmtree($resultpath);
|
rmtree($resultpath);
|
||||||
@ -174,9 +201,9 @@ rmtree($resultpath);
|
|||||||
# Can't combine result of 1+2 with 2.
|
# Can't combine result of 1+2 with 2.
|
||||||
$node1->command_fails_like(
|
$node1->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_combinebackup', $synthetic12path,
|
'pg_combinebackup', $synthetic12path, $backup2path,
|
||||||
$backup2path, '-o',
|
'--output' => $resultpath,
|
||||||
$resultpath, $mode
|
$mode,
|
||||||
],
|
],
|
||||||
qr/starts at LSN.*but expected/,
|
qr/starts at LSN.*but expected/,
|
||||||
"can't combine synthetic backup with included incremental");
|
"can't combine synthetic backup with included incremental");
|
||||||
|
@ -29,7 +29,12 @@ EOM
|
|||||||
# Take a full backup.
|
# Take a full backup.
|
||||||
my $backup1path = $primary->backup_dir . '/backup1';
|
my $backup1path = $primary->backup_dir . '/backup1';
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup1path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast'
|
||||||
|
],
|
||||||
"full backup");
|
"full backup");
|
||||||
|
|
||||||
# Now make some database changes.
|
# Now make some database changes.
|
||||||
@ -42,8 +47,11 @@ EOM
|
|||||||
my $backup2path = $primary->backup_dir . '/backup2';
|
my $backup2path = $primary->backup_dir . '/backup2';
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backup1path . '/backup_manifest'
|
'--pgdata' => $backup2path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backup1path . '/backup_manifest'
|
||||||
],
|
],
|
||||||
"incremental backup");
|
"incremental backup");
|
||||||
|
|
||||||
|
@ -34,7 +34,12 @@ EOM
|
|||||||
# Take a full backup.
|
# Take a full backup.
|
||||||
my $backup1path = $node1->backup_dir . '/backup1';
|
my $backup1path = $node1->backup_dir . '/backup1';
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup1path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast'
|
||||||
|
],
|
||||||
"full backup");
|
"full backup");
|
||||||
|
|
||||||
# Switch to wal_level=minimal, which also requires max_wal_senders=0 and
|
# Switch to wal_level=minimal, which also requires max_wal_senders=0 and
|
||||||
@ -63,8 +68,11 @@ $node1->restart;
|
|||||||
my $backup2path = $node1->backup_dir . '/backup2';
|
my $backup2path = $node1->backup_dir . '/backup2';
|
||||||
$node1->command_fails_like(
|
$node1->command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backup1path . '/backup_manifest'
|
'--pgdata' => $backup2path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backup1path . '/backup_manifest'
|
||||||
],
|
],
|
||||||
qr/WAL summaries are required on timeline 1 from.*are incomplete/,
|
qr/WAL summaries are required on timeline 1 from.*are incomplete/,
|
||||||
"incremental backup fails");
|
"incremental backup fails");
|
||||||
|
@ -31,7 +31,12 @@ EOM
|
|||||||
# Take a full backup.
|
# Take a full backup.
|
||||||
my $backup1path = $node1->backup_dir . '/backup1';
|
my $backup1path = $node1->backup_dir . '/backup1';
|
||||||
$node1->command_ok(
|
$node1->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup1path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
],
|
||||||
"full backup from node1");
|
"full backup from node1");
|
||||||
|
|
||||||
# Checkpoint and record LSN after.
|
# Checkpoint and record LSN after.
|
||||||
@ -70,8 +75,11 @@ EOM
|
|||||||
my $backup2path = $node1->backup_dir . '/backup2';
|
my $backup2path = $node1->backup_dir . '/backup2';
|
||||||
$node2->command_ok(
|
$node2->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backup1path . '/backup_manifest'
|
'--pgdata' => $backup2path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backup1path . '/backup_manifest',
|
||||||
],
|
],
|
||||||
"incremental backup from node2");
|
"incremental backup from node2");
|
||||||
|
|
||||||
|
@ -21,15 +21,23 @@ $primary->start;
|
|||||||
# Take a full backup.
|
# Take a full backup.
|
||||||
my $backup1path = $primary->backup_dir . '/backup1';
|
my $backup1path = $primary->backup_dir . '/backup1';
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup1path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast'
|
||||||
|
],
|
||||||
"full backup");
|
"full backup");
|
||||||
|
|
||||||
# Take an incremental backup.
|
# Take an incremental backup.
|
||||||
my $backup2path = $primary->backup_dir . '/backup2';
|
my $backup2path = $primary->backup_dir . '/backup2';
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'--incremental', $backup1path . '/backup_manifest'
|
'--pgdata' => $backup2path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--incremental' => $backup1path . '/backup_manifest'
|
||||||
],
|
],
|
||||||
"incremental backup");
|
"incremental backup");
|
||||||
|
|
||||||
@ -57,7 +65,10 @@ for my $iname (@filelist)
|
|||||||
# pg_combinebackup should fail.
|
# pg_combinebackup should fail.
|
||||||
my $outpath = $primary->backup_dir . '/out';
|
my $outpath = $primary->backup_dir . '/out';
|
||||||
$primary->command_fails_like(
|
$primary->command_fails_like(
|
||||||
[ 'pg_combinebackup', $backup1path, $backup2path, '-o', $outpath, ],
|
[
|
||||||
|
'pg_combinebackup', $backup1path,
|
||||||
|
$backup2path, '--output' => $outpath,
|
||||||
|
],
|
||||||
qr/full backup contains unexpected incremental file/,
|
qr/full backup contains unexpected incremental file/,
|
||||||
"pg_combinebackup fails");
|
"pg_combinebackup fails");
|
||||||
|
|
||||||
|
@ -15,10 +15,15 @@ program_help_ok('pg_ctl');
|
|||||||
program_version_ok('pg_ctl');
|
program_version_ok('pg_ctl');
|
||||||
program_options_handling_ok('pg_ctl');
|
program_options_handling_ok('pg_ctl');
|
||||||
|
|
||||||
command_exit_is([ 'pg_ctl', 'start', '-D', "$tempdir/nonexistent" ],
|
command_exit_is([ 'pg_ctl', 'start', '--pgdata' => "$tempdir/nonexistent" ],
|
||||||
1, 'pg_ctl start with nonexistent directory');
|
1, 'pg_ctl start with nonexistent directory');
|
||||||
|
|
||||||
command_ok([ 'pg_ctl', 'initdb', '-D', "$tempdir/data", '-o', '-N' ],
|
command_ok(
|
||||||
|
[
|
||||||
|
'pg_ctl', 'initdb',
|
||||||
|
'--pgdata' => "$tempdir/data",
|
||||||
|
'--options' => '--no-sync'
|
||||||
|
],
|
||||||
'pg_ctl initdb');
|
'pg_ctl initdb');
|
||||||
command_ok([ $ENV{PG_REGRESS}, '--config-auth', "$tempdir/data" ],
|
command_ok([ $ENV{PG_REGRESS}, '--config-auth', "$tempdir/data" ],
|
||||||
'configure authentication');
|
'configure authentication');
|
||||||
@ -41,8 +46,9 @@ else
|
|||||||
}
|
}
|
||||||
close $conf;
|
close $conf;
|
||||||
my $ctlcmd = [
|
my $ctlcmd = [
|
||||||
'pg_ctl', 'start', '-D', "$tempdir/data", '-l',
|
'pg_ctl', 'start',
|
||||||
"$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log"
|
'--pgdata' => "$tempdir/data",
|
||||||
|
'--log' => "$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log"
|
||||||
];
|
];
|
||||||
command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start');
|
command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start');
|
||||||
|
|
||||||
@ -51,17 +57,23 @@ command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start');
|
|||||||
# postmaster they start. Waiting more than the 2 seconds slop time allowed
|
# postmaster they start. Waiting more than the 2 seconds slop time allowed
|
||||||
# by wait_for_postmaster() prevents that mistake.
|
# by wait_for_postmaster() prevents that mistake.
|
||||||
sleep 3 if ($windows_os);
|
sleep 3 if ($windows_os);
|
||||||
command_fails([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
|
command_fails([ 'pg_ctl', 'start', '--pgdata' => "$tempdir/data" ],
|
||||||
'second pg_ctl start fails');
|
'second pg_ctl start fails');
|
||||||
command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], 'pg_ctl stop');
|
command_ok([ 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data" ],
|
||||||
command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ],
|
'pg_ctl stop');
|
||||||
|
command_fails([ 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data" ],
|
||||||
'second pg_ctl stop fails');
|
'second pg_ctl stop fails');
|
||||||
|
|
||||||
# Log file for default permission test. The permissions won't be checked on
|
# Log file for default permission test. The permissions won't be checked on
|
||||||
# Windows but we still want to do the restart test.
|
# Windows but we still want to do the restart test.
|
||||||
my $logFileName = "$tempdir/data/perm-test-600.log";
|
my $logFileName = "$tempdir/data/perm-test-600.log";
|
||||||
|
|
||||||
command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-l', $logFileName ],
|
command_ok(
|
||||||
|
[
|
||||||
|
'pg_ctl', 'restart',
|
||||||
|
'--pgdata' => "$tempdir/data",
|
||||||
|
'--log' => $logFileName
|
||||||
|
],
|
||||||
'pg_ctl restart with server not running');
|
'pg_ctl restart with server not running');
|
||||||
|
|
||||||
# Permissions on log file should be default
|
# Permissions on log file should be default
|
||||||
@ -82,23 +94,27 @@ SKIP:
|
|||||||
skip "group access not supported on Windows", 3
|
skip "group access not supported on Windows", 3
|
||||||
if ($windows_os || $Config::Config{osname} eq 'cygwin');
|
if ($windows_os || $Config::Config{osname} eq 'cygwin');
|
||||||
|
|
||||||
system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data";
|
system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data";
|
||||||
|
|
||||||
# Change the data dir mode so log file will be created with group read
|
# Change the data dir mode so log file will be created with group read
|
||||||
# privileges on the next start
|
# privileges on the next start
|
||||||
chmod_recursive("$tempdir/data", 0750, 0640);
|
chmod_recursive("$tempdir/data", 0750, 0640);
|
||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[ 'pg_ctl', 'start', '-D', "$tempdir/data", '-l', $logFileName ],
|
[
|
||||||
|
'pg_ctl', 'start',
|
||||||
|
'--pgdata' => "$tempdir/data",
|
||||||
|
'--log' => $logFileName
|
||||||
|
],
|
||||||
'start server to check group permissions');
|
'start server to check group permissions');
|
||||||
|
|
||||||
ok(-f $logFileName);
|
ok(-f $logFileName);
|
||||||
ok(check_mode_recursive("$tempdir/data", 0750, 0640));
|
ok(check_mode_recursive("$tempdir/data", 0750, 0640));
|
||||||
}
|
}
|
||||||
|
|
||||||
command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
|
command_ok([ 'pg_ctl', 'restart', '--pgdata' => "$tempdir/data" ],
|
||||||
'pg_ctl restart with server running');
|
'pg_ctl restart with server running');
|
||||||
|
|
||||||
system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data";
|
system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data";
|
||||||
|
|
||||||
done_testing();
|
done_testing();
|
||||||
|
@ -10,20 +10,23 @@ use Test::More;
|
|||||||
|
|
||||||
my $tempdir = PostgreSQL::Test::Utils::tempdir;
|
my $tempdir = PostgreSQL::Test::Utils::tempdir;
|
||||||
|
|
||||||
command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
|
command_exit_is([ 'pg_ctl', 'status', '--pgdata' => "$tempdir/nonexistent" ],
|
||||||
4, 'pg_ctl status with nonexistent directory');
|
4, 'pg_ctl status with nonexistent directory');
|
||||||
|
|
||||||
my $node = PostgreSQL::Test::Cluster->new('main');
|
my $node = PostgreSQL::Test::Cluster->new('main');
|
||||||
$node->init;
|
$node->init;
|
||||||
|
|
||||||
command_exit_is([ 'pg_ctl', 'status', '-D', $node->data_dir ],
|
command_exit_is([ 'pg_ctl', 'status', '--pgdata' => $node->data_dir ],
|
||||||
3, 'pg_ctl status with server not running');
|
3, 'pg_ctl status with server not running');
|
||||||
|
|
||||||
system_or_bail 'pg_ctl', '-l', "$tempdir/logfile", '-D',
|
system_or_bail(
|
||||||
$node->data_dir, '-w', 'start';
|
'pg_ctl',
|
||||||
command_exit_is([ 'pg_ctl', 'status', '-D', $node->data_dir ],
|
'--log' => "$tempdir/logfile",
|
||||||
|
'--pgdata' => $node->data_dir,
|
||||||
|
'--wait', 'start');
|
||||||
|
command_exit_is([ 'pg_ctl', 'status', '--pgdata' => $node->data_dir ],
|
||||||
0, 'pg_ctl status with server running');
|
0, 'pg_ctl status with server running');
|
||||||
|
|
||||||
system_or_bail 'pg_ctl', 'stop', '-D', $node->data_dir;
|
system_or_bail 'pg_ctl', 'stop', '--pgdata' => $node->data_dir;
|
||||||
|
|
||||||
done_testing();
|
done_testing();
|
||||||
|
@ -11,7 +11,7 @@ use Test::More;
|
|||||||
my $tempdir = PostgreSQL::Test::Utils::tempdir;
|
my $tempdir = PostgreSQL::Test::Utils::tempdir;
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ],
|
[ 'pg_ctl', '--pgdata' => "$tempdir/nonexistent", 'promote' ],
|
||||||
qr/directory .* does not exist/,
|
qr/directory .* does not exist/,
|
||||||
'pg_ctl promote with nonexistent directory');
|
'pg_ctl promote with nonexistent directory');
|
||||||
|
|
||||||
@ -19,14 +19,14 @@ my $node_primary = PostgreSQL::Test::Cluster->new('primary');
|
|||||||
$node_primary->init(allows_streaming => 1);
|
$node_primary->init(allows_streaming => 1);
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
|
[ 'pg_ctl', '--pgdata' => $node_primary->data_dir, 'promote' ],
|
||||||
qr/PID file .* does not exist/,
|
qr/PID file .* does not exist/,
|
||||||
'pg_ctl promote of not running instance fails');
|
'pg_ctl promote of not running instance fails');
|
||||||
|
|
||||||
$node_primary->start;
|
$node_primary->start;
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
|
[ 'pg_ctl', '--pgdata' => $node_primary->data_dir, 'promote' ],
|
||||||
qr/not in standby mode/,
|
qr/not in standby mode/,
|
||||||
'pg_ctl promote of primary instance fails');
|
'pg_ctl promote of primary instance fails');
|
||||||
|
|
||||||
@ -39,8 +39,13 @@ $node_standby->start;
|
|||||||
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
|
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
|
||||||
't', 'standby is in recovery');
|
't', 'standby is in recovery');
|
||||||
|
|
||||||
command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, '-W', 'promote' ],
|
command_ok(
|
||||||
'pg_ctl -W promote of standby runs');
|
[
|
||||||
|
'pg_ctl',
|
||||||
|
'--pgdata' => $node_standby->data_dir,
|
||||||
|
'--no-wait', 'promote'
|
||||||
|
],
|
||||||
|
'pg_ctl --no-wait promote of standby runs');
|
||||||
|
|
||||||
ok( $node_standby->poll_query_until(
|
ok( $node_standby->poll_query_until(
|
||||||
'postgres', 'SELECT NOT pg_is_in_recovery()'),
|
'postgres', 'SELECT NOT pg_is_in_recovery()'),
|
||||||
@ -55,7 +60,7 @@ $node_standby->start;
|
|||||||
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
|
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
|
||||||
't', 'standby is in recovery');
|
't', 'standby is in recovery');
|
||||||
|
|
||||||
command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, 'promote' ],
|
command_ok([ 'pg_ctl', '--pgdata' => $node_standby->data_dir, 'promote' ],
|
||||||
'pg_ctl promote of standby runs');
|
'pg_ctl promote of standby runs');
|
||||||
|
|
||||||
# no wait here
|
# no wait here
|
||||||
|
@ -61,18 +61,19 @@ my $supports_zstd = check_pg_config("#define USE_ZSTD 1");
|
|||||||
my %pgdump_runs = (
|
my %pgdump_runs = (
|
||||||
binary_upgrade => {
|
binary_upgrade => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--format' => 'custom',
|
||||||
'--format=custom',
|
'--file' => "$tempdir/binary_upgrade.dump",
|
||||||
"--file=$tempdir/binary_upgrade.dump",
|
'--no-password',
|
||||||
'-w',
|
|
||||||
'--schema-only',
|
'--schema-only',
|
||||||
'--binary-upgrade',
|
'--binary-upgrade',
|
||||||
'-d', 'postgres', # alternative way to specify database
|
'--dbname' => 'postgres', # alternative way to specify database
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore', '-Fc', '--verbose',
|
'pg_restore',
|
||||||
"--file=$tempdir/binary_upgrade.sql",
|
'--format' => 'custom',
|
||||||
|
'--verbose',
|
||||||
|
'--file' => "$tempdir/binary_upgrade.sql",
|
||||||
"$tempdir/binary_upgrade.dump",
|
"$tempdir/binary_upgrade.dump",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@ -82,18 +83,21 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'gzip',
|
compile_option => 'gzip',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--format=custom',
|
'pg_dump',
|
||||||
'--compress=1', "--file=$tempdir/compression_gzip_custom.dump",
|
'--format' => 'custom',
|
||||||
|
'--compress' => '1',
|
||||||
|
'--file' => "$tempdir/compression_gzip_custom.dump",
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore',
|
'pg_restore',
|
||||||
"--file=$tempdir/compression_gzip_custom.sql",
|
'--file' => "$tempdir/compression_gzip_custom.sql",
|
||||||
"$tempdir/compression_gzip_custom.dump",
|
"$tempdir/compression_gzip_custom.dump",
|
||||||
],
|
],
|
||||||
command_like => {
|
command_like => {
|
||||||
command => [
|
command => [
|
||||||
'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump",
|
'pg_restore', '--list',
|
||||||
|
"$tempdir/compression_gzip_custom.dump",
|
||||||
],
|
],
|
||||||
expected => qr/Compression: gzip/,
|
expected => qr/Compression: gzip/,
|
||||||
name => 'data content is gzip-compressed'
|
name => 'data content is gzip-compressed'
|
||||||
@ -105,9 +109,12 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'gzip',
|
compile_option => 'gzip',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--jobs=2',
|
'pg_dump',
|
||||||
'--format=directory', '--compress=gzip:1',
|
'--jobs' => '2',
|
||||||
"--file=$tempdir/compression_gzip_dir", 'postgres',
|
'--format' => 'directory',
|
||||||
|
'--compress' => 'gzip:1',
|
||||||
|
'--file' => "$tempdir/compression_gzip_dir",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
# Give coverage for manually compressed blobs.toc files during
|
# Give coverage for manually compressed blobs.toc files during
|
||||||
# restore.
|
# restore.
|
||||||
@ -121,8 +128,9 @@ my %pgdump_runs = (
|
|||||||
"$tempdir/compression_gzip_dir/*.dat.gz",
|
"$tempdir/compression_gzip_dir/*.dat.gz",
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore', '--jobs=2',
|
'pg_restore',
|
||||||
"--file=$tempdir/compression_gzip_dir.sql",
|
'--jobs' => '2',
|
||||||
|
'--file' => "$tempdir/compression_gzip_dir.sql",
|
||||||
"$tempdir/compression_gzip_dir",
|
"$tempdir/compression_gzip_dir",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@ -131,8 +139,11 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'gzip',
|
compile_option => 'gzip',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--format=plain', '-Z1',
|
'pg_dump',
|
||||||
"--file=$tempdir/compression_gzip_plain.sql.gz", 'postgres',
|
'--format' => 'plain',
|
||||||
|
'--compress' => '1',
|
||||||
|
'--file' => "$tempdir/compression_gzip_plain.sql.gz",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
# Decompress the generated file to run through the tests.
|
# Decompress the generated file to run through the tests.
|
||||||
compress_cmd => {
|
compress_cmd => {
|
||||||
@ -146,18 +157,22 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'lz4',
|
compile_option => 'lz4',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--format=custom',
|
'pg_dump',
|
||||||
'--compress=lz4', "--file=$tempdir/compression_lz4_custom.dump",
|
'--format' => 'custom',
|
||||||
|
'--compress' => 'lz4',
|
||||||
|
'--file' => "$tempdir/compression_lz4_custom.dump",
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore',
|
'pg_restore',
|
||||||
"--file=$tempdir/compression_lz4_custom.sql",
|
'--file' => "$tempdir/compression_lz4_custom.sql",
|
||||||
"$tempdir/compression_lz4_custom.dump",
|
"$tempdir/compression_lz4_custom.dump",
|
||||||
],
|
],
|
||||||
command_like => {
|
command_like => {
|
||||||
command =>
|
command => [
|
||||||
[ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ],
|
'pg_restore', '--list',
|
||||||
|
"$tempdir/compression_lz4_custom.dump",
|
||||||
|
],
|
||||||
expected => qr/Compression: lz4/,
|
expected => qr/Compression: lz4/,
|
||||||
name => 'data content is lz4 compressed'
|
name => 'data content is lz4 compressed'
|
||||||
},
|
},
|
||||||
@ -168,9 +183,12 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'lz4',
|
compile_option => 'lz4',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--jobs=2',
|
'pg_dump',
|
||||||
'--format=directory', '--compress=lz4:1',
|
'--jobs' => '2',
|
||||||
"--file=$tempdir/compression_lz4_dir", 'postgres',
|
'--format' => 'directory',
|
||||||
|
'--compress' => 'lz4:1',
|
||||||
|
'--file' => "$tempdir/compression_lz4_dir",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
# Verify that data files were compressed
|
# Verify that data files were compressed
|
||||||
glob_patterns => [
|
glob_patterns => [
|
||||||
@ -178,8 +196,9 @@ my %pgdump_runs = (
|
|||||||
"$tempdir/compression_lz4_dir/*.dat.lz4",
|
"$tempdir/compression_lz4_dir/*.dat.lz4",
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore', '--jobs=2',
|
'pg_restore',
|
||||||
"--file=$tempdir/compression_lz4_dir.sql",
|
'--jobs' => '2',
|
||||||
|
'--file' => "$tempdir/compression_lz4_dir.sql",
|
||||||
"$tempdir/compression_lz4_dir",
|
"$tempdir/compression_lz4_dir",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@ -188,8 +207,11 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'lz4',
|
compile_option => 'lz4',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--format=plain', '--compress=lz4',
|
'pg_dump',
|
||||||
"--file=$tempdir/compression_lz4_plain.sql.lz4", 'postgres',
|
'--format' => 'plain',
|
||||||
|
'--compress' => 'lz4',
|
||||||
|
'--file' => "$tempdir/compression_lz4_plain.sql.lz4",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
# Decompress the generated file to run through the tests.
|
# Decompress the generated file to run through the tests.
|
||||||
compress_cmd => {
|
compress_cmd => {
|
||||||
@ -206,18 +228,21 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'zstd',
|
compile_option => 'zstd',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--format=custom',
|
'pg_dump',
|
||||||
'--compress=zstd', "--file=$tempdir/compression_zstd_custom.dump",
|
'--format' => 'custom',
|
||||||
|
'--compress' => 'zstd',
|
||||||
|
'--file' => "$tempdir/compression_zstd_custom.dump",
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore',
|
'pg_restore',
|
||||||
"--file=$tempdir/compression_zstd_custom.sql",
|
'--file' => "$tempdir/compression_zstd_custom.sql",
|
||||||
"$tempdir/compression_zstd_custom.dump",
|
"$tempdir/compression_zstd_custom.dump",
|
||||||
],
|
],
|
||||||
command_like => {
|
command_like => {
|
||||||
command => [
|
command => [
|
||||||
'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump",
|
'pg_restore', '--list',
|
||||||
|
"$tempdir/compression_zstd_custom.dump",
|
||||||
],
|
],
|
||||||
expected => qr/Compression: zstd/,
|
expected => qr/Compression: zstd/,
|
||||||
name => 'data content is zstd compressed'
|
name => 'data content is zstd compressed'
|
||||||
@ -228,9 +253,12 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'zstd',
|
compile_option => 'zstd',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--jobs=2',
|
'pg_dump',
|
||||||
'--format=directory', '--compress=zstd:1',
|
'--jobs' => '2',
|
||||||
"--file=$tempdir/compression_zstd_dir", 'postgres',
|
'--format' => 'directory',
|
||||||
|
'--compress' => 'zstd:1',
|
||||||
|
'--file' => "$tempdir/compression_zstd_dir",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
# Give coverage for manually compressed blobs.toc files during
|
# Give coverage for manually compressed blobs.toc files during
|
||||||
# restore.
|
# restore.
|
||||||
@ -247,8 +275,9 @@ my %pgdump_runs = (
|
|||||||
"$tempdir/compression_zstd_dir/*.dat.zst",
|
"$tempdir/compression_zstd_dir/*.dat.zst",
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore', '--jobs=2',
|
'pg_restore',
|
||||||
"--file=$tempdir/compression_zstd_dir.sql",
|
'--jobs' => '2',
|
||||||
|
'--file' => "$tempdir/compression_zstd_dir.sql",
|
||||||
"$tempdir/compression_zstd_dir",
|
"$tempdir/compression_zstd_dir",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@ -258,8 +287,11 @@ my %pgdump_runs = (
|
|||||||
test_key => 'compression',
|
test_key => 'compression',
|
||||||
compile_option => 'zstd',
|
compile_option => 'zstd',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--format=plain', '--compress=zstd:long',
|
'pg_dump',
|
||||||
"--file=$tempdir/compression_zstd_plain.sql.zst", 'postgres',
|
'--format' => 'plain',
|
||||||
|
'--compress' => 'zstd:long',
|
||||||
|
'--file' => "$tempdir/compression_zstd_plain.sql.zst",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
# Decompress the generated file to run through the tests.
|
# Decompress the generated file to run through the tests.
|
||||||
compress_cmd => {
|
compress_cmd => {
|
||||||
@ -274,81 +306,80 @@ my %pgdump_runs = (
|
|||||||
|
|
||||||
clean => {
|
clean => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/clean.sql",
|
||||||
"--file=$tempdir/clean.sql",
|
'--clean',
|
||||||
'-c',
|
'--dbname' => 'postgres', # alternative way to specify database
|
||||||
'-d', 'postgres', # alternative way to specify database
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
clean_if_exists => {
|
clean_if_exists => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/clean_if_exists.sql",
|
||||||
"--file=$tempdir/clean_if_exists.sql",
|
'--clean',
|
||||||
'-c',
|
|
||||||
'--if-exists',
|
'--if-exists',
|
||||||
'--encoding=UTF8', # no-op, just tests that option is accepted
|
'--encoding' => 'UTF8', # no-op, just for testing
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
column_inserts => {
|
column_inserts => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/column_inserts.sql", '-a',
|
'--file' => "$tempdir/column_inserts.sql",
|
||||||
|
'--data-only',
|
||||||
'--column-inserts', 'postgres',
|
'--column-inserts', 'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
createdb => {
|
createdb => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/createdb.sql",
|
||||||
"--file=$tempdir/createdb.sql",
|
'--create',
|
||||||
'-C',
|
'--no-reconnect', # no-op, just for testing
|
||||||
'-R', # no-op, just for testing
|
'--verbose',
|
||||||
'-v',
|
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
data_only => {
|
data_only => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/data_only.sql",
|
||||||
"--file=$tempdir/data_only.sql",
|
'--data-only',
|
||||||
'-a',
|
'--superuser' => 'test_superuser',
|
||||||
'--superuser=test_superuser',
|
|
||||||
'--disable-triggers',
|
'--disable-triggers',
|
||||||
'-v', # no-op, just make sure it works
|
'--verbose', # no-op, just make sure it works
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
defaults => {
|
defaults => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
'-f', "$tempdir/defaults.sql",
|
'--file' => "$tempdir/defaults.sql",
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
defaults_no_public => {
|
defaults_no_public => {
|
||||||
database => 'regress_pg_dump_test',
|
database => 'regress_pg_dump_test',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync', '-f', "$tempdir/defaults_no_public.sql",
|
'pg_dump', '--no-sync',
|
||||||
|
'--file' => "$tempdir/defaults_no_public.sql",
|
||||||
'regress_pg_dump_test',
|
'regress_pg_dump_test',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
defaults_no_public_clean => {
|
defaults_no_public_clean => {
|
||||||
database => 'regress_pg_dump_test',
|
database => 'regress_pg_dump_test',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync', '-c', '-f',
|
'pg_dump', '--no-sync',
|
||||||
"$tempdir/defaults_no_public_clean.sql",
|
'--clean',
|
||||||
|
'--file' => "$tempdir/defaults_no_public_clean.sql",
|
||||||
'regress_pg_dump_test',
|
'regress_pg_dump_test',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
defaults_public_owner => {
|
defaults_public_owner => {
|
||||||
database => 'regress_public_owner',
|
database => 'regress_public_owner',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync', '-f',
|
'pg_dump', '--no-sync',
|
||||||
"$tempdir/defaults_public_owner.sql",
|
'--file' => "$tempdir/defaults_public_owner.sql",
|
||||||
'regress_public_owner',
|
'regress_public_owner',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@ -360,17 +391,22 @@ my %pgdump_runs = (
|
|||||||
defaults_custom_format => {
|
defaults_custom_format => {
|
||||||
test_key => 'defaults',
|
test_key => 'defaults',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '-Fc',
|
'pg_dump',
|
||||||
"--file=$tempdir/defaults_custom_format.dump", 'postgres',
|
'--format' => 'custom',
|
||||||
|
'--file' => "$tempdir/defaults_custom_format.dump",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore', '-Fc',
|
'pg_restore',
|
||||||
"--file=$tempdir/defaults_custom_format.sql",
|
'--format' => 'custom',
|
||||||
|
'--file' => "$tempdir/defaults_custom_format.sql",
|
||||||
"$tempdir/defaults_custom_format.dump",
|
"$tempdir/defaults_custom_format.dump",
|
||||||
],
|
],
|
||||||
command_like => {
|
command_like => {
|
||||||
command =>
|
command => [
|
||||||
[ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ],
|
'pg_restore', '--list',
|
||||||
|
"$tempdir/defaults_custom_format.dump",
|
||||||
|
],
|
||||||
expected => $supports_gzip
|
expected => $supports_gzip
|
||||||
? qr/Compression: gzip/
|
? qr/Compression: gzip/
|
||||||
: qr/Compression: none/,
|
: qr/Compression: none/,
|
||||||
@ -385,17 +421,20 @@ my %pgdump_runs = (
|
|||||||
defaults_dir_format => {
|
defaults_dir_format => {
|
||||||
test_key => 'defaults',
|
test_key => 'defaults',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '-Fd',
|
'pg_dump',
|
||||||
"--file=$tempdir/defaults_dir_format", 'postgres',
|
'--format' => 'directory',
|
||||||
|
'--file' => "$tempdir/defaults_dir_format",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore', '-Fd',
|
'pg_restore',
|
||||||
"--file=$tempdir/defaults_dir_format.sql",
|
'--format' => 'directory',
|
||||||
|
'--file' => "$tempdir/defaults_dir_format.sql",
|
||||||
"$tempdir/defaults_dir_format",
|
"$tempdir/defaults_dir_format",
|
||||||
],
|
],
|
||||||
command_like => {
|
command_like => {
|
||||||
command =>
|
command =>
|
||||||
[ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ],
|
[ 'pg_restore', '--list', "$tempdir/defaults_dir_format", ],
|
||||||
expected => $supports_gzip ? qr/Compression: gzip/
|
expected => $supports_gzip ? qr/Compression: gzip/
|
||||||
: qr/Compression: none/,
|
: qr/Compression: none/,
|
||||||
name => 'data content is gzip-compressed by default',
|
name => 'data content is gzip-compressed by default',
|
||||||
@ -412,12 +451,15 @@ my %pgdump_runs = (
|
|||||||
defaults_parallel => {
|
defaults_parallel => {
|
||||||
test_key => 'defaults',
|
test_key => 'defaults',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '-Fd', '-j2', "--file=$tempdir/defaults_parallel",
|
'pg_dump',
|
||||||
|
'--format' => 'directory',
|
||||||
|
'--jobs' => 2,
|
||||||
|
'--file' => "$tempdir/defaults_parallel",
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore',
|
'pg_restore',
|
||||||
"--file=$tempdir/defaults_parallel.sql",
|
'--file' => "$tempdir/defaults_parallel.sql",
|
||||||
"$tempdir/defaults_parallel",
|
"$tempdir/defaults_parallel",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@ -426,55 +468,56 @@ my %pgdump_runs = (
|
|||||||
defaults_tar_format => {
|
defaults_tar_format => {
|
||||||
test_key => 'defaults',
|
test_key => 'defaults',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '-Ft',
|
'pg_dump',
|
||||||
"--file=$tempdir/defaults_tar_format.tar", 'postgres',
|
'--format' => 'tar',
|
||||||
|
'--file' => "$tempdir/defaults_tar_format.tar",
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore',
|
'pg_restore',
|
||||||
'--format=tar',
|
'--format' => 'tar',
|
||||||
"--file=$tempdir/defaults_tar_format.sql",
|
'--file' => "$tempdir/defaults_tar_format.sql",
|
||||||
"$tempdir/defaults_tar_format.tar",
|
"$tempdir/defaults_tar_format.tar",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
exclude_dump_test_schema => {
|
exclude_dump_test_schema => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/exclude_dump_test_schema.sql",
|
'--file' => "$tempdir/exclude_dump_test_schema.sql",
|
||||||
'--exclude-schema=dump_test', 'postgres',
|
'--exclude-schema' => 'dump_test',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
exclude_test_table => {
|
exclude_test_table => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/exclude_test_table.sql",
|
'--file' => "$tempdir/exclude_test_table.sql",
|
||||||
'--exclude-table=dump_test.test_table', 'postgres',
|
'--exclude-table' => 'dump_test.test_table',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
exclude_measurement => {
|
exclude_measurement => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/exclude_measurement.sql",
|
||||||
"--file=$tempdir/exclude_measurement.sql",
|
'--exclude-table-and-children' => 'dump_test.measurement',
|
||||||
'--exclude-table-and-children=dump_test.measurement',
|
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
exclude_measurement_data => {
|
exclude_measurement_data => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/exclude_measurement_data.sql",
|
||||||
"--file=$tempdir/exclude_measurement_data.sql",
|
'--exclude-table-data-and-children' => 'dump_test.measurement',
|
||||||
'--exclude-table-data-and-children=dump_test.measurement',
|
|
||||||
'--no-unlogged-table-data',
|
'--no-unlogged-table-data',
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
exclude_test_table_data => {
|
exclude_test_table_data => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/exclude_test_table_data.sql",
|
||||||
"--file=$tempdir/exclude_test_table_data.sql",
|
'--exclude-table-data' => 'dump_test.test_table',
|
||||||
'--exclude-table-data=dump_test.test_table',
|
|
||||||
'--no-unlogged-table-data',
|
'--no-unlogged-table-data',
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
@ -482,168 +525,190 @@ my %pgdump_runs = (
|
|||||||
inserts => {
|
inserts => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/inserts.sql", '-a',
|
'--file' => "$tempdir/inserts.sql",
|
||||||
|
'--data-only',
|
||||||
'--inserts', 'postgres',
|
'--inserts', 'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
pg_dumpall_globals => {
|
pg_dumpall_globals => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_globals.sql",
|
'pg_dumpall',
|
||||||
'-g', '--no-sync',
|
'--verbose',
|
||||||
|
'--file' => "$tempdir/pg_dumpall_globals.sql",
|
||||||
|
'--globals-only',
|
||||||
|
'--no-sync',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
pg_dumpall_globals_clean => {
|
pg_dumpall_globals_clean => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dumpall', "--file=$tempdir/pg_dumpall_globals_clean.sql",
|
'pg_dumpall',
|
||||||
'-g', '-c', '--no-sync',
|
'--file' => "$tempdir/pg_dumpall_globals_clean.sql",
|
||||||
|
'--globals-only',
|
||||||
|
'--clean',
|
||||||
|
'--no-sync',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
pg_dumpall_dbprivs => {
|
pg_dumpall_dbprivs => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dumpall', '--no-sync',
|
'pg_dumpall', '--no-sync',
|
||||||
"--file=$tempdir/pg_dumpall_dbprivs.sql",
|
'--file' => "$tempdir/pg_dumpall_dbprivs.sql",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
pg_dumpall_exclude => {
|
pg_dumpall_exclude => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_exclude.sql",
|
'pg_dumpall',
|
||||||
'--exclude-database', '*dump_test*', '--no-sync',
|
'--verbose',
|
||||||
|
'--file' => "$tempdir/pg_dumpall_exclude.sql",
|
||||||
|
'--exclude-database' => '*dump_test*',
|
||||||
|
'--no-sync',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
no_toast_compression => {
|
no_toast_compression => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/no_toast_compression.sql",
|
'--file' => "$tempdir/no_toast_compression.sql",
|
||||||
'--no-toast-compression', 'postgres',
|
'--no-toast-compression',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
no_large_objects => {
|
no_large_objects => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql",
|
'pg_dump', '--no-sync',
|
||||||
'-B', 'postgres',
|
'--file' => "$tempdir/no_large_objects.sql",
|
||||||
|
'--no-large-objects',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
no_privs => {
|
no_privs => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/no_privs.sql", '-x',
|
'--file' => "$tempdir/no_privs.sql",
|
||||||
|
'--no-privileges',
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
no_owner => {
|
no_owner => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/no_owner.sql", '-O',
|
'--file' => "$tempdir/no_owner.sql",
|
||||||
|
'--no-owner',
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
no_table_access_method => {
|
no_table_access_method => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/no_table_access_method.sql",
|
'--file' => "$tempdir/no_table_access_method.sql",
|
||||||
'--no-table-access-method', 'postgres',
|
'--no-table-access-method',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
only_dump_test_schema => {
|
only_dump_test_schema => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--no-sync',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/only_dump_test_schema.sql",
|
'--file' => "$tempdir/only_dump_test_schema.sql",
|
||||||
'--schema=dump_test', 'postgres',
|
'--schema' => 'dump_test',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
only_dump_test_table => {
|
only_dump_test_table => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/only_dump_test_table.sql",
|
||||||
"--file=$tempdir/only_dump_test_table.sql",
|
'--table' => 'dump_test.test_table',
|
||||||
'--table=dump_test.test_table',
|
'--lock-wait-timeout' =>
|
||||||
'--lock-wait-timeout='
|
(1000 * $PostgreSQL::Test::Utils::timeout_default),
|
||||||
. (1000 * $PostgreSQL::Test::Utils::timeout_default),
|
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
only_dump_measurement => {
|
only_dump_measurement => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/only_dump_measurement.sql",
|
||||||
"--file=$tempdir/only_dump_measurement.sql",
|
'--table-and-children' => 'dump_test.measurement',
|
||||||
'--table-and-children=dump_test.measurement',
|
'--lock-wait-timeout' =>
|
||||||
'--lock-wait-timeout='
|
(1000 * $PostgreSQL::Test::Utils::timeout_default),
|
||||||
. (1000 * $PostgreSQL::Test::Utils::timeout_default),
|
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
role => {
|
role => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/role.sql",
|
||||||
"--file=$tempdir/role.sql",
|
'--role' => 'regress_dump_test_role',
|
||||||
'--role=regress_dump_test_role',
|
'--schema' => 'dump_test_second_schema',
|
||||||
'--schema=dump_test_second_schema',
|
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
role_parallel => {
|
role_parallel => {
|
||||||
test_key => 'role',
|
test_key => 'role',
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--format' => 'directory',
|
||||||
'--format=directory',
|
'--jobs' => '2',
|
||||||
'--jobs=2',
|
'--file' => "$tempdir/role_parallel",
|
||||||
"--file=$tempdir/role_parallel",
|
'--role' => 'regress_dump_test_role',
|
||||||
'--role=regress_dump_test_role',
|
'--schema' => 'dump_test_second_schema',
|
||||||
'--schema=dump_test_second_schema',
|
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
restore_cmd => [
|
restore_cmd => [
|
||||||
'pg_restore', "--file=$tempdir/role_parallel.sql",
|
'pg_restore',
|
||||||
|
'--file' => "$tempdir/role_parallel.sql",
|
||||||
"$tempdir/role_parallel",
|
"$tempdir/role_parallel",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
rows_per_insert => {
|
rows_per_insert => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump',
|
'pg_dump', '--no-sync',
|
||||||
'--no-sync',
|
'--file' => "$tempdir/rows_per_insert.sql",
|
||||||
"--file=$tempdir/rows_per_insert.sql",
|
'--data-only',
|
||||||
'-a',
|
'--rows-per-insert' => '4',
|
||||||
'--rows-per-insert=4',
|
'--table' => 'dump_test.test_table',
|
||||||
'--table=dump_test.test_table',
|
'--table' => 'dump_test.test_fourth_table',
|
||||||
'--table=dump_test.test_fourth_table',
|
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
schema_only => {
|
schema_only => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', '--format=plain',
|
'pg_dump', '--no-sync',
|
||||||
"--file=$tempdir/schema_only.sql", '--no-sync',
|
'--format' => 'plain',
|
||||||
'-s', 'postgres',
|
'--file' => "$tempdir/schema_only.sql",
|
||||||
|
'--schema-only',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
section_pre_data => {
|
section_pre_data => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', "--file=$tempdir/section_pre_data.sql",
|
'pg_dump', '--no-sync',
|
||||||
'--section=pre-data', '--no-sync',
|
'--file' => "$tempdir/section_pre_data.sql",
|
||||||
|
'--section' => 'pre-data',
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
section_data => {
|
section_data => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', "--file=$tempdir/section_data.sql",
|
'pg_dump', '--no-sync',
|
||||||
'--section=data', '--no-sync',
|
'--file' => "$tempdir/section_data.sql",
|
||||||
|
'--section' => 'data',
|
||||||
'postgres',
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
section_post_data => {
|
section_post_data => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', "--file=$tempdir/section_post_data.sql",
|
'pg_dump', '--no-sync',
|
||||||
'--section=post-data', '--no-sync', 'postgres',
|
'--file' => "$tempdir/section_post_data.sql",
|
||||||
|
'--section' => 'post-data',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
test_schema_plus_large_objects => {
|
test_schema_plus_large_objects => {
|
||||||
dump_cmd => [
|
dump_cmd => [
|
||||||
'pg_dump', "--file=$tempdir/test_schema_plus_large_objects.sql",
|
'pg_dump', '--no-sync',
|
||||||
|
'--file' => "$tempdir/test_schema_plus_large_objects.sql",
|
||||||
'--schema=dump_test', '-b', '-B', '--no-sync', 'postgres',
|
'--schema' => 'dump_test',
|
||||||
|
'--large-objects',
|
||||||
|
'--no-large-objects',
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
},);
|
},);
|
||||||
|
|
||||||
@ -4732,7 +4797,7 @@ foreach my $db (sort keys %create_sql)
|
|||||||
# Test connecting to a non-existent database
|
# Test connecting to a non-existent database
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_dump', '-p', "$port", 'qqq' ],
|
[ 'pg_dump', '--port' => $port, 'qqq' ],
|
||||||
qr/pg_dump: error: connection to server .* failed: FATAL: database "qqq" does not exist/,
|
qr/pg_dump: error: connection to server .* failed: FATAL: database "qqq" does not exist/,
|
||||||
'connecting to a non-existent database');
|
'connecting to a non-existent database');
|
||||||
|
|
||||||
@ -4740,7 +4805,7 @@ command_fails_like(
|
|||||||
# Test connecting to an invalid database
|
# Test connecting to an invalid database
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dump', '-d', 'regression_invalid' ],
|
[ 'pg_dump', '--dbname' => 'regression_invalid' ],
|
||||||
qr/pg_dump: error: connection to server .* failed: FATAL: cannot connect to invalid database "regression_invalid"/,
|
qr/pg_dump: error: connection to server .* failed: FATAL: cannot connect to invalid database "regression_invalid"/,
|
||||||
'connecting to an invalid database');
|
'connecting to an invalid database');
|
||||||
|
|
||||||
@ -4748,7 +4813,7 @@ $node->command_fails_like(
|
|||||||
# Test connecting with an unprivileged user
|
# Test connecting with an unprivileged user
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_dump', '-p', "$port", '--role=regress_dump_test_role' ],
|
[ 'pg_dump', '--port' => $port, '--role' => 'regress_dump_test_role' ],
|
||||||
qr/\Qpg_dump: error: query failed: ERROR: permission denied for\E/,
|
qr/\Qpg_dump: error: query failed: ERROR: permission denied for\E/,
|
||||||
'connecting with an unprivileged user');
|
'connecting with an unprivileged user');
|
||||||
|
|
||||||
@ -4756,22 +4821,32 @@ command_fails_like(
|
|||||||
# Test dumping a non-existent schema, table, and patterns with --strict-names
|
# Test dumping a non-existent schema, table, and patterns with --strict-names
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_dump', '-p', "$port", '-n', 'nonexistent' ],
|
[ 'pg_dump', '--port' => $port, '--schema' => 'nonexistent' ],
|
||||||
qr/\Qpg_dump: error: no matching schemas were found\E/,
|
qr/\Qpg_dump: error: no matching schemas were found\E/,
|
||||||
'dumping a non-existent schema');
|
'dumping a non-existent schema');
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_dump', '-p', "$port", '-t', 'nonexistent' ],
|
[ 'pg_dump', '--port' => $port, '--table' => 'nonexistent' ],
|
||||||
qr/\Qpg_dump: error: no matching tables were found\E/,
|
qr/\Qpg_dump: error: no matching tables were found\E/,
|
||||||
'dumping a non-existent table');
|
'dumping a non-existent table');
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_dump', '-p', "$port", '--strict-names', '-n', 'nonexistent*' ],
|
[
|
||||||
|
'pg_dump',
|
||||||
|
'--port' => $port,
|
||||||
|
'--strict-names',
|
||||||
|
'--schema' => 'nonexistent*'
|
||||||
|
],
|
||||||
qr/\Qpg_dump: error: no matching schemas were found for pattern\E/,
|
qr/\Qpg_dump: error: no matching schemas were found for pattern\E/,
|
||||||
'no matching schemas');
|
'no matching schemas');
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_dump', '-p', "$port", '--strict-names', '-t', 'nonexistent*' ],
|
[
|
||||||
|
'pg_dump',
|
||||||
|
'--port' => $port,
|
||||||
|
'--strict-names',
|
||||||
|
'--table' => 'nonexistent*'
|
||||||
|
],
|
||||||
qr/\Qpg_dump: error: no matching tables were found for pattern\E/,
|
qr/\Qpg_dump: error: no matching tables were found for pattern\E/,
|
||||||
'no matching tables');
|
'no matching tables');
|
||||||
|
|
||||||
@ -4779,26 +4854,31 @@ command_fails_like(
|
|||||||
# Test invalid multipart database names
|
# Test invalid multipart database names
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dumpall', '--exclude-database', '.' ],
|
[ 'pg_dumpall', '--exclude-database' => '.' ],
|
||||||
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./,
|
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./,
|
||||||
'pg_dumpall: option --exclude-database rejects multipart pattern "."');
|
'pg_dumpall: option --exclude-database rejects multipart pattern "."');
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dumpall', '--exclude-database', 'myhost.mydb' ],
|
[ 'pg_dumpall', '--exclude-database' => 'myhost.mydb' ],
|
||||||
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/,
|
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/,
|
||||||
'pg_dumpall: option --exclude-database rejects multipart database names');
|
'pg_dumpall: option --exclude-database rejects multipart database names');
|
||||||
|
|
||||||
##############################################################
|
##############################################################
|
||||||
# Test dumping pg_catalog (for research -- cannot be reloaded)
|
# Test dumping pg_catalog (for research -- cannot be reloaded)
|
||||||
|
|
||||||
$node->command_ok([ 'pg_dump', '-p', "$port", '-n', 'pg_catalog' ],
|
$node->command_ok(
|
||||||
|
[ 'pg_dump', '--port' => $port, '--schema' => 'pg_catalog' ],
|
||||||
'pg_dump: option -n pg_catalog');
|
'pg_dump: option -n pg_catalog');
|
||||||
|
|
||||||
#########################################
|
#########################################
|
||||||
# Test valid database exclusion patterns
|
# Test valid database exclusion patterns
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ 'pg_dumpall', '-p', "$port", '--exclude-database', '"myhost.mydb"' ],
|
[
|
||||||
|
'pg_dumpall',
|
||||||
|
'--port' => $port,
|
||||||
|
'--exclude-database' => '"myhost.mydb"'
|
||||||
|
],
|
||||||
'pg_dumpall: option --exclude-database handles database names with embedded dots'
|
'pg_dumpall: option --exclude-database handles database names with embedded dots'
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -4806,28 +4886,28 @@ $node->command_ok(
|
|||||||
# Test invalid multipart schema names
|
# Test invalid multipart schema names
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dump', '--schema', 'myhost.mydb.myschema' ],
|
[ 'pg_dump', '--schema' => 'myhost.mydb.myschema' ],
|
||||||
qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/,
|
qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/,
|
||||||
'pg_dump: option --schema rejects three-part schema names');
|
'pg_dump: option --schema rejects three-part schema names');
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dump', '--schema', 'otherdb.myschema' ],
|
[ 'pg_dump', '--schema' => 'otherdb.myschema' ],
|
||||||
qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/,
|
qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/,
|
||||||
'pg_dump: option --schema rejects cross-database multipart schema names');
|
'pg_dump: option --schema rejects cross-database multipart schema names');
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dump', '--schema', '.' ],
|
[ 'pg_dump', '--schema' => '.' ],
|
||||||
qr/pg_dump: error: cross-database references are not implemented: \./,
|
qr/pg_dump: error: cross-database references are not implemented: \./,
|
||||||
'pg_dump: option --schema rejects degenerate two-part schema name: "."');
|
'pg_dump: option --schema rejects degenerate two-part schema name: "."');
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dump', '--schema', '"some.other.db".myschema' ],
|
[ 'pg_dump', '--schema' => '"some.other.db".myschema' ],
|
||||||
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.myschema/,
|
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.myschema/,
|
||||||
'pg_dump: option --schema rejects cross-database multipart schema names with embedded dots'
|
'pg_dump: option --schema rejects cross-database multipart schema names with embedded dots'
|
||||||
);
|
);
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dump', '--schema', '..' ],
|
[ 'pg_dump', '--schema' => '..' ],
|
||||||
qr/pg_dump: error: improper qualified name \(too many dotted names\): \.\./,
|
qr/pg_dump: error: improper qualified name \(too many dotted names\): \.\./,
|
||||||
'pg_dump: option --schema rejects degenerate three-part schema name: ".."'
|
'pg_dump: option --schema rejects degenerate three-part schema name: ".."'
|
||||||
);
|
);
|
||||||
@ -4836,19 +4916,20 @@ $node->command_fails_like(
|
|||||||
# Test invalid multipart relation names
|
# Test invalid multipart relation names
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dump', '--table', 'myhost.mydb.myschema.mytable' ],
|
[ 'pg_dump', '--table' => 'myhost.mydb.myschema.mytable' ],
|
||||||
qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/,
|
qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/,
|
||||||
'pg_dump: option --table rejects four-part table names');
|
'pg_dump: option --table rejects four-part table names');
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'pg_dump', '--table', 'otherdb.pg_catalog.pg_class' ],
|
[ 'pg_dump', '--table' => 'otherdb.pg_catalog.pg_class' ],
|
||||||
qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/,
|
qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/,
|
||||||
'pg_dump: option --table rejects cross-database three part table names');
|
'pg_dump: option --table rejects cross-database three part table names');
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', "$port", '--table',
|
'pg_dump',
|
||||||
'"some.other.db".pg_catalog.pg_class'
|
'--port' => $port,
|
||||||
|
'--table' => '"some.other.db".pg_catalog.pg_class'
|
||||||
],
|
],
|
||||||
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/,
|
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/,
|
||||||
'pg_dump: option --table rejects cross-database three part table names with embedded dots'
|
'pg_dump: option --table rejects cross-database three part table names with embedded dots'
|
||||||
|
@ -28,12 +28,23 @@ $node->safe_psql('postgres', "CREATE FOREIGN TABLE t0 (a int) SERVER s0");
|
|||||||
$node->safe_psql('postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1");
|
$node->safe_psql('postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1");
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ],
|
[
|
||||||
|
"pg_dump",
|
||||||
|
'--port' => $port,
|
||||||
|
'--include-foreign-data' => 's0',
|
||||||
|
'postgres'
|
||||||
|
],
|
||||||
qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: detail: Query was: .*t0/,
|
qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: detail: Query was: .*t0/,
|
||||||
"correctly fails to dump a foreign table from a dummy FDW");
|
"correctly fails to dump a foreign table from a dummy FDW");
|
||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[ "pg_dump", '-p', $port, '-a', '--include-foreign-data=s2', 'postgres' ],
|
[
|
||||||
|
"pg_dump",
|
||||||
|
'--port' => $port,
|
||||||
|
'--data-only',
|
||||||
|
'--include-foreign-data' => 's2',
|
||||||
|
'postgres'
|
||||||
|
],
|
||||||
"dump foreign server with no tables");
|
"dump foreign server with no tables");
|
||||||
|
|
||||||
done_testing();
|
done_testing();
|
||||||
|
@ -48,33 +48,42 @@ insert into tht select (x%10)::text::digit, x from generate_series(1,1000) x;
|
|||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-Fd', '--no-sync', '-j2', '-f', "$backupdir/dump1",
|
'pg_dump',
|
||||||
$node->connstr($dbname1)
|
'--format' => 'directory',
|
||||||
|
'--no-sync',
|
||||||
|
'--jobs' => 2,
|
||||||
|
'--file' => "$backupdir/dump1",
|
||||||
|
$node->connstr($dbname1),
|
||||||
],
|
],
|
||||||
'parallel dump');
|
'parallel dump');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-v',
|
'pg_restore', '--verbose',
|
||||||
'-d', $node->connstr($dbname2),
|
'--dbname' => $node->connstr($dbname2),
|
||||||
'-j3', "$backupdir/dump1"
|
'--jobs' => 3,
|
||||||
|
"$backupdir/dump1",
|
||||||
],
|
],
|
||||||
'parallel restore');
|
'parallel restore');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-Fd',
|
'pg_dump',
|
||||||
'--no-sync', '-j2',
|
'--format' => 'directory',
|
||||||
'-f', "$backupdir/dump2",
|
'--no-sync',
|
||||||
'--inserts', $node->connstr($dbname1)
|
'--jobs' => 2,
|
||||||
|
'--file' => "$backupdir/dump2",
|
||||||
|
'--inserts',
|
||||||
|
$node->connstr($dbname1),
|
||||||
],
|
],
|
||||||
'parallel dump as inserts');
|
'parallel dump as inserts');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-v',
|
'pg_restore', '--verbose',
|
||||||
'-d', $node->connstr($dbname3),
|
'--dbname' => $node->connstr($dbname3),
|
||||||
'-j3', "$backupdir/dump2"
|
'--jobs' => 3,
|
||||||
|
"$backupdir/dump2",
|
||||||
],
|
],
|
||||||
'parallel restore as inserts');
|
'parallel restore as inserts');
|
||||||
|
|
||||||
|
@ -90,8 +90,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"filter file without patterns");
|
"filter file without patterns");
|
||||||
|
|
||||||
@ -117,8 +120,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump tables with filter patterns as well as comments and whitespace");
|
"dump tables with filter patterns as well as comments and whitespace");
|
||||||
|
|
||||||
@ -143,8 +149,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"filter file without patterns");
|
"filter file without patterns");
|
||||||
|
|
||||||
@ -162,8 +171,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump tables with exclusion of a single table");
|
"dump tables with exclusion of a single table");
|
||||||
|
|
||||||
@ -183,8 +195,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump tables with wildcard in pattern");
|
"dump tables with wildcard in pattern");
|
||||||
|
|
||||||
@ -205,8 +220,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump tables with multiline names requiring quoting");
|
"dump tables with multiline names requiring quoting");
|
||||||
|
|
||||||
@ -223,8 +241,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump tables with filter");
|
"dump tables with filter");
|
||||||
|
|
||||||
@ -241,8 +262,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"exclude the public schema");
|
"exclude the public schema");
|
||||||
|
|
||||||
@ -263,9 +287,12 @@ close $alt_inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
"--filter=$tempdir/inputfile2.txt", 'postgres'
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'--filter' => "$tempdir/inputfile2.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"exclude the public schema with multiple filters");
|
"exclude the public schema with multiple filters");
|
||||||
|
|
||||||
@ -284,8 +311,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump tables with filter");
|
"dump tables with filter");
|
||||||
|
|
||||||
@ -301,8 +331,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump tables with filter");
|
"dump tables with filter");
|
||||||
|
|
||||||
@ -321,8 +354,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/pg_dump: error: no matching foreign servers were found for pattern/,
|
qr/pg_dump: error: no matching foreign servers were found for pattern/,
|
||||||
"dump nonexisting foreign server");
|
"dump nonexisting foreign server");
|
||||||
@ -334,8 +370,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump foreign_data with filter");
|
"dump foreign_data with filter");
|
||||||
|
|
||||||
@ -350,8 +389,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/exclude filter for "foreign data" is not allowed/,
|
qr/exclude filter for "foreign data" is not allowed/,
|
||||||
"erroneously exclude foreign server");
|
"erroneously exclude foreign server");
|
||||||
@ -367,8 +409,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/invalid filter command/,
|
qr/invalid filter command/,
|
||||||
"invalid syntax: incorrect filter command");
|
"invalid syntax: incorrect filter command");
|
||||||
@ -381,8 +426,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/unsupported filter object type: "xxx"/,
|
qr/unsupported filter object type: "xxx"/,
|
||||||
"invalid syntax: invalid object type specified, should be table, schema, foreign_data or data"
|
"invalid syntax: invalid object type specified, should be table, schema, foreign_data or data"
|
||||||
@ -396,8 +444,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/missing object name/,
|
qr/missing object name/,
|
||||||
"invalid syntax: missing object identifier pattern");
|
"invalid syntax: missing object identifier pattern");
|
||||||
@ -410,8 +461,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/no matching tables were found/,
|
qr/no matching tables were found/,
|
||||||
"invalid syntax: extra content after object identifier pattern");
|
"invalid syntax: extra content after object identifier pattern");
|
||||||
@ -427,8 +481,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
'--strict-names', 'postgres'
|
'--strict-names', 'postgres'
|
||||||
],
|
],
|
||||||
"strict names with matching pattern");
|
"strict names with matching pattern");
|
||||||
@ -445,8 +501,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
'--strict-names', 'postgres'
|
'--strict-names', 'postgres'
|
||||||
],
|
],
|
||||||
qr/no matching tables were found/,
|
qr/no matching tables were found/,
|
||||||
@ -464,8 +522,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-p', $port, '-f', $plainfile,
|
'pg_dumpall',
|
||||||
"--filter=$tempdir/inputfile.txt"
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt"
|
||||||
],
|
],
|
||||||
"dump tables with exclusion of a database");
|
"dump tables with exclusion of a database");
|
||||||
|
|
||||||
@ -478,8 +538,10 @@ ok($dump =~ qr/^\\connect template1/m, "database template1 is dumped");
|
|||||||
# --globals-only with exclusions
|
# --globals-only with exclusions
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-p', $port, '-f', $plainfile,
|
'pg_dumpall',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
'--globals-only'
|
'--globals-only'
|
||||||
],
|
],
|
||||||
qr/\Qpg_dumpall: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
|
qr/\Qpg_dumpall: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
|
||||||
@ -494,8 +556,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-p', $port, '-f', $plainfile,
|
'pg_dumpall',
|
||||||
"--filter=$tempdir/inputfile.txt"
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt"
|
||||||
],
|
],
|
||||||
qr/invalid filter command/,
|
qr/invalid filter command/,
|
||||||
"invalid syntax: incorrect filter command");
|
"invalid syntax: incorrect filter command");
|
||||||
@ -508,8 +572,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-p', $port, '-f', $plainfile,
|
'pg_dumpall',
|
||||||
"--filter=$tempdir/inputfile.txt"
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt"
|
||||||
],
|
],
|
||||||
qr/unsupported filter object type: "xxx"/,
|
qr/unsupported filter object type: "xxx"/,
|
||||||
"invalid syntax: exclusion of non-existing object type");
|
"invalid syntax: exclusion of non-existing object type");
|
||||||
@ -521,8 +587,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-p', $port, '-f', $plainfile,
|
'pg_dumpall',
|
||||||
"--filter=$tempdir/inputfile.txt"
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt"
|
||||||
],
|
],
|
||||||
qr/pg_dumpall: error: invalid format in filter/,
|
qr/pg_dumpall: error: invalid format in filter/,
|
||||||
"invalid syntax: exclusion of unsupported object type");
|
"invalid syntax: exclusion of unsupported object type");
|
||||||
@ -532,8 +600,11 @@ command_fails_like(
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', "$tempdir/filter_test.dump",
|
'pg_dump',
|
||||||
"-Fc", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => "$tempdir/filter_test.dump",
|
||||||
|
'--format' => 'custom',
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"dump all tables");
|
"dump all tables");
|
||||||
|
|
||||||
@ -544,9 +615,12 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
"-Fc", "$tempdir/filter_test.dump"
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'--format' => 'custom',
|
||||||
|
"$tempdir/filter_test.dump"
|
||||||
],
|
],
|
||||||
"restore tables with filter");
|
"restore tables with filter");
|
||||||
|
|
||||||
@ -563,8 +637,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt"
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt"
|
||||||
],
|
],
|
||||||
qr/include filter for "table data" is not allowed/,
|
qr/include filter for "table data" is not allowed/,
|
||||||
"invalid syntax: inclusion of unallowed object");
|
"invalid syntax: inclusion of unallowed object");
|
||||||
@ -576,8 +652,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt"
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt"
|
||||||
],
|
],
|
||||||
qr/include filter for "extension" is not allowed/,
|
qr/include filter for "extension" is not allowed/,
|
||||||
"invalid syntax: inclusion of unallowed object");
|
"invalid syntax: inclusion of unallowed object");
|
||||||
@ -589,8 +667,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt"
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt"
|
||||||
],
|
],
|
||||||
qr/exclude filter for "extension" is not allowed/,
|
qr/exclude filter for "extension" is not allowed/,
|
||||||
"invalid syntax: exclusion of unallowed object");
|
"invalid syntax: exclusion of unallowed object");
|
||||||
@ -602,8 +682,10 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt"
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt"
|
||||||
],
|
],
|
||||||
qr/exclude filter for "table data" is not allowed/,
|
qr/exclude filter for "table data" is not allowed/,
|
||||||
"invalid syntax: exclusion of unallowed object");
|
"invalid syntax: exclusion of unallowed object");
|
||||||
@ -613,8 +695,11 @@ command_fails_like(
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', "$tempdir/filter_test.dump",
|
'pg_dump',
|
||||||
"-Fc", 'sourcedb'
|
'--port' => $port,
|
||||||
|
'--file' => "$tempdir/filter_test.dump",
|
||||||
|
'--format' => 'custom',
|
||||||
|
'sourcedb'
|
||||||
],
|
],
|
||||||
"dump all objects from sourcedb");
|
"dump all objects from sourcedb");
|
||||||
|
|
||||||
@ -625,9 +710,12 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
"-Fc", "$tempdir/filter_test.dump"
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'--format' => 'custom',
|
||||||
|
"$tempdir/filter_test.dump"
|
||||||
],
|
],
|
||||||
"restore function with filter");
|
"restore function with filter");
|
||||||
|
|
||||||
@ -646,9 +734,12 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
"-Fc", "$tempdir/filter_test.dump"
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'--format' => 'custom',
|
||||||
|
"$tempdir/filter_test.dump"
|
||||||
],
|
],
|
||||||
"restore function with filter");
|
"restore function with filter");
|
||||||
|
|
||||||
@ -667,9 +758,12 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
"-Fc", "$tempdir/filter_test.dump"
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'--format' => 'custom',
|
||||||
|
"$tempdir/filter_test.dump"
|
||||||
],
|
],
|
||||||
"restore function with filter");
|
"restore function with filter");
|
||||||
|
|
||||||
@ -687,9 +781,12 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
"-Fc", "$tempdir/filter_test.dump"
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'--format' => 'custom',
|
||||||
|
"$tempdir/filter_test.dump"
|
||||||
],
|
],
|
||||||
"restore function with filter");
|
"restore function with filter");
|
||||||
|
|
||||||
@ -707,9 +804,12 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-p', $port, '-f', $plainfile,
|
'pg_restore',
|
||||||
"--filter=$tempdir/inputfile.txt",
|
'--port' => $port,
|
||||||
"-Fc", "$tempdir/filter_test.dump"
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'--format' => 'custom',
|
||||||
|
"$tempdir/filter_test.dump"
|
||||||
],
|
],
|
||||||
"restore function with filter");
|
"restore function with filter");
|
||||||
|
|
||||||
@ -733,8 +833,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"filter file without patterns");
|
"filter file without patterns");
|
||||||
|
|
||||||
@ -750,8 +853,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"filter file without patterns");
|
"filter file without patterns");
|
||||||
|
|
||||||
@ -768,8 +874,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
"filter file without patterns");
|
"filter file without patterns");
|
||||||
|
|
||||||
@ -788,8 +897,11 @@ close $inputfile;
|
|||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_dump', '-p', $port, '-f', $plainfile,
|
'pg_dump',
|
||||||
"--filter=$tempdir/inputfile.txt", 'postgres'
|
'--port' => $port,
|
||||||
|
'--file' => $plainfile,
|
||||||
|
'--filter' => "$tempdir/inputfile.txt",
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/pg_dump: error: no matching extensions were found/,
|
qr/pg_dump: error: no matching extensions were found/,
|
||||||
"dump nonexisting extension");
|
"dump nonexisting extension");
|
||||||
|
@ -51,16 +51,20 @@ my $src_bootstrap_super = 'regress_postgres';
|
|||||||
my $dst_bootstrap_super = 'boot';
|
my $dst_bootstrap_super = 'boot';
|
||||||
|
|
||||||
my $node = PostgreSQL::Test::Cluster->new('main');
|
my $node = PostgreSQL::Test::Cluster->new('main');
|
||||||
$node->init(extra =>
|
$node->init(
|
||||||
[ '-U', $src_bootstrap_super, '--locale=C', '--encoding=LATIN1' ]);
|
extra => [
|
||||||
|
'--username' => $src_bootstrap_super,
|
||||||
|
'--locale' => 'C',
|
||||||
|
'--encoding' => 'LATIN1',
|
||||||
|
]);
|
||||||
|
|
||||||
# prep pg_hba.conf and pg_ident.conf
|
# prep pg_hba.conf and pg_ident.conf
|
||||||
$node->run_log(
|
$node->run_log(
|
||||||
[
|
[
|
||||||
$ENV{PG_REGRESS}, '--config-auth',
|
$ENV{PG_REGRESS},
|
||||||
$node->data_dir, '--user',
|
'--config-auth' => $node->data_dir,
|
||||||
$src_bootstrap_super, '--create-role',
|
'--user' => $src_bootstrap_super,
|
||||||
"$username1,$username2,$username3,$username4"
|
'--create-role' => "$username1,$username2,$username3,$username4",
|
||||||
]);
|
]);
|
||||||
$node->start;
|
$node->start;
|
||||||
|
|
||||||
@ -69,106 +73,158 @@ my $discard = "$backupdir/discard.sql";
|
|||||||
my $plain = "$backupdir/plain.sql";
|
my $plain = "$backupdir/plain.sql";
|
||||||
my $dirfmt = "$backupdir/dirfmt";
|
my $dirfmt = "$backupdir/dirfmt";
|
||||||
|
|
||||||
$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]);
|
|
||||||
$node->run_log(
|
$node->run_log(
|
||||||
[ 'createuser', '-U', $src_bootstrap_super, '-s', $username1 ]);
|
[ 'createdb', '--username' => $src_bootstrap_super, $dbname1 ]);
|
||||||
$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname2 ]);
|
|
||||||
$node->run_log(
|
$node->run_log(
|
||||||
[ 'createuser', '-U', $src_bootstrap_super, '-s', $username2 ]);
|
[
|
||||||
$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname3 ]);
|
'createuser',
|
||||||
|
'--username' => $src_bootstrap_super,
|
||||||
|
'--superuser',
|
||||||
|
$username1,
|
||||||
|
]);
|
||||||
$node->run_log(
|
$node->run_log(
|
||||||
[ 'createuser', '-U', $src_bootstrap_super, '-s', $username3 ]);
|
[ 'createdb', '--username' => $src_bootstrap_super, $dbname2 ]);
|
||||||
$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname4 ]);
|
|
||||||
$node->run_log(
|
$node->run_log(
|
||||||
[ 'createuser', '-U', $src_bootstrap_super, '-s', $username4 ]);
|
[
|
||||||
|
'createuser',
|
||||||
|
'--username' => $src_bootstrap_super,
|
||||||
|
'--superuser',
|
||||||
|
$username2,
|
||||||
|
]);
|
||||||
|
$node->run_log(
|
||||||
|
[ 'createdb', '--username' => $src_bootstrap_super, $dbname3 ]);
|
||||||
|
$node->run_log(
|
||||||
|
[
|
||||||
|
'createuser',
|
||||||
|
'--username' => $src_bootstrap_super,
|
||||||
|
'--superuser',
|
||||||
|
$username3,
|
||||||
|
]);
|
||||||
|
$node->run_log(
|
||||||
|
[ 'createdb', '--username' => $src_bootstrap_super, $dbname4 ]);
|
||||||
|
$node->run_log(
|
||||||
|
[
|
||||||
|
'createuser',
|
||||||
|
'--username' => $src_bootstrap_super,
|
||||||
|
'--superuser',
|
||||||
|
$username4,
|
||||||
|
]);
|
||||||
|
|
||||||
|
|
||||||
# For these tests, pg_dumpall -r is used because it produces a short
|
# For these tests, pg_dumpall --roles-only is used because it produces
|
||||||
# dump.
|
# a short dump.
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-r', '-f', $discard, '--dbname',
|
'pg_dumpall', '--roles-only',
|
||||||
$node->connstr($dbname1),
|
'--file' => $discard,
|
||||||
'-U', $username4
|
'--dbname' => $node->connstr($dbname1),
|
||||||
|
'--username' => $username4,
|
||||||
],
|
],
|
||||||
'pg_dumpall with long ASCII name 1');
|
'pg_dumpall with long ASCII name 1');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname',
|
'pg_dumpall', '--no-sync', '--roles-only',
|
||||||
$node->connstr($dbname2),
|
'--file' => $discard,
|
||||||
'-U', $username3
|
'--dbname' => $node->connstr($dbname2),
|
||||||
|
'--username' => $username3,
|
||||||
],
|
],
|
||||||
'pg_dumpall with long ASCII name 2');
|
'pg_dumpall with long ASCII name 2');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname',
|
'pg_dumpall', '--no-sync', '--roles-only',
|
||||||
$node->connstr($dbname3),
|
'--file' => $discard,
|
||||||
'-U', $username2
|
'--dbname' => $node->connstr($dbname3),
|
||||||
|
'--username' => $username2,
|
||||||
],
|
],
|
||||||
'pg_dumpall with long ASCII name 3');
|
'pg_dumpall with long ASCII name 3');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname',
|
'pg_dumpall', '--no-sync', '--roles-only',
|
||||||
$node->connstr($dbname4),
|
'--file' => $discard,
|
||||||
'-U', $username1
|
'--dbname' => $node->connstr($dbname4),
|
||||||
|
'--username' => $username1,
|
||||||
],
|
],
|
||||||
'pg_dumpall with long ASCII name 4');
|
'pg_dumpall with long ASCII name 4');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-U',
|
'pg_dumpall', '--no-sync', '--roles-only',
|
||||||
$src_bootstrap_super, '--no-sync',
|
'--username' => $src_bootstrap_super,
|
||||||
'-r', '-l',
|
'--dbname' => 'dbname=template1',
|
||||||
'dbname=template1'
|
|
||||||
],
|
],
|
||||||
'pg_dumpall -l accepts connection string');
|
'pg_dumpall --dbname accepts connection string');
|
||||||
|
|
||||||
$node->run_log([ 'createdb', '-U', $src_bootstrap_super, "foo\n\rbar" ]);
|
$node->run_log(
|
||||||
|
[ 'createdb', '--username' => $src_bootstrap_super, "foo\n\rbar" ]);
|
||||||
|
|
||||||
# not sufficient to use -r here
|
# not sufficient to use --roles-only here
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ 'pg_dumpall', '-U', $src_bootstrap_super, '--no-sync', '-f', $discard ],
|
[
|
||||||
|
'pg_dumpall', '--no-sync',
|
||||||
|
'--username' => $src_bootstrap_super,
|
||||||
|
'--file' => $discard,
|
||||||
|
],
|
||||||
'pg_dumpall with \n\r in database name');
|
'pg_dumpall with \n\r in database name');
|
||||||
$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, "foo\n\rbar" ]);
|
$node->run_log(
|
||||||
|
[ 'dropdb', '--username' => $src_bootstrap_super, "foo\n\rbar" ]);
|
||||||
|
|
||||||
|
|
||||||
# make a table, so the parallel worker has something to dump
|
# make a table, so the parallel worker has something to dump
|
||||||
$node->safe_psql(
|
$node->safe_psql(
|
||||||
$dbname1,
|
$dbname1,
|
||||||
'CREATE TABLE t0()',
|
'CREATE TABLE t0()',
|
||||||
extra_params => [ '-U', $src_bootstrap_super ]);
|
extra_params => [ '--username' => $src_bootstrap_super ]);
|
||||||
|
|
||||||
# XXX no printed message when this fails, just SIGPIPE termination
|
# XXX no printed message when this fails, just SIGPIPE termination
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt, '-U', $username1,
|
'pg_dump',
|
||||||
$node->connstr($dbname1)
|
'--format' => 'directory',
|
||||||
|
'--no-sync',
|
||||||
|
'--jobs' => 2,
|
||||||
|
'--file' => $dirfmt,
|
||||||
|
'--username' => $username1,
|
||||||
|
$node->connstr($dbname1),
|
||||||
],
|
],
|
||||||
'parallel dump');
|
'parallel dump');
|
||||||
|
|
||||||
# recreate $dbname1 for restore test
|
# recreate $dbname1 for restore test
|
||||||
$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]);
|
$node->run_log([ 'dropdb', '--username' => $src_bootstrap_super, $dbname1 ]);
|
||||||
$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]);
|
$node->run_log(
|
||||||
|
[ 'createdb', '--username' => $src_bootstrap_super, $dbname1 ]);
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-v', '-d', 'template1',
|
'pg_restore',
|
||||||
'-j2', '-U', $username1, $dirfmt
|
'--verbose',
|
||||||
|
'--dbname' => 'template1',
|
||||||
|
'--jobs' => 2,
|
||||||
|
'--username' => $username1,
|
||||||
|
$dirfmt,
|
||||||
],
|
],
|
||||||
'parallel restore');
|
'parallel restore');
|
||||||
|
|
||||||
$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]);
|
$node->run_log([ 'dropdb', '--username' => $src_bootstrap_super, $dbname1 ]);
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_restore', '-C', '-v', '-d',
|
'pg_restore',
|
||||||
'template1', '-j2', '-U', $username1,
|
'--create',
|
||||||
$dirfmt
|
'--verbose',
|
||||||
|
'--dbname' => 'template1',
|
||||||
|
'--jobs' => 2,
|
||||||
|
'--username' => $username1,
|
||||||
|
$dirfmt,
|
||||||
],
|
],
|
||||||
'parallel restore with create');
|
'parallel restore with create');
|
||||||
|
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ 'pg_dumpall', '--no-sync', '-f', $plain, '-U', $username1 ],
|
[
|
||||||
|
'pg_dumpall',
|
||||||
|
'--no-sync',
|
||||||
|
'--file' => $plain,
|
||||||
|
'--username' => $username1,
|
||||||
|
],
|
||||||
'take full dump');
|
'take full dump');
|
||||||
system_log('cat', $plain);
|
system_log('cat', $plain);
|
||||||
my ($stderr, $result);
|
my ($stderr, $result);
|
||||||
@ -183,20 +239,29 @@ $restore_super =~ s/"//g
|
|||||||
|
|
||||||
my $envar_node = PostgreSQL::Test::Cluster->new('destination_envar');
|
my $envar_node = PostgreSQL::Test::Cluster->new('destination_envar');
|
||||||
$envar_node->init(
|
$envar_node->init(
|
||||||
extra =>
|
extra => [
|
||||||
[ '-U', $dst_bootstrap_super, '--locale=C', '--encoding=LATIN1' ],
|
'--username' => $dst_bootstrap_super,
|
||||||
|
'--locale' => 'C',
|
||||||
|
'--encoding' => 'LATIN1',
|
||||||
|
],
|
||||||
auth_extra =>
|
auth_extra =>
|
||||||
[ '--user', $dst_bootstrap_super, '--create-role', $restore_super ]);
|
[ '--user' => $dst_bootstrap_super, '--create-role' => $restore_super ],
|
||||||
|
);
|
||||||
$envar_node->start;
|
$envar_node->start;
|
||||||
|
|
||||||
# make superuser for restore
|
# make superuser for restore
|
||||||
$envar_node->run_log(
|
$envar_node->run_log(
|
||||||
[ 'createuser', '-U', $dst_bootstrap_super, '-s', $restore_super ]);
|
[
|
||||||
|
'createuser',
|
||||||
|
'--username' => $dst_bootstrap_super,
|
||||||
|
'--superuser', $restore_super,
|
||||||
|
]);
|
||||||
|
|
||||||
{
|
{
|
||||||
local $ENV{PGPORT} = $envar_node->port;
|
local $ENV{PGPORT} = $envar_node->port;
|
||||||
local $ENV{PGUSER} = $restore_super;
|
local $ENV{PGUSER} = $restore_super;
|
||||||
$result = run_log([ 'psql', '-X', '-f', $plain ], '2>', \$stderr);
|
$result = run_log([ 'psql', '--no-psqlrc', '--file' => $plain ],
|
||||||
|
'2>' => \$stderr);
|
||||||
}
|
}
|
||||||
ok($result,
|
ok($result,
|
||||||
'restore full dump using environment variables for connection parameters'
|
'restore full dump using environment variables for connection parameters'
|
||||||
@ -210,21 +275,32 @@ is($stderr, '', 'no dump errors');
|
|||||||
|
|
||||||
my $cmdline_node = PostgreSQL::Test::Cluster->new('destination_cmdline');
|
my $cmdline_node = PostgreSQL::Test::Cluster->new('destination_cmdline');
|
||||||
$cmdline_node->init(
|
$cmdline_node->init(
|
||||||
extra =>
|
extra => [
|
||||||
[ '-U', $dst_bootstrap_super, '--locale=C', '--encoding=LATIN1' ],
|
'--username' => $dst_bootstrap_super,
|
||||||
|
'--locale' => 'C',
|
||||||
|
'--encoding' => 'LATIN1',
|
||||||
|
],
|
||||||
auth_extra =>
|
auth_extra =>
|
||||||
[ '--user', $dst_bootstrap_super, '--create-role', $restore_super ]);
|
[ '--user' => $dst_bootstrap_super, '--create-role' => $restore_super ],
|
||||||
|
);
|
||||||
$cmdline_node->start;
|
$cmdline_node->start;
|
||||||
$cmdline_node->run_log(
|
$cmdline_node->run_log(
|
||||||
[ 'createuser', '-U', $dst_bootstrap_super, '-s', $restore_super ]);
|
[
|
||||||
|
'createuser',
|
||||||
|
'--username' => $dst_bootstrap_super,
|
||||||
|
'--superuser',
|
||||||
|
$restore_super,
|
||||||
|
]);
|
||||||
{
|
{
|
||||||
$result = run_log(
|
$result = run_log(
|
||||||
[
|
[
|
||||||
'psql', '-p', $cmdline_node->port, '-U',
|
'psql',
|
||||||
$restore_super, '-X', '-f', $plain
|
'--port' => $cmdline_node->port,
|
||||||
|
'--username' => $restore_super,
|
||||||
|
'--no-psqlrc',
|
||||||
|
'--file' => $plain,
|
||||||
],
|
],
|
||||||
'2>',
|
'2>' => \$stderr);
|
||||||
\$stderr);
|
|
||||||
}
|
}
|
||||||
ok($result,
|
ok($result,
|
||||||
'restore full dump with command-line options for connection parameters');
|
'restore full dump with command-line options for connection parameters');
|
||||||
|
@ -30,7 +30,8 @@ SKIP:
|
|||||||
'check PGDATA permissions');
|
'check PGDATA permissions');
|
||||||
}
|
}
|
||||||
|
|
||||||
command_ok([ 'pg_resetwal', '-D', $node->data_dir ], 'pg_resetwal runs');
|
command_ok([ 'pg_resetwal', '--pgdata' => $node->data_dir ],
|
||||||
|
'pg_resetwal runs');
|
||||||
$node->start;
|
$node->start;
|
||||||
is($node->safe_psql("postgres", "SELECT 1;"),
|
is($node->safe_psql("postgres", "SELECT 1;"),
|
||||||
1, 'server running and working after reset');
|
1, 'server running and working after reset');
|
||||||
@ -46,7 +47,7 @@ command_fails_like(
|
|||||||
qr/database server was not shut down cleanly/,
|
qr/database server was not shut down cleanly/,
|
||||||
'does not run after immediate shutdown');
|
'does not run after immediate shutdown');
|
||||||
command_ok(
|
command_ok(
|
||||||
[ 'pg_resetwal', '-f', $node->data_dir ],
|
[ 'pg_resetwal', '--force', $node->data_dir ],
|
||||||
'runs after immediate shutdown with force');
|
'runs after immediate shutdown with force');
|
||||||
$node->start;
|
$node->start;
|
||||||
is($node->safe_psql("postgres", "SELECT 1;"),
|
is($node->safe_psql("postgres", "SELECT 1;"),
|
||||||
@ -80,111 +81,111 @@ command_fails_like(
|
|||||||
# error cases
|
# error cases
|
||||||
# -c
|
# -c
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-c', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '-c' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -c/,
|
qr/error: invalid argument for option -c/,
|
||||||
'fails with incorrect -c option');
|
'fails with incorrect -c option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-c', '10,bar', $node->data_dir ],
|
[ 'pg_resetwal', '-c' => '10,bar', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -c/,
|
qr/error: invalid argument for option -c/,
|
||||||
'fails with incorrect -c option part 2');
|
'fails with incorrect -c option part 2');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-c', '1,10', $node->data_dir ],
|
[ 'pg_resetwal', '-c' => '1,10', $node->data_dir ],
|
||||||
qr/greater than/,
|
qr/greater than/,
|
||||||
'fails with -c value 1 part 1');
|
'fails with -c ids value 1 part 1');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-c', '10,1', $node->data_dir ],
|
[ 'pg_resetwal', '-c' => '10,1', $node->data_dir ],
|
||||||
qr/greater than/,
|
qr/greater than/,
|
||||||
'fails with -c value 1 part 2');
|
'fails with -c value 1 part 2');
|
||||||
# -e
|
# -e
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-e', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '-e' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -e/,
|
qr/error: invalid argument for option -e/,
|
||||||
'fails with incorrect -e option');
|
'fails with incorrect -e option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-e', '-1', $node->data_dir ],
|
[ 'pg_resetwal', '-e' => '-1', $node->data_dir ],
|
||||||
qr/must not be -1/,
|
qr/must not be -1/,
|
||||||
'fails with -e value -1');
|
'fails with -e value -1');
|
||||||
# -l
|
# -l
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-l', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '-l' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -l/,
|
qr/error: invalid argument for option -l/,
|
||||||
'fails with incorrect -l option');
|
'fails with incorrect -l option');
|
||||||
# -m
|
# -m
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-m', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '-m' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -m/,
|
qr/error: invalid argument for option -m/,
|
||||||
'fails with incorrect -m option');
|
'fails with incorrect -m option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-m', '10,bar', $node->data_dir ],
|
[ 'pg_resetwal', '-m' => '10,bar', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -m/,
|
qr/error: invalid argument for option -m/,
|
||||||
'fails with incorrect -m option part 2');
|
'fails with incorrect -m option part 2');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-m', '0,10', $node->data_dir ],
|
[ 'pg_resetwal', '-m' => '0,10', $node->data_dir ],
|
||||||
qr/must not be 0/,
|
qr/must not be 0/,
|
||||||
'fails with -m value 0 part 1');
|
'fails with -m value 0 part 1');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-m', '10,0', $node->data_dir ],
|
[ 'pg_resetwal', '-m' => '10,0', $node->data_dir ],
|
||||||
qr/must not be 0/,
|
qr/must not be 0/,
|
||||||
'fails with -m value 0 part 2');
|
'fails with -m value 0 part 2');
|
||||||
# -o
|
# -o
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-o', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '-o' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -o/,
|
qr/error: invalid argument for option -o/,
|
||||||
'fails with incorrect -o option');
|
'fails with incorrect -o option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-o', '0', $node->data_dir ],
|
[ 'pg_resetwal', '-o' => '0', $node->data_dir ],
|
||||||
qr/must not be 0/,
|
qr/must not be 0/,
|
||||||
'fails with -o value 0');
|
'fails with -o value 0');
|
||||||
# -O
|
# -O
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-O', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '-O' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -O/,
|
qr/error: invalid argument for option -O/,
|
||||||
'fails with incorrect -O option');
|
'fails with incorrect -O option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-O', '-1', $node->data_dir ],
|
[ 'pg_resetwal', '-O' => '-1', $node->data_dir ],
|
||||||
qr/must not be -1/,
|
qr/must not be -1/,
|
||||||
'fails with -O value -1');
|
'fails with -O value -1');
|
||||||
# --wal-segsize
|
# --wal-segsize
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '--wal-segsize', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '--wal-segsize' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid value/,
|
qr/error: invalid value/,
|
||||||
'fails with incorrect --wal-segsize option');
|
'fails with incorrect --wal-segsize option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '--wal-segsize', '13', $node->data_dir ],
|
[ 'pg_resetwal', '--wal-segsize' => '13', $node->data_dir ],
|
||||||
qr/must be a power/,
|
qr/must be a power/,
|
||||||
'fails with invalid --wal-segsize value');
|
'fails with invalid --wal-segsize value');
|
||||||
# -u
|
# -u
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-u', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '-u' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -u/,
|
qr/error: invalid argument for option -u/,
|
||||||
'fails with incorrect -u option');
|
'fails with incorrect -u option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-u', '1', $node->data_dir ],
|
[ 'pg_resetwal', '-u' => '1', $node->data_dir ],
|
||||||
qr/must be greater than/,
|
qr/must be greater than/,
|
||||||
'fails with -u value too small');
|
'fails with -u value too small');
|
||||||
# -x
|
# -x
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-x', 'foo', $node->data_dir ],
|
[ 'pg_resetwal', '-x' => 'foo', $node->data_dir ],
|
||||||
qr/error: invalid argument for option -x/,
|
qr/error: invalid argument for option -x/,
|
||||||
'fails with incorrect -x option');
|
'fails with incorrect -x option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_resetwal', '-x', '1', $node->data_dir ],
|
[ 'pg_resetwal', '-x' => '1', $node->data_dir ],
|
||||||
qr/must be greater than/,
|
qr/must be greater than/,
|
||||||
'fails with -x value too small');
|
'fails with -x value too small');
|
||||||
|
|
||||||
# run with control override options
|
# run with control override options
|
||||||
|
|
||||||
my $out = (run_command([ 'pg_resetwal', '-n', $node->data_dir ]))[0];
|
my $out = (run_command([ 'pg_resetwal', '--dry-run', $node->data_dir ]))[0];
|
||||||
$out =~ /^Database block size: *(\d+)$/m or die;
|
$out =~ /^Database block size: *(\d+)$/m or die;
|
||||||
my $blcksz = $1;
|
my $blcksz = $1;
|
||||||
|
|
||||||
my @cmd = ('pg_resetwal', '-D', $node->data_dir);
|
my @cmd = ('pg_resetwal', '--pgdata' => $node->data_dir);
|
||||||
|
|
||||||
# some not-so-critical hardcoded values
|
# some not-so-critical hardcoded values
|
||||||
push @cmd, '-e', 1;
|
push @cmd, '--epoch' => 1;
|
||||||
push @cmd, '-l', '00000001000000320000004B';
|
push @cmd, '--next-wal-file' => '00000001000000320000004B';
|
||||||
push @cmd, '-o', 100_000;
|
push @cmd, '--next-oid' => 100_000;
|
||||||
push @cmd, '--wal-segsize', 1;
|
push @cmd, '--wal-segsize' => 1;
|
||||||
|
|
||||||
# these use the guidance from the documentation
|
# these use the guidance from the documentation
|
||||||
|
|
||||||
@ -202,31 +203,33 @@ my (@files, $mult);
|
|||||||
# XXX: Should there be a multiplier, similar to the other options?
|
# XXX: Should there be a multiplier, similar to the other options?
|
||||||
# -c argument is "old,new"
|
# -c argument is "old,new"
|
||||||
push @cmd,
|
push @cmd,
|
||||||
'-c',
|
'--commit-timestamp-ids' =>
|
||||||
sprintf("%d,%d", hex($files[0]) == 0 ? 3 : hex($files[0]), hex($files[-1]));
|
sprintf("%d,%d", hex($files[0]) == 0 ? 3 : hex($files[0]), hex($files[-1]));
|
||||||
|
|
||||||
@files = get_slru_files('pg_multixact/offsets');
|
@files = get_slru_files('pg_multixact/offsets');
|
||||||
$mult = 32 * $blcksz / 4;
|
$mult = 32 * $blcksz / 4;
|
||||||
# -m argument is "new,old"
|
# --multixact-ids argument is "new,old"
|
||||||
push @cmd, '-m',
|
push @cmd,
|
||||||
sprintf("%d,%d",
|
'--multixact-ids' => sprintf("%d,%d",
|
||||||
(hex($files[-1]) + 1) * $mult,
|
(hex($files[-1]) + 1) * $mult,
|
||||||
hex($files[0]) == 0 ? 1 : hex($files[0] * $mult));
|
hex($files[0]) == 0 ? 1 : hex($files[0] * $mult));
|
||||||
|
|
||||||
@files = get_slru_files('pg_multixact/members');
|
@files = get_slru_files('pg_multixact/members');
|
||||||
$mult = 32 * int($blcksz / 20) * 4;
|
$mult = 32 * int($blcksz / 20) * 4;
|
||||||
push @cmd, '-O', (hex($files[-1]) + 1) * $mult;
|
push @cmd, '--multixact-offset' => (hex($files[-1]) + 1) * $mult;
|
||||||
|
|
||||||
@files = get_slru_files('pg_xact');
|
@files = get_slru_files('pg_xact');
|
||||||
$mult = 32 * $blcksz * 4;
|
$mult = 32 * $blcksz * 4;
|
||||||
push @cmd,
|
push @cmd,
|
||||||
'-u', (hex($files[0]) == 0 ? 3 : hex($files[0]) * $mult),
|
'--oldest-transaction-id' =>
|
||||||
'-x', ((hex($files[-1]) + 1) * $mult);
|
(hex($files[0]) == 0 ? 3 : hex($files[0]) * $mult),
|
||||||
|
'--next-transaction-id' => ((hex($files[-1]) + 1) * $mult);
|
||||||
|
|
||||||
command_ok([ @cmd, '-n' ], 'runs with control override options, dry run');
|
command_ok([ @cmd, '--dry-run' ],
|
||||||
|
'runs with control override options, dry run');
|
||||||
command_ok(\@cmd, 'runs with control override options');
|
command_ok(\@cmd, 'runs with control override options');
|
||||||
command_like(
|
command_like(
|
||||||
[ 'pg_resetwal', '-n', $node->data_dir ],
|
[ 'pg_resetwal', '--dry-run', $node->data_dir ],
|
||||||
qr/^Latest checkpoint's NextOID: *100000$/m,
|
qr/^Latest checkpoint's NextOID: *100000$/m,
|
||||||
'spot check that control changes were applied');
|
'spot check that control changes were applied');
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ print $fh pack("x[$size]");
|
|||||||
close $fh;
|
close $fh;
|
||||||
|
|
||||||
command_checks_all(
|
command_checks_all(
|
||||||
[ 'pg_resetwal', '-n', $node->data_dir ],
|
[ 'pg_resetwal', '--dry-run', $node->data_dir ],
|
||||||
0,
|
0,
|
||||||
[qr/pg_control version number/],
|
[qr/pg_control version number/],
|
||||||
[
|
[
|
||||||
@ -47,7 +47,7 @@ print $fh $data, pack("x[" . ($size - 16) . "]");
|
|||||||
close $fh;
|
close $fh;
|
||||||
|
|
||||||
command_checks_all(
|
command_checks_all(
|
||||||
[ 'pg_resetwal', '-n', $node->data_dir ],
|
[ 'pg_resetwal', '--dry-run', $node->data_dir ],
|
||||||
0,
|
0,
|
||||||
[qr/pg_control version number/],
|
[qr/pg_control version number/],
|
||||||
[
|
[
|
||||||
|
@ -106,8 +106,8 @@ sub run_test
|
|||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind', '--debug',
|
||||||
'--source-pgdata', $standby_pgdata,
|
'--source-pgdata' => $standby_pgdata,
|
||||||
'--target-pgdata', $primary_pgdata,
|
'--target-pgdata' => $primary_pgdata,
|
||||||
'--no-sync'
|
'--no-sync'
|
||||||
],
|
],
|
||||||
'pg_rewind with running target');
|
'pg_rewind with running target');
|
||||||
@ -118,8 +118,8 @@ sub run_test
|
|||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind', '--debug',
|
||||||
'--source-pgdata', $standby_pgdata,
|
'--source-pgdata' => $standby_pgdata,
|
||||||
'--target-pgdata', $primary_pgdata,
|
'--target-pgdata' => $primary_pgdata,
|
||||||
'--no-sync', '--no-ensure-shutdown'
|
'--no-sync', '--no-ensure-shutdown'
|
||||||
],
|
],
|
||||||
'pg_rewind --no-ensure-shutdown with running target');
|
'pg_rewind --no-ensure-shutdown with running target');
|
||||||
@ -131,8 +131,8 @@ sub run_test
|
|||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind', '--debug',
|
||||||
'--source-pgdata', $standby_pgdata,
|
'--source-pgdata' => $standby_pgdata,
|
||||||
'--target-pgdata', $primary_pgdata,
|
'--target-pgdata' => $primary_pgdata,
|
||||||
'--no-sync', '--no-ensure-shutdown'
|
'--no-sync', '--no-ensure-shutdown'
|
||||||
],
|
],
|
||||||
'pg_rewind with unexpected running source');
|
'pg_rewind with unexpected running source');
|
||||||
@ -145,8 +145,8 @@ sub run_test
|
|||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind', '--debug',
|
||||||
'--source-pgdata', $standby_pgdata,
|
'--source-pgdata' => $standby_pgdata,
|
||||||
'--target-pgdata', $primary_pgdata,
|
'--target-pgdata' => $primary_pgdata,
|
||||||
'--no-sync', '--dry-run'
|
'--no-sync', '--dry-run'
|
||||||
],
|
],
|
||||||
'pg_rewind --dry-run');
|
'pg_rewind --dry-run');
|
||||||
|
@ -17,27 +17,30 @@ my $primary_pgdata = PostgreSQL::Test::Utils::tempdir;
|
|||||||
my $standby_pgdata = PostgreSQL::Test::Utils::tempdir;
|
my $standby_pgdata = PostgreSQL::Test::Utils::tempdir;
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind',
|
||||||
'--target-pgdata', $primary_pgdata,
|
'--debug',
|
||||||
'--source-pgdata', $standby_pgdata,
|
'--target-pgdata' => $primary_pgdata,
|
||||||
|
'--source-pgdata' => $standby_pgdata,
|
||||||
'extra_arg1'
|
'extra_arg1'
|
||||||
],
|
],
|
||||||
'too many arguments');
|
'too many arguments');
|
||||||
command_fails([ 'pg_rewind', '--target-pgdata', $primary_pgdata ],
|
command_fails([ 'pg_rewind', '--target-pgdata' => $primary_pgdata ],
|
||||||
'no source specified');
|
'no source specified');
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind',
|
||||||
'--target-pgdata', $primary_pgdata,
|
'--debug',
|
||||||
'--source-pgdata', $standby_pgdata,
|
'--target-pgdata' => $primary_pgdata,
|
||||||
'--source-server', 'incorrect_source'
|
'--source-pgdata' => $standby_pgdata,
|
||||||
|
'--source-server' => 'incorrect_source'
|
||||||
],
|
],
|
||||||
'both remote and local sources specified');
|
'both remote and local sources specified');
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind',
|
||||||
'--target-pgdata', $primary_pgdata,
|
'--debug',
|
||||||
'--source-pgdata', $standby_pgdata,
|
'--target-pgdata' => $primary_pgdata,
|
||||||
|
'--source-pgdata' => $standby_pgdata,
|
||||||
'--write-recovery-conf'
|
'--write-recovery-conf'
|
||||||
],
|
],
|
||||||
'no local source with --write-recovery-conf');
|
'no local source with --write-recovery-conf');
|
||||||
|
@ -124,10 +124,12 @@ copy(
|
|||||||
# recovery configuration automatically.
|
# recovery configuration automatically.
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_rewind', "--debug",
|
'pg_rewind',
|
||||||
"--source-server", $node_b->connstr('postgres'),
|
'--debug',
|
||||||
"--target-pgdata=$node_c_pgdata", "--no-sync",
|
'--source-server' => $node_b->connstr('postgres'),
|
||||||
"--write-recovery-conf"
|
'--target-pgdata' => $node_c_pgdata,
|
||||||
|
'--no-sync',
|
||||||
|
'--write-recovery-conf',
|
||||||
],
|
],
|
||||||
'pg_rewind remote');
|
'pg_rewind remote');
|
||||||
}
|
}
|
||||||
|
@ -142,8 +142,10 @@ copy(
|
|||||||
|
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_rewind', "--source-server=$node_1_connstr",
|
'pg_rewind',
|
||||||
"--target-pgdata=$node_2_pgdata", "--debug"
|
'--source-server' => $node_1_connstr,
|
||||||
|
'--target-pgdata' => $node_2_pgdata,
|
||||||
|
'--debug',
|
||||||
],
|
],
|
||||||
'run pg_rewind');
|
'run pg_rewind');
|
||||||
|
|
||||||
|
@ -52,8 +52,8 @@ append_to_file "$standby_pgdata/tst_both_dir/file1", 'a';
|
|||||||
my $ret = run_log(
|
my $ret = run_log(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind', '--debug',
|
||||||
'--source-pgdata', $standby_pgdata,
|
'--source-pgdata' => $standby_pgdata,
|
||||||
'--target-pgdata', $primary_pgdata,
|
'--target-pgdata' => $primary_pgdata,
|
||||||
'--no-sync',
|
'--no-sync',
|
||||||
],
|
],
|
||||||
'2>>',
|
'2>>',
|
||||||
|
@ -49,8 +49,8 @@ $node_primary->stop();
|
|||||||
my ($stdout, $stderr) = run_command(
|
my ($stdout, $stderr) = run_command(
|
||||||
[
|
[
|
||||||
'pg_rewind', '--debug',
|
'pg_rewind', '--debug',
|
||||||
'--source-pgdata', $node_standby->data_dir,
|
'--source-pgdata' => $node_standby->data_dir,
|
||||||
'--target-pgdata', $node_primary->data_dir,
|
'--target-pgdata' => $node_primary->data_dir,
|
||||||
'--no-sync',
|
'--no-sync',
|
||||||
]);
|
]);
|
||||||
|
|
||||||
|
@ -255,12 +255,11 @@ sub run_pg_rewind
|
|||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_rewind',
|
'pg_rewind',
|
||||||
"--debug",
|
'--debug',
|
||||||
"--source-pgdata=$standby_pgdata",
|
'--source-pgdata' => $standby_pgdata,
|
||||||
"--target-pgdata=$primary_pgdata",
|
'--target-pgdata' => $primary_pgdata,
|
||||||
"--no-sync",
|
'--no-sync',
|
||||||
"--config-file",
|
'--config-file' => "$tmp_folder/primary-postgresql.conf.tmp",
|
||||||
"$tmp_folder/primary-postgresql.conf.tmp"
|
|
||||||
],
|
],
|
||||||
'pg_rewind local');
|
'pg_rewind local');
|
||||||
}
|
}
|
||||||
@ -270,11 +269,13 @@ sub run_pg_rewind
|
|||||||
# recovery configuration automatically.
|
# recovery configuration automatically.
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_rewind', "--debug",
|
'pg_rewind',
|
||||||
"--source-server", $standby_connstr,
|
'--debug',
|
||||||
"--target-pgdata=$primary_pgdata", "--no-sync",
|
'--source-server' => $standby_connstr,
|
||||||
"--write-recovery-conf", "--config-file",
|
'--target-pgdata' => $primary_pgdata,
|
||||||
"$tmp_folder/primary-postgresql.conf.tmp"
|
'--no-sync',
|
||||||
|
'--write-recovery-conf',
|
||||||
|
'--config-file' => "$tmp_folder/primary-postgresql.conf.tmp",
|
||||||
],
|
],
|
||||||
'pg_rewind remote');
|
'pg_rewind remote');
|
||||||
|
|
||||||
@ -327,14 +328,13 @@ sub run_pg_rewind
|
|||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_rewind',
|
'pg_rewind',
|
||||||
"--debug",
|
'--debug',
|
||||||
"--source-pgdata=$standby_pgdata",
|
'--source-pgdata' => $standby_pgdata,
|
||||||
"--target-pgdata=$primary_pgdata",
|
'--target-pgdata' => $primary_pgdata,
|
||||||
"--no-sync",
|
'--no-sync',
|
||||||
"--no-ensure-shutdown",
|
'--no-ensure-shutdown',
|
||||||
"--restore-target-wal",
|
'--restore-target-wal',
|
||||||
"--config-file",
|
'--config-file' => "$primary_pgdata/postgresql.conf",
|
||||||
"$primary_pgdata/postgresql.conf"
|
|
||||||
],
|
],
|
||||||
'pg_rewind archive');
|
'pg_rewind archive');
|
||||||
}
|
}
|
||||||
|
@ -18,11 +18,11 @@ program_options_handling_ok('pg_test_fsync');
|
|||||||
# Test invalid option combinations
|
# Test invalid option combinations
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_test_fsync', '--secs-per-test', 'a' ],
|
[ 'pg_test_fsync', '--secs-per-test' => 'a' ],
|
||||||
qr/\Qpg_test_fsync: error: invalid argument for option --secs-per-test\E/,
|
qr/\Qpg_test_fsync: error: invalid argument for option --secs-per-test\E/,
|
||||||
'pg_test_fsync: invalid argument for option --secs-per-test');
|
'pg_test_fsync: invalid argument for option --secs-per-test');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_test_fsync', '--secs-per-test', '0' ],
|
[ 'pg_test_fsync', '--secs-per-test' => '0' ],
|
||||||
qr/\Qpg_test_fsync: error: --secs-per-test must be in range 1..4294967295\E/,
|
qr/\Qpg_test_fsync: error: --secs-per-test must be in range 1..4294967295\E/,
|
||||||
'pg_test_fsync: --secs-per-test must be in range');
|
'pg_test_fsync: --secs-per-test must be in range');
|
||||||
|
|
||||||
|
@ -18,11 +18,11 @@ program_options_handling_ok('pg_test_timing');
|
|||||||
# Test invalid option combinations
|
# Test invalid option combinations
|
||||||
|
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_test_timing', '--duration', 'a' ],
|
[ 'pg_test_timing', '--duration' => 'a' ],
|
||||||
qr/\Qpg_test_timing: invalid argument for option --duration\E/,
|
qr/\Qpg_test_timing: invalid argument for option --duration\E/,
|
||||||
'pg_test_timing: invalid argument for option --duration');
|
'pg_test_timing: invalid argument for option --duration');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_test_timing', '--duration', '0' ],
|
[ 'pg_test_timing', '--duration' => '0' ],
|
||||||
qr/\Qpg_test_timing: --duration must be in range 1..4294967295\E/,
|
qr/\Qpg_test_timing: --duration must be in range 1..4294967295\E/,
|
||||||
'pg_test_timing: --duration must be in range');
|
'pg_test_timing: --duration must be in range');
|
||||||
|
|
||||||
|
@ -58,11 +58,17 @@ $new_sub->append_conf('postgresql.conf', "max_replication_slots = 0");
|
|||||||
# max_replication_slots.
|
# max_replication_slots.
|
||||||
command_checks_all(
|
command_checks_all(
|
||||||
[
|
[
|
||||||
'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir,
|
'pg_upgrade',
|
||||||
'-D', $new_sub->data_dir, '-b', $oldbindir,
|
'--no-sync',
|
||||||
'-B', $newbindir, '-s', $new_sub->host,
|
'--old-datadir' => $old_sub->data_dir,
|
||||||
'-p', $old_sub->port, '-P', $new_sub->port,
|
'--new-datadir' => $new_sub->data_dir,
|
||||||
$mode, '--check',
|
'--old-bindir' => $oldbindir,
|
||||||
|
'--new-bindir' => $newbindir,
|
||||||
|
'--socketdir' => $new_sub->host,
|
||||||
|
'--old-port' => $old_sub->port,
|
||||||
|
'--new-port' => $new_sub->port,
|
||||||
|
$mode,
|
||||||
|
'--check',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[
|
[
|
||||||
@ -126,11 +132,17 @@ $old_sub->stop;
|
|||||||
|
|
||||||
command_fails(
|
command_fails(
|
||||||
[
|
[
|
||||||
'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir,
|
'pg_upgrade',
|
||||||
'-D', $new_sub->data_dir, '-b', $oldbindir,
|
'--no-sync',
|
||||||
'-B', $newbindir, '-s', $new_sub->host,
|
'--old-datadir' => $old_sub->data_dir,
|
||||||
'-p', $old_sub->port, '-P', $new_sub->port,
|
'--new-datadir' => $new_sub->data_dir,
|
||||||
$mode, '--check',
|
'--old-bindir' => $oldbindir,
|
||||||
|
'--new-bindir' => $newbindir,
|
||||||
|
'--socketdir' => $new_sub->host,
|
||||||
|
'--old-port' => $old_sub->port,
|
||||||
|
'--new-port' => $new_sub->port,
|
||||||
|
$mode,
|
||||||
|
'--check',
|
||||||
],
|
],
|
||||||
'run of pg_upgrade --check for old instance with relation in \'d\' datasync(invalid) state and missing replication origin'
|
'run of pg_upgrade --check for old instance with relation in \'d\' datasync(invalid) state and missing replication origin'
|
||||||
);
|
);
|
||||||
@ -254,10 +266,15 @@ $new_sub->append_conf('postgresql.conf',
|
|||||||
# ------------------------------------------------------
|
# ------------------------------------------------------
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir,
|
'pg_upgrade',
|
||||||
'-D', $new_sub->data_dir, '-b', $oldbindir,
|
'--no-sync',
|
||||||
'-B', $newbindir, '-s', $new_sub->host,
|
'--old-datadir' => $old_sub->data_dir,
|
||||||
'-p', $old_sub->port, '-P', $new_sub->port,
|
'--new-datadir' => $new_sub->data_dir,
|
||||||
|
'--old-bindir' => $oldbindir,
|
||||||
|
'--new-bindir' => $newbindir,
|
||||||
|
'--socketdir' => $new_sub->host,
|
||||||
|
'--old-port' => $old_sub->port,
|
||||||
|
'--new-port' => $new_sub->port,
|
||||||
$mode
|
$mode
|
||||||
],
|
],
|
||||||
'run of pg_upgrade for old instance when the subscription tables are in init/ready state'
|
'run of pg_upgrade for old instance when the subscription tables are in init/ready state'
|
||||||
|
@ -31,7 +31,11 @@ close($fh);
|
|||||||
|
|
||||||
# but then try to use an alternate, nonexisting manifest
|
# but then try to use an alternate, nonexisting manifest
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_verifybackup', '-m', "$tempdir/not_the_manifest", $tempdir ],
|
[
|
||||||
|
'pg_verifybackup',
|
||||||
|
'--manifest-path' => "$tempdir/not_the_manifest",
|
||||||
|
$tempdir,
|
||||||
|
],
|
||||||
qr/could not open file.*\/not_the_manifest\"/,
|
qr/could not open file.*\/not_the_manifest\"/,
|
||||||
'pg_verifybackup respects -m flag');
|
'pg_verifybackup respects -m flag');
|
||||||
|
|
||||||
|
@ -125,8 +125,12 @@ for my $scenario (@scenario)
|
|||||||
local $ENV{MSYS2_ARG_CONV_EXCL} = $source_ts_prefix;
|
local $ENV{MSYS2_ARG_CONV_EXCL} = $source_ts_prefix;
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast',
|
'pg_basebackup',
|
||||||
'-T', "${source_ts_path}=${backup_ts_path}"
|
'--pgdata' => $backup_path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--tablespace-mapping' =>
|
||||||
|
"${source_ts_path}=${backup_ts_path}",
|
||||||
],
|
],
|
||||||
"base backup ok");
|
"base backup ok");
|
||||||
command_ok([ 'pg_verifybackup', $backup_path ],
|
command_ok([ 'pg_verifybackup', $backup_path ],
|
||||||
|
@ -16,33 +16,45 @@ $primary->init(allows_streaming => 1);
|
|||||||
$primary->start;
|
$primary->start;
|
||||||
my $backup_path = $primary->backup_dir . '/test_options';
|
my $backup_path = $primary->backup_dir . '/test_options';
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup_path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast'
|
||||||
|
],
|
||||||
"base backup ok");
|
"base backup ok");
|
||||||
|
|
||||||
# Verify that pg_verifybackup -q succeeds and produces no output.
|
# Verify that pg_verifybackup --quiet succeeds and produces no output.
|
||||||
my $stdout;
|
my $stdout;
|
||||||
my $stderr;
|
my $stderr;
|
||||||
my $result = IPC::Run::run [ 'pg_verifybackup', '-q', $backup_path ],
|
my $result = IPC::Run::run [ 'pg_verifybackup', '--quiet', $backup_path ],
|
||||||
'>', \$stdout, '2>', \$stderr;
|
'>' => \$stdout,
|
||||||
ok($result, "-q succeeds: exit code 0");
|
'2>' => \$stderr;
|
||||||
is($stdout, '', "-q succeeds: no stdout");
|
ok($result, "--quiet succeeds: exit code 0");
|
||||||
is($stderr, '', "-q succeeds: no stderr");
|
is($stdout, '', "--quiet succeeds: no stdout");
|
||||||
|
is($stderr, '', "--quiet succeeds: no stderr");
|
||||||
|
|
||||||
# Should still work if we specify -Fp.
|
# Should still work if we specify --format=plain.
|
||||||
$primary->command_ok([ 'pg_verifybackup', '-Fp', $backup_path ],
|
$primary->command_ok(
|
||||||
"verifies with -Fp");
|
[ 'pg_verifybackup', '--format' => 'plain', $backup_path ],
|
||||||
|
"verifies with --format=plain");
|
||||||
|
|
||||||
# Should not work if we specify -Fy because that's invalid.
|
# Should not work if we specify --format=y because that's invalid.
|
||||||
$primary->command_fails_like(
|
$primary->command_fails_like(
|
||||||
[ 'pg_verifybackup', '-Fy', $backup_path ],
|
[ 'pg_verifybackup', '--format' => 'y', $backup_path ],
|
||||||
qr(invalid backup format "y", must be "plain" or "tar"),
|
qr(invalid backup format "y", must be "plain" or "tar"),
|
||||||
"does not verify with -Fy");
|
"does not verify with --format=y");
|
||||||
|
|
||||||
# Should produce a lengthy list of errors; we test for just one of those.
|
# Should produce a lengthy list of errors; we test for just one of those.
|
||||||
$primary->command_fails_like(
|
$primary->command_fails_like(
|
||||||
[ 'pg_verifybackup', '-Ft', '-n', $backup_path ],
|
[
|
||||||
|
'pg_verifybackup',
|
||||||
|
'--format' => 'tar',
|
||||||
|
'--no-parse-wal',
|
||||||
|
$backup_path
|
||||||
|
],
|
||||||
qr("pg_multixact" is not a plain file),
|
qr("pg_multixact" is not a plain file),
|
||||||
"does not verify with -Ft -n");
|
"does not verify with --format=tar --no-parse-wal");
|
||||||
|
|
||||||
# Test invalid options
|
# Test invalid options
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
@ -59,25 +71,30 @@ close($fh);
|
|||||||
|
|
||||||
# Verify that pg_verifybackup -q now fails.
|
# Verify that pg_verifybackup -q now fails.
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_verifybackup', '-q', $backup_path ],
|
[ 'pg_verifybackup', '--quiet', $backup_path ],
|
||||||
qr/checksum mismatch for file \"PG_VERSION\"/,
|
qr/checksum mismatch for file \"PG_VERSION\"/,
|
||||||
'-q checksum mismatch');
|
'--quiet checksum mismatch');
|
||||||
|
|
||||||
# Since we didn't change the length of the file, verification should succeed
|
# Since we didn't change the length of the file, verification should succeed
|
||||||
# if we ignore checksums. Check that we get the right message, too.
|
# if we ignore checksums. Check that we get the right message, too.
|
||||||
command_like(
|
command_like(
|
||||||
[ 'pg_verifybackup', '-s', $backup_path ],
|
[ 'pg_verifybackup', '--skip-checksums', $backup_path ],
|
||||||
qr/backup successfully verified/,
|
qr/backup successfully verified/,
|
||||||
'-s skips checksumming');
|
'--skip-checksums skips checksumming');
|
||||||
|
|
||||||
# Validation should succeed if we ignore the problem file. Also, check
|
# Validation should succeed if we ignore the problem file. Also, check
|
||||||
# the progress information.
|
# the progress information.
|
||||||
command_checks_all(
|
command_checks_all(
|
||||||
[ 'pg_verifybackup', '--progress', '-i', 'PG_VERSION', $backup_path ],
|
[
|
||||||
|
'pg_verifybackup',
|
||||||
|
'--progress',
|
||||||
|
'--ignore' => 'PG_VERSION',
|
||||||
|
$backup_path
|
||||||
|
],
|
||||||
0,
|
0,
|
||||||
[qr/backup successfully verified/],
|
[qr/backup successfully verified/],
|
||||||
[qr{(\d+/\d+ kB \(\d+%\) verified)+}],
|
[qr{(\d+/\d+ kB \(\d+%\) verified)+}],
|
||||||
'-i ignores problem file');
|
'--ignore ignores problem file');
|
||||||
|
|
||||||
# PG_VERSION is already corrupt; let's try also removing all of pg_xact.
|
# PG_VERSION is already corrupt; let's try also removing all of pg_xact.
|
||||||
rmtree($backup_path . "/pg_xact");
|
rmtree($backup_path . "/pg_xact");
|
||||||
@ -85,17 +102,22 @@ rmtree($backup_path . "/pg_xact");
|
|||||||
# We're ignoring the problem with PG_VERSION, but not the problem with
|
# We're ignoring the problem with PG_VERSION, but not the problem with
|
||||||
# pg_xact, so verification should fail here.
|
# pg_xact, so verification should fail here.
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
|
[ 'pg_verifybackup', '--ignore' => 'PG_VERSION', $backup_path ],
|
||||||
qr/pg_xact.*is present in the manifest but not on disk/,
|
qr/pg_xact.*is present in the manifest but not on disk/,
|
||||||
'-i does not ignore all problems');
|
'--ignore does not ignore all problems');
|
||||||
|
|
||||||
# If we use -i twice, we should be able to ignore all of the problems.
|
# If we use --ignore twice, we should be able to ignore all of the problems.
|
||||||
command_like(
|
command_like(
|
||||||
[ 'pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact', $backup_path ],
|
[
|
||||||
|
'pg_verifybackup',
|
||||||
|
'--ignore' => 'PG_VERSION',
|
||||||
|
'--ignore' => 'pg_xact',
|
||||||
|
$backup_path
|
||||||
|
],
|
||||||
qr/backup successfully verified/,
|
qr/backup successfully verified/,
|
||||||
'multiple -i options work');
|
'multiple --ignore options work');
|
||||||
|
|
||||||
# Verify that when -i is not used, both problems are reported.
|
# Verify that when --ignore is not used, both problems are reported.
|
||||||
$result = IPC::Run::run [ 'pg_verifybackup', $backup_path ],
|
$result = IPC::Run::run [ 'pg_verifybackup', $backup_path ],
|
||||||
'>', \$stdout, '2>', \$stderr;
|
'>', \$stdout, '2>', \$stderr;
|
||||||
ok(!$result, "multiple problems: fails");
|
ok(!$result, "multiple problems: fails");
|
||||||
@ -108,24 +130,28 @@ like(
|
|||||||
qr/checksum mismatch for file \"PG_VERSION\"/,
|
qr/checksum mismatch for file \"PG_VERSION\"/,
|
||||||
"multiple problems: checksum mismatch reported");
|
"multiple problems: checksum mismatch reported");
|
||||||
|
|
||||||
# Verify that when -e is used, only the problem detected first is reported.
|
# Verify that when --exit-on-error is used, only the problem detected
|
||||||
$result = IPC::Run::run [ 'pg_verifybackup', '-e', $backup_path ],
|
# first is reported.
|
||||||
'>', \$stdout, '2>', \$stderr;
|
$result =
|
||||||
ok(!$result, "-e reports 1 error: fails");
|
IPC::Run::run [ 'pg_verifybackup', '--exit-on-error', $backup_path ],
|
||||||
|
'>' => \$stdout,
|
||||||
|
'2>' => \$stderr;
|
||||||
|
ok(!$result, "--exit-on-error reports 1 error: fails");
|
||||||
like(
|
like(
|
||||||
$stderr,
|
$stderr,
|
||||||
qr/pg_xact.*is present in the manifest but not on disk/,
|
qr/pg_xact.*is present in the manifest but not on disk/,
|
||||||
"-e reports 1 error: missing files reported");
|
"--exit-on-error reports 1 error: missing files reported");
|
||||||
unlike(
|
unlike(
|
||||||
$stderr,
|
$stderr,
|
||||||
qr/checksum mismatch for file \"PG_VERSION\"/,
|
qr/checksum mismatch for file \"PG_VERSION\"/,
|
||||||
"-e reports 1 error: checksum mismatch not reported");
|
"--exit-on-error reports 1 error: checksum mismatch not reported");
|
||||||
|
|
||||||
# Test valid manifest with nonexistent backup directory.
|
# Test valid manifest with nonexistent backup directory.
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[
|
[
|
||||||
'pg_verifybackup', '-m',
|
'pg_verifybackup',
|
||||||
"$backup_path/backup_manifest", "$backup_path/fake"
|
'--manifest-path' => "$backup_path/backup_manifest",
|
||||||
|
"$backup_path/fake"
|
||||||
],
|
],
|
||||||
qr/could not open directory/,
|
qr/could not open directory/,
|
||||||
'nonexistent backup directory');
|
'nonexistent backup directory');
|
||||||
|
@ -15,9 +15,11 @@ $primary->start;
|
|||||||
my $backup_path = $primary->backup_dir . '/test_encoding';
|
my $backup_path = $primary->backup_dir . '/test_encoding';
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[
|
[
|
||||||
'pg_basebackup', '-D',
|
'pg_basebackup',
|
||||||
$backup_path, '--no-sync',
|
'--pgdata' => $backup_path,
|
||||||
'-cfast', '--manifest-force-encode'
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast',
|
||||||
|
'--manifest-force-encode',
|
||||||
],
|
],
|
||||||
"backup ok with forced hex encoding");
|
"backup ok with forced hex encoding");
|
||||||
|
|
||||||
@ -27,7 +29,7 @@ cmp_ok($count_of_encoded_path_in_manifest,
|
|||||||
'>', 100, "many paths are encoded in the manifest");
|
'>', 100, "many paths are encoded in the manifest");
|
||||||
|
|
||||||
command_like(
|
command_like(
|
||||||
[ 'pg_verifybackup', '-s', $backup_path ],
|
[ 'pg_verifybackup', '--skip-checksums', $backup_path ],
|
||||||
qr/backup successfully verified/,
|
qr/backup successfully verified/,
|
||||||
'backup with forced encoding verified');
|
'backup with forced encoding verified');
|
||||||
|
|
||||||
|
@ -15,7 +15,12 @@ $primary->init(allows_streaming => 1);
|
|||||||
$primary->start;
|
$primary->start;
|
||||||
my $backup_path = $primary->backup_dir . '/test_wal';
|
my $backup_path = $primary->backup_dir . '/test_wal';
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup_path,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast'
|
||||||
|
],
|
||||||
"base backup ok");
|
"base backup ok");
|
||||||
|
|
||||||
# Rename pg_wal.
|
# Rename pg_wal.
|
||||||
@ -30,13 +35,17 @@ command_fails_like(
|
|||||||
'missing pg_wal causes failure');
|
'missing pg_wal causes failure');
|
||||||
|
|
||||||
# Should work if we skip WAL verification.
|
# Should work if we skip WAL verification.
|
||||||
command_ok(
|
command_ok([ 'pg_verifybackup', '--no-parse-wal', $backup_path ],
|
||||||
[ 'pg_verifybackup', '-n', $backup_path ],
|
|
||||||
'missing pg_wal OK if not verifying WAL');
|
'missing pg_wal OK if not verifying WAL');
|
||||||
|
|
||||||
# Should also work if we specify the correct WAL location.
|
# Should also work if we specify the correct WAL location.
|
||||||
command_ok([ 'pg_verifybackup', '-w', $relocated_pg_wal, $backup_path ],
|
command_ok(
|
||||||
'-w can be used to specify WAL directory');
|
[
|
||||||
|
'pg_verifybackup',
|
||||||
|
'--wal-directory' => $relocated_pg_wal,
|
||||||
|
$backup_path
|
||||||
|
],
|
||||||
|
'--wal-directory can be used to specify WAL directory');
|
||||||
|
|
||||||
# Move directory back to original location.
|
# Move directory back to original location.
|
||||||
rename($relocated_pg_wal, $original_pg_wal) || die "rename pg_wal back: $!";
|
rename($relocated_pg_wal, $original_pg_wal) || die "rename pg_wal back: $!";
|
||||||
@ -70,7 +79,12 @@ my $backup_path2 = $primary->backup_dir . '/test_tli';
|
|||||||
# The base backup run below does a checkpoint, that removes the first segment
|
# The base backup run below does a checkpoint, that removes the first segment
|
||||||
# of the current timeline.
|
# of the current timeline.
|
||||||
$primary->command_ok(
|
$primary->command_ok(
|
||||||
[ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
|
[
|
||||||
|
'pg_basebackup',
|
||||||
|
'--pgdata' => $backup_path2,
|
||||||
|
'--no-sync',
|
||||||
|
'--checkpoint' => 'fast'
|
||||||
|
],
|
||||||
"base backup 2 ok");
|
"base backup 2 ok");
|
||||||
command_ok(
|
command_ok(
|
||||||
[ 'pg_verifybackup', $backup_path2 ],
|
[ 'pg_verifybackup', $backup_path2 ],
|
||||||
|
@ -108,7 +108,11 @@ for my $tc (@test_configuration)
|
|||||||
"found expected backup files, compression $method");
|
"found expected backup files, compression $method");
|
||||||
|
|
||||||
# Verify tar backup.
|
# Verify tar backup.
|
||||||
$primary->command_ok([ 'pg_verifybackup', '-n', '-e', $backup_path ],
|
$primary->command_ok(
|
||||||
|
[
|
||||||
|
'pg_verifybackup', '--no-parse-wal',
|
||||||
|
'--exit-on-error', $backup_path,
|
||||||
|
],
|
||||||
"verify backup, compression $method");
|
"verify backup, compression $method");
|
||||||
|
|
||||||
# Cleanup.
|
# Cleanup.
|
||||||
|
@ -21,31 +21,31 @@ command_fails_like(
|
|||||||
|
|
||||||
# invalid option arguments
|
# invalid option arguments
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '--block', 'bad' ],
|
[ 'pg_waldump', '--block' => 'bad' ],
|
||||||
qr/error: invalid block number/,
|
qr/error: invalid block number/,
|
||||||
'invalid block number');
|
'invalid block number');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '--fork', 'bad' ],
|
[ 'pg_waldump', '--fork' => 'bad' ],
|
||||||
qr/error: invalid fork name/,
|
qr/error: invalid fork name/,
|
||||||
'invalid fork name');
|
'invalid fork name');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '--limit', 'bad' ],
|
[ 'pg_waldump', '--limit' => 'bad' ],
|
||||||
qr/error: invalid value/,
|
qr/error: invalid value/,
|
||||||
'invalid limit');
|
'invalid limit');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '--relation', 'bad' ],
|
[ 'pg_waldump', '--relation' => 'bad' ],
|
||||||
qr/error: invalid relation/,
|
qr/error: invalid relation/,
|
||||||
'invalid relation specification');
|
'invalid relation specification');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '--rmgr', 'bad' ],
|
[ 'pg_waldump', '--rmgr' => 'bad' ],
|
||||||
qr/error: resource manager .* does not exist/,
|
qr/error: resource manager .* does not exist/,
|
||||||
'invalid rmgr name');
|
'invalid rmgr name');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '--start', 'bad' ],
|
[ 'pg_waldump', '--start' => 'bad' ],
|
||||||
qr/error: invalid WAL location/,
|
qr/error: invalid WAL location/,
|
||||||
'invalid start LSN');
|
'invalid start LSN');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '--end', 'bad' ],
|
[ 'pg_waldump', '--end' => 'bad' ],
|
||||||
qr/error: invalid WAL location/,
|
qr/error: invalid WAL location/,
|
||||||
'invalid end LSN');
|
'invalid end LSN');
|
||||||
|
|
||||||
@ -199,18 +199,24 @@ command_like(
|
|||||||
qr/./,
|
qr/./,
|
||||||
'runs with start and end segment specified');
|
'runs with start and end segment specified');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '-p', $node->data_dir ],
|
[ 'pg_waldump', '--path' => $node->data_dir ],
|
||||||
qr/error: no start WAL location given/,
|
qr/error: no start WAL location given/,
|
||||||
'path option requires start location');
|
'path option requires start location');
|
||||||
command_like(
|
command_like(
|
||||||
[
|
[
|
||||||
'pg_waldump', '-p', $node->data_dir, '--start',
|
'pg_waldump',
|
||||||
$start_lsn, '--end', $end_lsn
|
'--path' => $node->data_dir,
|
||||||
|
'--start' => $start_lsn,
|
||||||
|
'--end' => $end_lsn,
|
||||||
],
|
],
|
||||||
qr/./,
|
qr/./,
|
||||||
'runs with path option and start and end locations');
|
'runs with path option and start and end locations');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ],
|
[
|
||||||
|
'pg_waldump',
|
||||||
|
'--path' => $node->data_dir,
|
||||||
|
'--start' => $start_lsn,
|
||||||
|
],
|
||||||
qr/error: error in WAL record at/,
|
qr/error: error in WAL record at/,
|
||||||
'falling off the end of the WAL results in an error');
|
'falling off the end of the WAL results in an error');
|
||||||
|
|
||||||
@ -222,7 +228,11 @@ command_like(
|
|||||||
qr/^$/,
|
qr/^$/,
|
||||||
'no output with --quiet option');
|
'no output with --quiet option');
|
||||||
command_fails_like(
|
command_fails_like(
|
||||||
[ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ],
|
[
|
||||||
|
'pg_waldump', '--quiet',
|
||||||
|
'--path' => $node->data_dir,
|
||||||
|
'--start' => $start_lsn
|
||||||
|
],
|
||||||
qr/error: error in WAL record at/,
|
qr/error: error in WAL record at/,
|
||||||
'errors are shown with --quiet');
|
'errors are shown with --quiet');
|
||||||
|
|
||||||
@ -240,7 +250,8 @@ command_fails_like(
|
|||||||
my (@cmd, $stdout, $stderr, $result);
|
my (@cmd, $stdout, $stderr, $result);
|
||||||
|
|
||||||
@cmd = (
|
@cmd = (
|
||||||
'pg_waldump', '--start', $new_start,
|
'pg_waldump',
|
||||||
|
'--start' => $new_start,
|
||||||
$node->data_dir . '/pg_wal/' . $start_walfile);
|
$node->data_dir . '/pg_wal/' . $start_walfile);
|
||||||
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
|
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
|
||||||
ok($result, "runs with start segment and start LSN specified");
|
ok($result, "runs with start segment and start LSN specified");
|
||||||
@ -258,8 +269,10 @@ sub test_pg_waldump
|
|||||||
my (@cmd, $stdout, $stderr, $result, @lines);
|
my (@cmd, $stdout, $stderr, $result, @lines);
|
||||||
|
|
||||||
@cmd = (
|
@cmd = (
|
||||||
'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end',
|
'pg_waldump',
|
||||||
$end_lsn);
|
'--path' => $node->data_dir,
|
||||||
|
'--start' => $start_lsn,
|
||||||
|
'--end' => $end_lsn);
|
||||||
push @cmd, @opts;
|
push @cmd, @opts;
|
||||||
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
|
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
|
||||||
ok($result, "pg_waldump @opts: runs ok");
|
ok($result, "pg_waldump @opts: runs ok");
|
||||||
@ -274,7 +287,7 @@ my @lines;
|
|||||||
@lines = test_pg_waldump;
|
@lines = test_pg_waldump;
|
||||||
is(grep(!/^rmgr: \w/, @lines), 0, 'all output lines are rmgr lines');
|
is(grep(!/^rmgr: \w/, @lines), 0, 'all output lines are rmgr lines');
|
||||||
|
|
||||||
@lines = test_pg_waldump('--limit', 6);
|
@lines = test_pg_waldump('--limit' => 6);
|
||||||
is(@lines, 6, 'limit option observed');
|
is(@lines, 6, 'limit option observed');
|
||||||
|
|
||||||
@lines = test_pg_waldump('--fullpage');
|
@lines = test_pg_waldump('--fullpage');
|
||||||
@ -288,21 +301,20 @@ is(grep(/^rmgr:/, @lines), 0, 'no rmgr lines output');
|
|||||||
like($lines[0], qr/WAL statistics/, "statistics on stdout");
|
like($lines[0], qr/WAL statistics/, "statistics on stdout");
|
||||||
is(grep(/^rmgr:/, @lines), 0, 'no rmgr lines output');
|
is(grep(/^rmgr:/, @lines), 0, 'no rmgr lines output');
|
||||||
|
|
||||||
@lines = test_pg_waldump('--rmgr', 'Btree');
|
@lines = test_pg_waldump('--rmgr' => 'Btree');
|
||||||
is(grep(!/^rmgr: Btree/, @lines), 0, 'only Btree lines');
|
is(grep(!/^rmgr: Btree/, @lines), 0, 'only Btree lines');
|
||||||
|
|
||||||
@lines = test_pg_waldump('--fork', 'init');
|
@lines = test_pg_waldump('--fork' => 'init');
|
||||||
is(grep(!/fork init/, @lines), 0, 'only init fork lines');
|
is(grep(!/fork init/, @lines), 0, 'only init fork lines');
|
||||||
|
|
||||||
@lines = test_pg_waldump('--relation',
|
@lines = test_pg_waldump(
|
||||||
"$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
|
'--relation' => "$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
|
||||||
is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines),
|
is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines),
|
||||||
0, 'only lines for selected relation');
|
0, 'only lines for selected relation');
|
||||||
|
|
||||||
@lines =
|
@lines = test_pg_waldump(
|
||||||
test_pg_waldump('--relation',
|
'--relation' => "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid",
|
||||||
"$default_ts_oid/$postgres_db_oid/$rel_i1a_oid",
|
'--block' => 1);
|
||||||
'--block', 1);
|
|
||||||
is(grep(!/\bblk 1\b/, @lines), 0, 'only lines for selected block');
|
is(grep(!/\bblk 1\b/, @lines), 0, 'only lines for selected block');
|
||||||
|
|
||||||
|
|
||||||
|
@ -71,9 +71,10 @@ ok(-f $walfile, "Got a WAL file");
|
|||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'pg_waldump', '--quiet',
|
'pg_waldump',
|
||||||
'--save-fullpage', "$tmp_folder/raw",
|
'--quiet',
|
||||||
'--relation', $relation,
|
'--save-fullpage' => "$tmp_folder/raw",
|
||||||
|
'--relation' => $relation,
|
||||||
$walfile
|
$walfile
|
||||||
],
|
],
|
||||||
'pg_waldump with --save-fullpage runs');
|
'pg_waldump with --save-fullpage runs');
|
||||||
|
@ -213,7 +213,7 @@ my $nthreads = 2;
|
|||||||
|
|
||||||
{
|
{
|
||||||
my ($stderr);
|
my ($stderr);
|
||||||
run_log([ 'pgbench', '-j', '2', '--bad-option' ], '2>', \$stderr);
|
run_log([ 'pgbench', '--jobs' => '2', '--bad-option' ], '2>', \$stderr);
|
||||||
$nthreads = 1 if $stderr =~ m/threads are not supported on this platform/;
|
$nthreads = 1 if $stderr =~ m/threads are not supported on this platform/;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,11 +216,12 @@ $node->safe_psql('postgres', "CREATE TABLE tab_psql_single (a int);");
|
|||||||
# Tests with ON_ERROR_STOP.
|
# Tests with ON_ERROR_STOP.
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'psql', '-X',
|
'psql',
|
||||||
'--single-transaction', '-v',
|
'--no-psqlrc',
|
||||||
'ON_ERROR_STOP=1', '-c',
|
'--single-transaction',
|
||||||
'INSERT INTO tab_psql_single VALUES (1)', '-c',
|
'--set' => 'ON_ERROR_STOP=1',
|
||||||
'INSERT INTO tab_psql_single VALUES (2)'
|
'--command' => 'INSERT INTO tab_psql_single VALUES (1)',
|
||||||
|
'--command' => 'INSERT INTO tab_psql_single VALUES (2)',
|
||||||
],
|
],
|
||||||
'ON_ERROR_STOP, --single-transaction and multiple -c switches');
|
'ON_ERROR_STOP, --single-transaction and multiple -c switches');
|
||||||
my $row_count =
|
my $row_count =
|
||||||
@ -231,11 +232,12 @@ is($row_count, '2',
|
|||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'psql', '-X',
|
'psql',
|
||||||
'--single-transaction', '-v',
|
'--no-psqlrc',
|
||||||
'ON_ERROR_STOP=1', '-c',
|
'--single-transaction',
|
||||||
'INSERT INTO tab_psql_single VALUES (3)', '-c',
|
'--set' => 'ON_ERROR_STOP=1',
|
||||||
"\\copy tab_psql_single FROM '$tempdir/nonexistent'"
|
'--command' => 'INSERT INTO tab_psql_single VALUES (3)',
|
||||||
|
'--command' => "\\copy tab_psql_single FROM '$tempdir/nonexistent'"
|
||||||
],
|
],
|
||||||
'ON_ERROR_STOP, --single-transaction and multiple -c switches, error');
|
'ON_ERROR_STOP, --single-transaction and multiple -c switches, error');
|
||||||
$row_count =
|
$row_count =
|
||||||
@ -252,9 +254,12 @@ append_to_file($copy_sql_file,
|
|||||||
append_to_file($insert_sql_file, 'INSERT INTO tab_psql_single VALUES (4);');
|
append_to_file($insert_sql_file, 'INSERT INTO tab_psql_single VALUES (4);');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'psql', '-X', '--single-transaction', '-v',
|
'psql',
|
||||||
'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f',
|
'--no-psqlrc',
|
||||||
$insert_sql_file
|
'--single-transaction',
|
||||||
|
'--set' => 'ON_ERROR_STOP=1',
|
||||||
|
'--file' => $insert_sql_file,
|
||||||
|
'--file' => $insert_sql_file
|
||||||
],
|
],
|
||||||
'ON_ERROR_STOP, --single-transaction and multiple -f switches');
|
'ON_ERROR_STOP, --single-transaction and multiple -f switches');
|
||||||
$row_count =
|
$row_count =
|
||||||
@ -265,9 +270,12 @@ is($row_count, '4',
|
|||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'psql', '-X', '--single-transaction', '-v',
|
'psql',
|
||||||
'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f',
|
'--no-psqlrc',
|
||||||
$copy_sql_file
|
'--single-transaction',
|
||||||
|
'--set' => 'ON_ERROR_STOP=1',
|
||||||
|
'--file' => $insert_sql_file,
|
||||||
|
'--file' => $copy_sql_file
|
||||||
],
|
],
|
||||||
'ON_ERROR_STOP, --single-transaction and multiple -f switches, error');
|
'ON_ERROR_STOP, --single-transaction and multiple -f switches, error');
|
||||||
$row_count =
|
$row_count =
|
||||||
@ -281,11 +289,12 @@ is($row_count, '4',
|
|||||||
# transaction commits.
|
# transaction commits.
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'psql', '-X',
|
'psql',
|
||||||
'--single-transaction', '-f',
|
'--no-psqlrc',
|
||||||
$insert_sql_file, '-f',
|
'--single-transaction',
|
||||||
$insert_sql_file, '-c',
|
'--file' => $insert_sql_file,
|
||||||
"\\copy tab_psql_single FROM '$tempdir/nonexistent'"
|
'--file' => $insert_sql_file,
|
||||||
|
'--command' => "\\copy tab_psql_single FROM '$tempdir/nonexistent'"
|
||||||
],
|
],
|
||||||
'no ON_ERROR_STOP, --single-transaction and multiple -f/-c switches');
|
'no ON_ERROR_STOP, --single-transaction and multiple -f/-c switches');
|
||||||
$row_count =
|
$row_count =
|
||||||
@ -298,9 +307,12 @@ is($row_count, '6',
|
|||||||
# returns a success and the transaction commits.
|
# returns a success and the transaction commits.
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'psql', '-X', '--single-transaction', '-f',
|
'psql',
|
||||||
$insert_sql_file, '-f', $insert_sql_file, '-f',
|
'--no-psqlrc',
|
||||||
$copy_sql_file
|
'--single-transaction',
|
||||||
|
'--file' => $insert_sql_file,
|
||||||
|
'--file' => $insert_sql_file,
|
||||||
|
'--file' => $copy_sql_file
|
||||||
],
|
],
|
||||||
'no ON_ERROR_STOP, --single-transaction and multiple -f switches');
|
'no ON_ERROR_STOP, --single-transaction and multiple -f switches');
|
||||||
$row_count =
|
$row_count =
|
||||||
@ -313,11 +325,12 @@ is($row_count, '8',
|
|||||||
# the transaction commit even if there is a failure in-between.
|
# the transaction commit even if there is a failure in-between.
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'psql', '-X',
|
'psql',
|
||||||
'--single-transaction', '-c',
|
'--no-psqlrc',
|
||||||
'INSERT INTO tab_psql_single VALUES (5)', '-f',
|
'--single-transaction',
|
||||||
$copy_sql_file, '-c',
|
'--command' => 'INSERT INTO tab_psql_single VALUES (5)',
|
||||||
'INSERT INTO tab_psql_single VALUES (6)'
|
'--file' => $copy_sql_file,
|
||||||
|
'--command' => 'INSERT INTO tab_psql_single VALUES (6)'
|
||||||
],
|
],
|
||||||
'no ON_ERROR_STOP, --single-transaction and multiple -c switches');
|
'no ON_ERROR_STOP, --single-transaction and multiple -c switches');
|
||||||
$row_count =
|
$row_count =
|
||||||
|
@ -21,14 +21,14 @@ $node->issues_sql_like(
|
|||||||
qr/statement: CLUSTER;/,
|
qr/statement: CLUSTER;/,
|
||||||
'SQL CLUSTER run');
|
'SQL CLUSTER run');
|
||||||
|
|
||||||
$node->command_fails([ 'clusterdb', '-t', 'nonexistent' ],
|
$node->command_fails([ 'clusterdb', '--table' => 'nonexistent' ],
|
||||||
'fails with nonexistent table');
|
'fails with nonexistent table');
|
||||||
|
|
||||||
$node->safe_psql('postgres',
|
$node->safe_psql('postgres',
|
||||||
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'
|
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'
|
||||||
);
|
);
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'clusterdb', '-t', 'test1' ],
|
[ 'clusterdb', '--table' => 'test1' ],
|
||||||
qr/statement: CLUSTER public\.test1;/,
|
qr/statement: CLUSTER public\.test1;/,
|
||||||
'cluster specific table');
|
'cluster specific table');
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ $node->start;
|
|||||||
# clusterdb -a is not compatible with -d. This relies on PGDATABASE to be
|
# clusterdb -a is not compatible with -d. This relies on PGDATABASE to be
|
||||||
# set, something PostgreSQL::Test::Cluster does.
|
# set, something PostgreSQL::Test::Cluster does.
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'clusterdb', '-a' ],
|
[ 'clusterdb', '--all' ],
|
||||||
qr/statement: CLUSTER.*statement: CLUSTER/s,
|
qr/statement: CLUSTER.*statement: CLUSTER/s,
|
||||||
'cluster all databases');
|
'cluster all databases');
|
||||||
|
|
||||||
@ -24,13 +24,13 @@ $node->safe_psql(
|
|||||||
CREATE DATABASE regression_invalid;
|
CREATE DATABASE regression_invalid;
|
||||||
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
|
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
|
||||||
));
|
));
|
||||||
$node->command_ok([ 'clusterdb', '-a' ],
|
$node->command_ok([ 'clusterdb', '--all' ],
|
||||||
'invalid database not targeted by clusterdb -a');
|
'invalid database not targeted by clusterdb -a');
|
||||||
|
|
||||||
# Doesn't quite belong here, but don't want to waste time by creating an
|
# Doesn't quite belong here, but don't want to waste time by creating an
|
||||||
# invalid database in 010_clusterdb.pl as well.
|
# invalid database in 010_clusterdb.pl as well.
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'clusterdb', '-d', 'regression_invalid' ],
|
[ 'clusterdb', '--dbname' => 'regression_invalid' ],
|
||||||
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
|
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
|
||||||
'clusterdb cannot target invalid database');
|
'clusterdb cannot target invalid database');
|
||||||
|
|
||||||
@ -41,7 +41,7 @@ $node->safe_psql('template1',
|
|||||||
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'
|
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'
|
||||||
);
|
);
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'clusterdb', '-a', '-t', 'test1' ],
|
[ 'clusterdb', '--all', '--table' => 'test1' ],
|
||||||
qr/statement: CLUSTER public\.test1/s,
|
qr/statement: CLUSTER public\.test1/s,
|
||||||
'cluster specific table in all databases');
|
'cluster specific table in all databases');
|
||||||
|
|
||||||
|
@ -21,7 +21,13 @@ $node->issues_sql_like(
|
|||||||
qr/statement: CREATE DATABASE foobar1/,
|
qr/statement: CREATE DATABASE foobar1/,
|
||||||
'SQL CREATE DATABASE run');
|
'SQL CREATE DATABASE run');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createdb', '-l', 'C', '-E', 'LATIN1', '-T', 'template0', 'foobar2' ],
|
[
|
||||||
|
'createdb',
|
||||||
|
'--locale' => 'C',
|
||||||
|
'--encoding' => 'LATIN1',
|
||||||
|
'--template' => 'template0',
|
||||||
|
'foobar2',
|
||||||
|
],
|
||||||
qr/statement: CREATE DATABASE foobar2 ENCODING 'LATIN1'/,
|
qr/statement: CREATE DATABASE foobar2 ENCODING 'LATIN1'/,
|
||||||
'create database with encoding');
|
'create database with encoding');
|
||||||
|
|
||||||
@ -32,35 +38,45 @@ if ($ENV{with_icu} eq 'yes')
|
|||||||
# provider. XXX Maybe split into multiple tests?
|
# provider. XXX Maybe split into multiple tests?
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'createdb', '-T', 'template0', '-E', 'UTF8',
|
'createdb',
|
||||||
'--locale-provider=icu', 'foobar4'
|
'--template' => 'template0',
|
||||||
|
'--encoding' => 'UTF8',
|
||||||
|
'--locale-provider' => 'icu',
|
||||||
|
'foobar4',
|
||||||
],
|
],
|
||||||
'create database with ICU fails without ICU locale specified');
|
'create database with ICU fails without ICU locale specified');
|
||||||
|
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '-E',
|
'--template' => 'template0',
|
||||||
'UTF8', '--locale-provider=icu',
|
'--encoding' => 'UTF8',
|
||||||
'--locale=C', '--icu-locale=en',
|
'--locale-provider' => 'icu',
|
||||||
'foobar5'
|
'--locale' => 'C',
|
||||||
|
'--icu-locale' => 'en',
|
||||||
|
'foobar5',
|
||||||
],
|
],
|
||||||
qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/,
|
qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/,
|
||||||
'create database with ICU locale specified');
|
'create database with ICU locale specified');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'createdb', '-T', 'template0', '-E', 'UTF8',
|
'createdb',
|
||||||
'--locale-provider=icu',
|
'--template' => 'template0',
|
||||||
'--icu-locale=@colNumeric=lower', 'foobarX'
|
'--encoding' => 'UTF8',
|
||||||
|
'--locale-provider' => 'icu',
|
||||||
|
'--icu-locale' => '@colNumeric=lower',
|
||||||
|
'foobarX',
|
||||||
],
|
],
|
||||||
'fails for invalid ICU locale');
|
'fails for invalid ICU locale');
|
||||||
|
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=icu',
|
'--template' => 'template0',
|
||||||
'--encoding=SQL_ASCII', 'foobarX'
|
'--locale-provider' => 'icu',
|
||||||
|
'--encoding' => 'SQL_ASCII',
|
||||||
|
'foobarX',
|
||||||
],
|
],
|
||||||
qr/ERROR: encoding "SQL_ASCII" is not supported with ICU provider/,
|
qr/ERROR: encoding "SQL_ASCII" is not supported with ICU provider/,
|
||||||
'fails for encoding not supported by ICU');
|
'fails for encoding not supported by ICU');
|
||||||
@ -72,116 +88,144 @@ if ($ENV{with_icu} eq 'yes')
|
|||||||
|
|
||||||
$node2->command_ok(
|
$node2->command_ok(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=libc',
|
'--template' => 'template0',
|
||||||
'foobar55'
|
'--locale-provider' => 'libc',
|
||||||
|
'foobar55',
|
||||||
],
|
],
|
||||||
'create database with libc provider from template database with icu provider'
|
'create database with libc provider from template database with icu provider'
|
||||||
);
|
);
|
||||||
|
|
||||||
$node2->command_ok(
|
$node2->command_ok(
|
||||||
[
|
[
|
||||||
'createdb', '-T', 'template0', '--icu-locale', 'en-US',
|
'createdb',
|
||||||
'foobar56'
|
'--template' => 'template0',
|
||||||
|
'--icu-locale' => 'en-US',
|
||||||
|
'foobar56',
|
||||||
],
|
],
|
||||||
'create database with icu locale from template database with icu provider'
|
'create database with icu locale from template database with icu provider'
|
||||||
);
|
);
|
||||||
|
|
||||||
$node2->command_ok(
|
$node2->command_ok(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider',
|
'--template' => 'template0',
|
||||||
'icu', '--locale',
|
'--locale-provider' => 'icu',
|
||||||
'en', '--lc-collate',
|
'--locale' => 'en',
|
||||||
'C', '--lc-ctype',
|
'--lc-collate' => 'C',
|
||||||
'C', 'foobar57'
|
'--lc-ctype' => 'C',
|
||||||
|
'foobar57',
|
||||||
],
|
],
|
||||||
'create database with locale as ICU locale');
|
'create database with locale as ICU locale');
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ 'createdb', '-T', 'template0', '--locale-provider=icu', 'foobar4' ],
|
[
|
||||||
|
'createdb',
|
||||||
|
'--template' => 'template0',
|
||||||
|
'--locale-provider' => 'icu',
|
||||||
|
'foobar4',
|
||||||
|
],
|
||||||
'create database with ICU fails since no ICU support');
|
'create database with ICU fails since no ICU support');
|
||||||
}
|
}
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=builtin',
|
'--template' => 'template0',
|
||||||
'tbuiltin1'
|
'--locale-provider' => 'builtin',
|
||||||
|
'tbuiltin1',
|
||||||
],
|
],
|
||||||
'create database with provider "builtin" fails without --locale');
|
'create database with provider "builtin" fails without --locale');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=builtin',
|
'--template' => 'template0',
|
||||||
'--locale=C', 'tbuiltin2'
|
'--locale-provider' => 'builtin',
|
||||||
|
'--locale' => 'C',
|
||||||
|
'tbuiltin2',
|
||||||
],
|
],
|
||||||
'create database with provider "builtin" and locale "C"');
|
'create database with provider "builtin" and locale "C"');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=builtin',
|
'--template' => 'template0',
|
||||||
'--locale=C', '--lc-collate=C',
|
'--locale-provider' => 'builtin',
|
||||||
'tbuiltin3'
|
'--locale' => 'C',
|
||||||
|
'--lc-collate' => 'C',
|
||||||
|
'tbuiltin3',
|
||||||
],
|
],
|
||||||
'create database with provider "builtin" and LC_COLLATE=C');
|
'create database with provider "builtin" and LC_COLLATE=C');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=builtin',
|
'--template' => 'template0',
|
||||||
'--locale=C', '--lc-ctype=C',
|
'--locale-provider' => 'builtin',
|
||||||
'tbuiltin4'
|
'--locale' => 'C',
|
||||||
|
'--lc-ctype' => 'C',
|
||||||
|
'tbuiltin4',
|
||||||
],
|
],
|
||||||
'create database with provider "builtin" and LC_CTYPE=C');
|
'create database with provider "builtin" and LC_CTYPE=C');
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=builtin',
|
'--template' => 'template0',
|
||||||
'--lc-collate=C', '--lc-ctype=C',
|
'--locale-provider' => 'builtin',
|
||||||
'-E UTF-8', '--builtin-locale=C.UTF8',
|
'--lc-collate' => 'C',
|
||||||
'tbuiltin5'
|
'--lc-ctype' => 'C',
|
||||||
|
'--encoding' => 'UTF-8',
|
||||||
|
'--builtin-locale' => 'C.UTF8',
|
||||||
|
'tbuiltin5',
|
||||||
],
|
],
|
||||||
'create database with --builtin-locale C.UTF-8 and -E UTF-8');
|
'create database with --builtin-locale C.UTF-8 and -E UTF-8');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=builtin',
|
'--template' => 'template0',
|
||||||
'--lc-collate=C', '--lc-ctype=C',
|
'--locale-provider' => 'builtin',
|
||||||
'-E LATIN1', '--builtin-locale=C.UTF-8',
|
'--lc-collate' => 'C',
|
||||||
'tbuiltin6'
|
'--lc-ctype' => 'C',
|
||||||
|
'--encoding' => 'LATIN1',
|
||||||
|
'--builtin-locale' => 'C.UTF-8',
|
||||||
|
'tbuiltin6',
|
||||||
],
|
],
|
||||||
'create database with --builtin-locale C.UTF-8 and -E LATIN1');
|
'create database with --builtin-locale C.UTF-8 and -E LATIN1');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=builtin',
|
'--template' => 'template0',
|
||||||
'--locale=C', '--icu-locale=en',
|
'--locale-provider' => 'builtin',
|
||||||
'tbuiltin7'
|
'--locale' => 'C',
|
||||||
|
'--icu-locale' => 'en',
|
||||||
|
'tbuiltin7',
|
||||||
],
|
],
|
||||||
'create database with provider "builtin" and ICU_LOCALE="en"');
|
'create database with provider "builtin" and ICU_LOCALE="en"');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template0', '--locale-provider=builtin',
|
'--template' => 'template0',
|
||||||
'--locale=C', '--icu-rules=""',
|
'--locale-provider' => 'builtin',
|
||||||
'tbuiltin8'
|
'--locale' => 'C',
|
||||||
|
'--icu-rules' => '""',
|
||||||
|
'tbuiltin8',
|
||||||
],
|
],
|
||||||
'create database with provider "builtin" and ICU_RULES=""');
|
'create database with provider "builtin" and ICU_RULES=""');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[
|
[
|
||||||
'createdb', '-T',
|
'createdb',
|
||||||
'template1', '--locale-provider=builtin',
|
'--template' => 'template1',
|
||||||
'--locale=C', 'tbuiltin9'
|
'--locale-provider' => 'builtin',
|
||||||
|
'--locale' => 'C',
|
||||||
|
'tbuiltin9',
|
||||||
],
|
],
|
||||||
'create database with provider "builtin" not matching template');
|
'create database with provider "builtin" not matching template');
|
||||||
|
|
||||||
@ -189,7 +233,12 @@ $node->command_fails([ 'createdb', 'foobar1' ],
|
|||||||
'fails if database already exists');
|
'fails if database already exists');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ],
|
[
|
||||||
|
'createdb',
|
||||||
|
'--template' => 'template0',
|
||||||
|
'--locale-provider' => 'xyz',
|
||||||
|
'foobarX',
|
||||||
|
],
|
||||||
'fails for invalid locale provider');
|
'fails for invalid locale provider');
|
||||||
|
|
||||||
# Check use of templates with shared dependencies copied from the template.
|
# Check use of templates with shared dependencies copied from the template.
|
||||||
@ -200,7 +249,7 @@ CREATE TABLE tab_foobar (id int);
|
|||||||
ALTER TABLE tab_foobar owner to role_foobar;
|
ALTER TABLE tab_foobar owner to role_foobar;
|
||||||
CREATE POLICY pol_foobar ON tab_foobar FOR ALL TO role_foobar;');
|
CREATE POLICY pol_foobar ON tab_foobar FOR ALL TO role_foobar;');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createdb', '-l', 'C', '-T', 'foobar2', 'foobar3' ],
|
[ 'createdb', '--locale' => 'C', '--template' => 'foobar2', 'foobar3' ],
|
||||||
qr/statement: CREATE DATABASE foobar3 TEMPLATE foobar2 LOCALE 'C'/,
|
qr/statement: CREATE DATABASE foobar3 TEMPLATE foobar2 LOCALE 'C'/,
|
||||||
'create database with template');
|
'create database with template');
|
||||||
($ret, $stdout, $stderr) = $node->psql(
|
($ret, $stdout, $stderr) = $node->psql(
|
||||||
@ -228,7 +277,7 @@ $node->command_checks_all(
|
|||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
qr/^createdb: error: database creation failed: ERROR: invalid LC_COLLATE locale name|^createdb: error: database creation failed: ERROR: new collation \(foo'; SELECT '1\) is incompatible with the collation of the template database/s
|
qr/^createdb: error: database creation failed: ERROR: invalid LC_COLLATE locale name|^createdb: error: database creation failed: ERROR: new collation \(foo'; SELECT '1\) is incompatible with the collation of the template database/s,
|
||||||
],
|
],
|
||||||
'createdb with incorrect --lc-collate');
|
'createdb with incorrect --lc-collate');
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
@ -236,7 +285,7 @@ $node->command_checks_all(
|
|||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
qr/^createdb: error: database creation failed: ERROR: invalid LC_CTYPE locale name|^createdb: error: database creation failed: ERROR: new LC_CTYPE \(foo'; SELECT '1\) is incompatible with the LC_CTYPE of the template database/s
|
qr/^createdb: error: database creation failed: ERROR: invalid LC_CTYPE locale name|^createdb: error: database creation failed: ERROR: new LC_CTYPE \(foo'; SELECT '1\) is incompatible with the LC_CTYPE of the template database/s,
|
||||||
],
|
],
|
||||||
'createdb with incorrect --lc-ctype');
|
'createdb with incorrect --lc-ctype');
|
||||||
|
|
||||||
@ -245,34 +294,59 @@ $node->command_checks_all(
|
|||||||
1,
|
1,
|
||||||
[qr/^$/],
|
[qr/^$/],
|
||||||
[
|
[
|
||||||
qr/^createdb: error: database creation failed: ERROR: invalid create database strategy "foo"/s
|
qr/^createdb: error: database creation failed: ERROR: invalid create database strategy "foo"/s,
|
||||||
],
|
],
|
||||||
'createdb with incorrect --strategy');
|
'createdb with incorrect --strategy');
|
||||||
|
|
||||||
# Check database creation strategy
|
# Check database creation strategy
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createdb', '-T', 'foobar2', '-S', 'wal_log', 'foobar6' ],
|
[
|
||||||
|
'createdb',
|
||||||
|
'--template' => 'foobar2',
|
||||||
|
'--strategy' => 'wal_log',
|
||||||
|
'foobar6',
|
||||||
|
],
|
||||||
qr/statement: CREATE DATABASE foobar6 STRATEGY wal_log TEMPLATE foobar2/,
|
qr/statement: CREATE DATABASE foobar6 STRATEGY wal_log TEMPLATE foobar2/,
|
||||||
'create database with WAL_LOG strategy');
|
'create database with WAL_LOG strategy');
|
||||||
|
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createdb', '-T', 'foobar2', '-S', 'WAL_LOG', 'foobar6s' ],
|
[
|
||||||
|
'createdb',
|
||||||
|
'--template' => 'foobar2',
|
||||||
|
'--strategy' => 'WAL_LOG',
|
||||||
|
'foobar6s',
|
||||||
|
],
|
||||||
qr/statement: CREATE DATABASE foobar6s STRATEGY "WAL_LOG" TEMPLATE foobar2/,
|
qr/statement: CREATE DATABASE foobar6s STRATEGY "WAL_LOG" TEMPLATE foobar2/,
|
||||||
'create database with WAL_LOG strategy');
|
'create database with WAL_LOG strategy');
|
||||||
|
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createdb', '-T', 'foobar2', '-S', 'file_copy', 'foobar7' ],
|
[
|
||||||
|
'createdb',
|
||||||
|
'--template' => 'foobar2',
|
||||||
|
'--strategy' => 'file_copy',
|
||||||
|
'foobar7',
|
||||||
|
],
|
||||||
qr/statement: CREATE DATABASE foobar7 STRATEGY file_copy TEMPLATE foobar2/,
|
qr/statement: CREATE DATABASE foobar7 STRATEGY file_copy TEMPLATE foobar2/,
|
||||||
'create database with FILE_COPY strategy');
|
'create database with FILE_COPY strategy');
|
||||||
|
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createdb', '-T', 'foobar2', '-S', 'FILE_COPY', 'foobar7s' ],
|
[
|
||||||
|
'createdb',
|
||||||
|
'--template' => 'foobar2',
|
||||||
|
'--strategy' => 'FILE_COPY',
|
||||||
|
'foobar7s',
|
||||||
|
],
|
||||||
qr/statement: CREATE DATABASE foobar7s STRATEGY "FILE_COPY" TEMPLATE foobar2/,
|
qr/statement: CREATE DATABASE foobar7s STRATEGY "FILE_COPY" TEMPLATE foobar2/,
|
||||||
'create database with FILE_COPY strategy');
|
'create database with FILE_COPY strategy');
|
||||||
|
|
||||||
# Create database owned by role_foobar.
|
# Create database owned by role_foobar.
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createdb', '-T', 'foobar2', '-O', 'role_foobar', 'foobar8' ],
|
[
|
||||||
|
'createdb',
|
||||||
|
'--template' => 'foobar2',
|
||||||
|
'--owner' => 'role_foobar',
|
||||||
|
'foobar8',
|
||||||
|
],
|
||||||
qr/statement: CREATE DATABASE foobar8 OWNER role_foobar TEMPLATE foobar2/,
|
qr/statement: CREATE DATABASE foobar8 OWNER role_foobar TEMPLATE foobar2/,
|
||||||
'create database with owner role_foobar');
|
'create database with owner role_foobar');
|
||||||
($ret, $stdout, $stderr) =
|
($ret, $stdout, $stderr) =
|
||||||
|
@ -21,34 +21,37 @@ $node->issues_sql_like(
|
|||||||
qr/statement: CREATE ROLE regress_user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
|
qr/statement: CREATE ROLE regress_user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
|
||||||
'SQL CREATE USER run');
|
'SQL CREATE USER run');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createuser', '-L', 'regress_role1' ],
|
[ 'createuser', '--no-login', 'regress_role1' ],
|
||||||
qr/statement: CREATE ROLE regress_role1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS;/,
|
qr/statement: CREATE ROLE regress_role1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS;/,
|
||||||
'create a non-login role');
|
'create a non-login role');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createuser', '-r', 'regress user2' ],
|
[ 'createuser', '--createrole', 'regress user2' ],
|
||||||
qr/statement: CREATE ROLE "regress user2" NOSUPERUSER NOCREATEDB CREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
|
qr/statement: CREATE ROLE "regress user2" NOSUPERUSER NOCREATEDB CREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
|
||||||
'create a CREATEROLE user');
|
'create a CREATEROLE user');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createuser', '-s', 'regress_user3' ],
|
[ 'createuser', '--superuser', 'regress_user3' ],
|
||||||
qr/statement: CREATE ROLE regress_user3 SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
|
qr/statement: CREATE ROLE regress_user3 SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
|
||||||
'create a superuser');
|
'create a superuser');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[
|
[
|
||||||
'createuser', '-a',
|
'createuser',
|
||||||
'regress_user1', '-a',
|
'--with-admin' => 'regress_user1',
|
||||||
'regress user2', 'regress user #4'
|
'--with-admin' => 'regress user2',
|
||||||
|
'regress user #4'
|
||||||
],
|
],
|
||||||
qr/statement: CREATE ROLE "regress user #4" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ADMIN regress_user1,"regress user2";/,
|
qr/statement: CREATE ROLE "regress user #4" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ADMIN regress_user1,"regress user2";/,
|
||||||
'add a role as a member with admin option of the newly created role');
|
'add a role as a member with admin option of the newly created role');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[
|
[
|
||||||
'createuser', 'REGRESS_USER5', '-m', 'regress_user3',
|
'createuser',
|
||||||
'-m', 'regress user #4'
|
'REGRESS_USER5',
|
||||||
|
'--with-member' => 'regress_user3',
|
||||||
|
'--with-member' => 'regress user #4'
|
||||||
],
|
],
|
||||||
qr/statement: CREATE ROLE "REGRESS_USER5" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user3,"regress user #4";/,
|
qr/statement: CREATE ROLE "REGRESS_USER5" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user3,"regress user #4";/,
|
||||||
'add a role as a member of the newly created role');
|
'add a role as a member of the newly created role');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createuser', '-v', '2029 12 31', 'regress_user6' ],
|
[ 'createuser', '--valid-until' => '2029 12 31', 'regress_user6' ],
|
||||||
qr/statement: CREATE ROLE regress_user6 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS VALID UNTIL \'2029 12 31\';/,
|
qr/statement: CREATE ROLE regress_user6 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS VALID UNTIL \'2029 12 31\';/,
|
||||||
'create a role with a password expiration date');
|
'create a role with a password expiration date');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
@ -60,26 +63,31 @@ $node->issues_sql_like(
|
|||||||
qr/statement: CREATE ROLE regress_user8 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
|
qr/statement: CREATE ROLE regress_user8 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
|
||||||
'create a role without BYPASSRLS');
|
'create a role without BYPASSRLS');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createuser', '--with-admin', 'regress_user1', 'regress_user9' ],
|
[ 'createuser', '--with-admin' => 'regress_user1', 'regress_user9' ],
|
||||||
qr/statement: CREATE ROLE regress_user9 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ADMIN regress_user1;/,
|
qr/statement: CREATE ROLE regress_user9 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ADMIN regress_user1;/,
|
||||||
'--with-admin');
|
'--with-admin');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createuser', '--with-member', 'regress_user1', 'regress_user10' ],
|
[ 'createuser', '--with-member' => 'regress_user1', 'regress_user10' ],
|
||||||
qr/statement: CREATE ROLE regress_user10 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user1;/,
|
qr/statement: CREATE ROLE regress_user10 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user1;/,
|
||||||
'--with-member');
|
'--with-member');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createuser', '--role', 'regress_user1', 'regress_user11' ],
|
[ 'createuser', '--role' => 'regress_user1', 'regress_user11' ],
|
||||||
qr/statement: CREATE ROLE regress_user11 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS IN ROLE regress_user1;/,
|
qr/statement: CREATE ROLE regress_user11 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS IN ROLE regress_user1;/,
|
||||||
'--role');
|
'--role');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'createuser', 'regress_user12', '--member-of', 'regress_user1' ],
|
[ 'createuser', 'regress_user12', '--member-of' => 'regress_user1' ],
|
||||||
qr/statement: CREATE ROLE regress_user12 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS IN ROLE regress_user1;/,
|
qr/statement: CREATE ROLE regress_user12 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS IN ROLE regress_user1;/,
|
||||||
'--member-of');
|
'--member-of');
|
||||||
|
|
||||||
$node->command_fails([ 'createuser', 'regress_user1' ],
|
$node->command_fails([ 'createuser', 'regress_user1' ],
|
||||||
'fails if role already exists');
|
'fails if role already exists');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ 'createuser', 'regress_user1', '-m', 'regress_user2', 'regress_user3' ],
|
[
|
||||||
|
'createuser',
|
||||||
|
'regress_user1',
|
||||||
|
'--with-member' => 'regress_user2',
|
||||||
|
'regress_user3'
|
||||||
|
],
|
||||||
'fails for too many non-options');
|
'fails for too many non-options');
|
||||||
|
|
||||||
done_testing();
|
done_testing();
|
||||||
|
@ -20,7 +20,10 @@ $node->command_fails(['pg_isready'], 'fails with no server running');
|
|||||||
$node->start;
|
$node->start;
|
||||||
|
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ 'pg_isready', "--timeout=$PostgreSQL::Test::Utils::timeout_default" ],
|
[
|
||||||
|
'pg_isready',
|
||||||
|
'--timeout' => $PostgreSQL::Test::Utils::timeout_default,
|
||||||
|
],
|
||||||
'succeeds with server running');
|
'succeeds with server running');
|
||||||
|
|
||||||
done_testing();
|
done_testing();
|
||||||
|
@ -96,7 +96,7 @@ test1|test1x|OID is unchanged|relfilenode has changed),
|
|||||||
$node->safe_psql('postgres',
|
$node->safe_psql('postgres',
|
||||||
"TRUNCATE index_relfilenodes; $save_relfilenodes");
|
"TRUNCATE index_relfilenodes; $save_relfilenodes");
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-s', 'postgres' ],
|
[ 'reindexdb', '--system', 'postgres' ],
|
||||||
qr/statement: REINDEX SYSTEM postgres;/,
|
qr/statement: REINDEX SYSTEM postgres;/,
|
||||||
'reindex system tables');
|
'reindex system tables');
|
||||||
$relnode_info = $node->safe_psql('postgres', $compare_relfilenodes);
|
$relnode_info = $node->safe_psql('postgres', $compare_relfilenodes);
|
||||||
@ -108,29 +108,37 @@ test1|test1x|OID is unchanged|relfilenode is unchanged),
|
|||||||
'relfilenode change after REINDEX SYSTEM');
|
'relfilenode change after REINDEX SYSTEM');
|
||||||
|
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-t', 'test1', 'postgres' ],
|
[ 'reindexdb', '--table' => 'test1', 'postgres' ],
|
||||||
qr/statement: REINDEX TABLE public\.test1;/,
|
qr/statement: REINDEX TABLE public\.test1;/,
|
||||||
'reindex specific table');
|
'reindex specific table');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-t', 'test1', '--tablespace', $tbspace_name, 'postgres' ],
|
[
|
||||||
|
'reindexdb',
|
||||||
|
'--table' => 'test1',
|
||||||
|
'--tablespace' => $tbspace_name,
|
||||||
|
'postgres',
|
||||||
|
],
|
||||||
qr/statement: REINDEX \(TABLESPACE $tbspace_name\) TABLE public\.test1;/,
|
qr/statement: REINDEX \(TABLESPACE $tbspace_name\) TABLE public\.test1;/,
|
||||||
'reindex specific table on tablespace');
|
'reindex specific table on tablespace');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-i', 'test1x', 'postgres' ],
|
[ 'reindexdb', '--index' => 'test1x', 'postgres' ],
|
||||||
qr/statement: REINDEX INDEX public\.test1x;/,
|
qr/statement: REINDEX INDEX public\.test1x;/,
|
||||||
'reindex specific index');
|
'reindex specific index');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-S', 'pg_catalog', 'postgres' ],
|
[ 'reindexdb', '--schema' => 'pg_catalog', 'postgres' ],
|
||||||
qr/statement: REINDEX SCHEMA pg_catalog;/,
|
qr/statement: REINDEX SCHEMA pg_catalog;/,
|
||||||
'reindex specific schema');
|
'reindex specific schema');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-v', '-t', 'test1', 'postgres' ],
|
[ 'reindexdb', '--verbose', '--table' => 'test1', 'postgres' ],
|
||||||
qr/statement: REINDEX \(VERBOSE\) TABLE public\.test1;/,
|
qr/statement: REINDEX \(VERBOSE\) TABLE public\.test1;/,
|
||||||
'reindex with verbose output');
|
'reindex with verbose output');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[
|
[
|
||||||
'reindexdb', '-v', '-t', 'test1',
|
'reindexdb',
|
||||||
'--tablespace', $tbspace_name, 'postgres'
|
'--verbose',
|
||||||
|
'--table' => 'test1',
|
||||||
|
'--tablespace' => $tbspace_name,
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE public\.test1;/,
|
qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE public\.test1;/,
|
||||||
'reindex with verbose output and tablespace');
|
'reindex with verbose output and tablespace');
|
||||||
@ -153,27 +161,36 @@ test1|test1x|OID has changed|relfilenode has changed),
|
|||||||
'OID change after REINDEX DATABASE CONCURRENTLY');
|
'OID change after REINDEX DATABASE CONCURRENTLY');
|
||||||
|
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '--concurrently', '-t', 'test1', 'postgres' ],
|
[ 'reindexdb', '--concurrently', '--table' => 'test1', 'postgres' ],
|
||||||
qr/statement: REINDEX TABLE CONCURRENTLY public\.test1;/,
|
qr/statement: REINDEX TABLE CONCURRENTLY public\.test1;/,
|
||||||
'reindex specific table concurrently');
|
'reindex specific table concurrently');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '--concurrently', '-i', 'test1x', 'postgres' ],
|
[ 'reindexdb', '--concurrently', '--index' => 'test1x', 'postgres' ],
|
||||||
qr/statement: REINDEX INDEX CONCURRENTLY public\.test1x;/,
|
qr/statement: REINDEX INDEX CONCURRENTLY public\.test1x;/,
|
||||||
'reindex specific index concurrently');
|
'reindex specific index concurrently');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '--concurrently', '-S', 'public', 'postgres' ],
|
[ 'reindexdb', '--concurrently', '--schema' => 'public', 'postgres' ],
|
||||||
qr/statement: REINDEX SCHEMA CONCURRENTLY public;/,
|
qr/statement: REINDEX SCHEMA CONCURRENTLY public;/,
|
||||||
'reindex specific schema concurrently');
|
'reindex specific schema concurrently');
|
||||||
$node->command_fails([ 'reindexdb', '--concurrently', '-s', 'postgres' ],
|
$node->command_fails(
|
||||||
|
[ 'reindexdb', '--concurrently', '--system', 'postgres' ],
|
||||||
'reindex system tables concurrently');
|
'reindex system tables concurrently');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '--concurrently', '-v', '-t', 'test1', 'postgres' ],
|
[
|
||||||
|
'reindexdb', '--concurrently', '--verbose',
|
||||||
|
'--table' => 'test1',
|
||||||
|
'postgres',
|
||||||
|
],
|
||||||
qr/statement: REINDEX \(VERBOSE\) TABLE CONCURRENTLY public\.test1;/,
|
qr/statement: REINDEX \(VERBOSE\) TABLE CONCURRENTLY public\.test1;/,
|
||||||
'reindex with verbose output concurrently');
|
'reindex with verbose output concurrently');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[
|
[
|
||||||
'reindexdb', '--concurrently', '-v', '-t',
|
'reindexdb',
|
||||||
'test1', '--tablespace', $tbspace_name, 'postgres'
|
'--concurrently',
|
||||||
|
'--verbose',
|
||||||
|
'--table' => 'test1',
|
||||||
|
'--tablespace' => $tbspace_name,
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE CONCURRENTLY public\.test1;/,
|
qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE CONCURRENTLY public\.test1;/,
|
||||||
'reindex concurrently with verbose output and tablespace');
|
'reindex concurrently with verbose output and tablespace');
|
||||||
@ -185,8 +202,10 @@ $node->issues_sql_like(
|
|||||||
# messages.
|
# messages.
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'reindexdb', '-t', $toast_table, '--tablespace',
|
'reindexdb',
|
||||||
$tbspace_name, 'postgres'
|
'--table' => $toast_table,
|
||||||
|
'--tablespace' => $tbspace_name,
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[],
|
[],
|
||||||
@ -194,8 +213,11 @@ $node->command_checks_all(
|
|||||||
'reindex toast table with tablespace');
|
'reindex toast table with tablespace');
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'reindexdb', '--concurrently', '-t', $toast_table,
|
'reindexdb',
|
||||||
'--tablespace', $tbspace_name, 'postgres'
|
'--concurrently',
|
||||||
|
'--table' => $toast_table,
|
||||||
|
'--tablespace' => $tbspace_name,
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[],
|
[],
|
||||||
@ -203,8 +225,10 @@ $node->command_checks_all(
|
|||||||
'reindex toast table concurrently with tablespace');
|
'reindex toast table concurrently with tablespace');
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'reindexdb', '-i', $toast_index, '--tablespace',
|
'reindexdb',
|
||||||
$tbspace_name, 'postgres'
|
'--index' => $toast_index,
|
||||||
|
'--tablespace' => $tbspace_name,
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[],
|
[],
|
||||||
@ -212,8 +236,11 @@ $node->command_checks_all(
|
|||||||
'reindex toast index with tablespace');
|
'reindex toast index with tablespace');
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[
|
[
|
||||||
'reindexdb', '--concurrently', '-i', $toast_index,
|
'reindexdb',
|
||||||
'--tablespace', $tbspace_name, 'postgres'
|
'--concurrently',
|
||||||
|
'--index' => $toast_index,
|
||||||
|
'--tablespace' => $tbspace_name,
|
||||||
|
'postgres',
|
||||||
],
|
],
|
||||||
1,
|
1,
|
||||||
[],
|
[],
|
||||||
@ -246,35 +273,51 @@ $node->safe_psql(
|
|||||||
|);
|
|);
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ 'reindexdb', '-j', '2', '-s', 'postgres' ],
|
[ 'reindexdb', '--jobs' => '2', '--system', 'postgres' ],
|
||||||
'parallel reindexdb cannot process system catalogs');
|
'parallel reindexdb cannot process system catalogs');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ 'reindexdb', '-j', '2', '-i', 's1.i1', '-i', 's2.i2', 'postgres' ],
|
[
|
||||||
|
'reindexdb',
|
||||||
|
'--jobs' => '2',
|
||||||
|
'--index' => 's1.i1',
|
||||||
|
'--index' => 's2.i2',
|
||||||
|
'postgres',
|
||||||
|
],
|
||||||
'parallel reindexdb for indices');
|
'parallel reindexdb for indices');
|
||||||
# Note that the ordering of the commands is not stable, so the second
|
# Note that the ordering of the commands is not stable, so the second
|
||||||
# command for s2.t2 is not checked after.
|
# command for s2.t2 is not checked after.
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-j', '2', '-S', 's1', '-S', 's2', 'postgres' ],
|
[
|
||||||
|
'reindexdb',
|
||||||
|
'--jobs' => '2',
|
||||||
|
'--schema' => 's1',
|
||||||
|
'--schema' => 's2',
|
||||||
|
'postgres',
|
||||||
|
],
|
||||||
qr/statement:\ REINDEX TABLE s1.t1;/,
|
qr/statement:\ REINDEX TABLE s1.t1;/,
|
||||||
'parallel reindexdb for schemas does a per-table REINDEX');
|
'parallel reindexdb for schemas does a per-table REINDEX');
|
||||||
$node->command_ok(
|
$node->command_ok([ 'reindexdb', '--jobs' => '2', '--schema' => 's3' ],
|
||||||
[ 'reindexdb', '-j', '2', '-S', 's3' ],
|
|
||||||
'parallel reindexdb with empty schema');
|
'parallel reindexdb with empty schema');
|
||||||
$node->command_ok(
|
$node->command_ok(
|
||||||
[ 'reindexdb', '-j', '2', '--concurrently', '-d', 'postgres' ],
|
[
|
||||||
|
'reindexdb',
|
||||||
|
'--jobs' => '2',
|
||||||
|
'--concurrently',
|
||||||
|
'--dbname' => 'postgres',
|
||||||
|
],
|
||||||
'parallel reindexdb on database, concurrently');
|
'parallel reindexdb on database, concurrently');
|
||||||
|
|
||||||
# combinations of objects
|
# combinations of objects
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-s', '-t', 'test1', 'postgres' ],
|
[ 'reindexdb', '--system', '--table' => 'test1', 'postgres' ],
|
||||||
qr/statement:\ REINDEX SYSTEM postgres;/,
|
qr/statement:\ REINDEX SYSTEM postgres;/,
|
||||||
'specify both --system and --table');
|
'specify both --system and --table');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-s', '-i', 'test1x', 'postgres' ],
|
[ 'reindexdb', '--system', '--index' => 'test1x', 'postgres' ],
|
||||||
qr/statement:\ REINDEX INDEX public.test1x;/,
|
qr/statement:\ REINDEX INDEX public.test1x;/,
|
||||||
'specify both --system and --index');
|
'specify both --system and --index');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-s', '-S', 'pg_catalog', 'postgres' ],
|
[ 'reindexdb', '--system', '--schema' => 'pg_catalog', 'postgres' ],
|
||||||
qr/statement:\ REINDEX SCHEMA pg_catalog;/,
|
qr/statement:\ REINDEX SCHEMA pg_catalog;/,
|
||||||
'specify both --system and --schema');
|
'specify both --system and --schema');
|
||||||
|
|
||||||
|
@ -18,23 +18,23 @@ $node->safe_psql('postgres',
|
|||||||
$node->safe_psql('template1',
|
$node->safe_psql('template1',
|
||||||
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a);');
|
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a);');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-a' ],
|
[ 'reindexdb', '--all' ],
|
||||||
qr/statement: REINDEX.*statement: REINDEX/s,
|
qr/statement: REINDEX.*statement: REINDEX/s,
|
||||||
'reindex all databases');
|
'reindex all databases');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-a', '-s' ],
|
[ 'reindexdb', '--all', '--system' ],
|
||||||
qr/statement: REINDEX SYSTEM postgres/s,
|
qr/statement: REINDEX SYSTEM postgres/s,
|
||||||
'reindex system catalogs in all databases');
|
'reindex system catalogs in all databases');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-a', '-S', 'public' ],
|
[ 'reindexdb', '--all', '--schema' => 'public' ],
|
||||||
qr/statement: REINDEX SCHEMA public/s,
|
qr/statement: REINDEX SCHEMA public/s,
|
||||||
'reindex schema in all databases');
|
'reindex schema in all databases');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-a', '-i', 'test1x' ],
|
[ 'reindexdb', '--all', '--index' => 'test1x' ],
|
||||||
qr/statement: REINDEX INDEX public\.test1x/s,
|
qr/statement: REINDEX INDEX public\.test1x/s,
|
||||||
'reindex index in all databases');
|
'reindex index in all databases');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'reindexdb', '-a', '-t', 'test1' ],
|
[ 'reindexdb', '--all', '--table' => 'test1' ],
|
||||||
qr/statement: REINDEX TABLE public\.test1/s,
|
qr/statement: REINDEX TABLE public\.test1/s,
|
||||||
'reindex table in all databases');
|
'reindex table in all databases');
|
||||||
|
|
||||||
@ -43,13 +43,13 @@ $node->safe_psql(
|
|||||||
CREATE DATABASE regression_invalid;
|
CREATE DATABASE regression_invalid;
|
||||||
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
|
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
|
||||||
));
|
));
|
||||||
$node->command_ok([ 'reindexdb', '-a' ],
|
$node->command_ok([ 'reindexdb', '--all' ],
|
||||||
'invalid database not targeted by reindexdb -a');
|
'invalid database not targeted by reindexdb --all');
|
||||||
|
|
||||||
# Doesn't quite belong here, but don't want to waste time by creating an
|
# Doesn't quite belong here, but don't want to waste time by creating an
|
||||||
# invalid database in 090_reindexdb.pl as well.
|
# invalid database in 090_reindexdb.pl as well.
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'reindexdb', '-d', 'regression_invalid' ],
|
[ 'reindexdb', '--dbname' => 'regression_invalid' ],
|
||||||
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
|
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
|
||||||
'reindexdb cannot target invalid database');
|
'reindexdb cannot target invalid database');
|
||||||
|
|
||||||
|
@ -80,11 +80,11 @@ $node->command_fails(
|
|||||||
[ 'vacuumdb', '--analyze-only', '--no-process-toast', 'postgres' ],
|
[ 'vacuumdb', '--analyze-only', '--no-process-toast', 'postgres' ],
|
||||||
'--analyze-only and --no-process-toast specified together');
|
'--analyze-only and --no-process-toast specified together');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '-P', 2, 'postgres' ],
|
[ 'vacuumdb', '--parallel' => 2, 'postgres' ],
|
||||||
qr/statement: VACUUM \(SKIP_DATABASE_STATS, PARALLEL 2\).*;/,
|
qr/statement: VACUUM \(SKIP_DATABASE_STATS, PARALLEL 2\).*;/,
|
||||||
'vacuumdb -P 2');
|
'vacuumdb -P 2');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '-P', 0, 'postgres' ],
|
[ 'vacuumdb', '--parallel' => 0, 'postgres' ],
|
||||||
qr/statement: VACUUM \(SKIP_DATABASE_STATS, PARALLEL 0\).*;/,
|
qr/statement: VACUUM \(SKIP_DATABASE_STATS, PARALLEL 0\).*;/,
|
||||||
'vacuumdb -P 0');
|
'vacuumdb -P 0');
|
||||||
$node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)],
|
$node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)],
|
||||||
@ -118,94 +118,123 @@ $node->command_ok([qw|vacuumdb -Z --table="need""q(uot"(")x") postgres|],
|
|||||||
'column list');
|
'column list');
|
||||||
|
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ 'vacuumdb', '--analyze', '--table', 'vactable(c)', 'postgres' ],
|
[ 'vacuumdb', '--analyze', '--table' => 'vactable(c)', 'postgres' ],
|
||||||
'incorrect column name with ANALYZE');
|
'incorrect column name with ANALYZE');
|
||||||
$node->command_fails([ 'vacuumdb', '-P', -1, 'postgres' ],
|
$node->command_fails([ 'vacuumdb', '--parallel' => -1, 'postgres' ],
|
||||||
'negative parallel degree');
|
'negative parallel degree');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '--analyze', '--table', 'vactable(a, b)', 'postgres' ],
|
[ 'vacuumdb', '--analyze', '--table' => 'vactable(a, b)', 'postgres' ],
|
||||||
qr/statement: VACUUM \(SKIP_DATABASE_STATS, ANALYZE\) public.vactable\(a, b\);/,
|
qr/statement: VACUUM \(SKIP_DATABASE_STATS, ANALYZE\) public.vactable\(a, b\);/,
|
||||||
'vacuumdb --analyze with complete column list');
|
'vacuumdb --analyze with complete column list');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '--analyze-only', '--table', 'vactable(b)', 'postgres' ],
|
[ 'vacuumdb', '--analyze-only', '--table' => 'vactable(b)', 'postgres' ],
|
||||||
qr/statement: ANALYZE public.vactable\(b\);/,
|
qr/statement: ANALYZE public.vactable\(b\);/,
|
||||||
'vacuumdb --analyze-only with partial column list');
|
'vacuumdb --analyze-only with partial column list');
|
||||||
$node->command_checks_all(
|
$node->command_checks_all(
|
||||||
[ 'vacuumdb', '--analyze', '--table', 'vacview', 'postgres' ],
|
[ 'vacuumdb', '--analyze', '--table' => 'vacview', 'postgres' ],
|
||||||
0,
|
0,
|
||||||
[qr/^.*vacuuming database "postgres"/],
|
[qr/^.*vacuuming database "postgres"/],
|
||||||
[qr/^WARNING.*cannot vacuum non-tables or special system tables/s],
|
[qr/^WARNING.*cannot vacuum non-tables or special system tables/s],
|
||||||
'vacuumdb with view');
|
'vacuumdb with view');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '0', 'postgres' ],
|
[
|
||||||
|
'vacuumdb',
|
||||||
|
'--table' => 'vactable',
|
||||||
|
'--min-mxid-age' => '0',
|
||||||
|
'postgres'
|
||||||
|
],
|
||||||
'vacuumdb --min-mxid-age with incorrect value');
|
'vacuumdb --min-mxid-age with incorrect value');
|
||||||
$node->command_fails(
|
$node->command_fails(
|
||||||
[ 'vacuumdb', '--table', 'vactable', '--min-xid-age', '0', 'postgres' ],
|
[
|
||||||
|
'vacuumdb',
|
||||||
|
'--table' => 'vactable',
|
||||||
|
'--min-xid-age' => '0',
|
||||||
|
'postgres'
|
||||||
|
],
|
||||||
'vacuumdb --min-xid-age with incorrect value');
|
'vacuumdb --min-xid-age with incorrect value');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[
|
[
|
||||||
'vacuumdb', '--table', 'vactable', '--min-mxid-age',
|
'vacuumdb',
|
||||||
'2147483000', 'postgres'
|
'--table' => 'vactable',
|
||||||
|
'--min-mxid-age' => '2147483000',
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/GREATEST.*relminmxid.*2147483000/,
|
qr/GREATEST.*relminmxid.*2147483000/,
|
||||||
'vacuumdb --table --min-mxid-age');
|
'vacuumdb --table --min-mxid-age');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '--min-xid-age', '2147483001', 'postgres' ],
|
[ 'vacuumdb', '--min-xid-age' => '2147483001', 'postgres' ],
|
||||||
qr/GREATEST.*relfrozenxid.*2147483001/,
|
qr/GREATEST.*relfrozenxid.*2147483001/,
|
||||||
'vacuumdb --table --min-xid-age');
|
'vacuumdb --table --min-xid-age');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '--schema', '"Foo"', 'postgres' ],
|
[ 'vacuumdb', '--schema' => '"Foo"', 'postgres' ],
|
||||||
qr/VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar/,
|
qr/VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar/,
|
||||||
'vacuumdb --schema');
|
'vacuumdb --schema');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '--schema', '"Foo"', '--schema', '"Bar"', 'postgres' ],
|
[ 'vacuumdb', '--schema' => '"Foo"', '--schema' => '"Bar"', 'postgres' ],
|
||||||
qr/VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
|
qr/VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
|
||||||
.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz
|
.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz
|
||||||
/sx,
|
/sx,
|
||||||
'vacuumdb multiple --schema switches');
|
'vacuumdb multiple --schema switches');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '--exclude-schema', '"Foo"', 'postgres' ],
|
[ 'vacuumdb', '--exclude-schema' => '"Foo"', 'postgres' ],
|
||||||
qr/^(?!.*VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar).*$/s,
|
qr/^(?!.*VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar).*$/s,
|
||||||
'vacuumdb --exclude-schema');
|
'vacuumdb --exclude-schema');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[
|
[
|
||||||
'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema',
|
'vacuumdb',
|
||||||
'"Bar"', 'postgres'
|
'--exclude-schema' => '"Foo"',
|
||||||
|
'--exclude-schema' => '"Bar"',
|
||||||
|
'postgres'
|
||||||
],
|
],
|
||||||
qr/^(?!.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
|
qr/^(?!.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
|
||||||
| VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz).*$/sx,
|
| VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz).*$/sx,
|
||||||
'vacuumdb multiple --exclude-schema switches');
|
'vacuumdb multiple --exclude-schema switches');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'vacuumdb', '-N', 'pg_catalog', '-t', 'pg_class', 'postgres', ],
|
[
|
||||||
|
'vacuumdb',
|
||||||
|
'--exclude-schema' => 'pg_catalog',
|
||||||
|
'--table' => 'pg_class',
|
||||||
|
'postgres',
|
||||||
|
],
|
||||||
qr/cannot vacuum specific table\(s\) and exclude schema\(s\) at the same time/,
|
qr/cannot vacuum specific table\(s\) and exclude schema\(s\) at the same time/,
|
||||||
'cannot use options -N and -t at the same time');
|
'cannot use options --excludes-chema and ---table at the same time');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'vacuumdb', '-n', 'pg_catalog', '-t', 'pg_class', 'postgres' ],
|
[
|
||||||
|
'vacuumdb',
|
||||||
|
'--schema' => 'pg_catalog',
|
||||||
|
'--table' => 'pg_class',
|
||||||
|
'postgres'
|
||||||
|
],
|
||||||
qr/cannot vacuum all tables in schema\(s\) and specific table\(s\) at the same time/,
|
qr/cannot vacuum all tables in schema\(s\) and specific table\(s\) at the same time/,
|
||||||
'cannot use options -n and -t at the same time');
|
'cannot use options --schema and ---table at the same time');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'vacuumdb', '-n', 'pg_catalog', '-N', '"Foo"', 'postgres' ],
|
[
|
||||||
|
'vacuumdb',
|
||||||
|
'--schema' => 'pg_catalog',
|
||||||
|
'--exclude-schema' => '"Foo"',
|
||||||
|
'postgres'
|
||||||
|
],
|
||||||
qr/cannot vacuum all tables in schema\(s\) and exclude schema\(s\) at the same time/,
|
qr/cannot vacuum all tables in schema\(s\) and exclude schema\(s\) at the same time/,
|
||||||
'cannot use options -n and -N at the same time');
|
'cannot use options --schema and --exclude-schema at the same time');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '-a', '-N', 'pg_catalog' ],
|
[ 'vacuumdb', '--all', '--exclude-schema' => 'pg_catalog' ],
|
||||||
qr/(?:(?!VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class).)*/,
|
qr/(?:(?!VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class).)*/,
|
||||||
'vacuumdb -a -N');
|
'vacuumdb --all --exclude-schema');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '-a', '-n', 'pg_catalog' ],
|
[ 'vacuumdb', '--all', '--schema' => 'pg_catalog' ],
|
||||||
qr/VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class/,
|
qr/VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class/,
|
||||||
'vacuumdb -a -n');
|
'vacuumdb --all ---schema');
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '-a', '-t', 'pg_class' ],
|
[ 'vacuumdb', '--all', '--table' => 'pg_class' ],
|
||||||
qr/VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class/,
|
qr/VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class/,
|
||||||
'vacuumdb -a -t');
|
'vacuumdb --all --table');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'vacuumdb', '-a', '-d', 'postgres' ],
|
[ 'vacuumdb', '--all', '-d' => 'postgres' ],
|
||||||
qr/cannot vacuum all databases and a specific one at the same time/,
|
qr/cannot vacuum all databases and a specific one at the same time/,
|
||||||
'cannot use options -a and -d at the same time');
|
'cannot use options --all and --dbname at the same time');
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'vacuumdb', '-a', 'postgres' ],
|
[ 'vacuumdb', '--all', 'postgres' ],
|
||||||
qr/cannot vacuum all databases and a specific one at the same time/,
|
qr/cannot vacuum all databases and a specific one at the same time/,
|
||||||
'cannot use option -a and a dbname as argument at the same time');
|
'cannot use option --all and a dbname as argument at the same time');
|
||||||
|
|
||||||
done_testing();
|
done_testing();
|
||||||
|
@ -12,7 +12,7 @@ $node->init;
|
|||||||
$node->start;
|
$node->start;
|
||||||
|
|
||||||
$node->issues_sql_like(
|
$node->issues_sql_like(
|
||||||
[ 'vacuumdb', '-a' ],
|
[ 'vacuumdb', '--all' ],
|
||||||
qr/statement: VACUUM.*statement: VACUUM/s,
|
qr/statement: VACUUM.*statement: VACUUM/s,
|
||||||
'vacuum all databases');
|
'vacuum all databases');
|
||||||
|
|
||||||
@ -21,13 +21,13 @@ $node->safe_psql(
|
|||||||
CREATE DATABASE regression_invalid;
|
CREATE DATABASE regression_invalid;
|
||||||
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
|
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
|
||||||
));
|
));
|
||||||
$node->command_ok([ 'vacuumdb', '-a' ],
|
$node->command_ok([ 'vacuumdb', '--all' ],
|
||||||
'invalid database not targeted by vacuumdb -a');
|
'invalid database not targeted by vacuumdb -a');
|
||||||
|
|
||||||
# Doesn't quite belong here, but don't want to waste time by creating an
|
# Doesn't quite belong here, but don't want to waste time by creating an
|
||||||
# invalid database in 010_vacuumdb.pl as well.
|
# invalid database in 010_vacuumdb.pl as well.
|
||||||
$node->command_fails_like(
|
$node->command_fails_like(
|
||||||
[ 'vacuumdb', '-d', 'regression_invalid' ],
|
[ 'vacuumdb', '--dbname' => 'regression_invalid' ],
|
||||||
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
|
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
|
||||||
'vacuumdb cannot target invalid database');
|
'vacuumdb cannot target invalid database');
|
||||||
|
|
||||||
|
@ -566,8 +566,11 @@ my $connstr = $node_primary->connstr('postgres') . " replication=database";
|
|||||||
# a replication command and a SQL command.
|
# a replication command and a SQL command.
|
||||||
$node_primary->command_fails_like(
|
$node_primary->command_fails_like(
|
||||||
[
|
[
|
||||||
'psql', '-X', '-c', "SELECT pg_backup_start('backup', true)",
|
'psql',
|
||||||
'-c', 'BASE_BACKUP', '-d', $connstr
|
'--no-psqlrc',
|
||||||
|
'--command' => "SELECT pg_backup_start('backup', true)",
|
||||||
|
'--command' => 'BASE_BACKUP',
|
||||||
|
'--dbname' => $connstr
|
||||||
],
|
],
|
||||||
qr/a backup is already in progress in this session/,
|
qr/a backup is already in progress in this session/,
|
||||||
'BASE_BACKUP cannot run in session already running backup');
|
'BASE_BACKUP cannot run in session already running backup');
|
||||||
|
@ -57,7 +57,7 @@ $node_primary->init(has_archiving => 1, allows_streaming => 1);
|
|||||||
|
|
||||||
# Bump the transaction ID epoch. This is useful to stress the portability
|
# Bump the transaction ID epoch. This is useful to stress the portability
|
||||||
# of recovery_target_xid parsing.
|
# of recovery_target_xid parsing.
|
||||||
system_or_bail('pg_resetwal', '--epoch', '1', $node_primary->data_dir);
|
system_or_bail('pg_resetwal', '--epoch' => '1', $node_primary->data_dir);
|
||||||
|
|
||||||
# Start it
|
# Start it
|
||||||
$node_primary->start;
|
$node_primary->start;
|
||||||
@ -147,8 +147,10 @@ recovery_target_time = '$recovery_time'");
|
|||||||
|
|
||||||
my $res = run_log(
|
my $res = run_log(
|
||||||
[
|
[
|
||||||
'pg_ctl', '-D', $node_standby->data_dir, '-l',
|
'pg_ctl',
|
||||||
$node_standby->logfile, 'start'
|
'--pgdata' => $node_standby->data_dir,
|
||||||
|
'--log' => $node_standby->logfile,
|
||||||
|
'start',
|
||||||
]);
|
]);
|
||||||
ok(!$res, 'invalid recovery startup fails');
|
ok(!$res, 'invalid recovery startup fails');
|
||||||
|
|
||||||
@ -168,8 +170,10 @@ $node_standby->append_conf('postgresql.conf',
|
|||||||
|
|
||||||
run_log(
|
run_log(
|
||||||
[
|
[
|
||||||
'pg_ctl', '-D', $node_standby->data_dir, '-l',
|
'pg_ctl',
|
||||||
$node_standby->logfile, 'start'
|
'--pgdata' => $node_standby->data_dir,
|
||||||
|
'--log' => $node_standby->logfile,
|
||||||
|
'start',
|
||||||
]);
|
]);
|
||||||
|
|
||||||
# wait for postgres to terminate
|
# wait for postgres to terminate
|
||||||
|
@ -160,13 +160,13 @@ my $pre_existing_msg = qr/pre-existing shared memory block/;
|
|||||||
like(slurp_file($gnat->logfile),
|
like(slurp_file($gnat->logfile),
|
||||||
$pre_existing_msg, 'detected live backend via shared memory');
|
$pre_existing_msg, 'detected live backend via shared memory');
|
||||||
# Reject single-user startup.
|
# Reject single-user startup.
|
||||||
my $single_stderr;
|
command_fails_like(
|
||||||
ok( !run_log(
|
[
|
||||||
[ 'postgres', '--single', '-D', $gnat->data_dir, 'template1' ],
|
'postgres', '--single',
|
||||||
'<', \undef, '2>', \$single_stderr),
|
'-D' => $gnat->data_dir,
|
||||||
'live query blocks --single');
|
'template1'
|
||||||
print STDERR $single_stderr;
|
],
|
||||||
like($single_stderr, $pre_existing_msg,
|
$pre_existing_msg,
|
||||||
'single-user mode detected live backend via shared memory');
|
'single-user mode detected live backend via shared memory');
|
||||||
log_ipcs();
|
log_ipcs();
|
||||||
|
|
||||||
|
@ -76,9 +76,10 @@ sub test_recovery_wal_level_minimal
|
|||||||
# that the server ends with an error during recovery.
|
# that the server ends with an error during recovery.
|
||||||
run_log(
|
run_log(
|
||||||
[
|
[
|
||||||
'pg_ctl', '-D',
|
'pg_ctl',
|
||||||
$recovery_node->data_dir, '-l',
|
'--pgdata' => $recovery_node->data_dir,
|
||||||
$recovery_node->logfile, 'start'
|
'--log' => $recovery_node->logfile,
|
||||||
|
'start',
|
||||||
]);
|
]);
|
||||||
|
|
||||||
# wait for postgres to terminate
|
# wait for postgres to terminate
|
||||||
|
@ -105,19 +105,23 @@ $node_primary->wait_for_replay_catchup($node_standby_1);
|
|||||||
# Perform a logical dump of primary and standby, and check that they match
|
# Perform a logical dump of primary and standby, and check that they match
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-f', $outputdir . '/primary.dump',
|
'pg_dumpall',
|
||||||
'--no-sync', '-p', $node_primary->port,
|
'--file' => $outputdir . '/primary.dump',
|
||||||
'--no-unlogged-table-data' # if unlogged, standby has schema only
|
'--no-sync',
|
||||||
|
'--port' => $node_primary->port,
|
||||||
|
'--no-unlogged-table-data', # if unlogged, standby has schema only
|
||||||
],
|
],
|
||||||
'dump primary server');
|
'dump primary server');
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dumpall', '-f', $outputdir . '/standby.dump',
|
'pg_dumpall',
|
||||||
'--no-sync', '-p', $node_standby_1->port
|
'--file' => $outputdir . '/standby.dump',
|
||||||
|
'--no-sync',
|
||||||
|
'--port' => $node_standby_1->port,
|
||||||
],
|
],
|
||||||
'dump standby server');
|
'dump standby server');
|
||||||
command_ok(
|
command_ok(
|
||||||
[ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump' ],
|
[ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump', ],
|
||||||
'compare primary and standby dumps');
|
'compare primary and standby dumps');
|
||||||
|
|
||||||
# Likewise for the catalogs of the regression database, after disabling
|
# Likewise for the catalogs of the regression database, after disabling
|
||||||
@ -128,29 +132,29 @@ $node_primary->wait_for_replay_catchup($node_standby_1);
|
|||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump',
|
'pg_dump',
|
||||||
('--schema', 'pg_catalog'),
|
'--schema' => 'pg_catalog',
|
||||||
('-f', $outputdir . '/catalogs_primary.dump'),
|
'--file' => $outputdir . '/catalogs_primary.dump',
|
||||||
'--no-sync',
|
'--no-sync',
|
||||||
('-p', $node_primary->port),
|
'--port', $node_primary->port,
|
||||||
'--no-unlogged-table-data',
|
'--no-unlogged-table-data',
|
||||||
'regression'
|
'regression',
|
||||||
],
|
],
|
||||||
'dump catalogs of primary server');
|
'dump catalogs of primary server');
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'pg_dump',
|
'pg_dump',
|
||||||
('--schema', 'pg_catalog'),
|
'--schema' => 'pg_catalog',
|
||||||
('-f', $outputdir . '/catalogs_standby.dump'),
|
'--file' => $outputdir . '/catalogs_standby.dump',
|
||||||
'--no-sync',
|
'--no-sync',
|
||||||
('-p', $node_standby_1->port),
|
'--port' => $node_standby_1->port,
|
||||||
'regression'
|
'regression',
|
||||||
],
|
],
|
||||||
'dump catalogs of standby server');
|
'dump catalogs of standby server');
|
||||||
command_ok(
|
command_ok(
|
||||||
[
|
[
|
||||||
'diff',
|
'diff',
|
||||||
$outputdir . '/catalogs_primary.dump',
|
$outputdir . '/catalogs_primary.dump',
|
||||||
$outputdir . '/catalogs_standby.dump'
|
$outputdir . '/catalogs_standby.dump',
|
||||||
],
|
],
|
||||||
'compare primary and standby catalog dumps');
|
'compare primary and standby catalog dumps');
|
||||||
|
|
||||||
|
@ -543,12 +543,14 @@ $node->connect_fails(
|
|||||||
# pg_stat_ssl
|
# pg_stat_ssl
|
||||||
command_like(
|
command_like(
|
||||||
[
|
[
|
||||||
'psql', '-X',
|
'psql',
|
||||||
'-A', '-F',
|
'--no-psqlrc',
|
||||||
',', '-P',
|
'--no-align',
|
||||||
'null=_null_', '-d',
|
'--field-separator' => ',',
|
||||||
"$common_connstr sslrootcert=invalid", '-c',
|
'--pset', => 'null=_null_',
|
||||||
"SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
|
'--dbname' => "$common_connstr sslrootcert=invalid",
|
||||||
|
'--command' =>
|
||||||
|
"SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
|
||||||
],
|
],
|
||||||
qr{^pid,ssl,version,cipher,bits,client_dn,client_serial,issuer_dn\r?\n
|
qr{^pid,ssl,version,cipher,bits,client_dn,client_serial,issuer_dn\r?\n
|
||||||
^\d+,t,TLSv[\d.]+,[\w-]+,\d+,_null_,_null_,_null_\r?$}mx,
|
^\d+,t,TLSv[\d.]+,[\w-]+,\d+,_null_,_null_,_null_\r?$}mx,
|
||||||
@ -742,17 +744,15 @@ else
|
|||||||
command_like(
|
command_like(
|
||||||
[
|
[
|
||||||
'psql',
|
'psql',
|
||||||
'-X',
|
'--no-psqlrc',
|
||||||
'-A',
|
'--no-align',
|
||||||
'-F',
|
'--field-separator' => ',',
|
||||||
',',
|
'--pset' => 'null=_null_',
|
||||||
'-P',
|
'--dbname' =>
|
||||||
'null=_null_',
|
"$common_connstr user=ssltestuser sslcert=ssl/client.crt "
|
||||||
'-d',
|
|
||||||
"$common_connstr user=ssltestuser sslcert=ssl/client.crt "
|
|
||||||
. sslkey('client.key'),
|
. sslkey('client.key'),
|
||||||
'-c',
|
'--command' =>
|
||||||
"SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
|
"SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
|
||||||
],
|
],
|
||||||
qr{^pid,ssl,version,cipher,bits,client_dn,client_serial,issuer_dn\r?\n
|
qr{^pid,ssl,version,cipher,bits,client_dn,client_serial,issuer_dn\r?\n
|
||||||
^\d+,t,TLSv[\d.]+,[\w-]+,\d+,/?CN=ssltestuser,$serialno,/?\QCN=Test CA for PostgreSQL SSL regression test client certs\E\r?$}mx,
|
^\d+,t,TLSv[\d.]+,[\w-]+,\d+,/?CN=ssltestuser,$serialno,/?\QCN=Test CA for PostgreSQL SSL regression test client certs\E\r?$}mx,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user