mirror of
https://github.com/postgres/postgres.git
synced 2025-07-30 11:03:19 +03:00
tap tests: replace 'master' with 'primary'.
We've largely replaced master with primary in docs etc, but tap test still widely used master. Author: Andres Freund Reviewed-By: David Steele Discussion: https://postgr.es/m/20200615182235.x7lch5n6kcjq4aue@alap3.anarazel.de
This commit is contained in:
@ -2,31 +2,31 @@ package RewindTest;
|
||||
|
||||
# Test driver for pg_rewind. Each test consists of a cycle where a new cluster
|
||||
# is first created with initdb, and a streaming replication standby is set up
|
||||
# to follow the master. Then the master is shut down and the standby is
|
||||
# promoted, and finally pg_rewind is used to rewind the old master, using the
|
||||
# to follow the primary. Then the primary is shut down and the standby is
|
||||
# promoted, and finally pg_rewind is used to rewind the old primary, using the
|
||||
# standby as the source.
|
||||
#
|
||||
# To run a test, the test script (in t/ subdirectory) calls the functions
|
||||
# in this module. These functions should be called in this sequence:
|
||||
#
|
||||
# 1. setup_cluster - creates a PostgreSQL cluster that runs as the master
|
||||
# 1. setup_cluster - creates a PostgreSQL cluster that runs as the primary
|
||||
#
|
||||
# 2. start_master - starts the master server
|
||||
# 2. start_primary - starts the primary server
|
||||
#
|
||||
# 3. create_standby - runs pg_basebackup to initialize a standby server, and
|
||||
# sets it up to follow the master.
|
||||
# sets it up to follow the primary.
|
||||
#
|
||||
# 4. promote_standby - runs "pg_ctl promote" to promote the standby server.
|
||||
# The old master keeps running.
|
||||
# The old primary keeps running.
|
||||
#
|
||||
# 5. run_pg_rewind - stops the old master (if it's still running) and runs
|
||||
# 5. run_pg_rewind - stops the old primary (if it's still running) and runs
|
||||
# pg_rewind to synchronize it with the now-promoted standby server.
|
||||
#
|
||||
# 6. clean_rewind_test - stops both servers used in the test, if they're
|
||||
# still running.
|
||||
#
|
||||
# The test script can use the helper functions master_psql and standby_psql
|
||||
# to run psql against the master and standby servers, respectively.
|
||||
# The test script can use the helper functions primary_psql and standby_psql
|
||||
# to run psql against the primary and standby servers, respectively.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
@ -43,15 +43,15 @@ use TestLib;
|
||||
use Test::More;
|
||||
|
||||
our @EXPORT = qw(
|
||||
$node_master
|
||||
$node_primary
|
||||
$node_standby
|
||||
|
||||
master_psql
|
||||
primary_psql
|
||||
standby_psql
|
||||
check_query
|
||||
|
||||
setup_cluster
|
||||
start_master
|
||||
start_primary
|
||||
create_standby
|
||||
promote_standby
|
||||
run_pg_rewind
|
||||
@ -59,16 +59,16 @@ our @EXPORT = qw(
|
||||
);
|
||||
|
||||
# Our nodes.
|
||||
our $node_master;
|
||||
our $node_primary;
|
||||
our $node_standby;
|
||||
|
||||
sub master_psql
|
||||
sub primary_psql
|
||||
{
|
||||
my $cmd = shift;
|
||||
my $dbname = shift || 'postgres';
|
||||
|
||||
system_or_bail 'psql', '-q', '--no-psqlrc', '-d',
|
||||
$node_master->connstr($dbname), '-c', "$cmd";
|
||||
$node_primary->connstr($dbname), '-c', "$cmd";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ sub standby_psql
|
||||
return;
|
||||
}
|
||||
|
||||
# Run a query against the master, and check that the output matches what's
|
||||
# Run a query against the primary, and check that the output matches what's
|
||||
# expected
|
||||
sub check_query
|
||||
{
|
||||
@ -94,7 +94,7 @@ sub check_query
|
||||
# we want just the output, no formatting
|
||||
my $result = run [
|
||||
'psql', '-q', '-A', '-t', '--no-psqlrc', '-d',
|
||||
$node_master->connstr('postgres'),
|
||||
$node_primary->connstr('postgres'),
|
||||
'-c', $query
|
||||
],
|
||||
'>', \$stdout, '2>', \$stderr;
|
||||
@ -123,34 +123,34 @@ sub setup_cluster
|
||||
my $extra_name = shift; # Used to differentiate clusters
|
||||
my $extra = shift; # Extra params for initdb
|
||||
|
||||
# Initialize master, data checksums are mandatory
|
||||
$node_master =
|
||||
get_new_node('master' . ($extra_name ? "_${extra_name}" : ''));
|
||||
# Initialize primary, data checksums are mandatory
|
||||
$node_primary =
|
||||
get_new_node('primary' . ($extra_name ? "_${extra_name}" : ''));
|
||||
|
||||
# Set up pg_hba.conf and pg_ident.conf for the role running
|
||||
# pg_rewind. This role is used for all the tests, and has
|
||||
# minimal permissions enough to rewind from an online source.
|
||||
$node_master->init(
|
||||
$node_primary->init(
|
||||
allows_streaming => 1,
|
||||
extra => $extra,
|
||||
auth_extra => [ '--create-role', 'rewind_user' ]);
|
||||
|
||||
# Set wal_keep_segments to prevent WAL segment recycling after enforced
|
||||
# checkpoints in the tests.
|
||||
$node_master->append_conf(
|
||||
$node_primary->append_conf(
|
||||
'postgresql.conf', qq(
|
||||
wal_keep_segments = 20
|
||||
));
|
||||
return;
|
||||
}
|
||||
|
||||
sub start_master
|
||||
sub start_primary
|
||||
{
|
||||
$node_master->start;
|
||||
$node_primary->start;
|
||||
|
||||
# Create custom role which is used to run pg_rewind, and adjust its
|
||||
# permissions to the minimum necessary.
|
||||
$node_master->safe_psql(
|
||||
$node_primary->safe_psql(
|
||||
'postgres', "
|
||||
CREATE ROLE rewind_user LOGIN;
|
||||
GRANT EXECUTE ON function pg_catalog.pg_ls_dir(text, boolean, boolean)
|
||||
@ -162,7 +162,7 @@ sub start_master
|
||||
GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text, bigint, bigint, boolean)
|
||||
TO rewind_user;");
|
||||
|
||||
#### Now run the test-specific parts to initialize the master before setting
|
||||
#### Now run the test-specific parts to initialize the primary before setting
|
||||
# up standby
|
||||
|
||||
return;
|
||||
@ -174,13 +174,13 @@ sub create_standby
|
||||
|
||||
$node_standby =
|
||||
get_new_node('standby' . ($extra_name ? "_${extra_name}" : ''));
|
||||
$node_master->backup('my_backup');
|
||||
$node_standby->init_from_backup($node_master, 'my_backup');
|
||||
my $connstr_master = $node_master->connstr();
|
||||
$node_primary->backup('my_backup');
|
||||
$node_standby->init_from_backup($node_primary, 'my_backup');
|
||||
my $connstr_primary = $node_primary->connstr();
|
||||
|
||||
$node_standby->append_conf(
|
||||
"postgresql.conf", qq(
|
||||
primary_conninfo='$connstr_master'
|
||||
primary_conninfo='$connstr_primary'
|
||||
));
|
||||
|
||||
$node_standby->set_standby_mode();
|
||||
@ -200,10 +200,10 @@ sub promote_standby
|
||||
# up standby
|
||||
|
||||
# Wait for the standby to receive and write all WAL.
|
||||
$node_master->wait_for_catchup($node_standby, 'write');
|
||||
$node_primary->wait_for_catchup($node_standby, 'write');
|
||||
|
||||
# Now promote standby and insert some new data on master, this will put
|
||||
# the master out-of-sync with the standby.
|
||||
# Now promote standby and insert some new data on primary, this will put
|
||||
# the primary out-of-sync with the standby.
|
||||
$node_standby->promote;
|
||||
|
||||
# Force a checkpoint after the promotion. pg_rewind looks at the control
|
||||
@ -220,7 +220,7 @@ sub promote_standby
|
||||
sub run_pg_rewind
|
||||
{
|
||||
my $test_mode = shift;
|
||||
my $master_pgdata = $node_master->data_dir;
|
||||
my $primary_pgdata = $node_primary->data_dir;
|
||||
my $standby_pgdata = $node_standby->data_dir;
|
||||
my $standby_connstr = $node_standby->connstr('postgres');
|
||||
my $tmp_folder = TestLib::tempdir;
|
||||
@ -239,14 +239,14 @@ sub run_pg_rewind
|
||||
# segments but that would just make the test more costly,
|
||||
# without improving the coverage. Hence, instead, stop
|
||||
# gracefully the primary here.
|
||||
$node_master->stop;
|
||||
$node_primary->stop;
|
||||
}
|
||||
else
|
||||
{
|
||||
# Stop the master and be ready to perform the rewind. The cluster
|
||||
# Stop the primary and be ready to perform the rewind. The cluster
|
||||
# needs recovery to finish once, and pg_rewind makes sure that it
|
||||
# happens automatically.
|
||||
$node_master->stop('immediate');
|
||||
$node_primary->stop('immediate');
|
||||
}
|
||||
|
||||
# At this point, the rewind processing is ready to run.
|
||||
@ -254,25 +254,25 @@ sub run_pg_rewind
|
||||
# The real testing begins really now with a bifurcation of the possible
|
||||
# scenarios that pg_rewind supports.
|
||||
|
||||
# Keep a temporary postgresql.conf for master node or it would be
|
||||
# Keep a temporary postgresql.conf for primary node or it would be
|
||||
# overwritten during the rewind.
|
||||
copy(
|
||||
"$master_pgdata/postgresql.conf",
|
||||
"$tmp_folder/master-postgresql.conf.tmp");
|
||||
"$primary_pgdata/postgresql.conf",
|
||||
"$tmp_folder/primary-postgresql.conf.tmp");
|
||||
|
||||
# Now run pg_rewind
|
||||
if ($test_mode eq "local")
|
||||
{
|
||||
|
||||
# Do rewind using a local pgdata as source
|
||||
# Stop the master and be ready to perform the rewind
|
||||
# Stop the primary and be ready to perform the rewind
|
||||
$node_standby->stop;
|
||||
command_ok(
|
||||
[
|
||||
'pg_rewind',
|
||||
"--debug",
|
||||
"--source-pgdata=$standby_pgdata",
|
||||
"--target-pgdata=$master_pgdata",
|
||||
"--target-pgdata=$primary_pgdata",
|
||||
"--no-sync"
|
||||
],
|
||||
'pg_rewind local');
|
||||
@ -285,19 +285,19 @@ sub run_pg_rewind
|
||||
[
|
||||
'pg_rewind', "--debug",
|
||||
"--source-server", $standby_connstr,
|
||||
"--target-pgdata=$master_pgdata", "--no-sync",
|
||||
"--target-pgdata=$primary_pgdata", "--no-sync",
|
||||
"--write-recovery-conf"
|
||||
],
|
||||
'pg_rewind remote');
|
||||
|
||||
# Check that standby.signal is here as recovery configuration
|
||||
# was requested.
|
||||
ok( -e "$master_pgdata/standby.signal",
|
||||
ok( -e "$primary_pgdata/standby.signal",
|
||||
'standby.signal created after pg_rewind');
|
||||
|
||||
# Now, when pg_rewind apparently succeeded with minimal permissions,
|
||||
# add REPLICATION privilege. So we could test that new standby
|
||||
# is able to connect to the new master with generated config.
|
||||
# is able to connect to the new primary with generated config.
|
||||
$node_standby->safe_psql('postgres',
|
||||
"ALTER ROLE rewind_user WITH REPLICATION;");
|
||||
}
|
||||
@ -305,30 +305,30 @@ sub run_pg_rewind
|
||||
{
|
||||
|
||||
# Do rewind using a local pgdata as source and specified
|
||||
# directory with target WAL archive. The old master has
|
||||
# directory with target WAL archive. The old primary has
|
||||
# to be stopped at this point.
|
||||
|
||||
# Remove the existing archive directory and move all WAL
|
||||
# segments from the old master to the archives. These
|
||||
# segments from the old primary to the archives. These
|
||||
# will be used by pg_rewind.
|
||||
rmtree($node_master->archive_dir);
|
||||
RecursiveCopy::copypath($node_master->data_dir . "/pg_wal",
|
||||
$node_master->archive_dir);
|
||||
rmtree($node_primary->archive_dir);
|
||||
RecursiveCopy::copypath($node_primary->data_dir . "/pg_wal",
|
||||
$node_primary->archive_dir);
|
||||
|
||||
# Fast way to remove entire directory content
|
||||
rmtree($node_master->data_dir . "/pg_wal");
|
||||
mkdir($node_master->data_dir . "/pg_wal");
|
||||
rmtree($node_primary->data_dir . "/pg_wal");
|
||||
mkdir($node_primary->data_dir . "/pg_wal");
|
||||
|
||||
# Make sure that directories have the right umask as this is
|
||||
# required by a follow-up check on permissions, and better
|
||||
# safe than sorry.
|
||||
chmod(0700, $node_master->archive_dir);
|
||||
chmod(0700, $node_master->data_dir . "/pg_wal");
|
||||
chmod(0700, $node_primary->archive_dir);
|
||||
chmod(0700, $node_primary->data_dir . "/pg_wal");
|
||||
|
||||
# Add appropriate restore_command to the target cluster
|
||||
$node_master->enable_restoring($node_master, 0);
|
||||
$node_primary->enable_restoring($node_primary, 0);
|
||||
|
||||
# Stop the new master and be ready to perform the rewind.
|
||||
# Stop the new primary and be ready to perform the rewind.
|
||||
$node_standby->stop;
|
||||
|
||||
# Note the use of --no-ensure-shutdown here. WAL files are
|
||||
@ -339,7 +339,7 @@ sub run_pg_rewind
|
||||
'pg_rewind',
|
||||
"--debug",
|
||||
"--source-pgdata=$standby_pgdata",
|
||||
"--target-pgdata=$master_pgdata",
|
||||
"--target-pgdata=$primary_pgdata",
|
||||
"--no-sync",
|
||||
"--no-ensure-shutdown",
|
||||
"--restore-target-wal"
|
||||
@ -355,28 +355,28 @@ sub run_pg_rewind
|
||||
|
||||
# Now move back postgresql.conf with old settings
|
||||
move(
|
||||
"$tmp_folder/master-postgresql.conf.tmp",
|
||||
"$master_pgdata/postgresql.conf");
|
||||
"$tmp_folder/primary-postgresql.conf.tmp",
|
||||
"$primary_pgdata/postgresql.conf");
|
||||
|
||||
chmod(
|
||||
$node_master->group_access() ? 0640 : 0600,
|
||||
"$master_pgdata/postgresql.conf")
|
||||
$node_primary->group_access() ? 0640 : 0600,
|
||||
"$primary_pgdata/postgresql.conf")
|
||||
or BAIL_OUT(
|
||||
"unable to set permissions for $master_pgdata/postgresql.conf");
|
||||
"unable to set permissions for $primary_pgdata/postgresql.conf");
|
||||
|
||||
# Plug-in rewound node to the now-promoted standby node
|
||||
if ($test_mode ne "remote")
|
||||
{
|
||||
my $port_standby = $node_standby->port;
|
||||
$node_master->append_conf(
|
||||
$node_primary->append_conf(
|
||||
'postgresql.conf', qq(
|
||||
primary_conninfo='port=$port_standby'));
|
||||
|
||||
$node_master->set_standby_mode();
|
||||
$node_primary->set_standby_mode();
|
||||
}
|
||||
|
||||
# Restart the master to check that rewind went correctly
|
||||
$node_master->start;
|
||||
# Restart the primary to check that rewind went correctly
|
||||
$node_primary->start;
|
||||
|
||||
#### Now run the test-specific parts to check the result
|
||||
|
||||
@ -386,7 +386,7 @@ primary_conninfo='port=$port_standby'));
|
||||
# Clean up after the test. Stop both servers, if they're still running.
|
||||
sub clean_rewind_test
|
||||
{
|
||||
$node_master->teardown_node if defined $node_master;
|
||||
$node_primary->teardown_node if defined $node_primary;
|
||||
$node_standby->teardown_node if defined $node_standby;
|
||||
return;
|
||||
}
|
||||
|
Reference in New Issue
Block a user