mirror of
https://github.com/postgres/postgres.git
synced 2025-12-19 17:02:53 +03:00
Run pgperltidy
This is required before the creation of a new branch. pgindent is clean, as well as is reformat-dat-files. perltidy version is v20230309, as documented in pgindent's README.
This commit is contained in:
@@ -79,39 +79,40 @@ $node->start;
|
||||
# other tests are added to this file in the future
|
||||
$node->safe_psql('postgres', "CREATE DATABASE test_log_connections");
|
||||
|
||||
my $log_connections = $node->safe_psql('test_log_connections', q(SHOW log_connections;));
|
||||
my $log_connections =
|
||||
$node->safe_psql('test_log_connections', q(SHOW log_connections;));
|
||||
is($log_connections, 'on', qq(check log connections has expected value 'on'));
|
||||
|
||||
$node->connect_ok('test_log_connections',
|
||||
$node->connect_ok(
|
||||
'test_log_connections',
|
||||
qq(log_connections 'on' works as expected for backwards compatibility),
|
||||
log_like => [
|
||||
qr/connection received/,
|
||||
qr/connection authenticated/,
|
||||
qr/connection authorized: user=\S+ database=test_log_connections/,
|
||||
],
|
||||
log_unlike => [
|
||||
qr/connection ready/,
|
||||
],);
|
||||
log_unlike => [ qr/connection ready/, ],);
|
||||
|
||||
$node->safe_psql('test_log_connections',
|
||||
$node->safe_psql(
|
||||
'test_log_connections',
|
||||
q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations;
|
||||
SELECT pg_reload_conf();]);
|
||||
|
||||
$node->connect_ok('test_log_connections',
|
||||
$node->connect_ok(
|
||||
'test_log_connections',
|
||||
q(log_connections with subset of specified options logs only those aspects),
|
||||
log_like => [
|
||||
qr/connection received/,
|
||||
qr/connection authorized: user=\S+ database=test_log_connections/,
|
||||
qr/connection ready/,
|
||||
],
|
||||
log_unlike => [
|
||||
qr/connection authenticated/,
|
||||
],);
|
||||
log_unlike => [ qr/connection authenticated/, ],);
|
||||
|
||||
$node->safe_psql('test_log_connections',
|
||||
qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();));
|
||||
|
||||
$node->connect_ok('test_log_connections',
|
||||
$node->connect_ok(
|
||||
'test_log_connections',
|
||||
qq(log_connections 'all' logs all available connection aspects),
|
||||
log_like => [
|
||||
qr/connection received/,
|
||||
|
||||
@@ -53,7 +53,8 @@ for my $testname (@tests)
|
||||
$node->command_ok(
|
||||
[
|
||||
'libpq_pipeline', @extraargs,
|
||||
$testname, $node->connstr('postgres') . " max_protocol_version=latest"
|
||||
$testname,
|
||||
$node->connstr('postgres') . " max_protocol_version=latest"
|
||||
],
|
||||
"libpq_pipeline $testname");
|
||||
|
||||
@@ -76,7 +77,8 @@ for my $testname (@tests)
|
||||
# test separately that it still works the old protocol version too.
|
||||
$node->command_ok(
|
||||
[
|
||||
'libpq_pipeline', 'cancel', $node->connstr('postgres') . " max_protocol_version=3.0"
|
||||
'libpq_pipeline', 'cancel',
|
||||
$node->connstr('postgres') . " max_protocol_version=3.0"
|
||||
],
|
||||
"libpq_pipeline cancel with protocol 3.0");
|
||||
|
||||
|
||||
@@ -1123,7 +1123,8 @@ COMMIT;
|
||||
{
|
||||
# Create a corruption and then read the block without waiting for
|
||||
# completion.
|
||||
$psql_a->query(qq(
|
||||
$psql_a->query(
|
||||
qq(
|
||||
SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true);
|
||||
SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true)
|
||||
));
|
||||
@@ -1133,7 +1134,8 @@ SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>tru
|
||||
$psql_b,
|
||||
"$persistency: test completing read by other session doesn't generate warning",
|
||||
qq(SELECT count(*) > 0 FROM tbl_zero;),
|
||||
qr/^t$/, qr/^$/);
|
||||
qr/^t$/,
|
||||
qr/^$/);
|
||||
}
|
||||
|
||||
# Clean up
|
||||
@@ -1355,18 +1357,24 @@ SELECT modify_rel_block('tbl_cs_fail', 6, corrupt_checksum=>true);
|
||||
));
|
||||
|
||||
$psql->query_safe($invalidate_sql);
|
||||
psql_like($io_method, $psql,
|
||||
psql_like(
|
||||
$io_method,
|
||||
$psql,
|
||||
"reading block w/ wrong checksum with ignore_checksum_failure=off fails",
|
||||
$count_sql, qr/^$/, qr/ERROR: invalid page in block/);
|
||||
$count_sql,
|
||||
qr/^$/,
|
||||
qr/ERROR: invalid page in block/);
|
||||
|
||||
$psql->query_safe("SET ignore_checksum_failure=on");
|
||||
|
||||
$psql->query_safe($invalidate_sql);
|
||||
psql_like($io_method, $psql,
|
||||
"reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
|
||||
$count_sql,
|
||||
qr/^$expect$/,
|
||||
qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
|
||||
psql_like(
|
||||
$io_method,
|
||||
$psql,
|
||||
"reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
|
||||
$count_sql,
|
||||
qr/^$expect$/,
|
||||
qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
|
||||
|
||||
|
||||
# Verify that ignore_checksum_failure=off works in multi-block reads
|
||||
@@ -1432,19 +1440,22 @@ SELECT read_rel_block_ll('tbl_cs_fail', 1, nblocks=>5, zero_on_error=>true);),
|
||||
# file.
|
||||
|
||||
$node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/,
|
||||
$log_location);
|
||||
$log_location);
|
||||
ok(1, "$io_method: found information about checksum failure in block 2");
|
||||
|
||||
$node->wait_for_log(qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
$node->wait_for_log(
|
||||
qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
ok(1, "$io_method: found information about invalid page in block 3");
|
||||
|
||||
$node->wait_for_log(qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
$node->wait_for_log(
|
||||
qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
ok(1, "$io_method: found information about checksum failure in block 4");
|
||||
|
||||
$node->wait_for_log(qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
$node->wait_for_log(
|
||||
qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
|
||||
$log_location);
|
||||
ok(1, "$io_method: found information about checksum failure in block 5");
|
||||
|
||||
|
||||
@@ -1462,8 +1473,7 @@ SELECT modify_rel_block('tbl_cs_fail', 3, corrupt_checksum=>true, corrupt_header
|
||||
qq(
|
||||
SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);),
|
||||
qr/^$/,
|
||||
qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/
|
||||
);
|
||||
qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/);
|
||||
|
||||
psql_like(
|
||||
$io_method,
|
||||
|
||||
@@ -68,7 +68,8 @@ sub connect_fails_wait
|
||||
my $log_location = -s $node->logfile;
|
||||
|
||||
$node->connect_fails($connstr, $test_name, %params);
|
||||
$node->wait_for_log(qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
|
||||
$node->wait_for_log(
|
||||
qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
|
||||
$log_location);
|
||||
ok(1, "$test_name: client backend process exited");
|
||||
}
|
||||
|
||||
@@ -941,8 +941,7 @@ is( $standby1->safe_psql(
|
||||
'synced slot retained on the new primary');
|
||||
|
||||
# Commit the prepared transaction
|
||||
$standby1->safe_psql('postgres',
|
||||
"COMMIT PREPARED 'test_twophase_slotsync';");
|
||||
$standby1->safe_psql('postgres', "COMMIT PREPARED 'test_twophase_slotsync';");
|
||||
$standby1->wait_for_catchup('regress_mysub1');
|
||||
|
||||
# Confirm that the prepared transaction is replicated to the subscriber
|
||||
|
||||
@@ -47,7 +47,7 @@ my $psql_primaryA =
|
||||
$node_primary->background_psql($test_db, on_error_stop => 1);
|
||||
|
||||
# Long-running Primary Session B
|
||||
my $psql_primaryB =
|
||||
my $psql_primaryB =
|
||||
$node_primary->background_psql($test_db, on_error_stop => 1);
|
||||
|
||||
# Our test relies on two rounds of index vacuuming for reasons elaborated
|
||||
@@ -81,7 +81,8 @@ my $nrows = 2000;
|
||||
# insert and delete enough rows that we force at least one round of index
|
||||
# vacuuming before getting to a dead tuple which was killed after the standby
|
||||
# is disconnected.
|
||||
$node_primary->safe_psql($test_db, qq[
|
||||
$node_primary->safe_psql(
|
||||
$test_db, qq[
|
||||
CREATE TABLE ${table1}(col1 int)
|
||||
WITH (autovacuum_enabled=false, fillfactor=10);
|
||||
INSERT INTO $table1 VALUES(7);
|
||||
@@ -98,21 +99,24 @@ my $primary_lsn = $node_primary->lsn('flush');
|
||||
$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
|
||||
|
||||
# Test that the WAL receiver is up and running.
|
||||
$node_replica->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't');
|
||||
$node_replica->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
|
||||
|
||||
# Set primary_conninfo to something invalid on the replica and reload the
|
||||
# config. Once the config is reloaded, the startup process will force the WAL
|
||||
# receiver to restart and it will be unable to reconnect because of the
|
||||
# invalid connection information.
|
||||
$node_replica->safe_psql($test_db, qq[
|
||||
$node_replica->safe_psql(
|
||||
$test_db, qq[
|
||||
ALTER SYSTEM SET primary_conninfo = '';
|
||||
SELECT pg_reload_conf();
|
||||
]);
|
||||
|
||||
# Wait until the WAL receiver has shut down and been unable to start up again.
|
||||
$node_replica->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f');
|
||||
$node_replica->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
|
||||
|
||||
# Now insert and update a tuple which will be visible to the vacuum on the
|
||||
# primary but which will have xmax newer than the oldest xmin on the standby
|
||||
@@ -123,7 +127,7 @@ my $res = $psql_primaryA->query_safe(
|
||||
UPDATE $table1 SET col1 = 100 WHERE col1 = 99;
|
||||
SELECT 'after_update';
|
||||
]
|
||||
);
|
||||
);
|
||||
|
||||
# Make sure the UPDATE finished
|
||||
like($res, qr/^after_update$/m, "UPDATE occurred on primary session A");
|
||||
@@ -148,7 +152,7 @@ $res = $psql_primaryB->query_safe(
|
||||
DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7;
|
||||
FETCH $primary_cursor1;
|
||||
]
|
||||
);
|
||||
);
|
||||
|
||||
is($res, 7, qq[Cursor query returned $res. Expected value 7.]);
|
||||
|
||||
@@ -183,7 +187,8 @@ $psql_primaryA->{run}->pump_nb();
|
||||
# just waiting on the lock to start vacuuming. We don't want the standby to
|
||||
# re-establish a connection to the primary and push the horizon back until
|
||||
# we've saved initial values in GlobalVisState and calculated OldestXmin.
|
||||
$node_primary->poll_query_until($test_db,
|
||||
$node_primary->poll_query_until(
|
||||
$test_db,
|
||||
qq[
|
||||
SELECT count(*) >= 1 FROM pg_stat_activity
|
||||
WHERE pid = $vacuum_pid
|
||||
@@ -192,8 +197,9 @@ $node_primary->poll_query_until($test_db,
|
||||
't');
|
||||
|
||||
# Ensure the WAL receiver is still not active on the replica.
|
||||
$node_replica->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f');
|
||||
$node_replica->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
|
||||
|
||||
# Allow the WAL receiver connection to re-establish.
|
||||
$node_replica->safe_psql(
|
||||
@@ -203,15 +209,17 @@ $node_replica->safe_psql(
|
||||
]);
|
||||
|
||||
# Ensure the new WAL receiver has connected.
|
||||
$node_replica->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't');
|
||||
$node_replica->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
|
||||
|
||||
# Once the WAL sender is shown on the primary, the replica should have
|
||||
# connected with the primary and pushed the horizon backward. Primary Session
|
||||
# A won't see that until the VACUUM FREEZE proceeds and does its first round
|
||||
# of index vacuuming.
|
||||
$node_primary->poll_query_until($test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_replication);] , 't');
|
||||
$node_primary->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT EXISTS (SELECT * FROM pg_stat_replication);], 't');
|
||||
|
||||
# Move the cursor forward to the next 7. We inserted the 7 much later, so
|
||||
# advancing the cursor should allow vacuum to proceed vacuuming most pages of
|
||||
@@ -225,20 +233,21 @@ is($res, 7,
|
||||
|
||||
# Prevent the test from incorrectly passing by confirming that we did indeed
|
||||
# do a pass of index vacuuming.
|
||||
$node_primary->poll_query_until($test_db, qq[
|
||||
$node_primary->poll_query_until(
|
||||
$test_db, qq[
|
||||
SELECT index_vacuum_count > 0
|
||||
FROM pg_stat_progress_vacuum
|
||||
WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass;
|
||||
] , 't');
|
||||
], 't');
|
||||
|
||||
# Commit the transaction with the open cursor so that the VACUUM can finish.
|
||||
$psql_primaryB->query_until(
|
||||
qr/^commit$/m,
|
||||
qq[
|
||||
qr/^commit$/m,
|
||||
qq[
|
||||
COMMIT;
|
||||
\\echo commit
|
||||
]
|
||||
);
|
||||
);
|
||||
|
||||
# VACUUM proceeds with pruning and does a visibility check on each tuple. In
|
||||
# older versions of Postgres, pruning found our final dead tuple
|
||||
@@ -252,7 +261,8 @@ $psql_primaryB->query_until(
|
||||
|
||||
# With the fix, VACUUM should finish successfully, incrementing the table
|
||||
# vacuum_count.
|
||||
$node_primary->poll_query_until($test_db,
|
||||
$node_primary->poll_query_until(
|
||||
$test_db,
|
||||
qq[
|
||||
SELECT vacuum_count > 0
|
||||
FROM pg_stat_all_tables WHERE relname = '${table1}';
|
||||
|
||||
@@ -318,7 +318,8 @@ sub switch_server_cert
|
||||
$node->append_conf('sslconfig.conf', "ssl=on");
|
||||
$node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params));
|
||||
# use lists of ECDH curves and cipher suites for syntax testing
|
||||
$node->append_conf('sslconfig.conf', 'ssl_groups=X25519:prime256v1:secp521r1');
|
||||
$node->append_conf('sslconfig.conf',
|
||||
'ssl_groups=X25519:prime256v1:secp521r1');
|
||||
$node->append_conf('sslconfig.conf',
|
||||
'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256');
|
||||
|
||||
|
||||
@@ -70,7 +70,8 @@ ok( $stderr =~
|
||||
);
|
||||
|
||||
# Cleanup
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
DROP PUBLICATION mypub;
|
||||
SELECT pg_drop_replication_slot('mysub');
|
||||
]);
|
||||
@@ -86,32 +87,38 @@ sub test_swap
|
||||
my ($table_name, $pubname, $appname) = @_;
|
||||
|
||||
# Confirms tuples can be replicated
|
||||
$node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (1);");
|
||||
$node_publisher->safe_psql('postgres',
|
||||
"INSERT INTO $table_name VALUES (1);");
|
||||
$node_publisher->wait_for_catchup($appname);
|
||||
my $result =
|
||||
$node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
|
||||
is($result, qq(1), 'check replication worked well before renaming a publication');
|
||||
$node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
|
||||
is($result, qq(1),
|
||||
'check replication worked well before renaming a publication');
|
||||
|
||||
# Swap the name of publications; $pubname <-> pub_empty
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
|
||||
ALTER PUBLICATION pub_empty RENAME TO $pubname;
|
||||
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
|
||||
]);
|
||||
|
||||
# Insert the data again
|
||||
$node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (2);");
|
||||
$node_publisher->safe_psql('postgres',
|
||||
"INSERT INTO $table_name VALUES (2);");
|
||||
$node_publisher->wait_for_catchup($appname);
|
||||
|
||||
# Confirms the second tuple won't be replicated because $pubname does not
|
||||
# contains relations anymore.
|
||||
$result =
|
||||
$node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name ORDER BY a");
|
||||
$node_subscriber->safe_psql('postgres',
|
||||
"SELECT a FROM $table_name ORDER BY a");
|
||||
is($result, qq(1),
|
||||
'check the tuple inserted after the RENAME was not replicated');
|
||||
|
||||
# Restore the name of publications because it can be called several times
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
|
||||
ALTER PUBLICATION pub_empty RENAME TO $pubname;
|
||||
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
|
||||
@@ -124,7 +131,8 @@ $node_publisher->safe_psql('postgres', $ddl);
|
||||
$node_subscriber->safe_psql('postgres', $ddl);
|
||||
|
||||
# Create publications and a subscription
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
CREATE PUBLICATION pub_empty;
|
||||
CREATE PUBLICATION pub_for_tab FOR TABLE test1;
|
||||
CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES;
|
||||
@@ -139,19 +147,20 @@ test_swap('test1', 'pub_for_tab', 'tap_sub');
|
||||
|
||||
# Switches a publication which includes all tables
|
||||
$node_subscriber->safe_psql('postgres',
|
||||
"ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;"
|
||||
);
|
||||
"ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;");
|
||||
$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
|
||||
|
||||
# Confirms RENAME command works well for ALL TABLES publication
|
||||
test_swap('test2', 'pub_for_all_tables', 'tap_sub');
|
||||
|
||||
# Cleanup
|
||||
$node_publisher->safe_psql('postgres', qq[
|
||||
$node_publisher->safe_psql(
|
||||
'postgres', qq[
|
||||
DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables;
|
||||
DROP TABLE test1, test2;
|
||||
]);
|
||||
$node_subscriber->safe_psql('postgres', qq[
|
||||
$node_subscriber->safe_psql(
|
||||
'postgres', qq[
|
||||
DROP SUBSCRIPTION tap_sub;
|
||||
DROP TABLE test1, test2;
|
||||
]);
|
||||
|
||||
@@ -51,8 +51,7 @@ $node_subscriber1->safe_psql('postgres',
|
||||
);
|
||||
# make a BRIN index to test aminsertcleanup logic in subscriber
|
||||
$node_subscriber1->safe_psql('postgres',
|
||||
"CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)"
|
||||
);
|
||||
"CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)");
|
||||
$node_subscriber1->safe_psql('postgres',
|
||||
"CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"
|
||||
);
|
||||
|
||||
@@ -108,11 +108,12 @@ $node_publisher->poll_query_until('postgres',
|
||||
|
||||
my $offset = -s $node_publisher->logfile;
|
||||
|
||||
$node_publisher->safe_psql('postgres',"INSERT INTO tab_3 values(1)");
|
||||
$node_publisher->safe_psql('postgres', "INSERT INTO tab_3 values(1)");
|
||||
|
||||
# Verify that a warning is logged.
|
||||
$node_publisher->wait_for_log(
|
||||
qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/, $offset);
|
||||
qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/,
|
||||
$offset);
|
||||
|
||||
$node_publisher->safe_psql('postgres',
|
||||
"CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3");
|
||||
@@ -128,10 +129,11 @@ $node_publisher->wait_for_catchup('tap_sub');
|
||||
|
||||
# Verify that the insert operation gets replicated to subscriber after
|
||||
# publication is created.
|
||||
$result = $node_subscriber->safe_psql('postgres',
|
||||
"SELECT * FROM tab_3");
|
||||
is($result, qq(1
|
||||
2), 'check that the incremental data is replicated after the publication is created');
|
||||
$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_3");
|
||||
is( $result, qq(1
|
||||
2),
|
||||
'check that the incremental data is replicated after the publication is created'
|
||||
);
|
||||
|
||||
# shutdown
|
||||
$node_subscriber->stop('fast');
|
||||
|
||||
@@ -26,7 +26,8 @@ $node_publisher->safe_psql('postgres',
|
||||
"CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
|
||||
|
||||
$node_publisher->safe_psql('postgres',
|
||||
"CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
|
||||
"CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"
|
||||
);
|
||||
|
||||
# Create same table on subscriber
|
||||
$node_subscriber->safe_psql('postgres',
|
||||
|
||||
Reference in New Issue
Block a user