1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-26 01:44:06 +03:00
Files
mariadb/mysql-test/suite/rpl/t/rpl_test_framework.test
Sven Sandberg 09c80e12c5 BUG#49978: Replication tests don't clean up replication state at the end
Major replication test framework cleanup. This does the following:
 - Ensure that all tests clean up the replication state when they
   finish, by making check-testcase check the output of SHOW SLAVE STATUS.
   This implies:
    - Slave must not be running after test finished. This is good
      because it removes the risk for sporadic errors in subsequent
      tests when a test forgets to sync correctly.
    - Slave SQL and IO errors must be cleared when test ends. This is
      good because we will notice if a test gets an unexpected error in
      the slave threads near the end.
    - We no longer have to clean up before a test starts.
 - Ensure that all tests that wait for an error in one of the slave
   threads waits for a specific error. It is no longer possible to
   source wait_for_slave_[sql|io]_to_stop.inc when there is an error
   in one of the slave threads. This is good because:
    - If a test expects an error but there is a bug that causes
      another error to happen, or if it stops the slave thread without
      an error, then we will notice.
    - When developing tests, wait_for_*_to_[start|stop].inc will fail
      immediately if there is an error in the relevant slave thread.
      Before this patch, we had to wait for the timeout.
 - Remove duplicated and repeated code for setting up unusual replication
   topologies. Now, there is a single file that is capable of setting
   up arbitrary topologies (include/rpl_init.inc, but
   include/master-slave.inc is still available for the most common
   topology). Tests can now end with include/rpl_end.inc, which will clean
   up correctly no matter what topology is used. The topology can be
   changed with include/rpl_change_topology.inc.
 - Improved debug information when tests fail. This includes:
    - debug info is printed on all servers configured by include/rpl_init.inc
    - User can set $rpl_debug=1, which makes auxiliary replication files
      print relevant debug info.
 - Improved documentation for all auxiliary replication files. Now they
   describe purpose, usage, parameters, and side effects.
 - Many small code cleanups:
    - Made have_innodb.inc output a sensible error message.
    - Moved contents of rpl000017-slave.sh into rpl000017.test
    - Added mysqltest variables that expose the current state of
      disable_warnings/enable_warnings and friends.
    - Too many to list here: see per-file comments for details.
2010-12-19 18:07:28 +01:00

144 lines
3.7 KiB
Plaintext

# ==== Purpose ====
#
# Test that the sync chain generated by
# include/rpl_change_topology.inc (sourced from include/rpl_init.inc)
# is correct.
#
# We test a number of different topologies. Each topology is tested
# in extra/rpl_tests/rpl_test_framework.inc. See
# extra/rpl_tests/rpl_test_framework.inc for details on how the sync
# chain is tested.
#
# ==== Related bugs ====
#
# BUG#49978: Replication tests don't clean up replication state at the end
# We only need to execute this test once. Also, we rely on 'DELETE
# FROM t1' to remove rows in slave tables that don't exist in master
# tables (see include/rpl_test_framework.inc for details).
--source include/have_binlog_format_statement.inc
--echo ==== Create t1 on all servers. ====
if ($mtr_supports_more_than_10_servers)
{
--let $rpl_server_count= 15
--let $rpl_topology= 1->2->3->4->5->6->7->8->9->10->11->12->13->14->15
}
if (!$mtr_supports_more_than_10_servers)
{
--let $rpl_server_count= 9
--let $rpl_topology= 1->2->3->4->5->6->7->8->9
}
--source include/rpl_init.inc
CREATE TABLE t1 (a INT);
--source include/rpl_end.inc
# Initialize $next_number before first call to
# extra/rpl_tests/rpl_test_framework.text
--let $next_number= 0
--echo ==== Test 3-server topologies ====
--let $rpl_server_count= 3
--let $rpl_topology= 1 -> 2
--let $masters= 1,3
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 2 -> 3
--let $masters= 1,2
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= none
--let $masters= 1,2,3
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 1->2, 2->1
--let $masters= 1,3
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 1->2->1
--let $masters= 2,3
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 2->1->2
--let $masters= 1,3
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 1->2->3
--let $masters= 1
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 2->3->2->1
--let $masters= 3
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 1->2,2->3,3->1
--let $masters= 3
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 1->3->2->1
--let $masters= 3
--source extra/rpl_tests/rpl_test_framework.inc
--echo ==== Test 6-server topologies ====
--let $rpl_server_count= 6
--let $rpl_topology= 1->2->3->4->1->5->6
--let $masters= 1
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 3->4->5->6->3->1->2
--let $masters= 4
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 6->5->4->3->2->1
--let $masters= 6
--source extra/rpl_tests/rpl_test_framework.inc
--let $rpl_topology= 1->2->3->1,4->5->6
--let $masters= 3,4
--source extra/rpl_tests/rpl_test_framework.inc
--echo ==== Test 9-server topology ====
--let $rpl_server_count= 9
--let $rpl_topology= 1->2, 2->3, 3->4, 4->5, 5->1, 1->6, 6->7, 6->8, 8->9
--let $masters= 2
--source extra/rpl_tests/rpl_test_framework.inc
if ($mtr_supports_more_than_10_servers) {
--echo ==== Test 15-server topologies ====
--let $rpl_server_count= 15
--let $rpl_topology= 1->2->3->4->5->6->7->8->9->10->11->12->13->14->15->1
--let $masters= 2
--source extra/rpl_tests/rpl_test_framework.inc
# This is a binary tree
--let $rpl_topology= 1->2->4->8,1->3->6->12,2->5->10,3->7->14,4->9,5->11,6->13,7->15
--let $masters= 1
--source extra/rpl_tests/rpl_test_framework.inc
}
--echo ==== Clean up ====
if ($mtr_supports_more_than_10_servers) {
--let $rpl_topology= 1->2->3->4->5->6->7->8->9->10->11->12->13->14->15
}
if (!$mtr_supports_more_than_10_servers) {
--let $rpl_topology= 1->2->3->4->5->6->7->8->9
}
--source include/rpl_init.inc
--connection server_1
DROP TABLE t1;
--source include/rpl_end.inc