1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-26 01:44:06 +03:00

BUG#49978: Replication tests don't clean up replication state at the end

Major replication test framework cleanup. This does the following:
 - Ensure that all tests clean up the replication state when they
   finish, by making check-testcase check the output of SHOW SLAVE STATUS.
   This implies:
    - Slave must not be running after test finished. This is good
      because it removes the risk for sporadic errors in subsequent
      tests when a test forgets to sync correctly.
    - Slave SQL and IO errors must be cleared when test ends. This is
      good because we will notice if a test gets an unexpected error in
      the slave threads near the end.
    - We no longer have to clean up before a test starts.
 - Ensure that all tests that wait for an error in one of the slave
   threads waits for a specific error. It is no longer possible to
   source wait_for_slave_[sql|io]_to_stop.inc when there is an error
   in one of the slave threads. This is good because:
    - If a test expects an error but there is a bug that causes
      another error to happen, or if it stops the slave thread without
      an error, then we will notice.
    - When developing tests, wait_for_*_to_[start|stop].inc will fail
      immediately if there is an error in the relevant slave thread.
      Before this patch, we had to wait for the timeout.
 - Remove duplicated and repeated code for setting up unusual replication
   topologies. Now, there is a single file that is capable of setting
   up arbitrary topologies (include/rpl_init.inc, but
   include/master-slave.inc is still available for the most common
   topology). Tests can now end with include/rpl_end.inc, which will clean
   up correctly no matter what topology is used. The topology can be
   changed with include/rpl_change_topology.inc.
 - Improved debug information when tests fail. This includes:
    - debug info is printed on all servers configured by include/rpl_init.inc
    - User can set $rpl_debug=1, which makes auxiliary replication files
      print relevant debug info.
 - Improved documentation for all auxiliary replication files. Now they
   describe purpose, usage, parameters, and side effects.
 - Many small code cleanups:
    - Made have_innodb.inc output a sensible error message.
    - Moved contents of rpl000017-slave.sh into rpl000017.test
    - Added mysqltest variables that expose the current state of
      disable_warnings/enable_warnings and friends.
    - Too many to list here: see per-file comments for details.
This commit is contained in:
Sven Sandberg
2010-12-19 18:07:28 +01:00
parent 82e887e3b2
commit 09c80e12c5
740 changed files with 6994 additions and 5608 deletions

View File

@@ -0,0 +1,170 @@
==== Create t1 on all servers. ====
include/rpl_init.inc [topology=1->2->3->4->5->6->7->8->9]
CREATE TABLE t1 (a INT);
include/rpl_end.inc
==== Test 3-server topologies ====
include/rpl_init.inc [topology=1 -> 2]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 12'
[connection server_1]
DELETE FROM t1;
INSERT INTO t1 VALUES (1);
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (1);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=2 -> 3]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 23'
[connection server_1]
DELETE FROM t1;
INSERT INTO t1 VALUES (2);
[connection server_2]
DELETE FROM t1;
INSERT INTO t1 VALUES (2);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=none]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ''
[connection server_1]
DELETE FROM t1;
INSERT INTO t1 VALUES (3);
[connection server_2]
DELETE FROM t1;
INSERT INTO t1 VALUES (3);
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (3);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=1->2, 2->1]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 212'
[connection server_1]
DELETE FROM t1;
INSERT INTO t1 VALUES (4);
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (4);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=1->2->1]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 212'
[connection server_2]
DELETE FROM t1;
INSERT INTO t1 VALUES (5);
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (5);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=2->1->2]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 212'
[connection server_1]
DELETE FROM t1;
INSERT INTO t1 VALUES (6);
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (6);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=1->2->3]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 123'
[connection server_1]
DELETE FROM t1;
INSERT INTO t1 VALUES (7);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=2->3->2->1]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 323 21'
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (8);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=1->2,2->3,3->1]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 23123'
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (9);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=1->3->2->1]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 13213'
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (10);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1]
include/rpl_end.inc
==== Test 6-server topologies ====
include/rpl_init.inc [topology=1->2->3->4->1->5->6]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 341234156'
[connection server_1]
DELETE FROM t1;
INSERT INTO t1 VALUES (11);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1,server_4:t1,server_5:t1,server_6:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=3->4->5->6->3->1->2]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 4563456 312'
[connection server_4]
DELETE FROM t1;
INSERT INTO t1 VALUES (12);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1,server_4:t1,server_5:t1,server_6:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=6->5->4->3->2->1]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 65 54 43 32 21'
[connection server_6]
DELETE FROM t1;
INSERT INTO t1 VALUES (13);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1,server_4:t1,server_5:t1,server_6:t1]
include/rpl_end.inc
include/rpl_init.inc [topology=1->2->3->1,4->5->6]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 456 23123'
[connection server_3]
DELETE FROM t1;
INSERT INTO t1 VALUES (14);
[connection server_4]
DELETE FROM t1;
INSERT INTO t1 VALUES (14);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1,server_4:t1,server_5:t1,server_6:t1]
include/rpl_end.inc
==== Test 9-server topology ====
include/rpl_init.inc [topology=1->2, 2->3, 3->4, 4->5, 5->1, 1->6, 6->7, 6->8, 8->9]
include/rpl_generate_sync_chain.inc
rpl_sync_chain= ' 345123451689 67'
[connection server_2]
DELETE FROM t1;
INSERT INTO t1 VALUES (15);
include/rpl_sync.inc
include/diff_tables.inc [server_1:t1,server_2:t1,server_3:t1,server_4:t1,server_5:t1,server_6:t1,server_7:t1,server_8:t1,server_9:t1]
include/rpl_end.inc
==== Clean up ====
include/rpl_init.inc [topology=1->2->3->4->5->6->7->8->9]
DROP TABLE t1;
include/rpl_end.inc