1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-27 13:04:36 +03:00
Files
mariadb/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi.test
Sven Sandberg 09c80e12c5 BUG#49978: Replication tests don't clean up replication state at the end
Major replication test framework cleanup. This does the following:
 - Ensure that all tests clean up the replication state when they
   finish, by making check-testcase check the output of SHOW SLAVE STATUS.
   This implies:
    - Slave must not be running after test finished. This is good
      because it removes the risk for sporadic errors in subsequent
      tests when a test forgets to sync correctly.
    - Slave SQL and IO errors must be cleared when test ends. This is
      good because we will notice if a test gets an unexpected error in
      the slave threads near the end.
    - We no longer have to clean up before a test starts.
 - Ensure that all tests that wait for an error in one of the slave
   threads waits for a specific error. It is no longer possible to
   source wait_for_slave_[sql|io]_to_stop.inc when there is an error
   in one of the slave threads. This is good because:
    - If a test expects an error but there is a bug that causes
      another error to happen, or if it stops the slave thread without
      an error, then we will notice.
    - When developing tests, wait_for_*_to_[start|stop].inc will fail
      immediately if there is an error in the relevant slave thread.
      Before this patch, we had to wait for the timeout.
 - Remove duplicated and repeated code for setting up unusual replication
   topologies. Now, there is a single file that is capable of setting
   up arbitrary topologies (include/rpl_init.inc, but
   include/master-slave.inc is still available for the most common
   topology). Tests can now end with include/rpl_end.inc, which will clean
   up correctly no matter what topology is used. The topology can be
   changed with include/rpl_change_topology.inc.
 - Improved debug information when tests fail. This includes:
    - debug info is printed on all servers configured by include/rpl_init.inc
    - User can set $rpl_debug=1, which makes auxiliary replication files
      print relevant debug info.
 - Improved documentation for all auxiliary replication files. Now they
   describe purpose, usage, parameters, and side effects.
 - Many small code cleanups:
    - Made have_innodb.inc output a sensible error message.
    - Moved contents of rpl000017-slave.sh into rpl000017.test
    - Added mysqltest variables that expose the current state of
      disable_warnings/enable_warnings and friends.
    - Too many to list here: see per-file comments for details.
2010-12-19 18:07:28 +01:00

76 lines
2.2 KiB
Plaintext

--source include/have_multi_ndb.inc
--source include/have_binlog_format_mixed_or_row.inc
# We need server 3 later on in this test.
--let $rpl_server_count= 3
--source include/master-slave.inc
--connection master
# note: server2 is another "master" connected to the master cluster
#
# Currently test only works with ndb since it retrieves "old"
# binlog positions with mysql.ndb_binlog_index and ndb_apply_status;
#
# create a table with one row, and make sure the other "master" gets it
CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
connection server2;
reset master;
SHOW TABLES;
connection master;
INSERT INTO t1 VALUES ("row1","will go away",1);
SELECT * FROM t1 ORDER BY c3;
connection server2;
SELECT * FROM t1 ORDER BY c3;
# sync slave and retrieve epoch and stop the slave
connection master;
sync_slave_with_master;
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM mysql.ndb_apply_status;
let $the_epoch= `select @the_epoch` ;
SELECT * FROM t1 ORDER BY c3;
stop slave;
# get the master binlog pos from the epoch, from the _other_ "master", server2
connection server2;
--replace_result $the_epoch <the_epoch>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM mysql.ndb_binlog_index WHERE epoch = $the_epoch ;
let $the_pos= `SELECT @the_pos` ;
let $the_file= `SELECT @the_file` ;
# now connect the slave to the _other_ "master"
--let $rpl_topology= 3->2
--let $rpl_master_log_file= 2:$the_file
--let $rpl_master_log_pos= 2:$the_pos
--source include/rpl_change_topology.inc
--connection slave
--source include/start_slave.inc
# insert some more values on the first master
connection master;
INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4);
DELETE FROM t1 WHERE c3 = 1;
UPDATE t1 SET c2="should go away" WHERE c3 = 2;
UPDATE t1 SET c2="C" WHERE c3 = 3;
DELETE FROM t1 WHERE c3 = 2;
SELECT * FROM t1 ORDER BY c3;
# insert another row, and check that we have it on the slave
connection server2;
INSERT INTO t1 VALUES ("row5","E",5);
SELECT * FROM t1 ORDER BY c3;
sync_slave_with_master;
SELECT * FROM t1 ORDER BY c3;
--echo ==== clean up ====
connection server2;
DROP TABLE t1;
sync_slave_with_master;
--source include/rpl_end.inc