1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-29 05:21:33 +03:00

MDEV-30421 more tests cleaned up

All the .inc files that included from binlog_encryption are refactored.
This commit is contained in:
Aleksey Midenkov
2023-03-23 21:07:32 +03:00
parent bdf5580611
commit 91e5e47a50
76 changed files with 4709 additions and 4710 deletions

View File

@ -1,2 +1,2 @@
--let $binlog_limit= 5,1 --let $binlog_limit= 5,1
--source suite/rpl/include/rpl_binlog_errors.inc --source suite/rpl/t/rpl_binlog_errors.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_cant_read_event_incident.inc --source suite/rpl/t/rpl_cant_read_event_incident.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_checksum.inc --source suite/rpl/t/rpl_checksum.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_checksum_cache.inc --source suite/rpl/t/rpl_checksum_cache.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_corruption.inc --source suite/rpl/t/rpl_corruption.test

View File

@ -558,3 +558,27 @@ a
connection server_1; connection server_1;
DROP TABLE t1; DROP TABLE t1;
include/rpl_end.inc include/rpl_end.inc
#
# Start of 10.2 tests
#
#
# MDEV-10134 Add full support for DEFAULT
#
CREATE TABLE t1 (a VARCHAR(100) DEFAULT BINLOG_GTID_POS("master-bin.000001", 600));
ERROR HY000: Function or expression 'binlog_gtid_pos()' cannot be used in the DEFAULT clause of `a`
#
# End of 10.2 tests
#
#
# Start of 10.3 tests
#
#
# MDEV-13967 Parameter data type control for Item_long_func
#
SELECT MASTER_GTID_WAIT(ROW(1,1),'str');
ERROR HY000: Illegal parameter data type row for operation 'master_gtid_wait'
SELECT MASTER_GTID_WAIT('str',ROW(1,1));
ERROR HY000: Illegal parameter data type row for operation 'master_gtid_wait'
#
# End of 10.3 tests
#

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_gtid_basic.inc --source suite/rpl/t/rpl_gtid_basic.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_incident.inc --source suite/rpl/t/rpl_incident.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_init_slave_errors.inc --source suite/rpl/t/rpl_init_slave_errors.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_loaddata_local.inc --source suite/rpl/t/rpl_loaddata_local.test

View File

@ -1,4 +1,4 @@
--source suite/rpl/include/rpl_loadfile.inc --source suite/rpl/t/rpl_loadfile.test
--let $datadir= `SELECT @@datadir` --let $datadir= `SELECT @@datadir`

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_packet.inc --source suite/rpl/t/rpl_packet.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_parallel_ignored_errors.inc --source suite/rpl/t/rpl_parallel_ignored_errors.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc --source suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_relayrotate.inc --source suite/rpl/t/rpl_relayrotate.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_semi_sync.inc --source suite/rpl/t/rpl_semi_sync.test

View File

@ -1,2 +1,2 @@
--let $use_remote_mysqlbinlog= 1 --let $use_remote_mysqlbinlog= 1
--source suite/rpl/include/rpl_skip_replication.inc --source suite/rpl/t/rpl_skip_replication.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_special_charset.inc --source suite/rpl/t/rpl_special_charset.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_sporadic_master.inc --source suite/rpl/t/rpl_sporadic_master.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_ssl.inc --source suite/rpl/t/rpl_ssl.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_stm_relay_ign_space.inc --source suite/rpl/t/rpl_stm_relay_ign_space.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_switch_stm_row_mixed.inc --source suite/rpl/t/rpl_switch_stm_row_mixed.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_sync_test.inc --source suite/rpl/t/rpl_sync.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_temporal_format_default_to_default.inc --source suite/rpl/t/rpl_temporal_format_default_to_default.test

View File

@ -3,4 +3,4 @@
--let $force_master_mysql56_temporal_format=false; --let $force_master_mysql56_temporal_format=false;
--let $force_slave_mysql56_temporal_format=true; --let $force_slave_mysql56_temporal_format=true;
--source suite/rpl/include/rpl_temporal_format_default_to_default.inc --source suite/rpl/t/rpl_temporal_format_default_to_default.test

View File

@ -1,4 +1,4 @@
--let $force_master_mysql56_temporal_format=true; --let $force_master_mysql56_temporal_format=true;
--let $force_slave_mysql56_temporal_format=false; --let $force_slave_mysql56_temporal_format=false;
--source suite/rpl/include/rpl_temporal_format_default_to_default.inc --source suite/rpl/t/rpl_temporal_format_default_to_default.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_typeconv.inc --source suite/rpl/t/rpl_typeconv.test

View File

@ -1 +1 @@
--source suite/rpl/include/rpl_switch_stm_row_mixed.inc --source suite/rpl/t/rpl_switch_stm_row_mixed.test

View File

@ -1,438 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# Usage:
# --let $binlog_limit= X[,Y] # optional
#
# Semantics of the value is the same as in include/show_binlog_events.inc
# which the script calls as a part of the test flow.
# The goal is to print the event demonstrating the triggered error,
# so normally Y should be 1 (print the exact event only);
# however, depending on test-specific server options, the offset X
# can be different.
#
# BUG#46166: MYSQL_BIN_LOG::new_file_impl is not propagating error
# when generating new name.
#
# WHY
# ===
#
# We want to check whether error is reported or not when
# new_file_impl fails (this may happen when rotation is not
# possible because there is some problem finding an
# unique filename).
#
# HOW
# ===
#
# Test cases are documented inline.
-- source include/have_innodb.inc
-- source include/have_debug.inc
-- source include/master-slave.inc
-- echo #######################################################################
-- echo ####################### PART 1: MASTER TESTS ##########################
-- echo #######################################################################
### ACTION: stopping slave as it is not needed for the first part of
### the test
-- connection slave
-- source include/stop_slave.inc
-- connection master
call mtr.add_suppression("Can't generate a unique log-filename");
call mtr.add_suppression("Writing one row to the row-based binary log failed.*");
call mtr.add_suppression("Error writing file .*");
call mtr.add_suppression("Could not use master-bin for logging");
SET @old_debug= @@global.debug_dbug;
### ACTION: create a large file (> 4096 bytes) that will be later used
### in LOAD DATA INFILE to check binlog errors in its vacinity
-- let $load_file= $MYSQLTEST_VARDIR/tmp/bug_46166.data
-- let $MYSQLD_DATADIR= `select @@datadir`
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval SELECT repeat('x',8192) INTO OUTFILE '$load_file'
### ACTION: create a small file (< 4096 bytes) that will be later used
### in LOAD DATA INFILE to check for absence of binlog errors
### when file loading this file does not force flushing and
### rotating the binary log
-- let $load_file2= $MYSQLTEST_VARDIR/tmp/bug_46166-2.data
-- let $MYSQLD_DATADIR= `select @@datadir`
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval SELECT repeat('x',10) INTO OUTFILE '$load_file2'
RESET MASTER;
-- echo ###################### TEST #1
### ASSERTION: no problem flushing logs (should show two binlogs)
FLUSH LOGS;
-- echo # assert: must show two binlogs
-- source include/show_binary_logs.inc
-- echo ###################### TEST #2
### ASSERTION: check that FLUSH LOGS actually fails and reports
### failure back to the user if find_uniq_filename fails
### (should show just one binlog)
RESET MASTER;
SET @@global.debug_dbug="d,error_unique_log_filename";
-- error ER_NO_UNIQUE_LOGFILE
FLUSH LOGS;
-- echo # assert: must show one binlog
-- source include/show_binary_logs.inc
### ACTION: clean up and move to next test
SET @@global.debug_dbug=@old_debug;
RESET MASTER;
-- echo ###################### TEST #3
### ACTION: create some tables (t1, t2, t4) and insert some values in
### table t1
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a VARCHAR(16384)) Engine=InnoDB;
CREATE TABLE t4 (a VARCHAR(16384));
INSERT INTO t1 VALUES (1);
RESET MASTER;
### ASSERTION: we force rotation of the binary log because it exceeds
### the max_binlog_size option (should show two binary
### logs)
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
# shows two binary logs
-- echo # assert: must show two binlog
-- source include/show_binary_logs.inc
# clean up the table and the binlog to be used in next part of test
SET @@global.debug_dbug=@old_debug;
DELETE FROM t2;
RESET MASTER;
-- echo ###################### TEST #4
### ASSERTION: load the big file into a transactional table and check
### that it reports error. The table will contain the
### changes performed despite the fact that it reported an
### error.
SET @@global.debug_dbug="d,error_unique_log_filename";
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- error ER_NO_UNIQUE_LOGFILE
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
# show table
-- echo # assert: must show one entry
SELECT count(*) FROM t2;
# clean up the table and the binlog to be used in next part of test
SET @@global.debug_dbug=@old_debug;
DELETE FROM t2;
RESET MASTER;
-- echo ###################### TEST #5
### ASSERTION: load the small file into a transactional table and
### check that it succeeds
SET @@global.debug_dbug="d,error_unique_log_filename";
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval LOAD DATA INFILE '$load_file2' INTO TABLE t2
# show table
-- echo # assert: must show one entry
SELECT count(*) FROM t2;
# clean up the table and the binlog to be used in next part of test
SET @@global.debug_dbug=@old_debug;
DELETE FROM t2;
RESET MASTER;
-- echo ###################### TEST #6
### ASSERTION: check that even if one is using a transactional table
### and explicit transactions (no autocommit) if rotation
### fails we get the error. Transaction is not rolledback
### because rotation happens after the commit.
SET @@global.debug_dbug="d,error_unique_log_filename";
SET AUTOCOMMIT=0;
INSERT INTO t2 VALUES ('muse');
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
INSERT INTO t2 VALUES ('muse');
-- error ER_NO_UNIQUE_LOGFILE
COMMIT;
### ACTION: Show the contents of the table after the test
-- echo # assert: must show three entries
SELECT count(*) FROM t2;
### ACTION: clean up and move to the next test
SET AUTOCOMMIT= 1;
SET @@global.debug_dbug=@old_debug;
DELETE FROM t2;
RESET MASTER;
-- echo ###################### TEST #7
### ASSERTION: check that on a non-transactional table, if rotation
### fails then an error is reported and an incident event
### is written to the current binary log.
SET @@global.debug_dbug="d,error_unique_log_filename";
# Disable logging Annotate_rows events to preserve events count.
let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`;
SET @@binlog_annotate_row_events= 0;
SELECT count(*) FROM t4;
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- error ER_NO_UNIQUE_LOGFILE
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4
-- echo # assert: must show 1 entry
SELECT count(*) FROM t4;
-- echo ### check that the incident event is written to the current log
SET @@global.debug_dbug=@old_debug;
if (!$binlog_limit)
{
-- let $binlog_limit= 4,1
}
-- source include/show_binlog_events.inc
# clean up and move to next test
DELETE FROM t4;
--disable_query_log
eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved;
--enable_query_log
RESET MASTER;
-- echo ###################### TEST #8
### ASSERTION: check that statements end up in error but they succeed
### on changing the data.
SET @@global.debug_dbug="d,error_unique_log_filename";
-- echo # must show 0 entries
SELECT count(*) FROM t4;
SELECT count(*) FROM t2;
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- error ER_NO_UNIQUE_LOGFILE
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- error ER_NO_UNIQUE_LOGFILE
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
-- error ER_NO_UNIQUE_LOGFILE
INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc');
-- echo # INFO: Count(*) Before Offending DELETEs
-- echo # assert: must show 1 entry
SELECT count(*) FROM t4;
-- echo # assert: must show 4 entries
SELECT count(*) FROM t2;
-- error ER_NO_UNIQUE_LOGFILE
DELETE FROM t4;
-- error ER_NO_UNIQUE_LOGFILE
DELETE FROM t2;
-- echo # INFO: Count(*) After Offending DELETEs
-- echo # assert: must show zero entries
SELECT count(*) FROM t4;
SELECT count(*) FROM t2;
# remove fault injection
SET @@global.debug_dbug=@old_debug;
-- echo ###################### TEST #9
### ASSERTION: check that if we disable binlogging, then statements
### succeed.
SET @@global.debug_dbug="d,error_unique_log_filename";
SET SQL_LOG_BIN=0;
INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'), ('ddd');
INSERT INTO t4 VALUES ('eee'), ('fff'), ('ggg'), ('hhh');
-- echo # assert: must show four entries
SELECT count(*) FROM t2;
SELECT count(*) FROM t4;
DELETE FROM t2;
DELETE FROM t4;
-- echo # assert: must show zero entries
SELECT count(*) FROM t2;
SELECT count(*) FROM t4;
SET SQL_LOG_BIN=1;
SET @@global.debug_dbug=@old_debug;
-- echo ###################### TEST #10
### ASSERTION: check that error is reported if there is a failure
### while registering the index file and the binary log
### file or failure to write the rotate event.
call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file.");
call mtr.add_suppression("Could not use .*");
RESET MASTER;
SHOW WARNINGS;
# +d,fault_injection_registering_index => injects fault on MYSQL_BIN_LOG::open
SET @@global.debug_dbug="d,fault_injection_registering_index";
-- replace_regex /\.[\\\/]master/master/
-- error ER_CANT_OPEN_FILE
FLUSH LOGS;
SET @@global.debug_dbug=@old_debug;
-- error ER_NO_BINARY_LOGGING
SHOW BINARY LOGS;
# issue some statements and check that they don't fail
CREATE TABLE t5 (a INT);
INSERT INTO t4 VALUES ('bbbbb');
INSERT INTO t2 VALUES ('aaaaa');
DELETE FROM t4;
DELETE FROM t2;
DROP TABLE t5;
flush tables;
-- echo ###################### TEST #11
### ASSERTION: check that error is reported if there is a failure
### while opening the index file and the binary log file or
### failure to write the rotate event.
# restart the server so that we have binlog again
--let $rpl_server_number= 1
--source include/rpl_restart_server.inc
# +d,fault_injection_openning_index => injects fault on MYSQL_BIN_LOG::open_index_file
SET @@global.debug_dbug="d,fault_injection_openning_index";
-- replace_regex /\.[\\\/]master/master/
-- error ER_CANT_OPEN_FILE
FLUSH LOGS;
SET @@global.debug_dbug=@old_debug;
-- error ER_FLUSH_MASTER_BINLOG_CLOSED
RESET MASTER;
# issue some statements and check that they don't fail
CREATE TABLE t5 (a INT);
INSERT INTO t4 VALUES ('bbbbb');
INSERT INTO t2 VALUES ('aaaaa');
DELETE FROM t4;
DELETE FROM t2;
DROP TABLE t5;
flush tables;
# restart the server so that we have binlog again
--let $rpl_server_number= 1
--source include/rpl_restart_server.inc
-- echo ###################### TEST #12
### ASSERTION: check that error is reported if there is a failure
### while writing the rotate event when creating a new log
### file.
# +d,fault_injection_new_file_rotate_event => injects fault on MYSQL_BIN_LOG::MYSQL_BIN_LOG::new_file_impl
SET @@global.debug_dbug="d,fault_injection_new_file_rotate_event";
-- error ER_ERROR_ON_WRITE
FLUSH LOGS;
SET @@global.debug_dbug=@old_debug;
-- error ER_FLUSH_MASTER_BINLOG_CLOSED
RESET MASTER;
# issue some statements and check that they don't fail
CREATE TABLE t5 (a INT);
INSERT INTO t4 VALUES ('bbbbb');
INSERT INTO t2 VALUES ('aaaaa');
DELETE FROM t4;
DELETE FROM t2;
DROP TABLE t5;
flush tables;
# restart the server so that we have binlog again
--let $rpl_server_number= 1
--source include/rpl_restart_server.inc
## clean up
DROP TABLE t1, t2, t4;
RESET MASTER;
# restart slave again
-- connection slave
-- source include/start_slave.inc
-- connection master
-- echo #######################################################################
-- echo ####################### PART 2: SLAVE TESTS ###########################
-- echo #######################################################################
### setup
--source include/rpl_reset.inc
-- connection slave
# slave suppressions
call mtr.add_suppression("Slave I/O: Relay log write failure: could not queue event from master.*");
call mtr.add_suppression("Error writing file .*");
call mtr.add_suppression("Could not use .*");
call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file.");
call mtr.add_suppression("Can't generate a unique log-filename .*");
-- echo ###################### TEST #13
#### ASSERTION: check against unique log filename error
-- let $io_thd_injection_fault_flag= error_unique_log_filename
-- let $slave_io_errno= 1595
-- let $show_slave_io_error= 1
-- source include/io_thd_fault_injection.inc
-- echo ###################### TEST #14
#### ASSERTION: check against rotate failing
-- let $io_thd_injection_fault_flag= fault_injection_new_file_rotate_event
-- let $slave_io_errno= 1595
-- let $show_slave_io_error= 1
-- source include/io_thd_fault_injection.inc
-- echo ###################### TEST #15
#### ASSERTION: check against relay log open failure
-- let $io_thd_injection_fault_flag= fault_injection_registering_index
-- let $slave_io_errno= 1595
-- let $show_slave_io_error= 1
-- source include/io_thd_fault_injection.inc
-- echo ###################### TEST #16
#### ASSERTION: check against relay log index open failure
-- let $io_thd_injection_fault_flag= fault_injection_openning_index
-- let $slave_io_errno= 1595
-- let $show_slave_io_error= 1
-- source include/io_thd_fault_injection.inc
### clean up
-- source include/stop_slave_sql.inc
RESET SLAVE;
RESET MASTER;
--remove_file $load_file
--remove_file $load_file2
--let $rpl_only_running_threads= 1
--source include/rpl_end.inc

View File

@ -1,83 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#
# Bug#11747416 : 32228 A disk full makes binary log corrupt.
#
#
# The test demonstrates reading from binlog error propagation to slave
# and reporting there.
# Conditions for the bug include a crash at time of the last event to
# the binlog was written partly. With the fixes the event is not sent out
# any longer, but rather the dump thread sends out a sound error message.
#
# Crash is not simulated. A binlog with partly written event in its end is installed
# and replication is started from it.
#
--source include/have_binlog_format_mixed.inc
--source include/master-slave.inc
--connection slave
# Make sure the slave is stopped while we are messing with master.
# Otherwise we get occasional failures as the slave manages to re-connect
# to the newly started master and we get extra events applied, causing
# conflicts.
--source include/stop_slave.inc
--connection master
call mtr.add_suppression("Error in Log_event::read_log_event()");
--let $datadir= `SELECT @@datadir`
--let $rpl_server_number= 1
--source include/rpl_stop_server.inc
--remove_file $datadir/master-bin.000001
--copy_file $MYSQL_TEST_DIR/std_data/bug11747416_32228_binlog.000001 $datadir/master-bin.000001
--let $rpl_server_number= 1
--source include/rpl_start_server.inc
--source include/wait_until_connected_again.inc
# evidence of the partial binlog
--error ER_ERROR_WHEN_EXECUTING_COMMAND
show binlog events;
--connection slave
call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log");
reset slave;
start slave;
# ER_MASTER_FATAL_ERROR_READING_BINLOG 1236
--let $slave_param=Last_IO_Errno
--let $slave_param_value=1236
--source include/wait_for_slave_param.inc
--let $slave_field_result_replace= / at [0-9]*/ at XXX/
--let $status_items= Last_IO_Errno, Last_IO_Error
--source include/show_slave_status.inc
#
# Cleanup
#
--connection master
reset master;
--connection slave
stop slave;
reset slave;
# Table was created from binlog, it may not be created if SQL thread is running
# slowly and IO thread reaches incident before SQL thread applies it.
--disable_warnings
drop table if exists t;
--enable_warnings
reset master;
--echo End of the tests
--let $rpl_only_running_threads= 1
--source include/rpl_end.inc

View File

@ -1,335 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# WL2540 replication events checksum
# Testing configuration parameters
--source include/have_debug.inc
--source include/have_binlog_format_mixed.inc
--source include/master-slave.inc
call mtr.add_suppression('Slave can not handle replication events with the checksum that master is configured to log');
call mtr.add_suppression('Replication event checksum verification failed');
# due to C failure simulation
call mtr.add_suppression('Relay log write failure: could not queue event from master');
call mtr.add_suppression('Master is configured to log replication events with checksum, but will not send such events to slaves that cannot process them');
# A. read/write access to the global vars:
# binlog_checksum master_verify_checksum slave_sql_verify_checksum
connection master;
set @master_save_binlog_checksum= @@global.binlog_checksum;
set @save_master_verify_checksum = @@global.master_verify_checksum;
select @@global.binlog_checksum as 'must be CRC32 because of the command line option';
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.binlog_checksum as 'no session var';
select @@global.master_verify_checksum as 'must be zero because of default';
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.master_verify_checksum as 'no session var';
connection slave;
set @slave_save_binlog_checksum= @@global.binlog_checksum;
set @save_slave_sql_verify_checksum = @@global.slave_sql_verify_checksum;
select @@global.slave_sql_verify_checksum as 'must be one because of default';
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.slave_sql_verify_checksum as 'no session var';
connection master;
source include/show_binary_logs.inc;
set @@global.binlog_checksum = NONE;
select @@global.binlog_checksum;
--echo *** must be rotations seen ***
source include/show_binary_logs.inc;
set @@global.binlog_checksum = default;
select @@global.binlog_checksum;
# testing lack of side-effects in non-effective update of binlog_checksum:
set @@global.binlog_checksum = CRC32;
select @@global.binlog_checksum;
set @@global.binlog_checksum = CRC32;
set @@global.master_verify_checksum = 0;
set @@global.master_verify_checksum = default;
--error ER_WRONG_VALUE_FOR_VAR
set @@global.binlog_checksum = ADLER32;
--error ER_WRONG_VALUE_FOR_VAR
set @@global.master_verify_checksum = 2; # the var is of bool type
connection slave;
set @@global.slave_sql_verify_checksum = 0;
set @@global.slave_sql_verify_checksum = default;
--error ER_WRONG_VALUE_FOR_VAR
set @@global.slave_sql_verify_checksum = 2; # the var is of bool type
#
# B. Old Slave to New master conditions
#
# while master does not send a checksum-ed binlog the Old Slave can
# work with the New Master
connection master;
set @@global.binlog_checksum = NONE;
create table t1 (a int);
# testing that binlog rotation preserves opt_binlog_checksum value
flush logs;
flush logs;
-- source include/wait_for_binlog_checkpoint.inc
flush logs;
sync_slave_with_master;
#connection slave;
# checking that rotation on the slave side leaves slave stable
flush logs;
flush logs;
flush logs;
select count(*) as zero from t1;
source include/stop_slave.inc;
connection master;
set @@global.binlog_checksum = CRC32;
-- source include/wait_for_binlog_checkpoint.inc
insert into t1 values (1) /* will not be applied on slave due to simulation */;
# instruction to the dump thread
connection slave;
set @saved_dbug = @@global.debug_dbug;
set @@global.debug_dbug='d,simulate_slave_unaware_checksum';
start slave;
--let $slave_io_errno= 1236
--let $show_slave_io_error= 1
source include/wait_for_slave_io_error.inc;
select count(*) as zero from t1;
set @@global.debug_dbug = @saved_dbug;
connection slave;
source include/start_slave.inc;
#
# C. checksum failure simulations
#
# C1. Failure by a client thread
connection master;
set @@global.master_verify_checksum = 1;
set @save_dbug = @@session.debug_dbug;
set @@session.debug_dbug='d,simulate_checksum_test_failure';
--error ER_ERROR_WHEN_EXECUTING_COMMAND
show binlog events;
SET debug_dbug= @save_dbug;
set @@global.master_verify_checksum = default;
#connection master;
sync_slave_with_master;
connection slave;
source include/stop_slave.inc;
connection master;
create table t2 (a int);
let $pos_master= query_get_value(SHOW MASTER STATUS, Position, 1);
connection slave;
# C2. Failure by IO thread
# instruction to io thread
set @saved_dbug = @@global.debug_dbug;
set @@global.debug_dbug='d,simulate_checksum_test_failure';
start slave io_thread;
# When the checksum error is detected, the slave sets error code 1913
# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately
# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io().
# So we usually get 1595, but it is occasionally possible to get 1913.
--let $slave_io_errno= 1595,1913
--let $show_slave_io_error= 0
source include/wait_for_slave_io_error.inc;
set @@global.debug_dbug = @saved_dbug;
# to make IO thread re-read it again w/o the failure
start slave io_thread;
let $slave_param= Read_Master_Log_Pos;
let $slave_param_value= $pos_master;
source include/wait_for_slave_param.inc;
# C3. Failure by SQL thread
# instruction to sql thread;
set @@global.slave_sql_verify_checksum = 1;
set @@global.debug_dbug='d,simulate_checksum_test_failure';
start slave sql_thread;
--let $slave_sql_errno= 1593
--let $show_slave_sql_error= 1
source include/wait_for_slave_sql_error.inc;
# resuming SQL thread to parse out the event w/o the failure
set @@global.debug_dbug = @saved_dbug;
source include/start_slave.inc;
connection master;
sync_slave_with_master;
#connection slave;
select count(*) as 'must be zero' from t2;
#
# D. Reset slave, Change-Master, Binlog & Relay-log rotations with
# random value on binlog_checksum on both master and slave
#
connection slave;
stop slave;
reset slave;
# randomize slave server's own checksum policy
set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE");
flush logs;
connection master;
set @@global.binlog_checksum= CRC32;
reset master;
flush logs;
create table t3 (a int, b char(5));
connection slave;
source include/start_slave.inc;
connection master;
sync_slave_with_master;
#connection slave;
select count(*) as 'must be zero' from t3;
source include/stop_slave.inc;
--replace_result $MASTER_MYPORT MASTER_PORT
eval change master to master_host='127.0.0.1',master_port=$MASTER_MYPORT, master_user='root';
connection master;
flush logs;
reset master;
insert into t3 value (1, @@global.binlog_checksum);
connection slave;
source include/start_slave.inc;
flush logs;
connection master;
sync_slave_with_master;
#connection slave;
select count(*) as 'must be one' from t3;
connection master;
set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE");
insert into t3 value (1, @@global.binlog_checksum);
sync_slave_with_master;
#connection slave;
#clean-up
connection master;
drop table t1, t2, t3;
set @@global.binlog_checksum = @master_save_binlog_checksum;
set @@global.master_verify_checksum = @save_master_verify_checksum;
#
# BUG#58564: flush_read_lock fails in mysql-trunk-bugfixing after merging with WL#2540
#
# Sanity check that verifies that no assertions are triggered because
# of old FD events (generated by versions prior to server released with
# checksums feature)
#
# There is no need for query log, if something wrong this should trigger
# an assertion
--disable_query_log
BINLOG '
MfmqTA8BAAAAZwAAAGsAAAABAAQANS41LjctbTMtZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA==
';
--enable_query_log
#connection slave;
sync_slave_with_master;
--echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters ***
--connection master
--source include/wait_for_binlog_checkpoint.inc
CREATE TABLE t4 (a INT PRIMARY KEY);
INSERT INTO t4 VALUES (1);
SET sql_log_bin=0;
CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename");
SET sql_log_bin=1;
SET @old_dbug= @@GLOBAL.debug_dbug;
SET debug_dbug= '+d,binlog_inject_new_name_error';
--error ER_NO_UNIQUE_LOGFILE
FLUSH LOGS;
SET debug_dbug= @old_dbug;
INSERT INTO t4 VALUES (2);
--connection slave
--let $slave_sql_errno= 1590
--source include/wait_for_slave_sql_error.inc
# Search the error log for the error message.
# The bug was that 4 garbage bytes were output in the middle of the error
# message; by searching for a pattern that spans that location, we can
# catch the error.
let $log_error_= `SELECT @@GLOBAL.log_error`;
if(!$log_error_)
{
# MySQL Server on windows is started with --console and thus
# does not know the location of its .err log, use default location
let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err;
}
--let SEARCH_FILE= $log_error_
--let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590
--source include/search_pattern_in_file.inc
SELECT * FROM t4 ORDER BY a;
STOP SLAVE IO_THREAD;
SET sql_slave_skip_counter= 1;
--source include/start_slave.inc
--connection master
--save_master_pos
--connection slave
--sync_with_master
SELECT * FROM t4 ORDER BY a;
--connection slave
set @@global.binlog_checksum = @slave_save_binlog_checksum;
set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum;
--echo End of tests
--connection master
DROP TABLE t4;
--source include/rpl_end.inc

View File

@ -1,261 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
-- source include/have_innodb.inc
-- source include/master-slave.inc
--disable_warnings
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t2 set data=repeat.*'a', @act_size.*");
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t1 values.* NAME_CONST.*'n',.*, @data .*");
--enable_warnings
connection master;
set @save_binlog_cache_size = @@global.binlog_cache_size;
set @save_binlog_checksum = @@global.binlog_checksum;
set @save_master_verify_checksum = @@global.master_verify_checksum;
set @@global.binlog_cache_size = 4096;
set @@global.binlog_checksum = CRC32;
set @@global.master_verify_checksum = 1;
# restart slave to force the dump thread to verify events (on master side)
connection slave;
source include/stop_slave.inc;
source include/start_slave.inc;
connection master;
#
# Testing a critical part of checksum handling dealing with transaction cache.
# The cache's buffer size is set to be less than the transaction's footprint
# in binlog.
#
# To verify combined buffer-by-buffer read out of the file and fixing crc per event
# there are the following parts:
#
# 1. the event size is much less than the cache's buffer
# 2. the event size is bigger than the cache's buffer
# 3. the event size if approximately the same as the cache's buffer
# 4. all in above
#
# 1. the event size is much less than the cache's buffer
#
flush status;
show status like "binlog_cache_use";
show status like "binlog_cache_disk_use";
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# parameter to ensure the test slightly varies binlog content
# between different invocations
#
let $deviation_size=32;
eval create table t1 (a int PRIMARY KEY, b CHAR($deviation_size)) engine=innodb;
# Now we are going to create transaction which is long enough so its
# transaction binlog will be flushed to disk...
delimiter |;
create procedure test.p_init (n int, size int)
begin
while n > 0 do
select round(RAND() * size) into @act_size;
set @data = repeat('a', @act_size);
insert into t1 values(n, @data );
set n= n-1;
end while;
end|
delimiter ;|
let $1 = 4000; # PB2 can run it slow to time out on following sync_slave_with_master:s
begin;
--disable_warnings
# todo: check if it is really so.
#+Note 1592 Unsafe statement binlogged in statement format since BINLOG_FORMAT = STATEMENT. Reason for unsafeness: Statement uses a system function whose value may differ on slave.
eval call test.p_init($1, $deviation_size);
--enable_warnings
commit;
show status like "binlog_cache_use";
--echo *** binlog_cache_disk_use must be non-zero ***
show status like "binlog_cache_disk_use";
sync_slave_with_master;
let $diff_tables=master:test.t1, slave:test.t1;
source include/diff_tables.inc;
# undoing changes with verifying the above once again
connection master;
begin;
delete from t1;
commit;
sync_slave_with_master;
#
# 2. the event size is bigger than the cache's buffer
#
connection master;
flush status;
let $t2_data_size= `select 3 * @@global.binlog_cache_size`;
let $t2_aver_size= `select 2 * @@global.binlog_cache_size`;
let $t2_max_rand= `select 1 * @@global.binlog_cache_size`;
eval create table t2(a int auto_increment primary key, data VARCHAR($t2_data_size)) ENGINE=Innodb;
let $1=100;
--disable_query_log
begin;
while ($1)
{
eval select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size;
set @data = repeat('a', @act_size);
insert into t2 set data = @data;
dec $1;
}
commit;
--enable_query_log
show status like "binlog_cache_use";
--echo *** binlog_cache_disk_use must be non-zero ***
show status like "binlog_cache_disk_use";
sync_slave_with_master;
let $diff_tables=master:test.t2, slave:test.t2;
source include/diff_tables.inc;
# undoing changes with verifying the above once again
connection master;
begin;
delete from t2;
commit;
sync_slave_with_master;
#
# 3. the event size if approximately the same as the cache's buffer
#
connection master;
flush status;
let $t3_data_size= `select 2 * @@global.binlog_cache_size`;
let $t3_aver_size= `select (9 * @@global.binlog_cache_size) / 10`;
let $t3_max_rand= `select (2 * @@global.binlog_cache_size) / 10`;
eval create table t3(a int auto_increment primary key, data VARCHAR($t3_data_size)) engine=innodb;
let $1= 300;
--disable_query_log
begin;
while ($1)
{
eval select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size;
insert into t3 set data= repeat('a', @act_size);
dec $1;
}
commit;
--enable_query_log
show status like "binlog_cache_use";
--echo *** binlog_cache_disk_use must be non-zero ***
show status like "binlog_cache_disk_use";
sync_slave_with_master;
let $diff_tables=master:test.t3, slave:test.t3;
source include/diff_tables.inc;
# undoing changes with verifying the above once again
connection master;
begin;
delete from t3;
commit;
sync_slave_with_master;
#
# 4. all in above
#
connection master;
flush status;
delimiter |;
eval create procedure test.p1 (n int)
begin
while n > 0 do
case (select (round(rand()*100) % 3) + 1)
when 1 then
select round(RAND() * $deviation_size) into @act_size;
set @data = repeat('a', @act_size);
insert into t1 values(n, @data);
when 2 then
begin
select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size;
insert into t2 set data=repeat('a', @act_size);
end;
when 3 then
begin
select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size;
insert into t3 set data= repeat('a', @act_size);
end;
end case;
set n= n-1;
end while;
end|
delimiter ;|
let $1= 1000;
set autocommit= 0;
begin;
--disable_warnings
eval call test.p1($1);
--enable_warnings
commit;
show status like "binlog_cache_use";
--echo *** binlog_cache_disk_use must be non-zero ***
show status like "binlog_cache_disk_use";
sync_slave_with_master;
let $diff_tables=master:test.t1, slave:test.t1;
source include/diff_tables.inc;
let $diff_tables=master:test.t2, slave:test.t2;
source include/diff_tables.inc;
let $diff_tables=master:test.t3, slave:test.t3;
source include/diff_tables.inc;
connection master;
begin;
delete from t1;
delete from t2;
delete from t3;
commit;
drop table t1, t2, t3;
set @@global.binlog_cache_size = @save_binlog_cache_size;
set @@global.binlog_checksum = @save_binlog_checksum;
set @@global.master_verify_checksum = @save_master_verify_checksum;
drop procedure test.p_init;
drop procedure test.p1;
--source include/rpl_end.inc

View File

@ -1,175 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
############################################################
# Purpose: WL#5064 Testing with corrupted events.
# The test emulates the corruption at the vary stages
# of replication:
# - in binlog file
# - in network
# - in relay log
############################################################
#
# The tests intensively utilize @@global.debug. Note,
# Bug#11765758 - 58754,
# @@global.debug is read by the slave threads through dbug-interface.
# Hence, before a client thread set @@global.debug we have to ensure that:
# (a) the slave threads are stopped, or (b) the slave threads are in
# sync and waiting.
--source include/have_debug.inc
--source include/master-slave.inc
# Block legal errors for MTR
call mtr.add_suppression('Found invalid event in binary log');
call mtr.add_suppression('Slave I/O: Relay log write failure: could not queue event from master');
call mtr.add_suppression('event read from binlog did not pass crc check');
call mtr.add_suppression('Replication event checksum verification failed');
call mtr.add_suppression('Event crc check failed! Most likely there is event corruption');
call mtr.add_suppression('Slave SQL: Error initializing relay log position: I/O error reading event at position .*, error.* 1593');
SET @old_master_verify_checksum = @@master_verify_checksum;
# Creating test table/data and set corruption position for testing
--echo # 1. Creating test table/data and set corruption position for testing
--connection master
--echo * insert/update/delete rows in table t1 *
# Corruption algorithm modifies only the first event and
# then will be reset. To avoid checking always the first event
# from binlog (usually it is FD) we randomly execute different
# statements and set position for corruption inside events.
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c VARCHAR(100));
--disable_query_log
let $i=`SELECT 3+CEILING(10*RAND())`;
let $j=1;
let $pos=0;
while ($i) {
eval INSERT INTO t1 VALUES ($j, 'a', NULL);
if (`SELECT RAND() > 0.7`)
{
eval UPDATE t1 SET c = REPEAT('a', 20) WHERE a = $j;
}
if (`SELECT RAND() > 0.8`)
{
eval DELETE FROM t1 WHERE a = $j;
}
if (!$pos) {
let $pos= query_get_value(SHOW MASTER STATUS, Position, 1);
--sync_slave_with_master
--source include/stop_slave.inc
--disable_query_log
--connection master
}
dec $i;
inc $j;
}
--enable_query_log
# Emulate corruption in binlog file when SHOW BINLOG EVENTS is executing
--echo # 2. Corruption in master binlog and SHOW BINLOG EVENTS
SET @saved_dbug = @@global.debug_dbug;
SET @@global.debug_dbug="d,corrupt_read_log_event_char";
--echo SHOW BINLOG EVENTS;
--disable_query_log
send_eval SHOW BINLOG EVENTS FROM $pos;
--enable_query_log
--error ER_ERROR_WHEN_EXECUTING_COMMAND
reap;
SET @@global.debug_dbug=@saved_dbug;
# Emulate corruption on master with crc checking on master
--echo # 3. Master read a corrupted event from binlog and send the error to slave
# We have a rare but nasty potential race here: if the dump thread on
# the master for the _old_ slave connection has not yet discovered
# that the slave has disconnected, we will inject the corrupt event on
# the wrong connection, and the test will fail
# (+d,corrupt_read_log_event2 corrupts only one event).
# So kill any lingering dump thread (we need to kill; otherwise dump thread
# could manage to send all events down the socket before seeing it close, and
# hang forever waiting for new binlog events to be created).
let $id= `select id from information_schema.processlist where command = "Binlog Dump"`;
if ($id)
{
--disable_query_log
--error 0,1094
eval kill $id;
--enable_query_log
}
let $wait_condition=
SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE command = 'Binlog Dump';
--source include/wait_condition.inc
SET @@global.debug_dbug="d,corrupt_read_log_event2_set";
--connection slave
START SLAVE IO_THREAD;
let $slave_io_errno= 1236;
--let $slave_timeout= 10
--source include/wait_for_slave_io_error.inc
--connection master
SET @@global.debug_dbug=@saved_dbug;
# Emulate corruption on master without crc checking on master
--echo # 4. Master read a corrupted event from binlog and send it to slave
--connection master
SET GLOBAL master_verify_checksum=0;
SET @@global.debug_dbug="d,corrupt_read_log_event2_set";
--connection slave
START SLAVE IO_THREAD;
# When the checksum error is detected, the slave sets error code 1743
# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately
# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io().
# So we usually get 1595, but it is occasionally possible to get 1743.
let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE
--source include/wait_for_slave_io_error.inc
--connection master
SET @@global.debug_dbug=@saved_dbug;
SET GLOBAL master_verify_checksum=1;
# Emulate corruption in network
--echo # 5. Slave. Corruption in network
--connection slave
SET @saved_dbug_slave = @@GLOBAL.debug_dbug;
SET @@global.debug_dbug="d,corrupt_queue_event";
START SLAVE IO_THREAD;
let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE
--source include/wait_for_slave_io_error.inc
SET @@global.debug_dbug=@saved_dbug_slave;
# Emulate corruption in relay log
--echo # 6. Slave. Corruption in relay log
SET @@global.debug_dbug="d,corrupt_read_log_event_char";
START SLAVE SQL_THREAD;
let $slave_sql_errno= 1593;
--source include/wait_for_slave_sql_error.inc
SET @@global.debug_dbug=@saved_dbug_slave;
# Start normal replication and compare same table on master
# and slave
--echo # 7. Seek diff for tables on master and slave
--connection slave
--source include/start_slave.inc
--connection master
--sync_slave_with_master
let $diff_tables= master:test.t1, slave:test.t1;
--source include/diff_tables.inc
# Clean up
--echo # 8. Clean up
--connection master
set @@global.debug_dbug = @saved_dbug;
SET GLOBAL master_verify_checksum = @old_master_verify_checksum;
DROP TABLE t1;
--sync_slave_with_master
--source include/rpl_end.inc

View File

@ -1,572 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
--source include/have_innodb.inc
--let $rpl_topology=1->2->3->4
--source include/rpl_init.inc
# Set up a 4-deep replication topology, then test various fail-overs
# using GTID.
#
# A -> B -> C -> D
connection server_1;
--source include/wait_for_binlog_checkpoint.inc
--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
--echo *** GTID position should be empty here ***
--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS>
eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
CREATE TABLE t2 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1, "m1");
INSERT INTO t1 VALUES (2, "m2"), (3, "m3"), (4, "m4");
INSERT INTO t2 VALUES (1, "i1");
BEGIN;
INSERT INTO t2 VALUES (2, "i2"), (3, "i3");
INSERT INTO t2 VALUES (4, "i4");
COMMIT;
save_master_pos;
source include/wait_for_binlog_checkpoint.inc;
--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
--let $gtid_pos_server_1 = `SELECT @@gtid_binlog_pos`
--echo *** GTID position should be non-empty here ***
--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1>
eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
connection server_2;
sync_with_master;
source include/wait_for_binlog_checkpoint.inc;
--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
--echo *** GTID position should be the same as on server_1 ***
--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1>
eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
save_master_pos;
connection server_3;
sync_with_master;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
save_master_pos;
connection server_4;
sync_with_master;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--echo *** Now take out D, let it fall behind a bit, and then test re-attaching it to A ***
connection server_4;
--source include/stop_slave.inc
connection server_1;
INSERT INTO t1 VALUES (5, "m1a");
INSERT INTO t2 VALUES (5, "i1a");
save_master_pos;
connection server_4;
--replace_result $MASTER_MYPORT MASTER_PORT
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT,
MASTER_USE_GTID=CURRENT_POS;
--source include/start_slave.inc
sync_with_master;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--echo *** Now move B to D (C is still replicating from B) ***
connection server_2;
--source include/stop_slave.inc
--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4,
MASTER_USE_GTID=CURRENT_POS;
--source include/start_slave.inc
connection server_4;
UPDATE t2 SET b="j1a" WHERE a=5;
save_master_pos;
connection server_2;
sync_with_master;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--echo *** Now move C to D, after letting it fall a little behind ***
connection server_3;
--source include/stop_slave.inc
connection server_1;
INSERT INTO t2 VALUES (6, "i6b");
INSERT INTO t2 VALUES (7, "i7b");
--source include/save_master_gtid.inc
connection server_3;
--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4,
MASTER_USE_GTID=CURRENT_POS;
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
SELECT * FROM t2 ORDER BY a;
--echo *** Now change everything back to what it was, to make rpl_end.inc happy
# Also check that MASTER_USE_GTID=CURRENT_POS is still enabled.
connection server_2;
# We need to sync up server_2 before switching. If it happened to have reached
# the point 'UPDATE t2 SET b="j1a" WHERE a=5' it will fail to connect to
# server_1, which is (deliberately) missing that transaction.
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
--replace_result $MASTER_MYPORT MASTER_MYPORT
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT;
--source include/start_slave.inc
--source include/wait_for_slave_to_start.inc
connection server_3;
--source include/stop_slave.inc
--replace_result $SLAVE_MYPORT SLAVE_MYPORT
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SLAVE_MYPORT;
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
connection server_4;
--source include/stop_slave.inc
--replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3;
--source include/start_slave.inc
connection server_1;
DROP TABLE t1,t2;
--source include/save_master_gtid.inc
--echo *** A few more checks for BINLOG_GTID_POS function ***
--let $valid_binlog_name = query_get_value(SHOW BINARY LOGS,Log_name,1)
--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
SELECT BINLOG_GTID_POS();
--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
SELECT BINLOG_GTID_POS('a');
--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
SELECT BINLOG_GTID_POS('a',1,NULL);
SELECT BINLOG_GTID_POS(1,'a');
SELECT BINLOG_GTID_POS(NULL,NULL);
SELECT BINLOG_GTID_POS('',1);
SELECT BINLOG_GTID_POS('a',1);
eval SELECT BINLOG_GTID_POS('$valid_binlog_name',-1);
eval SELECT BINLOG_GTID_POS('$valid_binlog_name',0);
eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551615);
eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551616);
--echo *** Some tests of @@GLOBAL.gtid_binlog_state ***
--connection server_2
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
--connection server_1
SET @old_state= @@GLOBAL.gtid_binlog_state;
--error ER_BINLOG_MUST_BE_EMPTY
SET GLOBAL gtid_binlog_state = '';
RESET MASTER;
SET GLOBAL gtid_binlog_state = '';
FLUSH LOGS;
--source include/show_binary_logs.inc
SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30';
--source include/show_binary_logs.inc
--let $binlog_file= master-bin.000001
--let $binlog_start= 4
--source include/show_binlog_events.inc
#SELECT @@GLOBAL.gtid_binlog_pos;
#SELECT @@GLOBAL.gtid_binlog_state;
--error ER_BINLOG_MUST_BE_EMPTY
SET GLOBAL gtid_binlog_state = @old_state;
RESET MASTER;
SET GLOBAL gtid_binlog_state = @old_state;
# Check that slave can reconnect again, despite the RESET MASTER, as we
# restored the state.
CREATE TABLE t1 (a INT PRIMARY KEY);
SET gtid_seq_no=100;
INSERT INTO t1 VALUES (1);
--source include/save_master_gtid.inc
--connection server_2
--source include/start_slave.inc
# We cannot just use sync_with_master as we've done RESET MASTER, so
# slave old-style position is wrong.
# So sync on gtid position instead.
--source include/sync_with_master_gtid.inc
SELECT * FROM t1;
# Check that the IO gtid position in SHOW SLAVE STATUS is also correct.
--let $status_items= Gtid_IO_Pos
--source include/show_slave_status.inc
--echo *** Test @@LAST_GTID and MASTER_GTID_WAIT() ***
--connection server_1
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
--save_master_pos
--connection server_2
--sync_with_master
--source include/stop_slave.inc
--connect (m1,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
SELECT @@last_gtid;
SET gtid_seq_no=110;
SELECT @@last_gtid;
BEGIN;
SELECT @@last_gtid;
INSERT INTO t1 VALUES (2);
SELECT @@last_gtid;
COMMIT;
SELECT @@last_gtid;
--let $pos= `SELECT @@gtid_binlog_pos`
--connect (s1,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
eval SET @pos= '$pos';
# Check NULL argument.
SELECT master_gtid_wait(NULL);
# Check empty argument returns immediately.
SELECT master_gtid_wait('', NULL);
# Check this gets counted
SHOW STATUS LIKE 'Master_gtid_wait_count';
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_time';
# Let's check that we get a timeout
SELECT master_gtid_wait(@pos, 0.5);
SELECT * FROM t1 ORDER BY a;
# Now actually wait until the slave reaches the position
send SELECT master_gtid_wait(@pos);
--connection server_2
--source include/start_slave.inc
--connection s1
reap;
SELECT * FROM t1 ORDER BY a;
# Test waiting on a domain that does not exist yet.
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id= 1;
INSERT INTO t1 VALUES (3);
--let $pos= `SELECT @@gtid_binlog_pos`
--connection s1
--replace_result $pos POS
eval SET @pos= '$pos';
SELECT master_gtid_wait(@pos, 0);
SELECT * FROM t1 WHERE a >= 3;
send SELECT master_gtid_wait(@pos, -1);
--connection server_2
--source include/start_slave.inc
--connection s1
reap;
SELECT * FROM t1 WHERE a >= 3;
# Waiting for only part of the position.
SELECT master_gtid_wait('1-1-1', 0);
# Now test a lot of parallel master_gtid_wait() calls, completing in different
# order, and some of which time out or get killed on the way.
--connection s1
send SELECT master_gtid_wait('2-1-1,1-1-4,0-1-110');
--connect (s2,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
# This will time out. No event 0-1-1000 exists
send SELECT master_gtid_wait('0-1-1000', 0.5);
--connect (s3,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
# This one we will kill
--let $kill1_id= `SELECT connection_id()`
send SELECT master_gtid_wait('0-1-2000');
--connect (s4,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-10');
--connect (s5,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-6', 1);
# This one we will kill also.
--connect (s6,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
--let $kill2_id= `SELECT connection_id()`
send SELECT master_gtid_wait('2-1-5');
--connect (s7,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-10');
--connect (s8,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-5,1-1-4,0-1-110');
--connect (s9,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-2');
--connection server_2
# This one completes immediately.
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_count';
SELECT master_gtid_wait('1-1-1');
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_count';
let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1);
--replace_result $wait_time MASTER_GTID_WAIT_TIME
eval SET @a= $wait_time;
SELECT IF(@a <= 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " is larger than expected"))
AS Master_gtid_wait_time_as_expected;
--connect (s10,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('0-1-109');
--connection server_2
# This one should time out.
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_count';
SELECT master_gtid_wait('2-1-2', 0.5);
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_count';
let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1);
--replace_result $wait_time MASTER_GTID_WAIT_TIME
eval SET @a= $wait_time;
# We expect a wait time of just a bit over 0.5 seconds. But thread scheduling
# and timer inaccuracies could introduce significant jitter. So allow a
# generous interval.
SELECT IF(@a BETWEEN 0.4*1000*1000 AND 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " not as expected")) AS Master_gtid_wait_time_as_expected;
--replace_result $kill1_id KILL_ID
eval KILL QUERY $kill1_id;
--connection s3
--error ER_QUERY_INTERRUPTED
reap;
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=2;
INSERT INTO t1 VALUES (4);
--connection s9
reap;
--connection server_2
--replace_result $kill2_id KILL_ID
eval KILL CONNECTION $kill2_id;
--connection s6
--error 2013,ER_CONNECTION_KILLED
reap;
--connection server_1
SET gtid_domain_id=1;
SET gtid_seq_no=4;
INSERT INTO t1 VALUES (5);
SET gtid_domain_id=2;
SET gtid_seq_no=5;
INSERT INTO t1 VALUES (6);
--connection s8
reap;
--connection s1
reap;
--connection s2
reap;
--connection s5
reap;
--connection s10
reap;
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=10;
INSERT INTO t1 VALUES (7);
--connection s4
reap;
--connection s7
reap;
--echo *** Test gtid_slave_pos when used with GTID ***
--connection server_2
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=1000;
INSERT INTO t1 VALUES (10);
INSERT INTO t1 VALUES (11);
--save_master_pos
--connection server_2
SET sql_slave_skip_counter= 1;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=1010;
INSERT INTO t1 VALUES (12);
INSERT INTO t1 VALUES (13);
--save_master_pos
--connection server_2
SET sql_slave_skip_counter= 2;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=1020;
INSERT INTO t1 VALUES (14);
INSERT INTO t1 VALUES (15);
INSERT INTO t1 VALUES (16);
--save_master_pos
--connection server_2
SET sql_slave_skip_counter= 3;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=1030;
# Disable logging Annotate_rows events to preserve events count.
let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`;
SET @@binlog_annotate_row_events= 0;
INSERT INTO t1 VALUES (17);
INSERT INTO t1 VALUES (18);
INSERT INTO t1 VALUES (19);
eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved;
--save_master_pos
--connection server_2
SET sql_slave_skip_counter= 5;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=3;
SET gtid_seq_no=100;
CREATE TABLE t2 (a INT PRIMARY KEY);
DROP TABLE t2;
SET gtid_domain_id=2;
SET gtid_seq_no=1040;
INSERT INTO t1 VALUES (20);
--save_master_pos
--connection server_2
SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode;
SET GLOBAL slave_ddl_exec_mode=STRICT;
SET sql_slave_skip_counter=1;
START SLAVE UNTIL master_gtid_pos="3-1-100";
--let $master_pos=3-1-100
--source include/sync_with_master_gtid.inc
--source include/wait_for_slave_to_stop.inc
--error ER_NO_SUCH_TABLE
SELECT * FROM t2;
SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
# Start the slave again, it should fail on the DROP TABLE as the table is not there.
SET sql_log_bin=0;
CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051");
SET sql_log_bin=1;
START SLAVE;
--let $slave_sql_errno=1051
--source include/wait_for_slave_sql_error.inc
SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
STOP SLAVE IO_THREAD;
SET sql_slave_skip_counter=2;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 20 ORDER BY a;
SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
SET GLOBAL slave_ddl_exec_mode= @saved_mode;
--echo *** Test GTID-connecting to a master with out-of-order sequence numbers in the binlog. ***
# Create an out-of-order binlog on server 2.
# Let server 3 replicate to an out-of-order point, stop it, restart it,
# and check that it replicates correctly despite the out-of-order.
--connection server_1
SET gtid_domain_id= @@GLOBAL.gtid_domain_id;
INSERT INTO t1 VALUES (31);
--save_master_pos
--connection server_2
--sync_with_master
SET gtid_domain_id= @@GLOBAL.gtid_domain_id;
INSERT INTO t1 VALUES (32);
--connection server_1
INSERT INTO t1 VALUES (33);
--save_master_pos
--connection server_2
--sync_with_master
--save_master_pos
--connection server_3
--sync_with_master
--source include/stop_slave.inc
--connection server_1
INSERT INTO t1 VALUES (34);
--save_master_pos
--connection server_2
--sync_with_master
--save_master_pos
--connection server_3
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
--save_master_pos
--connection server_4
--sync_with_master
SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
# Clean up.
--connection server_1
DROP TABLE t1;
--source include/rpl_end.inc

View File

@ -1,61 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
--source include/have_debug.inc
--source include/master-slave.inc
SET @old_binlog_checksum=@@binlog_checksum;
SET GLOBAL BINLOG_CHECKSUM=none;
connection slave;
SET @old_binlog_checksum=@@binlog_checksum;
SET GLOBAL BINLOG_CHECKSUM=none;
connection master;
--echo **** On Master ****
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3);
SELECT * FROM t1;
set @saved_dbug = @@global.debug_dbug;
SET GLOBAL debug_dbug= '+d,incident_database_resync_on_replace,*';
# This will generate an incident log event and store it in the binary
# log before the replace statement.
REPLACE INTO t1 VALUES (4);
--save_master_pos
SELECT * FROM t1;
set @@global.debug_dbug = @saved_dbug;
connection slave;
# Wait until SQL thread stops with error LOST_EVENT on master
call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occurred on the master.* 1590");
let $slave_sql_errno= 1590;
let $show_slave_sql_error= 1;
source include/wait_for_slave_sql_error.inc;
# The 4 should not be inserted into the table, since the incident log
# event should have stop the slave.
--echo **** On Slave ****
SELECT * FROM t1;
SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1;
START SLAVE;
--sync_with_master
# Now, we should have inserted the row into the table and the slave
# should be running. We should also have rotated to a new binary log.
SELECT * FROM t1;
source include/check_slave_is_running.inc;
connection master;
SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum;
DROP TABLE t1;
--sync_slave_with_master
SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum;
--source include/rpl_end.inc

View File

@ -1,96 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
######################################################################
# Some errors that cause the slave SQL thread to stop are not shown in
# the Slave_SQL_Error column of "SHOW SLAVE STATUS". Instead, the error
# is only in the server's error log.
#
# Two failures and their respective reporting are verified:
#
# 1 - Failures during slave thread initialization
# 2 - Failures while processing queries passed through the init_slave
# option.
#
# In order to check the first type of failure, we inject a fault in the
# SQL/IO Threads through SET GLOBAL debug.
#
# To check the second type, we set @@global.init_slave to an invalid
# command thus preventing the initialization of the SQL Thread.
#
# Obs:
# 1 - Note that testing failures while initializing the relay log position
# is hard as the same function is called before the code reaches the point
# that we want to test.
#
# 2 - This test does not target failures that are reported while applying
# events such as duplicate keys, errors while reading the relay-log.bin*,
# etc. Such errors are already checked on other tests.
######################################################################
######################################################################
# Configuring the Environment
######################################################################
source include/have_debug.inc;
source include/have_log_bin.inc;
source include/master-slave.inc;
connection slave;
--disable_warnings
stop slave;
--enable_warnings
reset slave;
######################################################################
# Injecting faults in the threads' initialization
######################################################################
connection slave;
# Set debug flags on slave to force errors to occur
set @saved_dbug = @@global.debug_dbug;
SET GLOBAL debug_dbug= "d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on_init";
start slave;
#
# slave is going to stop because of emulated failures
# but there won't be any crashes nor asserts hit.
#
# 1593 = ER_SLAVE_FATAL_ERROR
--let $slave_sql_errno= 1593
--let $show_slave_sql_error= 1
--source include/wait_for_slave_sql_error.inc
call mtr.add_suppression("Failed during slave.* thread initialization");
set @@global.debug_dbug = @saved_dbug;
######################################################################
# Injecting faults in the init_slave option
######################################################################
connection slave;
reset slave;
SET GLOBAL init_slave= "garbage";
start slave;
# 1064 = ER_PARSE_ERROR
--let $slave_sql_errno= 1064
--let $show_slave_sql_error= 1
--source include/wait_for_slave_sql_error.inc
######################################################################
# Clean up
######################################################################
SET GLOBAL init_slave= "";
# Clean up Last_SQL_Error
--source include/stop_slave_io.inc
RESET SLAVE;
--let $rpl_only_running_threads= 1
--source include/rpl_end.inc

View File

@ -1,120 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#############################################################################
# Original Author: JBM #
# Original Date: Aug/18/2005 #
#############################################################################
# TEST: To test the LOAD_FILE() in rbr #
#############################################################################
# Change Author: JBM
# Change Date: 2006-01-16
##########
# Includes
-- source include/have_binlog_format_mixed_or_row.inc
-- source include/master-slave.inc
-- source suite/rpl/include/rpl_loadfile.test
# BUG#39701: Mixed binlog format does not switch to row mode on LOAD_FILE
#
# DESCRIPTION
#
# Problem: when using load_file string function and mixed binlogging format
# there was no switch to row based binlogging format. This leads
# to scenarios on which the slave replicates the statement and it
# will try to load the file from local file system, which in most
# likely it will not exist.
#
# Solution:
# Marking this function as unsafe for statement format, makes the
# statement using it to be logged in row based format. As such, data
# replicated from the master, becomes the content of the loaded file.
# Consequently, the slave receives the necessary data to complete
# the load_file instruction correctly.
#
# IMPLEMENTATION
#
# The test is implemented as follows:
#
# On Master,
# i) write to file the desired content.
# ii) create table and stored procedure with load_file
# iii) stop slave
# iii) execute load_file
# iv) remove file
#
# On Slave,
# v) start slave
# vi) sync it with master so that it gets the updates from binlog (which
# should have bin logged in row format).
#
# If the the binlog format does not change to row, then the assertion
# done in the following step fails. This happens because tables differ
# since the file does not exist anymore, meaning that when slave
# attempts to execute LOAD_FILE statement it inserts NULL on table
# instead of the same contents that the master loaded when it executed
# the procedure (which was executed when file existed).
#
# vii) assert that the contents of master and slave
# table are the same
--source include/rpl_reset.inc
connection master;
let $file= $MYSQLTEST_VARDIR/tmp/bug_39701.data;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--eval SELECT repeat('x',20) INTO OUTFILE '$file'
disable_warnings;
DROP TABLE IF EXISTS t1;
enable_warnings;
CREATE TABLE t1 (t text);
DELIMITER |;
CREATE PROCEDURE p(file varchar(4096))
BEGIN
INSERT INTO t1 VALUES (LOAD_FILE(file));
END|
DELIMITER ;|
# stop slave before issuing the load_file on master
connection slave;
source include/stop_slave.inc;
connection master;
# test: check that logging falls back to rbr.
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--eval CALL p('$file')
# test: remove the file from the filesystem and assert that slave still
# gets the loaded file
remove_file $file;
# now that the file is removed it is safe (regarding what we want to test)
# to start slave
connection slave;
source include/start_slave.inc;
connection master;
sync_slave_with_master;
# assertion: assert that the slave got the updates even
# if the file was removed before the slave started,
# meaning that contents were indeed transfered
# through binlog (in row format)
let $diff_tables= master:t1, slave:t1;
source include/diff_tables.inc;
# CLEAN UP
--connection master
DROP TABLE t1;
DROP PROCEDURE p;
--source include/rpl_end.inc

View File

@ -1,184 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# ==== Purpose ====
#
# Check replication protocol packet size handling
#
# ==== Related bugs ====
# Bug#19402 SQL close to the size of the max_allowed_packet fails on slave
# BUG#23755: Replicated event larger that max_allowed_packet infinitely re-transmits
# BUG#42914: No LAST_IO_ERROR for max_allowed_packet errors
# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET
# max-out size db name
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153");
call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet");
let $db= DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________;
disable_warnings;
eval drop database if exists $db;
enable_warnings;
eval create database $db;
connection master;
let $old_max_allowed_packet= `SELECT @@global.max_allowed_packet`;
let $old_net_buffer_length= `SELECT @@global.net_buffer_length`;
let $old_slave_max_allowed_packet= `SELECT @@global.slave_max_allowed_packet`;
SET @@global.max_allowed_packet=1024;
SET @@global.net_buffer_length=1024;
sync_slave_with_master;
# Restart slave for setting to take effect
source include/stop_slave.inc;
source include/start_slave.inc;
# Reconnect to master for new setting to take effect
disconnect master;
# alas, can't use eval here; if db name changed apply the change here
connect (master,localhost,root,,DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________);
connection master;
select @@net_buffer_length, @@max_allowed_packet;
create table `t1` (`f1` LONGTEXT) ENGINE=MyISAM;
INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1023');
sync_slave_with_master;
eval select count(*) from `$db`.`t1` /* must be 1 */;
SHOW STATUS LIKE 'Slave_running';
select * from information_schema.session_status where variable_name= 'SLAVE_RUNNING';
connection master;
eval drop database $db;
sync_slave_with_master;
#
# Bug #23755: Replicated event larger that max_allowed_packet infinitely re-transmits
#
# Check that a situation when the size of event on the master is greater than
# max_allowed_packet on the slave does not lead to infinite re-transmits.
connection master;
# Change the max packet size on master
SET @@global.max_allowed_packet=4096;
SET @@global.net_buffer_length=4096;
# Restart slave for new setting to take effect
connection slave;
source include/stop_slave.inc;
source include/start_slave.inc;
# Reconnect to master for new setting to take effect
disconnect master;
connect (master, localhost, root);
connection master;
CREATE TABLE `t1` (`f1` LONGTEXT) ENGINE=MyISAM;
sync_slave_with_master;
connection master;
INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048');
#
# Bug#42914: The slave I/O thread must stop after trying to read the above
# event, However there is no Last_IO_Error report.
#
# The slave I/O thread must stop after trying to read the above event
connection slave;
# 1153 = ER_NET_PACKET_TOO_LARGE
--let $slave_io_errno= 1153
--let $show_slave_io_error= 1
--source include/wait_for_slave_io_error.inc
# TODO: this is needed because of BUG#55790. Remove once that is fixed.
--source include/stop_slave_sql.inc
#
# Bug#42914: On the master, if a binary log event is larger than
# max_allowed_packet, the error message ER_MASTER_FATAL_ERROR_READING_BINLOG
# is sent to a slave when it requests a dump from the master, thus leading the
# I/O thread to stop. However, there is no Last_IO_Error reported.
#
--let $rpl_only_running_threads= 1
--source include/rpl_reset.inc
--connection master
DROP TABLE t1;
--sync_slave_with_master
connection master;
CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM;
sync_slave_with_master;
connection master;
INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet));
connection slave;
# The slave I/O thread must stop after receiving
# 1153 = ER_NET_PACKET_TOO_LARGE
--let $slave_io_errno= 1153
--let $show_slave_io_error= 1
--source include/wait_for_slave_io_error.inc
# Remove the bad binlog and clear error status on slave.
STOP SLAVE;
RESET SLAVE;
--connection master
RESET MASTER;
#
# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET
#
# In BUG#55322, @@session.max_allowed_packet increased each time SHOW
# BINLOG EVENTS was issued. To verify that this bug is fixed, we
# execute SHOW BINLOG EVENTS twice and check that max_allowed_packet
# never changes. We turn off the result log because we don't care
# about the contents of the binlog.
--disable_result_log
SET @max_allowed_packet_0= @@session.max_allowed_packet;
SHOW BINLOG EVENTS;
SET @max_allowed_packet_1= @@session.max_allowed_packet;
SHOW BINLOG EVENTS;
SET @max_allowed_packet_2= @@session.max_allowed_packet;
--enable_result_log
if (`SELECT NOT(@max_allowed_packet_0 = @max_allowed_packet_1 AND @max_allowed_packet_1 = @max_allowed_packet_2)`)
{
--echo ERROR: max_allowed_packet changed after executing SHOW BINLOG EVENTS
--source include/show_rpl_debug_info.inc
SELECT @max_allowed_packet_0, @max_allowed_packet_1, @max_allowed_packet_2;
--die @max_allowed_packet changed after executing SHOW BINLOG EVENTS
}
--echo ==== clean up ====
connection master;
DROP TABLE t1;
eval SET @@global.max_allowed_packet= $old_max_allowed_packet;
eval SET @@global.net_buffer_length= $old_net_buffer_length;
eval SET @@global.slave_max_allowed_packet= $old_slave_max_allowed_packet;
# slave is stopped
connection slave;
DROP TABLE t1;
# Clear Last_IO_Error
RESET SLAVE;
--source include/rpl_end.inc
# End of tests

View File

@ -1,112 +0,0 @@
# ==== Purpose ====
#
# Test verifies that, in parallel replication, transaction failure notification
# is propagated to all the workers. Workers should abort the execution of
# transaction event groups, whose event positions are higher than the failing
# transaction group.
#
# ==== Implementation ====
#
# Steps:
# 0 - Create a table t1 on master which has a primary key. Enable parallel
# replication on slave with slave_parallel_mode='optimistic' and
# slave_parallel_threads=3.
# 1 - On slave start a transaction and execute a local INSERT statement
# which will insert value 32. This is done to block the INSERT coming
# from master.
# 2 - On master execute an INSERT statement with value 32, so that it is
# blocked on slave.
# 3 - On slave enable a debug sync point such that it holds the worker thread
# execution as soon as work is scheduled to it.
# 4 - INSERT value 33 on master. It will be held on slave by other worker
# thread due to debug simulation.
# 5 - INSERT value 34 on master.
# 6 - On slave, enusre that INSERT 34 has reached a state where it waits for
# its prior transactions to commit.
# 7 - Commit the local INSERT 32 on slave server so that first worker will
# error out.
# 8 - Now send a continue signal to second worker processing 33. It should
# wakeup and propagate the error to INSERT 34.
# 9 - Upon slave stop due to error, check that no rows are found after the
# failed INSERT 32.
#
# ==== References ====
#
# MDEV-20645: Replication consistency is broken as workers miss the error
# notification from an earlier failed group.
#
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/have_binlog_format_statement.inc
--source include/master-slave.inc
--enable_connect_log
--connection server_2
--source include/stop_slave.inc
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET @old_debug= @@GLOBAL.debug_dbug;
SET GLOBAL slave_parallel_mode='optimistic';
SET GLOBAL slave_parallel_threads= 3;
CHANGE MASTER TO master_use_gtid=slave_pos;
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
--source include/start_slave.inc
--connection server_1
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
--source include/save_master_gtid.inc
--connection server_2
--source include/sync_with_master_gtid.inc
--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
BEGIN;
INSERT INTO t1 VALUES (32);
--connection server_1
INSERT INTO t1 VALUES (32);
--connection server_2
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE info like "INSERT INTO t1 VALUES (32)"
--source include/wait_condition.inc
SET GLOBAL debug_dbug="+d,hold_worker_on_schedule";
SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker";
--connection server_1
SET gtid_seq_no=100;
INSERT INTO t1 VALUES (33);
--connection server_2
SET debug_sync='now WAIT_FOR reached_pause';
--connection server_1
INSERT INTO t1 VALUES (34);
--connection server_2
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit"
--source include/wait_condition.inc
--connection con_temp2
COMMIT;
# Clean up.
--connection server_2
--source include/stop_slave.inc
--let $assert_cond= COUNT(*) = 0 FROM t1 WHERE a>32
--let $assert_text= table t1 should have zero rows where a>32
--source include/assert.inc
SELECT * FROM t1 WHERE a>32;
DELETE FROM t1 WHERE a=32;
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
SET GLOBAL slave_parallel_mode=@old_parallel_mode;
SET GLOBAL debug_dbug=@old_debug;
SET DEBUG_SYNC= 'RESET';
--source include/start_slave.inc
--connection server_1
DROP TABLE t1;
--disable_connect_log
--source include/rpl_end.inc

View File

@ -1,38 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# BUG#13979418: SHOW BINLOG EVENTS MAY CRASH THE SERVER
#
# The function mysql_show_binlog_events has a local stack variable
# 'LOG_INFO linfo;', which is assigned to thd->current_linfo, however
# this variable goes out of scope and is destroyed before clean
# thd->current_linfo.
#
# This test case runs SHOW BINLOG EVENTS and FLUSH LOGS to make sure
# that with the fix local variable linfo is valid along all
# mysql_show_binlog_events function scope.
#
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/master-slave.inc
--connection slave
SET DEBUG_SYNC= 'after_show_binlog_events SIGNAL on_show_binlog_events WAIT_FOR end';
--send SHOW BINLOG EVENTS
--connection slave1
SET DEBUG_SYNC= 'now WAIT_FOR on_show_binlog_events';
FLUSH LOGS;
SET DEBUG_SYNC= 'now SIGNAL end';
--connection slave
--disable_result_log
--reap
--enable_result_log
SET DEBUG_SYNC= 'RESET';
--connection master
--source include/rpl_end.inc

View File

@ -1,18 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#######################################################
# Wrapper for rpl_relayrotate.test to allow multi #
# Engines to reuse test code. By JBM 2006-02-15 #
#######################################################
-- source include/have_innodb.inc
# Slow test, don't run during staging part
-- source include/not_staging.inc
-- source include/master-slave.inc
let $engine_type=innodb;
-- source suite/rpl/include/rpl_relayrotate.test
--source include/rpl_end.inc

View File

@ -1,525 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
source include/not_embedded.inc;
source include/have_innodb.inc;
source include/master-slave.inc;
let $engine_type= InnoDB;
# Suppress warnings that might be generated during the test
connection master;
call mtr.add_suppression("Timeout waiting for reply of binlog");
call mtr.add_suppression("Read semi-sync reply");
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.");
call mtr.add_suppression("mysqld: Got an error reading communication packets");
connection slave;
call mtr.add_suppression("Master server does not support semi-sync");
call mtr.add_suppression("Semi-sync slave .* reply");
call mtr.add_suppression("Slave SQL.*Request to stop slave SQL Thread received while applying a group that has non-transactional changes; waiting for completion of the group");
connection master;
# wait for dying connections (if any) to disappear
let $wait_condition= select count(*) = 0 from information_schema.processlist where command='killed';
--source include/wait_condition.inc
# After fix of BUG#45848, semi-sync slave should not create any extra
# connections on master, save the count of connections before start
# semi-sync slave for comparison below.
let $_connections_normal_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1);
--echo #
--echo # Uninstall semi-sync plugins on master and slave
--echo #
connection slave;
source include/stop_slave.inc;
reset slave;
set global rpl_semi_sync_master_enabled= 0;
set global rpl_semi_sync_slave_enabled= 0;
connection master;
reset master;
set global rpl_semi_sync_master_enabled= 0;
set global rpl_semi_sync_slave_enabled= 0;
--echo #
--echo # Main test of semi-sync replication start here
--echo #
connection master;
set global rpl_semi_sync_master_timeout= 60000; # 60s
echo [ default state of semi-sync on master should be OFF ];
show variables like 'rpl_semi_sync_master_enabled';
echo [ enable semi-sync on master ];
set global rpl_semi_sync_master_enabled = 1;
show variables like 'rpl_semi_sync_master_enabled';
echo [ status of semi-sync on master should be ON even without any semi-sync slaves ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
--echo # BUG#45672 Semisync repl: ActiveTranx:insert_tranx_node: transaction node allocation failed
--echo # BUG#45673 Semisynch reports correct operation even if no slave is connected
--echo #
# BUG#45672 When semi-sync is enabled on master, it would allocate
# transaction node even without semi-sync slave connected, and would
# finally result in transaction node allocation error.
#
# Semi-sync master will pre-allocate 'max_connections' transaction
# nodes, so here we do more than that much transactions to check if it
# will fail or not.
# select @@global.max_connections + 1;
let $i= `select @@global.max_connections + 1`;
disable_query_log;
eval create table t1 (a int) engine=$engine_type;
while ($i)
{
eval insert into t1 values ($i);
dec $i;
}
drop table t1;
enable_query_log;
# BUG#45673
echo [ status of semi-sync on master should be OFF ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_yes_tx';
# reset master to make sure the following test will start with a clean environment
reset master;
connection slave;
echo [ default state of semi-sync on slave should be OFF ];
show variables like 'rpl_semi_sync_slave_enabled';
echo [ enable semi-sync on slave ];
set global rpl_semi_sync_slave_enabled = 1;
show variables like 'rpl_semi_sync_slave_enabled';
source include/start_slave.inc;
connection master;
# NOTE: Rpl_semi_sync_master_client will only be updated when
# semi-sync slave has started binlog dump request
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
echo [ initial master state after the semi-sync slave connected ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
replace_result $engine_type ENGINE_TYPE;
eval create table t1(a int) engine = $engine_type;
echo [ master state after CREATE TABLE statement ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
# After fix of BUG#45848, semi-sync slave should not create any extra
# connections on master.
let $_connections_semisync_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1);
replace_result $_connections_normal_slave CONNECTIONS_NORMAL_SLAVE $_connections_semisync_slave CONNECTIONS_SEMISYNC_SLAVE;
eval select $_connections_semisync_slave - $_connections_normal_slave as 'Should be 0';
echo [ insert records to table ];
insert t1 values (10);
insert t1 values (9);
insert t1 values (8);
insert t1 values (7);
insert t1 values (6);
insert t1 values (5);
insert t1 values (4);
insert t1 values (3);
insert t1 values (2);
insert t1 values (1);
echo [ master status after inserts ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
sync_slave_with_master;
echo [ slave status after replicated inserts ];
show status like 'Rpl_semi_sync_slave_status';
select count(distinct a) from t1;
select min(a) from t1;
select max(a) from t1;
--echo
--echo # BUG#50157
--echo # semi-sync replication crashes when replicating a transaction which
--echo # include 'CREATE TEMPORARY TABLE `MyISAM_t` SELECT * FROM `Innodb_t` ;
connection master;
SET SESSION AUTOCOMMIT= 0;
CREATE TABLE t2(c1 INT) ENGINE=innodb;
sync_slave_with_master;
connection master;
BEGIN;
--echo
--echo # Even though it is in a transaction, this statement is binlogged into binlog
--echo # file immediately.
--disable_warnings
CREATE TEMPORARY TABLE t3 SELECT c1 FROM t2 where 1=1;
--enable_warnings
--echo
--echo # These statements will not be binlogged until the transaction is committed
INSERT INTO t2 VALUES(11);
INSERT INTO t2 VALUES(22);
COMMIT;
DROP TABLE t2, t3;
SET SESSION AUTOCOMMIT= 1;
sync_slave_with_master;
--echo #
--echo # Test semi-sync master will switch OFF after one transaction
--echo # timeout waiting for slave reply.
--echo #
connection slave;
source include/stop_slave.inc;
connection master;
--source include/kill_binlog_dump_threads.inc
set global rpl_semi_sync_master_timeout= 5000;
# The first semi-sync check should be on because after slave stop,
# there are no transactions on the master.
echo [ master status should be ON ];
let $status_var= Rpl_semi_sync_master_status;
let $status_var_value= ON;
source include/wait_for_status_var.inc;
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 0;
source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
echo [ semi-sync replication of these transactions will fail ];
insert into t1 values (500);
# Wait for the semi-sync replication of this transaction to timeout
let $status_var= Rpl_semi_sync_master_status;
let $status_var_value= OFF;
source include/wait_for_status_var.inc;
# The second semi-sync check should be off because one transaction
# times out during waiting.
echo [ master status should be OFF ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
# Semi-sync status on master is now OFF, so all these transactions
# will be replicated asynchronously.
delete from t1 where a=10;
delete from t1 where a=9;
delete from t1 where a=8;
delete from t1 where a=7;
delete from t1 where a=6;
delete from t1 where a=5;
delete from t1 where a=4;
delete from t1 where a=3;
delete from t1 where a=2;
delete from t1 where a=1;
insert into t1 values (100);
echo [ master status should be OFF ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
--echo # Test semi-sync status on master will be ON again when slave catches up
--echo #
# Save the master position for later use.
save_master_pos;
connection slave;
echo [ slave status should be OFF ];
show status like 'Rpl_semi_sync_slave_status';
source include/start_slave.inc;
sync_with_master;
echo [ slave status should be ON ];
show status like 'Rpl_semi_sync_slave_status';
select count(distinct a) from t1;
select min(a) from t1;
select max(a) from t1;
connection master;
# The master semi-sync status should be on again after slave catches up.
echo [ master status should be ON again after slave catches up ];
let $status_var= Rpl_semi_sync_master_status;
let $status_var_value= ON;
source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
show status like 'Rpl_semi_sync_master_clients';
--echo #
--echo # Test disable/enable master semi-sync on the fly.
--echo #
drop table t1;
sync_slave_with_master;
source include/stop_slave.inc;
--echo #
--echo # Flush status
--echo #
connection master;
echo [ Semi-sync master status variables before FLUSH STATUS ];
SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx';
SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx';
# Do not write the FLUSH STATUS to binlog, to make sure we'll get a
# clean status after this.
FLUSH NO_WRITE_TO_BINLOG STATUS;
echo [ Semi-sync master status variables after FLUSH STATUS ];
SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx';
SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx';
connection master;
source include/show_master_logs.inc;
show variables like 'rpl_semi_sync_master_enabled';
echo [ disable semi-sync on the fly ];
set global rpl_semi_sync_master_enabled=0;
show variables like 'rpl_semi_sync_master_enabled';
show status like 'Rpl_semi_sync_master_status';
echo [ enable semi-sync on the fly ];
set global rpl_semi_sync_master_enabled=1;
show variables like 'rpl_semi_sync_master_enabled';
show status like 'Rpl_semi_sync_master_status';
--echo #
--echo # Test RESET MASTER/SLAVE
--echo #
connection slave;
source include/start_slave.inc;
connection master;
replace_result $engine_type ENGINE_TYPE;
eval create table t1 (a int) engine = $engine_type;
drop table t1;
sync_slave_with_master;
echo [ test reset master ];
connection master;
reset master;
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
connection slave;
source include/stop_slave.inc;
reset slave;
# Kill the dump thread on master for previous slave connection and
--source include/kill_binlog_dump_threads.inc
connection slave;
source include/start_slave.inc;
connection master;
# Wait for dump thread to start, Rpl_semi_sync_master_clients will be
# 1 after dump thread started.
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
replace_result $engine_type ENGINE_TYPE;
eval create table t1 (a int) engine = $engine_type;
insert into t1 values (1);
insert into t1 values (2), (3);
sync_slave_with_master;
select * from t1;
connection master;
echo [ master semi-sync status should be ON ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
--echo # Start semi-sync replication without SUPER privilege
--echo #
connection slave;
source include/stop_slave.inc;
reset slave;
connection master;
reset master;
# Kill the dump thread on master for previous slave connection and wait for it to exit
--source include/kill_binlog_dump_threads.inc
# Do not binlog the following statement because it will generate
# different events for ROW and STATEMENT format
set sql_log_bin=0;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
flush privileges;
set sql_log_bin=1;
connection slave;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
flush privileges;
change master to master_user='rpl',master_password='rpl_password';
source include/start_slave.inc;
show status like 'Rpl_semi_sync_slave_status';
connection master;
# Wait for the semi-sync binlog dump thread to start
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
echo [ master semi-sync should be ON ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
insert into t1 values (4);
insert into t1 values (5);
echo [ master semi-sync should be ON ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
--echo # Test semi-sync slave connect to non-semi-sync master
--echo #
# Disable semi-sync on master
connection slave;
source include/stop_slave.inc;
SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
connection master;
# Kill the dump thread on master for previous slave connection and wait for it to exit
--source include/kill_binlog_dump_threads.inc
echo [ Semi-sync status on master should be ON ];
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 0;
source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_status';
let $status_var= Rpl_semi_sync_master_status;
let $status_var_value= ON;
source include/wait_for_status_var.inc;
set global rpl_semi_sync_master_enabled= 0;
connection slave;
SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
source include/start_slave.inc;
connection master;
insert into t1 values (8);
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
echo [ master semi-sync clients should be 1, status should be OFF ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
sync_slave_with_master;
show status like 'Rpl_semi_sync_slave_status';
# Uninstall semi-sync plugin on master
connection slave;
source include/stop_slave.inc;
connection master;
set global rpl_semi_sync_master_enabled= 0;
connection slave;
SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
source include/start_slave.inc;
connection master;
insert into t1 values (10);
sync_slave_with_master;
--echo #
--echo # Test non-semi-sync slave connect to semi-sync master
--echo #
connection master;
set global rpl_semi_sync_master_timeout= 5000; # 5s
set global rpl_semi_sync_master_enabled= 1;
connection slave;
source include/stop_slave.inc;
SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
echo [ uninstall semi-sync slave plugin ];
set global rpl_semi_sync_slave_enabled= 0;
echo [ reinstall semi-sync slave plugin and disable semi-sync ];
SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
source include/start_slave.inc;
SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
--echo #
--echo # Clean up
--echo #
connection slave;
source include/stop_slave.inc;
set global rpl_semi_sync_slave_enabled= 0;
connection master;
set global rpl_semi_sync_master_enabled= 0;
connection slave;
change master to master_user='root',master_password='';
source include/start_slave.inc;
connection master;
drop table t1;
sync_slave_with_master;
connection master;
drop user rpl@127.0.0.1;
flush privileges;
set global rpl_semi_sync_master_timeout= default;
--source include/rpl_end.inc

View File

@ -1,402 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it.
#
# Usage:
#
# --let $use_remote_mysqlbinlog= 1 # optional
# --source suite/rpl/include/rpl_skip_replication.inc
#
# The script uses MYSQLBINLOG to verify certain results.
# By default, it uses binary logs directly. If it is undesirable,
# this behavior can be overridden by setting $use_remote_binlog
# as shown above.
# The value will be unset after every execution of the script,
# so if it is needed, it should be set explicitly before each call.
#
--source include/have_innodb.inc
--source include/master-slave.inc
connection slave;
# Test that SUPER is required to change @@replicate_events_marked_for_skip.
CREATE USER 'nonsuperuser'@'127.0.0.1';
GRANT ALTER,CREATE,DELETE,DROP,EVENT,INSERT,PROCESS,REPLICATION SLAVE,
SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1';
connect(nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,);
connection nonpriv;
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
disconnect nonpriv;
connection slave;
DROP USER'nonsuperuser'@'127.0.0.1';
SELECT @@global.replicate_events_marked_for_skip;
--error ER_SLAVE_MUST_STOP
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
SELECT @@global.replicate_events_marked_for_skip;
STOP SLAVE;
--error ER_GLOBAL_VARIABLE
SET SESSION replicate_events_marked_for_skip=FILTER_ON_MASTER;
SELECT @@global.replicate_events_marked_for_skip;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
SELECT @@global.replicate_events_marked_for_skip;
START SLAVE;
connection master;
SELECT @@skip_replication;
--error ER_LOCAL_VARIABLE
SET GLOBAL skip_replication=1;
SELECT @@skip_replication;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=innodb;
INSERT INTO t1(a) VALUES (1);
INSERT INTO t2(a) VALUES (1);
# Test that master-side filtering works.
SET skip_replication=1;
CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
INSERT INTO t1(a) VALUES (2);
INSERT INTO t2(a) VALUES (2);
# Inject a rotate event in the binlog stream sent to slave (otherwise we will
# fail sync_slave_with_master as the last event on the master is not present
# on the slave).
FLUSH NO_WRITE_TO_BINLOG LOGS;
sync_slave_with_master;
connection slave;
SHOW TABLES;
SELECT * FROM t1;
SELECT * FROM t2;
connection master;
DROP TABLE t3;
FLUSH NO_WRITE_TO_BINLOG LOGS;
sync_slave_with_master;
# Test that slave-side filtering works.
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
START SLAVE;
connection master;
SET skip_replication=1;
CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
INSERT INTO t1(a) VALUES (3);
INSERT INTO t2(a) VALUES (3);
# Inject a rotate event in the binlog stream sent to slave (otherwise we will
# fail sync_slave_with_master as the last event on the master is not present
# on the slave).
FLUSH NO_WRITE_TO_BINLOG LOGS;
sync_slave_with_master;
connection slave;
SHOW TABLES;
SELECT * FROM t1;
SELECT * FROM t2;
connection master;
DROP TABLE t3;
FLUSH NO_WRITE_TO_BINLOG LOGS;
sync_slave_with_master;
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
START SLAVE;
# Test that events with @@skip_replication=1 are not filtered when filtering is
# not set on slave.
connection master;
SET skip_replication=1;
CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
INSERT INTO t3(a) VALUES(2);
sync_slave_with_master;
connection slave;
SELECT * FROM t3;
connection master;
DROP TABLE t3;
#
# Test that the slave will preserve the @@skip_replication flag in its
# own binlog.
#
TRUNCATE t1;
sync_slave_with_master;
connection slave;
RESET MASTER;
connection master;
SET skip_replication=0;
INSERT INTO t1 VALUES (1,0);
SET skip_replication=1;
INSERT INTO t1 VALUES (2,0);
SET skip_replication=0;
INSERT INTO t1 VALUES (3,0);
sync_slave_with_master;
connection slave;
# Since slave has @@replicate_events_marked_for_skip=REPLICATE, it should have
# applied all events.
SELECT * FROM t1 ORDER by a;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
let $SLAVE_DATADIR= `select @@datadir`;
connection master;
TRUNCATE t1;
# Now apply the slave binlog to the master, to check that both the slave
# and mysqlbinlog will preserve the @@skip_replication flag.
--let $mysqlbinlog_args= $SLAVE_DATADIR/slave-bin.000001
if ($use_remote_mysqlbinlog)
{
--let $mysqlbinlog_args= --read-from-remote-server --protocol=tcp --host=127.0.0.1 --port=$SLAVE_MYPORT -uroot slave-bin.000001
--let $use_remote_mysqlbinlog= 0
}
--exec $MYSQL_BINLOG $mysqlbinlog_args > $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog
--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog
# The master should have all three events.
SELECT * FROM t1 ORDER by a;
# The slave should be missing event 2, which is marked with the
# @@skip_replication flag.
connection slave;
START SLAVE;
connection master;
sync_slave_with_master;
connection slave;
SELECT * FROM t1 ORDER by a;
#
# Test that @@sql_slave_skip_counter does not count skipped @@skip_replication
# events.
#
connection master;
TRUNCATE t1;
sync_slave_with_master;
connection slave;
STOP SLAVE;
# We will skip two INSERTs (in addition to any skipped due to
# @@skip_replication). Since from 5.5 every statement is wrapped in
# BEGIN ... END, we need to skip 6 events for this.
SET GLOBAL sql_slave_skip_counter=6;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
START SLAVE;
connection master;
# Need to fix @@binlog_format to get consistent event count.
SET @old_binlog_format= @@binlog_format;
SET binlog_format= statement;
SET skip_replication=0;
INSERT INTO t1 VALUES (1,5);
SET skip_replication=1;
INSERT INTO t1 VALUES (2,5);
SET skip_replication=0;
INSERT INTO t1 VALUES (3,5);
INSERT INTO t1 VALUES (4,5);
SET binlog_format= @old_binlog_format;
sync_slave_with_master;
connection slave;
# The slave should have skipped the first three inserts (number 1 and 3 due
# to @@sql_slave_skip_counter=2, number 2 due to
# @@replicate_events_marked_for_skip=FILTER_ON_SLAVE). So only number 4
# should be left.
SELECT * FROM t1;
#
# Check that BINLOG statement preserves the @@skip_replication flag.
#
connection slave;
# Need row @@binlog_format for BINLOG statements containing row events.
--source include/stop_slave.inc
SET @old_slave_binlog_format= @@global.binlog_format;
SET GLOBAL binlog_format= row;
--source include/start_slave.inc
connection master;
TRUNCATE t1;
SET @old_binlog_format= @@binlog_format;
SET binlog_format= row;
# Format description log event.
BINLOG 'wlZOTw8BAAAA8QAAAPUAAAAAAAQANS41LjIxLU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAEzgNAAgAEgAEBAQEEgAA2QAEGggAAAAICAgCAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAA371saA==';
# INSERT INTO t1 VALUES (1,8) # with @@skip_replication=1
BINLOG 'wlZOTxMBAAAAKgAAAGMBAAAAgCkAAAAAAAEABHRlc3QAAnQxAAIDAwAC
wlZOTxcBAAAAJgAAAIkBAAAAgCkAAAAAAAEAAv/8AQAAAAgAAAA=';
# INSERT INTO t1 VALUES (2,8) # with @@skip_replication=0
BINLOG 'wlZOTxMBAAAAKgAAADwCAAAAACkAAAAAAAEABHRlc3QAAnQxAAIDAwAC
wlZOTxcBAAAAJgAAAGICAAAAACkAAAAAAAEAAv/8AgAAAAgAAAA=';
SET binlog_format= @old_binlog_format;
SELECT * FROM t1 ORDER BY a;
sync_slave_with_master;
connection slave;
# Slave should have only the second insert, the first should be ignored due to
# the @@skip_replication flag.
SELECT * FROM t1 ORDER by a;
--source include/stop_slave.inc
SET GLOBAL binlog_format= @old_slave_binlog_format;
--source include/start_slave.inc
# Test that it is not possible to change @@skip_replication inside a
# transaction or statement, thereby replicating only parts of statements
# or transactions.
connection master;
SET skip_replication=0;
BEGIN;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=0;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=1;
ROLLBACK;
SET skip_replication=1;
BEGIN;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=0;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=1;
COMMIT;
SET autocommit=0;
INSERT INTO t2(a) VALUES(100);
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=1;
ROLLBACK;
SET autocommit=1;
SET skip_replication=1;
--delimiter |
CREATE FUNCTION foo (x INT) RETURNS INT BEGIN SET SESSION skip_replication=x; RETURN x; END|
CREATE PROCEDURE bar(x INT) BEGIN SET SESSION skip_replication=x; END|
CREATE FUNCTION baz (x INT) RETURNS INT BEGIN CALL bar(x); RETURN x; END|
--delimiter ;
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
SELECT foo(0);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
SELECT baz(0);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET @a= foo(1);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET @a= baz(1);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
UPDATE t2 SET b=foo(0);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
UPDATE t2 SET b=baz(0);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
INSERT INTO t1 VALUES (101, foo(1));
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
INSERT INTO t1 VALUES (101, baz(0));
SELECT @@skip_replication;
CALL bar(0);
SELECT @@skip_replication;
CALL bar(1);
SELECT @@skip_replication;
DROP FUNCTION foo;
DROP PROCEDURE bar;
DROP FUNCTION baz;
# Test that master-side filtering happens on the master side, and that
# slave-side filtering happens on the slave.
# First test that events do not reach the slave when master-side filtering
# is configured. Do this by replicating first with only the IO thread running
# and master-side filtering; then change to no filtering and start the SQL
# thread. This should still skip the events, as master-side filtering
# means the events never reached the slave.
connection master;
SET skip_replication= 0;
TRUNCATE t1;
sync_slave_with_master;
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
START SLAVE IO_THREAD;
connection master;
SET skip_replication= 1;
INSERT INTO t1(a) VALUES (1);
SET skip_replication= 0;
INSERT INTO t1(a) VALUES (2);
--source include/save_master_pos.inc
connection slave;
--source include/sync_io_with_master.inc
STOP SLAVE IO_THREAD;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
START SLAVE;
connection master;
sync_slave_with_master;
connection slave;
# Now only the second insert of (2) should be visible, as the first was
# filtered on the master, so even though the SQL thread ran without skipping
# events, it will never see the event in the first place.
SELECT * FROM t1;
# Now tests that when slave-side filtering is configured, events _do_ reach
# the slave.
connection master;
SET skip_replication= 0;
TRUNCATE t1;
sync_slave_with_master;
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
START SLAVE IO_THREAD;
connection master;
SET skip_replication= 1;
INSERT INTO t1(a) VALUES (1);
SET skip_replication= 0;
INSERT INTO t1(a) VALUES (2);
--source include/save_master_pos.inc
connection slave;
--source include/sync_io_with_master.inc
STOP SLAVE IO_THREAD;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
START SLAVE;
connection master;
sync_slave_with_master;
connection slave;
# Now both inserts should be visible. Since filtering was configured to be
# slave-side, the event is in the relay log, and when the SQL thread ran we
# had disabled filtering again.
SELECT * FROM t1 ORDER BY a;
# Clean up.
connection master;
SET skip_replication=0;
DROP TABLE t1,t2;
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
START SLAVE;
--source include/rpl_end.inc

View File

@ -1,32 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
################################################################################
# Bug#19855907 IO THREAD AUTHENTICATION ISSUE WITH SOME CHARACTER SETS
# Problem: IO thread fails to connect to master if servers are configured with
# special character sets like utf16, utf32, ucs2.
#
# Analysis: MySQL server does not support few special character sets like
# utf16,utf32 and ucs2 as "client's character set"(eg: utf16,utf32, ucs2).
# When IO thread is trying to connect to Master, it sets server's character
# set as client's character set. When Slave server is started with these
# special character sets, IO thread (a connection to Master) fails because
# of the above said reason.
#
# Fix: If server's character set is not supported as client's character set,
# then set default's client character set(latin1) as client's character set.
###############################################################################
--source include/master-slave.inc
call mtr.add_suppression("'utf16' can not be used as client character set");
CREATE TABLE t1(i VARCHAR(20));
INSERT INTO t1 VALUES (0xFFFF);
--sync_slave_with_master
--let diff_tables=master:t1, slave:t1
--source include/diff_tables.inc
# Cleanup
--connection master
DROP TABLE t1;
--source include/rpl_end.inc

View File

@ -1,32 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# test to see if replication can continue when master sporadically fails on
# COM_BINLOG_DUMP and additionally limits the number of events per dump
source include/master-slave.inc;
create table t2(n int);
create table t1(n int not null auto_increment primary key);
insert into t1 values (NULL),(NULL);
truncate table t1;
# We have to use 4 in the following to make this test work with all table types
insert into t1 values (4),(NULL);
sync_slave_with_master;
--source include/stop_slave.inc
--source include/start_slave.inc
connection master;
insert into t1 values (NULL),(NULL);
flush logs;
truncate table t1;
insert into t1 values (10),(NULL),(NULL),(NULL),(NULL),(NULL);
sync_slave_with_master;
select * from t1 ORDER BY n;
connection master;
drop table t1,t2;
sync_slave_with_master;
--source include/rpl_end.inc

View File

@ -1,116 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
source include/have_ssl_communication.inc;
source include/master-slave.inc;
source include/no_valgrind_without_big.inc;
# create a user for replication that requires ssl encryption
connection master;
create user replssl@localhost;
grant replication slave on *.* to replssl@localhost require ssl;
create table t1 (t int auto_increment, KEY(t));
sync_slave_with_master;
# Set slave to use SSL for connection to master
stop slave;
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
eval change master to
master_user='replssl',
master_password='',
master_ssl=1,
master_ssl_ca ='$MYSQL_TEST_DIR/std_data/cacert.pem',
master_ssl_cert='$MYSQL_TEST_DIR/std_data/client-cert.pem',
master_ssl_key='$MYSQL_TEST_DIR/std_data/client-key.pem';
start slave;
# Switch to master and insert one record, then sync it to slave
connection master;
insert into t1 values(1);
sync_slave_with_master;
# The record should now be on slave
select * from t1;
# The slave is synced and waiting/reading from master
# SHOW SLAVE STATUS will show "Waiting for master to send event"
let $status_items= Master_SSL_Allowed, Master_SSL_CA_Path, Master_SSL_CA_File, Master_SSL_Crl, Master_SSL_Crlpath, Master_SSL_Cert, Master_SSL_Key;
source include/show_slave_status.inc;
source include/check_slave_is_running.inc;
# Stop the slave, as reported in bug#21871 it would hang
STOP SLAVE;
select * from t1;
# Do the same thing a number of times
disable_query_log;
disable_result_log;
# 2007-11-27 mats Bug #32756 Starting and stopping the slave in a loop can lose rows
# After discussions with Engineering, I'm disabling this part of the test to avoid it causing
# red trees.
disable_parsing;
let $i= 100;
while ($i)
{
start slave;
connection master;
insert into t1 values (NULL);
select * from t1; # Some variance
connection slave;
select * from t1; # Some variance
stop slave;
dec $i;
}
enable_parsing;
START SLAVE;
enable_query_log;
enable_result_log;
connection master;
# INSERT one more record to make sure
# the sync has something to do
insert into t1 values (NULL);
let $master_count= `select count(*) from t1`;
sync_slave_with_master;
--source include/wait_for_slave_to_start.inc
source include/show_slave_status.inc;
source include/check_slave_is_running.inc;
let $slave_count= `select count(*) from t1`;
if ($slave_count != $master_count)
{
echo master and slave differed in number of rows;
echo master: $master_count;
echo slave: $slave_count;
connection master;
select count(*) t1;
select * from t1;
connection slave;
select count(*) t1;
select * from t1;
query_vertical show slave status;
}
connection master;
drop user replssl@localhost;
drop table t1;
sync_slave_with_master;
--source include/stop_slave.inc
CHANGE MASTER TO
master_user = 'root',
master_ssl = 0,
master_ssl_ca = '',
master_ssl_cert = '',
master_ssl_key = '';
--echo End of 5.0 tests
--let $rpl_only_running_threads= 1
--source include/rpl_end.inc

View File

@ -1,107 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#
# BUG#12400313 / BUG#64503 test case
#
#
# Description
# -----------
#
# This test case starts the slave server with:
# --relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096
#
# Then it issues some queries that will cause the slave to reach
# relay-log-space-limit. We lock the table so that the SQL thread is
# not able to purge the log and then we issue some more statements.
#
# The purpose is to show that the IO thread will honor the limits
# while the SQL thread is not able to purge the relay logs, which did
# not happen before this patch. In addition we assert that while
# ignoring the limit (SQL thread needs to rotate before purging), the
# IO thread does not do it in an uncontrolled manner.
--source include/have_binlog_format_statement.inc
--source include/have_innodb.inc
--source include/master-slave.inc
--disable_query_log
CREATE TABLE t1 (c1 TEXT) engine=InnoDB;
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
--sync_slave_with_master
# wait for the SQL thread to sleep
--let $show_statement= SHOW PROCESSLIST
--let $field= State
--let $condition= = 'Slave has read all relay log; waiting for the slave I/O thread to update it'
--source include/wait_show_condition.inc
# now the io thread has set rli->ignore_space_limit
# lets lock the table so that once the SQL thread awakes
# it blocks there and does not set rli->ignore_space_limit
# back to zero
LOCK TABLE t1 WRITE;
# now issue more statements that will overflow the
# rli->log_space_limit (in this case ~10K)
--connection master
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
--connection slave
# ASSERT that the IO thread waits for the SQL thread to release some
# space before continuing
--let $show_statement= SHOW PROCESSLIST
--let $field= State
--let $condition= LIKE 'Waiting for %'
# before the patch (IO would have transfered everything)
#--let $condition= = 'Waiting for master to send event'
# after the patch (now it waits for space to be freed)
#--let $condition= = 'Waiting for the slave SQL thread to free enough relay log space'
--source include/wait_show_condition.inc
# without the patch we can uncomment the following two lines and
# watch the IO thread synchronize with the master, thus writing
# relay logs way over the space limit
#--connection master
#--source include/sync_slave_io_with_master.inc
## ASSERT that the IO thread has honored the limit+few bytes required to be able to purge
--let $relay_log_space_while_sql_is_executing = query_get_value(SHOW SLAVE STATUS, Relay_Log_Space, 1)
--let $relay_log_space_limit = query_get_value(SHOW VARIABLES LIKE "relay_log_space_limit", Value, 1)
--let $assert_text= Assert that relay log space is close to the limit
--let $assert_cond= $relay_log_space_while_sql_is_executing <= $relay_log_space_limit * 1.15
--source include/assert.inc
# unlock the table and let SQL thread continue applying events
UNLOCK TABLES;
--connection master
--sync_slave_with_master
--let $diff_tables=master:test.t1,slave:test.t1
--source include/diff_tables.inc
--connection master
DROP TABLE t1;
--enable_query_log
--sync_slave_with_master
--source include/rpl_end.inc

View File

@ -1,633 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#
# rpl_switch_stm_row_mixed tests covers
#
# - Master is switching explicitly between STATEMENT, ROW, and MIXED
# binlog format showing when it is possible and when not.
# - Master switching from MIXED to RBR implicitly listing all use
# cases, e.g a query invokes UUID(), thereafter to serve as the
# definition of MIXED binlog format
# - correctness of execution
-- source include/have_binlog_format_mixed_or_row.inc
-- source include/master-slave.inc
# Since this test generates row-based events in the binary log, the
# slave SQL thread cannot be in STATEMENT mode to execute this test,
# so we only execute it for MIXED and ROW as default value of
# BINLOG_FORMAT.
connection slave;
connection master;
--disable_warnings
drop database if exists mysqltest1;
create database mysqltest1;
--enable_warnings
use mysqltest1;
# Save binlog format
set @my_binlog_format= @@global.binlog_format;
# play with switching
set session binlog_format=mixed;
show session variables like "binlog_format%";
set session binlog_format=statement;
show session variables like "binlog_format%";
set session binlog_format=row;
show session variables like "binlog_format%";
set global binlog_format=DEFAULT;
show global variables like "binlog_format%";
set global binlog_format=MIXED;
show global variables like "binlog_format%";
set global binlog_format=STATEMENT;
show global variables like "binlog_format%";
set global binlog_format=ROW;
show global variables like "binlog_format%";
show session variables like "binlog_format%";
select @@global.binlog_format, @@session.binlog_format;
CREATE TABLE t1 (a varchar(100));
prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
set @string="emergency_1_";
insert into t1 values("work_2_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values(concat(UUID(),"work_3_"));
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(concat("for_4_",UUID()));
insert into t1 select "yesterday_5_";
# verify that temp tables prevent a switch to SBR
create temporary table tmp(a char(100));
insert into tmp values("see_6_");
--error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
set binlog_format=statement;
insert into t1 select * from tmp;
drop temporary table tmp;
# Now we go to SBR
set binlog_format=statement;
show global variables like "binlog_format%";
show session variables like "binlog_format%";
select @@global.binlog_format, @@session.binlog_format;
set global binlog_format=statement;
show global variables like "binlog_format%";
show session variables like "binlog_format%";
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select ?';
set @string="emergency_7_";
insert into t1 values("work_8_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values("work_9_");
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values("for_10_");
insert into t1 select "yesterday_11_";
# test statement (is not default after wl#3368)
set binlog_format=statement;
select @@global.binlog_format, @@session.binlog_format;
set global binlog_format=statement;
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select ?';
set @string="emergency_12_";
insert into t1 values("work_13_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values("work_14_");
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values("for_15_");
insert into t1 select "yesterday_16_";
# and now the mixed mode
set global binlog_format=mixed;
select @@global.binlog_format, @@session.binlog_format;
set binlog_format=default;
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
set @string="emergency_17_";
insert into t1 values("work_18_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values(concat(UUID(),"work_19_"));
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(concat("for_20_",UUID()));
insert into t1 select "yesterday_21_";
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values(concat(UUID(),"work_22_"));
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(concat("for_23_",UUID()));
insert into t1 select "yesterday_24_";
# Test of CREATE TABLE SELECT
create table t2 ENGINE=MyISAM select rpad(UUID(),100,' ');
create table t3 select 1 union select UUID();
--disable_warnings
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3);
--enable_warnings
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
# what if UUID() is first:
--disable_warnings
insert ignore into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
--enable_warnings
# inside a stored procedure
delimiter |;
create procedure foo()
begin
insert into t1 values("work_25_");
insert into t1 values(concat("for_26_",UUID()));
insert into t1 select "yesterday_27_";
end|
create procedure foo2()
begin
insert into t1 values(concat("emergency_28_",UUID()));
insert into t1 values("work_29_");
insert into t1 values(concat("for_30_",UUID()));
set session binlog_format=row; # accepted for stored procs
insert into t1 values("more work_31_");
set session binlog_format=mixed;
end|
create function foo3() returns bigint unsigned
begin
set session binlog_format=row; # rejected for stored funcs
insert into t1 values("alarm");
return 100;
end|
create procedure foo4(x varchar(100))
begin
insert into t1 values(concat("work_250_",x));
insert into t1 select "yesterday_270_";
end|
delimiter ;|
call foo();
call foo2();
call foo4("hello");
call foo4(UUID());
call foo4("world");
# test that can't SET in a stored function
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
select foo3();
select * from t1 where a="alarm";
# Tests of stored functions/triggers/views for BUG#20930 "Mixed
# binlogging mode does not work with stored functions, triggers,
# views"
# Function which calls procedure
drop function foo3;
delimiter |;
create function foo3() returns bigint unsigned
begin
insert into t1 values("foo3_32_");
call foo();
return 100;
end|
delimiter ;|
insert into t2 select foo3();
prepare stmt1 from 'insert into t2 select foo3()';
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# Test if stored function calls stored function which calls procedure
# which requires row-based.
delimiter |;
create function foo4() returns bigint unsigned
begin
insert into t2 select foo3();
return 100;
end|
delimiter ;|
select foo4();
prepare stmt1 from 'select foo4()';
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# A simple stored function
delimiter |;
create function foo5() returns bigint unsigned
begin
insert into t2 select UUID();
return 100;
end|
delimiter ;|
select foo5();
prepare stmt1 from 'select foo5()';
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# A simple stored function where UUID() is in the argument
delimiter |;
create function foo6(x varchar(100)) returns bigint unsigned
begin
insert into t2 select x;
return 100;
end|
delimiter ;|
select foo6("foo6_1_");
select foo6(concat("foo6_2_",UUID()));
prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))';
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# Test of views using UUID()
create view v1 as select uuid();
create table t11 (data varchar(255));
insert into t11 select * from v1;
# Test of querying INFORMATION_SCHEMA which parses the view's body,
# to verify that it binlogs statement-based (is not polluted by
# the parsing of the view's body).
insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11');
prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')";
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# Test of triggers with UUID()
delimiter |;
create trigger t11_bi before insert on t11 for each row
begin
set NEW.data = concat(NEW.data,UUID());
end|
delimiter ;|
insert into t11 values("try_560_");
# Test that INSERT DELAYED works in mixed mode (BUG#20649)
insert delayed into t2 values("delay_1_");
insert delayed into t2 values(concat("delay_2_",UUID()));
insert delayed into t2 values("delay_6_");
# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not
# replicate fine in statement-based ; we test that in mixed mode it
# works).
insert delayed into t2 values(rand());
set @a=2.345;
insert delayed into t2 values(@a);
# With INSERT DELAYED, rows are written to the binlog after they are
# written to the table. Therefore, it is not enough to wait until the
# rows make it to t2 on the master (the rows may not be in the binlog
# at that time, and may still not be in the binlog when
# sync_slave_with_master is later called). Instead, we wait until the
# rows make it to t2 on the slave. We first call
# sync_slave_with_master, so that we are sure that t2 has been created
# on the slave.
sync_slave_with_master;
let $wait_condition= SELECT COUNT(*) = 19 FROM mysqltest1.t2;
--source include/wait_condition.inc
connection master;
# If you want to do manual testing of the mixed mode regarding UDFs (not
# testable automatically as quite platform- and compiler-dependent),
# you just need to set the variable below to 1, and to
# "make udf_example.so" in sql/, and to copy sql/udf_example.so to
# MYSQL_TEST_DIR/lib/mysql.
let $you_want_to_test_UDF=0;
if ($you_want_to_test_UDF)
{
CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so';
prepare stmt1 from 'insert into t1 select metaphon(?)';
set @string="emergency_133_";
insert into t1 values("work_134_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values(metaphon("work_135_"));
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(metaphon("for_136_"));
insert into t1 select "yesterday_137_";
create table t6 select metaphon("for_138_");
create table t7 select 1 union select metaphon("for_139_");
create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3);
create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
}
create table t20 select * from t1; # save for comparing later
create table t21 select * from t2;
create table t22 select * from t3;
drop table t1,t2,t3;
# This tests the fix to
# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog
# We verify that under the mixed binlog mode, a stored function
# modifying at least two tables having an auto_increment column,
# is binlogged row-based. Indeed in statement-based binlogging,
# only the auto_increment value generated for the first table
# is recorded in the binlog, the value generated for the 2nd table
# lacking.
create table t1 (a int primary key auto_increment, b varchar(100));
create table t2 (a int primary key auto_increment, b varchar(100));
create table t3 (b varchar(100));
delimiter |;
create function f (x varchar(100)) returns int deterministic
begin
insert into t1 values(null,x);
insert into t2 values(null,x);
return 1;
end|
delimiter ;|
select f("try_41_");
# Two operations which compensate each other except that their net
# effect is that they advance the auto_increment counter of t2 on slave:
sync_slave_with_master;
use mysqltest1;
insert into t2 values(2,null),(3,null),(4,null);
delete from t2 where a>=2;
connection master;
# this is the call which didn't replicate well
select f("try_42_");
sync_slave_with_master;
# now use prepared statement and test again, just to see that the RBB
# mode isn't set at PREPARE but at EXECUTE.
insert into t2 values(3,null),(4,null);
delete from t2 where a>=3;
connection master;
prepare stmt1 from 'select f(?)';
set @string="try_43_";
insert into t1 values(null,"try_44_"); # should be SBB
execute stmt1 using @string; # should be RBB
deallocate prepare stmt1;
sync_slave_with_master;
# verify that if only one table has auto_inc, it does not trigger RBB
# (we'll check in binlog further below)
connection master;
create table t12 select * from t1; # save for comparing later
drop table t1;
create table t1 (a int, b varchar(100), key(a));
select f("try_45_");
# restore table's key
create table t13 select * from t1;
drop table t1;
create table t1 (a int primary key auto_increment, b varchar(100));
# now test if it's two functions, each of them inserts in one table
drop function f;
# we need a unique key to have sorting of rows by mysqldump
create table t14 (unique (a)) select * from t2;
truncate table t2;
delimiter |;
create function f1 (x varchar(100)) returns int deterministic
begin
insert into t1 values(null,x);
return 1;
end|
create function f2 (x varchar(100)) returns int deterministic
begin
insert into t2 values(null,x);
return 1;
end|
delimiter ;|
select f1("try_46_"),f2("try_47_");
sync_slave_with_master;
insert into t2 values(2,null),(3,null),(4,null);
delete from t2 where a>=2;
connection master;
# Test with SELECT and INSERT
select f1("try_48_"),f2("try_49_");
insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_")));
sync_slave_with_master;
# verify that if f2 does only read on an auto_inc table, this does not
# switch to RBB
connection master;
drop function f2;
delimiter |;
create function f2 (x varchar(100)) returns int deterministic
begin
declare y int;
insert into t1 values(null,x);
set y = (select count(*) from t2);
return y;
end|
delimiter ;|
select f1("try_53_"),f2("try_54_");
sync_slave_with_master;
# And now, a normal statement with a trigger (no stored functions)
connection master;
drop function f2;
delimiter |;
create trigger t1_bi before insert on t1 for each row
begin
insert into t2 values(null,"try_55_");
end|
delimiter ;|
insert into t1 values(null,"try_56_");
# and now remove one auto_increment and verify SBB
alter table t1 modify a int, drop primary key;
insert into t1 values(null,"try_57_");
sync_slave_with_master;
# Test for BUG#20499 "mixed mode with temporary table breaks binlog"
# Slave used to have only 2 rows instead of 3.
connection master;
CREATE TEMPORARY TABLE t15 SELECT UUID();
create table t16 like t15;
INSERT INTO t16 SELECT * FROM t15;
# we'll verify that this one is done RBB
insert into t16 values("try_65_");
drop table t15;
# we'll verify that this one is done SBB
insert into t16 values("try_66_");
sync_slave_with_master;
# and now compare:
connection master;
# first check that data on master is sensible
select count(*) from t1;
select count(*) from t2;
select count(*) from t3;
select count(*) from t4;
select count(*) from t5;
select count(*) from t11;
select count(*) from t20;
select count(*) from t21;
select count(*) from t22;
select count(*) from t12;
select count(*) from t13;
select count(*) from t14;
select count(*) from t16;
if ($you_want_to_test_UDF)
{
select count(*) from t6;
select count(*) from t7;
select count(*) from t8;
select count(*) from t9;
}
sync_slave_with_master;
#
# Bug#20863 If binlog format is changed between update and unlock of
# tables, wrong binlog
#
connection master;
DROP TABLE IF EXISTS t11;
SET SESSION BINLOG_FORMAT=STATEMENT;
CREATE TABLE t11 (song VARCHAR(255));
LOCK TABLES t11 WRITE;
SET SESSION BINLOG_FORMAT=ROW;
INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict');
SET SESSION BINLOG_FORMAT=STATEMENT;
INSERT INTO t11 VALUES('Careful With That Axe, Eugene');
UNLOCK TABLES;
--query_vertical SELECT * FROM t11
sync_slave_with_master;
USE mysqltest1;
--query_vertical SELECT * FROM t11
connection master;
DROP TABLE IF EXISTS t12;
SET SESSION BINLOG_FORMAT=MIXED;
CREATE TABLE t12 (data LONG);
LOCK TABLES t12 WRITE;
INSERT INTO t12 VALUES(UUID());
UNLOCK TABLES;
sync_slave_with_master;
#
# BUG#28086: SBR of USER() becomes corrupted on slave
#
connection master;
# Just to get something that is non-trivial, albeit still simple, we
# stuff the result of USER() and CURRENT_USER() into a variable.
--delimiter $$
CREATE FUNCTION my_user()
RETURNS CHAR(64)
BEGIN
DECLARE user CHAR(64);
SELECT USER() INTO user;
RETURN user;
END $$
--delimiter ;
--delimiter $$
CREATE FUNCTION my_current_user()
RETURNS CHAR(64)
BEGIN
DECLARE user CHAR(64);
SELECT CURRENT_USER() INTO user;
RETURN user;
END $$
--delimiter ;
DROP TABLE IF EXISTS t13;
CREATE TABLE t13 (data CHAR(64));
INSERT INTO t13 VALUES (USER());
INSERT INTO t13 VALUES (my_user());
INSERT INTO t13 VALUES (CURRENT_USER());
INSERT INTO t13 VALUES (my_current_user());
sync_slave_with_master;
# as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql
# Let's compare. Note: If they match test will pass, if they do not match
# the test will show that the diff statement failed and not reject file
# will be created. You will need to go to the mysql-test dir and diff
# the files your self to see what is not matching
diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
connection master;
# Now test that mysqlbinlog works fine on a binlog generated by the
# mixed mode
# BUG#11312 "DELIMITER is not written to the binary log that causes
# syntax error" makes that mysqlbinlog will fail if we pass it the
# text of queries; this forces us to use --base64-output here.
# BUG#20929 "BINLOG command causes invalid free plus assertion
# failure" makes mysqld segfault when receiving --base64-output
# So I can't enable this piece of test
# SIGH
if ($enable_when_11312_or_20929_fixed)
{
--exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
drop database mysqltest1;
--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
# the old mysqldump output on slave is the same as what it was on
# master before restoring on master.
diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
}
drop database mysqltest1;
sync_slave_with_master;
connection master;
# Restore binlog format setting
set global binlog_format =@my_binlog_format;
--source include/rpl_end.inc

View File

@ -1,159 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
########################################################################################
# This test verifies the options --sync-relay-log-info and --relay-log-recovery by
# crashing the slave in two different situations:
# (case-1) - Corrupt the relay log with changes which were not processed by
# the SQL Thread and crashes it.
# (case-2) - Corrupt the master.info with wrong coordinates and crashes it.
#
# Case 1:
# 1 - Stops the SQL Thread
# 2 - Inserts new records into the master.
# 3 - Corrupts the relay-log.bin* which most likely has such changes.
# 4 - Crashes the slave
# 5 - Verifies if the slave is sync with the master which means that the information
# loss was circumvented by the recovery process.
#
# Case 2:
# 1 - Stops the SQL/IO Threads
# 2 - Inserts new records into the master.
# 3 - Corrupts the master.info with wrong coordinates.
# 4 - Crashes the slave
# 5 - Verifies if the slave is sync with the master which means that the information
# loss was circumvented by the recovery process.
########################################################################################
########################################################################################
# Configuring the environment
########################################################################################
--echo =====Configuring the enviroment=======;
--source include/not_embedded.inc
--source include/not_valgrind.inc
--source include/have_debug.inc
--source include/have_innodb.inc
--source include/not_crashrep.inc
--source include/master-slave.inc
call mtr.add_suppression('Attempting backtrace');
call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001");
# Use innodb so we do not get "table should be repaired" issues.
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
flush tables;
CREATE TABLE t1(a INT, PRIMARY KEY(a)) engine=innodb;
insert into t1(a) values(1);
insert into t1(a) values(2);
insert into t1(a) values(3);
########################################################################################
# Case 1: Corrupt a relay-log.bin*
########################################################################################
--echo =====Inserting data on the master but without the SQL Thread being running=======;
sync_slave_with_master;
connection slave;
let $MYSQLD_SLAVE_DATADIR= `select @@datadir`;
--replace_result $MYSQLD_SLAVE_DATADIR MYSQLD_SLAVE_DATADIR
--copy_file $MYSQLD_SLAVE_DATADIR/master.info $MYSQLD_SLAVE_DATADIR/master.backup
--source include/stop_slave_sql.inc
connection master;
insert into t1(a) values(4);
insert into t1(a) values(5);
insert into t1(a) values(6);
--echo =====Removing relay log files and crashing/recoverying the slave=======;
connection slave;
--source include/stop_slave_io.inc
let $file= query_get_value("SHOW SLAVE STATUS", Relay_Log_File, 1);
--let FILE_TO_CORRUPT= $MYSQLD_SLAVE_DATADIR/$file
perl;
$file= $ENV{'FILE_TO_CORRUPT'};
open(FILE, ">$file") || die "Unable to open $file.";
truncate(FILE,0);
print FILE "failure";
close ($file);
EOF
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
SET SESSION debug_dbug="d,crash_before_rotate_relaylog";
--error 2013
FLUSH LOGS;
--let $rpl_server_number= 2
--source include/rpl_reconnect.inc
--echo =====Dumping and comparing tables=======;
--source include/start_slave.inc
connection master;
sync_slave_with_master;
let $diff_tables=master:t1,slave:t1;
source include/diff_tables.inc;
########################################################################################
# Case 2: Corrupt a master.info
########################################################################################
--echo =====Corrupting the master.info=======;
connection slave;
--source include/stop_slave.inc
connection master;
FLUSH LOGS;
insert into t1(a) values(7);
insert into t1(a) values(8);
insert into t1(a) values(9);
connection slave;
let MYSQLD_SLAVE_DATADIR=`select @@datadir`;
--perl
use strict;
use warnings;
my $src= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.backup";
my $dst= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.info";
open(FILE, "<", $src) or die;
my @content= <FILE>;
close FILE;
open(FILE, ">", $dst) or die;
binmode FILE;
print FILE @content;
close FILE;
EOF
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
SET SESSION debug_dbug="d,crash_before_rotate_relaylog";
--error 2013
FLUSH LOGS;
--let $rpl_server_number= 2
--source include/rpl_reconnect.inc
--echo =====Dumping and comparing tables=======;
--source include/start_slave.inc
connection master;
sync_slave_with_master;
let $diff_tables=master:t1,slave:t1;
source include/diff_tables.inc;
########################################################################################
# Clean up
########################################################################################
--echo =====Clean up=======;
connection master;
drop table t1;
--remove_file $MYSQLD_SLAVE_DATADIR/master.backup
--source include/rpl_end.inc

View File

@ -1,82 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
--source include/master-slave.inc
if ($force_master_mysql56_temporal_format)
{
connection master;
eval SET @@global.mysql56_temporal_format=$force_master_mysql56_temporal_format;
}
if ($force_slave_mysql56_temporal_format)
{
connection slave;
eval SET @@global.mysql56_temporal_format=$force_slave_mysql56_temporal_format;
}
connection master;
SELECT @@global.mysql56_temporal_format AS on_master;
connection slave;
SELECT @@global.mysql56_temporal_format AS on_slave;
connection master;
CREATE TABLE t1
(
c0 TIME(0),
c1 TIME(1),
c2 TIME(2),
c3 TIME(3),
c4 TIME(4),
c5 TIME(5),
c6 TIME(6)
);
CREATE TABLE t2
(
c0 TIMESTAMP(0),
c1 TIMESTAMP(1),
c2 TIMESTAMP(2),
c3 TIMESTAMP(3),
c4 TIMESTAMP(4),
c5 TIMESTAMP(5),
c6 TIMESTAMP(6)
);
CREATE TABLE t3
(
c0 DATETIME(0),
c1 DATETIME(1),
c2 DATETIME(2),
c3 DATETIME(3),
c4 DATETIME(4),
c5 DATETIME(5),
c6 DATETIME(6)
);
INSERT INTO t1 VALUES ('01:01:01','01:01:01.1','01:01:01.11','01:01:01.111','01:01:01.1111','01:01:01.11111','01:01:01.111111');
INSERT INTO t2 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111');
INSERT INTO t3 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111');
SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME;
sync_slave_with_master;
connection slave;
--query_vertical SELECT * FROM t1;
--query_vertical SELECT * FROM t2;
--query_vertical SELECT * FROM t3;
SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME;
connection master;
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
connection slave;
SET @@global.mysql56_temporal_format=DEFAULT;
connection master;
SET @@global.mysql56_temporal_format=DEFAULT;
--source include/rpl_end.inc

View File

@ -1,78 +0,0 @@
#
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption suite).
# Please check all dependent tests after modifying it
#
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
connection slave;
set @saved_slave_type_conversions = @@global.slave_type_conversions;
CREATE TABLE type_conversions (
TestNo INT AUTO_INCREMENT PRIMARY KEY,
Source TEXT,
Target TEXT,
Flags TEXT,
On_Master LONGTEXT,
On_Slave LONGTEXT,
Expected LONGTEXT,
Compare INT,
Error TEXT);
SELECT @@global.slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='';
SELECT @@global.slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY';
SELECT @@global.slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY';
SELECT @@global.slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY';
SELECT @@global.slave_type_conversions;
--error ER_WRONG_VALUE_FOR_VAR
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY,NONEXISTING_BIT';
SELECT @@global.slave_type_conversions;
# Checking strict interpretation of type conversions
connection slave;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='';
source suite/rpl/include/type_conversions.test;
# Checking lossy integer type conversions
connection slave;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY';
source suite/rpl/include/type_conversions.test;
# Checking non-lossy integer type conversions
connection slave;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY';
source suite/rpl/include/type_conversions.test;
# Checking all type conversions
connection slave;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY';
source suite/rpl/include/type_conversions.test;
connection slave;
--echo **** Result of conversions ****
disable_query_log;
SELECT RPAD(Source, 15, ' ') AS Source_Type,
RPAD(Target, 15, ' ') AS Target_Type,
RPAD(Flags, 25, ' ') AS All_Type_Conversion_Flags,
IF(Compare IS NULL AND Error IS NOT NULL, '<Correct error>',
IF(Compare, '<Correct value>',
CONCAT("'", On_Slave, "' != '", Expected, "'")))
AS Value_On_Slave
FROM type_conversions;
enable_query_log;
DROP TABLE type_conversions;
call mtr.add_suppression("Slave SQL.*Column 1 of table .test.t1. cannot be converted from type.* error.* 1677");
connection master;
DROP TABLE t1;
sync_slave_with_master;
set global slave_type_conversions = @saved_slave_type_conversions;
--source include/rpl_end.inc

View File

@ -1 +1,438 @@
--source include/rpl_binlog_errors.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# Usage:
# --let $binlog_limit= X[,Y] # optional
#
# Semantics of the value is the same as in include/show_binlog_events.inc
# which the script calls as a part of the test flow.
# The goal is to print the event demonstrating the triggered error,
# so normally Y should be 1 (print the exact event only);
# however, depending on test-specific server options, the offset X
# can be different.
#
# BUG#46166: MYSQL_BIN_LOG::new_file_impl is not propagating error
# when generating new name.
#
# WHY
# ===
#
# We want to check whether error is reported or not when
# new_file_impl fails (this may happen when rotation is not
# possible because there is some problem finding an
# unique filename).
#
# HOW
# ===
#
# Test cases are documented inline.
-- source include/have_innodb.inc
-- source include/have_debug.inc
-- source include/master-slave.inc
-- echo #######################################################################
-- echo ####################### PART 1: MASTER TESTS ##########################
-- echo #######################################################################
### ACTION: stopping slave as it is not needed for the first part of
### the test
-- connection slave
-- source include/stop_slave.inc
-- connection master
call mtr.add_suppression("Can't generate a unique log-filename");
call mtr.add_suppression("Writing one row to the row-based binary log failed.*");
call mtr.add_suppression("Error writing file .*");
call mtr.add_suppression("Could not use master-bin for logging");
SET @old_debug= @@global.debug_dbug;
### ACTION: create a large file (> 4096 bytes) that will be later used
### in LOAD DATA INFILE to check binlog errors in its vacinity
-- let $load_file= $MYSQLTEST_VARDIR/tmp/bug_46166.data
-- let $MYSQLD_DATADIR= `select @@datadir`
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval SELECT repeat('x',8192) INTO OUTFILE '$load_file'
### ACTION: create a small file (< 4096 bytes) that will be later used
### in LOAD DATA INFILE to check for absence of binlog errors
### when file loading this file does not force flushing and
### rotating the binary log
-- let $load_file2= $MYSQLTEST_VARDIR/tmp/bug_46166-2.data
-- let $MYSQLD_DATADIR= `select @@datadir`
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval SELECT repeat('x',10) INTO OUTFILE '$load_file2'
RESET MASTER;
-- echo ###################### TEST #1
### ASSERTION: no problem flushing logs (should show two binlogs)
FLUSH LOGS;
-- echo # assert: must show two binlogs
-- source include/show_binary_logs.inc
-- echo ###################### TEST #2
### ASSERTION: check that FLUSH LOGS actually fails and reports
### failure back to the user if find_uniq_filename fails
### (should show just one binlog)
RESET MASTER;
SET @@global.debug_dbug="d,error_unique_log_filename";
-- error ER_NO_UNIQUE_LOGFILE
FLUSH LOGS;
-- echo # assert: must show one binlog
-- source include/show_binary_logs.inc
### ACTION: clean up and move to next test
SET @@global.debug_dbug=@old_debug;
RESET MASTER;
-- echo ###################### TEST #3
### ACTION: create some tables (t1, t2, t4) and insert some values in
### table t1
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a VARCHAR(16384)) Engine=InnoDB;
CREATE TABLE t4 (a VARCHAR(16384));
INSERT INTO t1 VALUES (1);
RESET MASTER;
### ASSERTION: we force rotation of the binary log because it exceeds
### the max_binlog_size option (should show two binary
### logs)
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
# shows two binary logs
-- echo # assert: must show two binlog
-- source include/show_binary_logs.inc
# clean up the table and the binlog to be used in next part of test
SET @@global.debug_dbug=@old_debug;
DELETE FROM t2;
RESET MASTER;
-- echo ###################### TEST #4
### ASSERTION: load the big file into a transactional table and check
### that it reports error. The table will contain the
### changes performed despite the fact that it reported an
### error.
SET @@global.debug_dbug="d,error_unique_log_filename";
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- error ER_NO_UNIQUE_LOGFILE
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
# show table
-- echo # assert: must show one entry
SELECT count(*) FROM t2;
# clean up the table and the binlog to be used in next part of test
SET @@global.debug_dbug=@old_debug;
DELETE FROM t2;
RESET MASTER;
-- echo ###################### TEST #5
### ASSERTION: load the small file into a transactional table and
### check that it succeeds
SET @@global.debug_dbug="d,error_unique_log_filename";
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval LOAD DATA INFILE '$load_file2' INTO TABLE t2
# show table
-- echo # assert: must show one entry
SELECT count(*) FROM t2;
# clean up the table and the binlog to be used in next part of test
SET @@global.debug_dbug=@old_debug;
DELETE FROM t2;
RESET MASTER;
-- echo ###################### TEST #6
### ASSERTION: check that even if one is using a transactional table
### and explicit transactions (no autocommit) if rotation
### fails we get the error. Transaction is not rolledback
### because rotation happens after the commit.
SET @@global.debug_dbug="d,error_unique_log_filename";
SET AUTOCOMMIT=0;
INSERT INTO t2 VALUES ('muse');
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
INSERT INTO t2 VALUES ('muse');
-- error ER_NO_UNIQUE_LOGFILE
COMMIT;
### ACTION: Show the contents of the table after the test
-- echo # assert: must show three entries
SELECT count(*) FROM t2;
### ACTION: clean up and move to the next test
SET AUTOCOMMIT= 1;
SET @@global.debug_dbug=@old_debug;
DELETE FROM t2;
RESET MASTER;
-- echo ###################### TEST #7
### ASSERTION: check that on a non-transactional table, if rotation
### fails then an error is reported and an incident event
### is written to the current binary log.
SET @@global.debug_dbug="d,error_unique_log_filename";
# Disable logging Annotate_rows events to preserve events count.
let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`;
SET @@binlog_annotate_row_events= 0;
SELECT count(*) FROM t4;
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- error ER_NO_UNIQUE_LOGFILE
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4
-- echo # assert: must show 1 entry
SELECT count(*) FROM t4;
-- echo ### check that the incident event is written to the current log
SET @@global.debug_dbug=@old_debug;
if (!$binlog_limit)
{
-- let $binlog_limit= 4,1
}
-- source include/show_binlog_events.inc
# clean up and move to next test
DELETE FROM t4;
--disable_query_log
eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved;
--enable_query_log
RESET MASTER;
-- echo ###################### TEST #8
### ASSERTION: check that statements end up in error but they succeed
### on changing the data.
SET @@global.debug_dbug="d,error_unique_log_filename";
-- echo # must show 0 entries
SELECT count(*) FROM t4;
SELECT count(*) FROM t2;
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- error ER_NO_UNIQUE_LOGFILE
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4
-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-- error ER_NO_UNIQUE_LOGFILE
-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2
-- error ER_NO_UNIQUE_LOGFILE
INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc');
-- echo # INFO: Count(*) Before Offending DELETEs
-- echo # assert: must show 1 entry
SELECT count(*) FROM t4;
-- echo # assert: must show 4 entries
SELECT count(*) FROM t2;
-- error ER_NO_UNIQUE_LOGFILE
DELETE FROM t4;
-- error ER_NO_UNIQUE_LOGFILE
DELETE FROM t2;
-- echo # INFO: Count(*) After Offending DELETEs
-- echo # assert: must show zero entries
SELECT count(*) FROM t4;
SELECT count(*) FROM t2;
# remove fault injection
SET @@global.debug_dbug=@old_debug;
-- echo ###################### TEST #9
### ASSERTION: check that if we disable binlogging, then statements
### succeed.
SET @@global.debug_dbug="d,error_unique_log_filename";
SET SQL_LOG_BIN=0;
INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'), ('ddd');
INSERT INTO t4 VALUES ('eee'), ('fff'), ('ggg'), ('hhh');
-- echo # assert: must show four entries
SELECT count(*) FROM t2;
SELECT count(*) FROM t4;
DELETE FROM t2;
DELETE FROM t4;
-- echo # assert: must show zero entries
SELECT count(*) FROM t2;
SELECT count(*) FROM t4;
SET SQL_LOG_BIN=1;
SET @@global.debug_dbug=@old_debug;
-- echo ###################### TEST #10
### ASSERTION: check that error is reported if there is a failure
### while registering the index file and the binary log
### file or failure to write the rotate event.
call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file.");
call mtr.add_suppression("Could not use .*");
RESET MASTER;
SHOW WARNINGS;
# +d,fault_injection_registering_index => injects fault on MYSQL_BIN_LOG::open
SET @@global.debug_dbug="d,fault_injection_registering_index";
-- replace_regex /\.[\\\/]master/master/
-- error ER_CANT_OPEN_FILE
FLUSH LOGS;
SET @@global.debug_dbug=@old_debug;
-- error ER_NO_BINARY_LOGGING
SHOW BINARY LOGS;
# issue some statements and check that they don't fail
CREATE TABLE t5 (a INT);
INSERT INTO t4 VALUES ('bbbbb');
INSERT INTO t2 VALUES ('aaaaa');
DELETE FROM t4;
DELETE FROM t2;
DROP TABLE t5;
flush tables;
-- echo ###################### TEST #11
### ASSERTION: check that error is reported if there is a failure
### while opening the index file and the binary log file or
### failure to write the rotate event.
# restart the server so that we have binlog again
--let $rpl_server_number= 1
--source include/rpl_restart_server.inc
# +d,fault_injection_openning_index => injects fault on MYSQL_BIN_LOG::open_index_file
SET @@global.debug_dbug="d,fault_injection_openning_index";
-- replace_regex /\.[\\\/]master/master/
-- error ER_CANT_OPEN_FILE
FLUSH LOGS;
SET @@global.debug_dbug=@old_debug;
-- error ER_FLUSH_MASTER_BINLOG_CLOSED
RESET MASTER;
# issue some statements and check that they don't fail
CREATE TABLE t5 (a INT);
INSERT INTO t4 VALUES ('bbbbb');
INSERT INTO t2 VALUES ('aaaaa');
DELETE FROM t4;
DELETE FROM t2;
DROP TABLE t5;
flush tables;
# restart the server so that we have binlog again
--let $rpl_server_number= 1
--source include/rpl_restart_server.inc
-- echo ###################### TEST #12
### ASSERTION: check that error is reported if there is a failure
### while writing the rotate event when creating a new log
### file.
# +d,fault_injection_new_file_rotate_event => injects fault on MYSQL_BIN_LOG::MYSQL_BIN_LOG::new_file_impl
SET @@global.debug_dbug="d,fault_injection_new_file_rotate_event";
-- error ER_ERROR_ON_WRITE
FLUSH LOGS;
SET @@global.debug_dbug=@old_debug;
-- error ER_FLUSH_MASTER_BINLOG_CLOSED
RESET MASTER;
# issue some statements and check that they don't fail
CREATE TABLE t5 (a INT);
INSERT INTO t4 VALUES ('bbbbb');
INSERT INTO t2 VALUES ('aaaaa');
DELETE FROM t4;
DELETE FROM t2;
DROP TABLE t5;
flush tables;
# restart the server so that we have binlog again
--let $rpl_server_number= 1
--source include/rpl_restart_server.inc
## clean up
DROP TABLE t1, t2, t4;
RESET MASTER;
# restart slave again
-- connection slave
-- source include/start_slave.inc
-- connection master
-- echo #######################################################################
-- echo ####################### PART 2: SLAVE TESTS ###########################
-- echo #######################################################################
### setup
--source include/rpl_reset.inc
-- connection slave
# slave suppressions
call mtr.add_suppression("Slave I/O: Relay log write failure: could not queue event from master.*");
call mtr.add_suppression("Error writing file .*");
call mtr.add_suppression("Could not use .*");
call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file.");
call mtr.add_suppression("Can't generate a unique log-filename .*");
-- echo ###################### TEST #13
#### ASSERTION: check against unique log filename error
-- let $io_thd_injection_fault_flag= error_unique_log_filename
-- let $slave_io_errno= 1595
-- let $show_slave_io_error= 1
-- source include/io_thd_fault_injection.inc
-- echo ###################### TEST #14
#### ASSERTION: check against rotate failing
-- let $io_thd_injection_fault_flag= fault_injection_new_file_rotate_event
-- let $slave_io_errno= 1595
-- let $show_slave_io_error= 1
-- source include/io_thd_fault_injection.inc
-- echo ###################### TEST #15
#### ASSERTION: check against relay log open failure
-- let $io_thd_injection_fault_flag= fault_injection_registering_index
-- let $slave_io_errno= 1595
-- let $show_slave_io_error= 1
-- source include/io_thd_fault_injection.inc
-- echo ###################### TEST #16
#### ASSERTION: check against relay log index open failure
-- let $io_thd_injection_fault_flag= fault_injection_openning_index
-- let $slave_io_errno= 1595
-- let $show_slave_io_error= 1
-- source include/io_thd_fault_injection.inc
### clean up
-- source include/stop_slave_sql.inc
RESET SLAVE;
RESET MASTER;
--remove_file $load_file
--remove_file $load_file2
--let $rpl_only_running_threads= 1
--source include/rpl_end.inc

View File

@ -1 +1,83 @@
--source include/rpl_cant_read_event_incident.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#
# Bug#11747416 : 32228 A disk full makes binary log corrupt.
#
#
# The test demonstrates reading from binlog error propagation to slave
# and reporting there.
# Conditions for the bug include a crash at time of the last event to
# the binlog was written partly. With the fixes the event is not sent out
# any longer, but rather the dump thread sends out a sound error message.
#
# Crash is not simulated. A binlog with partly written event in its end is installed
# and replication is started from it.
#
--source include/have_binlog_format_mixed.inc
--source include/master-slave.inc
--connection slave
# Make sure the slave is stopped while we are messing with master.
# Otherwise we get occasional failures as the slave manages to re-connect
# to the newly started master and we get extra events applied, causing
# conflicts.
--source include/stop_slave.inc
--connection master
call mtr.add_suppression("Error in Log_event::read_log_event()");
--let $datadir= `SELECT @@datadir`
--let $rpl_server_number= 1
--source include/rpl_stop_server.inc
--remove_file $datadir/master-bin.000001
--copy_file $MYSQL_TEST_DIR/std_data/bug11747416_32228_binlog.000001 $datadir/master-bin.000001
--let $rpl_server_number= 1
--source include/rpl_start_server.inc
--source include/wait_until_connected_again.inc
# evidence of the partial binlog
--error ER_ERROR_WHEN_EXECUTING_COMMAND
show binlog events;
--connection slave
call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log");
reset slave;
start slave;
# ER_MASTER_FATAL_ERROR_READING_BINLOG 1236
--let $slave_param=Last_IO_Errno
--let $slave_param_value=1236
--source include/wait_for_slave_param.inc
--let $slave_field_result_replace= / at [0-9]*/ at XXX/
--let $status_items= Last_IO_Errno, Last_IO_Error
--source include/show_slave_status.inc
#
# Cleanup
#
--connection master
reset master;
--connection slave
stop slave;
reset slave;
# Table was created from binlog, it may not be created if SQL thread is running
# slowly and IO thread reaches incident before SQL thread applies it.
--disable_warnings
drop table if exists t;
--enable_warnings
reset master;
--echo End of the tests
--let $rpl_only_running_threads= 1
--source include/rpl_end.inc

View File

@ -1 +1,335 @@
--source include/rpl_checksum.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# WL2540 replication events checksum
# Testing configuration parameters
--source include/have_debug.inc
--source include/have_binlog_format_mixed.inc
--source include/master-slave.inc
call mtr.add_suppression('Slave can not handle replication events with the checksum that master is configured to log');
call mtr.add_suppression('Replication event checksum verification failed');
# due to C failure simulation
call mtr.add_suppression('Relay log write failure: could not queue event from master');
call mtr.add_suppression('Master is configured to log replication events with checksum, but will not send such events to slaves that cannot process them');
# A. read/write access to the global vars:
# binlog_checksum master_verify_checksum slave_sql_verify_checksum
connection master;
set @master_save_binlog_checksum= @@global.binlog_checksum;
set @save_master_verify_checksum = @@global.master_verify_checksum;
select @@global.binlog_checksum as 'must be CRC32 because of the command line option';
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.binlog_checksum as 'no session var';
select @@global.master_verify_checksum as 'must be zero because of default';
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.master_verify_checksum as 'no session var';
connection slave;
set @slave_save_binlog_checksum= @@global.binlog_checksum;
set @save_slave_sql_verify_checksum = @@global.slave_sql_verify_checksum;
select @@global.slave_sql_verify_checksum as 'must be one because of default';
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.slave_sql_verify_checksum as 'no session var';
connection master;
source include/show_binary_logs.inc;
set @@global.binlog_checksum = NONE;
select @@global.binlog_checksum;
--echo *** must be rotations seen ***
source include/show_binary_logs.inc;
set @@global.binlog_checksum = default;
select @@global.binlog_checksum;
# testing lack of side-effects in non-effective update of binlog_checksum:
set @@global.binlog_checksum = CRC32;
select @@global.binlog_checksum;
set @@global.binlog_checksum = CRC32;
set @@global.master_verify_checksum = 0;
set @@global.master_verify_checksum = default;
--error ER_WRONG_VALUE_FOR_VAR
set @@global.binlog_checksum = ADLER32;
--error ER_WRONG_VALUE_FOR_VAR
set @@global.master_verify_checksum = 2; # the var is of bool type
connection slave;
set @@global.slave_sql_verify_checksum = 0;
set @@global.slave_sql_verify_checksum = default;
--error ER_WRONG_VALUE_FOR_VAR
set @@global.slave_sql_verify_checksum = 2; # the var is of bool type
#
# B. Old Slave to New master conditions
#
# while master does not send a checksum-ed binlog the Old Slave can
# work with the New Master
connection master;
set @@global.binlog_checksum = NONE;
create table t1 (a int);
# testing that binlog rotation preserves opt_binlog_checksum value
flush logs;
flush logs;
-- source include/wait_for_binlog_checkpoint.inc
flush logs;
sync_slave_with_master;
#connection slave;
# checking that rotation on the slave side leaves slave stable
flush logs;
flush logs;
flush logs;
select count(*) as zero from t1;
source include/stop_slave.inc;
connection master;
set @@global.binlog_checksum = CRC32;
-- source include/wait_for_binlog_checkpoint.inc
insert into t1 values (1) /* will not be applied on slave due to simulation */;
# instruction to the dump thread
connection slave;
set @saved_dbug = @@global.debug_dbug;
set @@global.debug_dbug='d,simulate_slave_unaware_checksum';
start slave;
--let $slave_io_errno= 1236
--let $show_slave_io_error= 1
source include/wait_for_slave_io_error.inc;
select count(*) as zero from t1;
set @@global.debug_dbug = @saved_dbug;
connection slave;
source include/start_slave.inc;
#
# C. checksum failure simulations
#
# C1. Failure by a client thread
connection master;
set @@global.master_verify_checksum = 1;
set @save_dbug = @@session.debug_dbug;
set @@session.debug_dbug='d,simulate_checksum_test_failure';
--error ER_ERROR_WHEN_EXECUTING_COMMAND
show binlog events;
SET debug_dbug= @save_dbug;
set @@global.master_verify_checksum = default;
#connection master;
sync_slave_with_master;
connection slave;
source include/stop_slave.inc;
connection master;
create table t2 (a int);
let $pos_master= query_get_value(SHOW MASTER STATUS, Position, 1);
connection slave;
# C2. Failure by IO thread
# instruction to io thread
set @saved_dbug = @@global.debug_dbug;
set @@global.debug_dbug='d,simulate_checksum_test_failure';
start slave io_thread;
# When the checksum error is detected, the slave sets error code 1913
# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately
# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io().
# So we usually get 1595, but it is occasionally possible to get 1913.
--let $slave_io_errno= 1595,1913
--let $show_slave_io_error= 0
source include/wait_for_slave_io_error.inc;
set @@global.debug_dbug = @saved_dbug;
# to make IO thread re-read it again w/o the failure
start slave io_thread;
let $slave_param= Read_Master_Log_Pos;
let $slave_param_value= $pos_master;
source include/wait_for_slave_param.inc;
# C3. Failure by SQL thread
# instruction to sql thread;
set @@global.slave_sql_verify_checksum = 1;
set @@global.debug_dbug='d,simulate_checksum_test_failure';
start slave sql_thread;
--let $slave_sql_errno= 1593
--let $show_slave_sql_error= 1
source include/wait_for_slave_sql_error.inc;
# resuming SQL thread to parse out the event w/o the failure
set @@global.debug_dbug = @saved_dbug;
source include/start_slave.inc;
connection master;
sync_slave_with_master;
#connection slave;
select count(*) as 'must be zero' from t2;
#
# D. Reset slave, Change-Master, Binlog & Relay-log rotations with
# random value on binlog_checksum on both master and slave
#
connection slave;
stop slave;
reset slave;
# randomize slave server's own checksum policy
set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE");
flush logs;
connection master;
set @@global.binlog_checksum= CRC32;
reset master;
flush logs;
create table t3 (a int, b char(5));
connection slave;
source include/start_slave.inc;
connection master;
sync_slave_with_master;
#connection slave;
select count(*) as 'must be zero' from t3;
source include/stop_slave.inc;
--replace_result $MASTER_MYPORT MASTER_PORT
eval change master to master_host='127.0.0.1',master_port=$MASTER_MYPORT, master_user='root';
connection master;
flush logs;
reset master;
insert into t3 value (1, @@global.binlog_checksum);
connection slave;
source include/start_slave.inc;
flush logs;
connection master;
sync_slave_with_master;
#connection slave;
select count(*) as 'must be one' from t3;
connection master;
set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE");
insert into t3 value (1, @@global.binlog_checksum);
sync_slave_with_master;
#connection slave;
#clean-up
connection master;
drop table t1, t2, t3;
set @@global.binlog_checksum = @master_save_binlog_checksum;
set @@global.master_verify_checksum = @save_master_verify_checksum;
#
# BUG#58564: flush_read_lock fails in mysql-trunk-bugfixing after merging with WL#2540
#
# Sanity check that verifies that no assertions are triggered because
# of old FD events (generated by versions prior to server released with
# checksums feature)
#
# There is no need for query log, if something wrong this should trigger
# an assertion
--disable_query_log
BINLOG '
MfmqTA8BAAAAZwAAAGsAAAABAAQANS41LjctbTMtZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA==
';
--enable_query_log
#connection slave;
sync_slave_with_master;
--echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters ***
--connection master
--source include/wait_for_binlog_checkpoint.inc
CREATE TABLE t4 (a INT PRIMARY KEY);
INSERT INTO t4 VALUES (1);
SET sql_log_bin=0;
CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename");
SET sql_log_bin=1;
SET @old_dbug= @@GLOBAL.debug_dbug;
SET debug_dbug= '+d,binlog_inject_new_name_error';
--error ER_NO_UNIQUE_LOGFILE
FLUSH LOGS;
SET debug_dbug= @old_dbug;
INSERT INTO t4 VALUES (2);
--connection slave
--let $slave_sql_errno= 1590
--source include/wait_for_slave_sql_error.inc
# Search the error log for the error message.
# The bug was that 4 garbage bytes were output in the middle of the error
# message; by searching for a pattern that spans that location, we can
# catch the error.
let $log_error_= `SELECT @@GLOBAL.log_error`;
if(!$log_error_)
{
# MySQL Server on windows is started with --console and thus
# does not know the location of its .err log, use default location
let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err;
}
--let SEARCH_FILE= $log_error_
--let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590
--source include/search_pattern_in_file.inc
SELECT * FROM t4 ORDER BY a;
STOP SLAVE IO_THREAD;
SET sql_slave_skip_counter= 1;
--source include/start_slave.inc
--connection master
--save_master_pos
--connection slave
--sync_with_master
SELECT * FROM t4 ORDER BY a;
--connection slave
set @@global.binlog_checksum = @slave_save_binlog_checksum;
set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum;
--echo End of tests
--connection master
DROP TABLE t4;
--source include/rpl_end.inc

View File

@ -1 +1,261 @@
--source include/rpl_checksum_cache.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
-- source include/have_innodb.inc
-- source include/master-slave.inc
--disable_warnings
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t2 set data=repeat.*'a', @act_size.*");
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t1 values.* NAME_CONST.*'n',.*, @data .*");
--enable_warnings
connection master;
set @save_binlog_cache_size = @@global.binlog_cache_size;
set @save_binlog_checksum = @@global.binlog_checksum;
set @save_master_verify_checksum = @@global.master_verify_checksum;
set @@global.binlog_cache_size = 4096;
set @@global.binlog_checksum = CRC32;
set @@global.master_verify_checksum = 1;
# restart slave to force the dump thread to verify events (on master side)
connection slave;
source include/stop_slave.inc;
source include/start_slave.inc;
connection master;
#
# Testing a critical part of checksum handling dealing with transaction cache.
# The cache's buffer size is set to be less than the transaction's footprint
# in binlog.
#
# To verify combined buffer-by-buffer read out of the file and fixing crc per event
# there are the following parts:
#
# 1. the event size is much less than the cache's buffer
# 2. the event size is bigger than the cache's buffer
# 3. the event size if approximately the same as the cache's buffer
# 4. all in above
#
# 1. the event size is much less than the cache's buffer
#
flush status;
show status like "binlog_cache_use";
show status like "binlog_cache_disk_use";
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# parameter to ensure the test slightly varies binlog content
# between different invocations
#
let $deviation_size=32;
eval create table t1 (a int PRIMARY KEY, b CHAR($deviation_size)) engine=innodb;
# Now we are going to create transaction which is long enough so its
# transaction binlog will be flushed to disk...
delimiter |;
create procedure test.p_init (n int, size int)
begin
while n > 0 do
select round(RAND() * size) into @act_size;
set @data = repeat('a', @act_size);
insert into t1 values(n, @data );
set n= n-1;
end while;
end|
delimiter ;|
let $1 = 4000; # PB2 can run it slow to time out on following sync_slave_with_master:s
begin;
--disable_warnings
# todo: check if it is really so.
#+Note 1592 Unsafe statement binlogged in statement format since BINLOG_FORMAT = STATEMENT. Reason for unsafeness: Statement uses a system function whose value may differ on slave.
eval call test.p_init($1, $deviation_size);
--enable_warnings
commit;
show status like "binlog_cache_use";
--echo *** binlog_cache_disk_use must be non-zero ***
show status like "binlog_cache_disk_use";
sync_slave_with_master;
let $diff_tables=master:test.t1, slave:test.t1;
source include/diff_tables.inc;
# undoing changes with verifying the above once again
connection master;
begin;
delete from t1;
commit;
sync_slave_with_master;
#
# 2. the event size is bigger than the cache's buffer
#
connection master;
flush status;
let $t2_data_size= `select 3 * @@global.binlog_cache_size`;
let $t2_aver_size= `select 2 * @@global.binlog_cache_size`;
let $t2_max_rand= `select 1 * @@global.binlog_cache_size`;
eval create table t2(a int auto_increment primary key, data VARCHAR($t2_data_size)) ENGINE=Innodb;
let $1=100;
--disable_query_log
begin;
while ($1)
{
eval select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size;
set @data = repeat('a', @act_size);
insert into t2 set data = @data;
dec $1;
}
commit;
--enable_query_log
show status like "binlog_cache_use";
--echo *** binlog_cache_disk_use must be non-zero ***
show status like "binlog_cache_disk_use";
sync_slave_with_master;
let $diff_tables=master:test.t2, slave:test.t2;
source include/diff_tables.inc;
# undoing changes with verifying the above once again
connection master;
begin;
delete from t2;
commit;
sync_slave_with_master;
#
# 3. the event size if approximately the same as the cache's buffer
#
connection master;
flush status;
let $t3_data_size= `select 2 * @@global.binlog_cache_size`;
let $t3_aver_size= `select (9 * @@global.binlog_cache_size) / 10`;
let $t3_max_rand= `select (2 * @@global.binlog_cache_size) / 10`;
eval create table t3(a int auto_increment primary key, data VARCHAR($t3_data_size)) engine=innodb;
let $1= 300;
--disable_query_log
begin;
while ($1)
{
eval select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size;
insert into t3 set data= repeat('a', @act_size);
dec $1;
}
commit;
--enable_query_log
show status like "binlog_cache_use";
--echo *** binlog_cache_disk_use must be non-zero ***
show status like "binlog_cache_disk_use";
sync_slave_with_master;
let $diff_tables=master:test.t3, slave:test.t3;
source include/diff_tables.inc;
# undoing changes with verifying the above once again
connection master;
begin;
delete from t3;
commit;
sync_slave_with_master;
#
# 4. all in above
#
connection master;
flush status;
delimiter |;
eval create procedure test.p1 (n int)
begin
while n > 0 do
case (select (round(rand()*100) % 3) + 1)
when 1 then
select round(RAND() * $deviation_size) into @act_size;
set @data = repeat('a', @act_size);
insert into t1 values(n, @data);
when 2 then
begin
select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size;
insert into t2 set data=repeat('a', @act_size);
end;
when 3 then
begin
select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size;
insert into t3 set data= repeat('a', @act_size);
end;
end case;
set n= n-1;
end while;
end|
delimiter ;|
let $1= 1000;
set autocommit= 0;
begin;
--disable_warnings
eval call test.p1($1);
--enable_warnings
commit;
show status like "binlog_cache_use";
--echo *** binlog_cache_disk_use must be non-zero ***
show status like "binlog_cache_disk_use";
sync_slave_with_master;
let $diff_tables=master:test.t1, slave:test.t1;
source include/diff_tables.inc;
let $diff_tables=master:test.t2, slave:test.t2;
source include/diff_tables.inc;
let $diff_tables=master:test.t3, slave:test.t3;
source include/diff_tables.inc;
connection master;
begin;
delete from t1;
delete from t2;
delete from t3;
commit;
drop table t1, t2, t3;
set @@global.binlog_cache_size = @save_binlog_cache_size;
set @@global.binlog_checksum = @save_binlog_checksum;
set @@global.master_verify_checksum = @save_master_verify_checksum;
drop procedure test.p_init;
drop procedure test.p1;
--source include/rpl_end.inc

View File

@ -1 +1,175 @@
--source include/rpl_corruption.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
############################################################
# Purpose: WL#5064 Testing with corrupted events.
# The test emulates the corruption at the vary stages
# of replication:
# - in binlog file
# - in network
# - in relay log
############################################################
#
# The tests intensively utilize @@global.debug. Note,
# Bug#11765758 - 58754,
# @@global.debug is read by the slave threads through dbug-interface.
# Hence, before a client thread set @@global.debug we have to ensure that:
# (a) the slave threads are stopped, or (b) the slave threads are in
# sync and waiting.
--source include/have_debug.inc
--source include/master-slave.inc
# Block legal errors for MTR
call mtr.add_suppression('Found invalid event in binary log');
call mtr.add_suppression('Slave I/O: Relay log write failure: could not queue event from master');
call mtr.add_suppression('event read from binlog did not pass crc check');
call mtr.add_suppression('Replication event checksum verification failed');
call mtr.add_suppression('Event crc check failed! Most likely there is event corruption');
call mtr.add_suppression('Slave SQL: Error initializing relay log position: I/O error reading event at position .*, error.* 1593');
SET @old_master_verify_checksum = @@master_verify_checksum;
# Creating test table/data and set corruption position for testing
--echo # 1. Creating test table/data and set corruption position for testing
--connection master
--echo * insert/update/delete rows in table t1 *
# Corruption algorithm modifies only the first event and
# then will be reset. To avoid checking always the first event
# from binlog (usually it is FD) we randomly execute different
# statements and set position for corruption inside events.
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c VARCHAR(100));
--disable_query_log
let $i=`SELECT 3+CEILING(10*RAND())`;
let $j=1;
let $pos=0;
while ($i) {
eval INSERT INTO t1 VALUES ($j, 'a', NULL);
if (`SELECT RAND() > 0.7`)
{
eval UPDATE t1 SET c = REPEAT('a', 20) WHERE a = $j;
}
if (`SELECT RAND() > 0.8`)
{
eval DELETE FROM t1 WHERE a = $j;
}
if (!$pos) {
let $pos= query_get_value(SHOW MASTER STATUS, Position, 1);
--sync_slave_with_master
--source include/stop_slave.inc
--disable_query_log
--connection master
}
dec $i;
inc $j;
}
--enable_query_log
# Emulate corruption in binlog file when SHOW BINLOG EVENTS is executing
--echo # 2. Corruption in master binlog and SHOW BINLOG EVENTS
SET @saved_dbug = @@global.debug_dbug;
SET @@global.debug_dbug="d,corrupt_read_log_event_char";
--echo SHOW BINLOG EVENTS;
--disable_query_log
send_eval SHOW BINLOG EVENTS FROM $pos;
--enable_query_log
--error ER_ERROR_WHEN_EXECUTING_COMMAND
reap;
SET @@global.debug_dbug=@saved_dbug;
# Emulate corruption on master with crc checking on master
--echo # 3. Master read a corrupted event from binlog and send the error to slave
# We have a rare but nasty potential race here: if the dump thread on
# the master for the _old_ slave connection has not yet discovered
# that the slave has disconnected, we will inject the corrupt event on
# the wrong connection, and the test will fail
# (+d,corrupt_read_log_event2 corrupts only one event).
# So kill any lingering dump thread (we need to kill; otherwise dump thread
# could manage to send all events down the socket before seeing it close, and
# hang forever waiting for new binlog events to be created).
let $id= `select id from information_schema.processlist where command = "Binlog Dump"`;
if ($id)
{
--disable_query_log
--error 0,1094
eval kill $id;
--enable_query_log
}
let $wait_condition=
SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE command = 'Binlog Dump';
--source include/wait_condition.inc
SET @@global.debug_dbug="d,corrupt_read_log_event2_set";
--connection slave
START SLAVE IO_THREAD;
let $slave_io_errno= 1236;
--let $slave_timeout= 10
--source include/wait_for_slave_io_error.inc
--connection master
SET @@global.debug_dbug=@saved_dbug;
# Emulate corruption on master without crc checking on master
--echo # 4. Master read a corrupted event from binlog and send it to slave
--connection master
SET GLOBAL master_verify_checksum=0;
SET @@global.debug_dbug="d,corrupt_read_log_event2_set";
--connection slave
START SLAVE IO_THREAD;
# When the checksum error is detected, the slave sets error code 1743
# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately
# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io().
# So we usually get 1595, but it is occasionally possible to get 1743.
let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE
--source include/wait_for_slave_io_error.inc
--connection master
SET @@global.debug_dbug=@saved_dbug;
SET GLOBAL master_verify_checksum=1;
# Emulate corruption in network
--echo # 5. Slave. Corruption in network
--connection slave
SET @saved_dbug_slave = @@GLOBAL.debug_dbug;
SET @@global.debug_dbug="d,corrupt_queue_event";
START SLAVE IO_THREAD;
let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE
--source include/wait_for_slave_io_error.inc
SET @@global.debug_dbug=@saved_dbug_slave;
# Emulate corruption in relay log
--echo # 6. Slave. Corruption in relay log
SET @@global.debug_dbug="d,corrupt_read_log_event_char";
START SLAVE SQL_THREAD;
let $slave_sql_errno= 1593;
--source include/wait_for_slave_sql_error.inc
SET @@global.debug_dbug=@saved_dbug_slave;
# Start normal replication and compare same table on master
# and slave
--echo # 7. Seek diff for tables on master and slave
--connection slave
--source include/start_slave.inc
--connection master
--sync_slave_with_master
let $diff_tables= master:test.t1, slave:test.t1;
--source include/diff_tables.inc
# Clean up
--echo # 8. Clean up
--connection master
set @@global.debug_dbug = @saved_dbug;
SET GLOBAL master_verify_checksum = @old_master_verify_checksum;
DROP TABLE t1;
--sync_slave_with_master
--source include/rpl_end.inc

View File

@ -1,4 +1,575 @@
--source include/rpl_gtid_basic.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
--source include/have_innodb.inc
--let $rpl_topology=1->2->3->4
--source include/rpl_init.inc
# Set up a 4-deep replication topology, then test various fail-overs
# using GTID.
#
# A -> B -> C -> D
connection server_1;
--source include/wait_for_binlog_checkpoint.inc
--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
--echo *** GTID position should be empty here ***
--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS>
eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
CREATE TABLE t2 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1, "m1");
INSERT INTO t1 VALUES (2, "m2"), (3, "m3"), (4, "m4");
INSERT INTO t2 VALUES (1, "i1");
BEGIN;
INSERT INTO t2 VALUES (2, "i2"), (3, "i3");
INSERT INTO t2 VALUES (4, "i4");
COMMIT;
save_master_pos;
source include/wait_for_binlog_checkpoint.inc;
--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
--let $gtid_pos_server_1 = `SELECT @@gtid_binlog_pos`
--echo *** GTID position should be non-empty here ***
--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1>
eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
connection server_2;
sync_with_master;
source include/wait_for_binlog_checkpoint.inc;
--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1)
--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1)
--echo *** GTID position should be the same as on server_1 ***
--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1>
eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos);
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
save_master_pos;
connection server_3;
sync_with_master;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
save_master_pos;
connection server_4;
sync_with_master;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--echo *** Now take out D, let it fall behind a bit, and then test re-attaching it to A ***
connection server_4;
--source include/stop_slave.inc
connection server_1;
INSERT INTO t1 VALUES (5, "m1a");
INSERT INTO t2 VALUES (5, "i1a");
save_master_pos;
connection server_4;
--replace_result $MASTER_MYPORT MASTER_PORT
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT,
MASTER_USE_GTID=CURRENT_POS;
--source include/start_slave.inc
sync_with_master;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--echo *** Now move B to D (C is still replicating from B) ***
connection server_2;
--source include/stop_slave.inc
--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4,
MASTER_USE_GTID=CURRENT_POS;
--source include/start_slave.inc
connection server_4;
UPDATE t2 SET b="j1a" WHERE a=5;
save_master_pos;
connection server_2;
sync_with_master;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--echo *** Now move C to D, after letting it fall a little behind ***
connection server_3;
--source include/stop_slave.inc
connection server_1;
INSERT INTO t2 VALUES (6, "i6b");
INSERT INTO t2 VALUES (7, "i7b");
--source include/save_master_gtid.inc
connection server_3;
--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4,
MASTER_USE_GTID=CURRENT_POS;
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
SELECT * FROM t2 ORDER BY a;
--echo *** Now change everything back to what it was, to make rpl_end.inc happy
# Also check that MASTER_USE_GTID=CURRENT_POS is still enabled.
connection server_2;
# We need to sync up server_2 before switching. If it happened to have reached
# the point 'UPDATE t2 SET b="j1a" WHERE a=5' it will fail to connect to
# server_1, which is (deliberately) missing that transaction.
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
--replace_result $MASTER_MYPORT MASTER_MYPORT
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT;
--source include/start_slave.inc
--source include/wait_for_slave_to_start.inc
connection server_3;
--source include/stop_slave.inc
--replace_result $SLAVE_MYPORT SLAVE_MYPORT
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SLAVE_MYPORT;
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
connection server_4;
--source include/stop_slave.inc
--replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3
eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3;
--source include/start_slave.inc
connection server_1;
DROP TABLE t1,t2;
--source include/save_master_gtid.inc
--echo *** A few more checks for BINLOG_GTID_POS function ***
--let $valid_binlog_name = query_get_value(SHOW BINARY LOGS,Log_name,1)
--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
SELECT BINLOG_GTID_POS();
--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
SELECT BINLOG_GTID_POS('a');
--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
SELECT BINLOG_GTID_POS('a',1,NULL);
SELECT BINLOG_GTID_POS(1,'a');
SELECT BINLOG_GTID_POS(NULL,NULL);
SELECT BINLOG_GTID_POS('',1);
SELECT BINLOG_GTID_POS('a',1);
eval SELECT BINLOG_GTID_POS('$valid_binlog_name',-1);
eval SELECT BINLOG_GTID_POS('$valid_binlog_name',0);
eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551615);
eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551616);
--echo *** Some tests of @@GLOBAL.gtid_binlog_state ***
--connection server_2
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
--connection server_1
SET @old_state= @@GLOBAL.gtid_binlog_state;
--error ER_BINLOG_MUST_BE_EMPTY
SET GLOBAL gtid_binlog_state = '';
RESET MASTER;
SET GLOBAL gtid_binlog_state = '';
FLUSH LOGS;
--source include/show_binary_logs.inc
SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30';
--source include/show_binary_logs.inc
--let $binlog_file= master-bin.000001
--let $binlog_start= 4
--source include/show_binlog_events.inc
#SELECT @@GLOBAL.gtid_binlog_pos;
#SELECT @@GLOBAL.gtid_binlog_state;
--error ER_BINLOG_MUST_BE_EMPTY
SET GLOBAL gtid_binlog_state = @old_state;
RESET MASTER;
SET GLOBAL gtid_binlog_state = @old_state;
# Check that slave can reconnect again, despite the RESET MASTER, as we
# restored the state.
CREATE TABLE t1 (a INT PRIMARY KEY);
SET gtid_seq_no=100;
INSERT INTO t1 VALUES (1);
--source include/save_master_gtid.inc
--connection server_2
--source include/start_slave.inc
# We cannot just use sync_with_master as we've done RESET MASTER, so
# slave old-style position is wrong.
# So sync on gtid position instead.
--source include/sync_with_master_gtid.inc
SELECT * FROM t1;
# Check that the IO gtid position in SHOW SLAVE STATUS is also correct.
--let $status_items= Gtid_IO_Pos
--source include/show_slave_status.inc
--echo *** Test @@LAST_GTID and MASTER_GTID_WAIT() ***
--connection server_1
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
--save_master_pos
--connection server_2
--sync_with_master
--source include/stop_slave.inc
--connect (m1,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
SELECT @@last_gtid;
SET gtid_seq_no=110;
SELECT @@last_gtid;
BEGIN;
SELECT @@last_gtid;
INSERT INTO t1 VALUES (2);
SELECT @@last_gtid;
COMMIT;
SELECT @@last_gtid;
--let $pos= `SELECT @@gtid_binlog_pos`
--connect (s1,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
eval SET @pos= '$pos';
# Check NULL argument.
SELECT master_gtid_wait(NULL);
# Check empty argument returns immediately.
SELECT master_gtid_wait('', NULL);
# Check this gets counted
SHOW STATUS LIKE 'Master_gtid_wait_count';
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_time';
# Let's check that we get a timeout
SELECT master_gtid_wait(@pos, 0.5);
SELECT * FROM t1 ORDER BY a;
# Now actually wait until the slave reaches the position
send SELECT master_gtid_wait(@pos);
--connection server_2
--source include/start_slave.inc
--connection s1
reap;
SELECT * FROM t1 ORDER BY a;
# Test waiting on a domain that does not exist yet.
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id= 1;
INSERT INTO t1 VALUES (3);
--let $pos= `SELECT @@gtid_binlog_pos`
--connection s1
--replace_result $pos POS
eval SET @pos= '$pos';
SELECT master_gtid_wait(@pos, 0);
SELECT * FROM t1 WHERE a >= 3;
send SELECT master_gtid_wait(@pos, -1);
--connection server_2
--source include/start_slave.inc
--connection s1
reap;
SELECT * FROM t1 WHERE a >= 3;
# Waiting for only part of the position.
SELECT master_gtid_wait('1-1-1', 0);
# Now test a lot of parallel master_gtid_wait() calls, completing in different
# order, and some of which time out or get killed on the way.
--connection s1
send SELECT master_gtid_wait('2-1-1,1-1-4,0-1-110');
--connect (s2,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
# This will time out. No event 0-1-1000 exists
send SELECT master_gtid_wait('0-1-1000', 0.5);
--connect (s3,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
# This one we will kill
--let $kill1_id= `SELECT connection_id()`
send SELECT master_gtid_wait('0-1-2000');
--connect (s4,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-10');
--connect (s5,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-6', 1);
# This one we will kill also.
--connect (s6,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
--let $kill2_id= `SELECT connection_id()`
send SELECT master_gtid_wait('2-1-5');
--connect (s7,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-10');
--connect (s8,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-5,1-1-4,0-1-110');
--connect (s9,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('2-1-2');
--connection server_2
# This one completes immediately.
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_count';
SELECT master_gtid_wait('1-1-1');
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_count';
let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1);
--replace_result $wait_time MASTER_GTID_WAIT_TIME
eval SET @a= $wait_time;
SELECT IF(@a <= 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " is larger than expected"))
AS Master_gtid_wait_time_as_expected;
--connect (s10,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
send SELECT master_gtid_wait('0-1-109');
--connection server_2
# This one should time out.
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_count';
SELECT master_gtid_wait('2-1-2', 0.5);
SHOW STATUS LIKE 'Master_gtid_wait_timeouts';
SHOW STATUS LIKE 'Master_gtid_wait_count';
let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1);
--replace_result $wait_time MASTER_GTID_WAIT_TIME
eval SET @a= $wait_time;
# We expect a wait time of just a bit over 0.5 seconds. But thread scheduling
# and timer inaccuracies could introduce significant jitter. So allow a
# generous interval.
SELECT IF(@a BETWEEN 0.4*1000*1000 AND 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " not as expected")) AS Master_gtid_wait_time_as_expected;
--replace_result $kill1_id KILL_ID
eval KILL QUERY $kill1_id;
--connection s3
--error ER_QUERY_INTERRUPTED
reap;
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=2;
INSERT INTO t1 VALUES (4);
--connection s9
reap;
--connection server_2
--replace_result $kill2_id KILL_ID
eval KILL CONNECTION $kill2_id;
--connection s6
--error 2013,ER_CONNECTION_KILLED
reap;
--connection server_1
SET gtid_domain_id=1;
SET gtid_seq_no=4;
INSERT INTO t1 VALUES (5);
SET gtid_domain_id=2;
SET gtid_seq_no=5;
INSERT INTO t1 VALUES (6);
--connection s8
reap;
--connection s1
reap;
--connection s2
reap;
--connection s5
reap;
--connection s10
reap;
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=10;
INSERT INTO t1 VALUES (7);
--connection s4
reap;
--connection s7
reap;
--echo *** Test gtid_slave_pos when used with GTID ***
--connection server_2
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=1000;
INSERT INTO t1 VALUES (10);
INSERT INTO t1 VALUES (11);
--save_master_pos
--connection server_2
SET sql_slave_skip_counter= 1;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=1010;
INSERT INTO t1 VALUES (12);
INSERT INTO t1 VALUES (13);
--save_master_pos
--connection server_2
SET sql_slave_skip_counter= 2;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=1020;
INSERT INTO t1 VALUES (14);
INSERT INTO t1 VALUES (15);
INSERT INTO t1 VALUES (16);
--save_master_pos
--connection server_2
SET sql_slave_skip_counter= 3;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=2;
SET gtid_seq_no=1030;
# Disable logging Annotate_rows events to preserve events count.
let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`;
SET @@binlog_annotate_row_events= 0;
INSERT INTO t1 VALUES (17);
INSERT INTO t1 VALUES (18);
INSERT INTO t1 VALUES (19);
eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved;
--save_master_pos
--connection server_2
SET sql_slave_skip_counter= 5;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
--source include/stop_slave.inc
--connection server_1
SET gtid_domain_id=3;
SET gtid_seq_no=100;
CREATE TABLE t2 (a INT PRIMARY KEY);
DROP TABLE t2;
SET gtid_domain_id=2;
SET gtid_seq_no=1040;
INSERT INTO t1 VALUES (20);
--save_master_pos
--connection server_2
SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode;
SET GLOBAL slave_ddl_exec_mode=STRICT;
SET sql_slave_skip_counter=1;
START SLAVE UNTIL master_gtid_pos="3-1-100";
--let $master_pos=3-1-100
--source include/sync_with_master_gtid.inc
--source include/wait_for_slave_to_stop.inc
--error ER_NO_SUCH_TABLE
SELECT * FROM t2;
SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
# Start the slave again, it should fail on the DROP TABLE as the table is not there.
SET sql_log_bin=0;
CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051");
SET sql_log_bin=1;
START SLAVE;
--let $slave_sql_errno=1051
--source include/wait_for_slave_sql_error.inc
SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
STOP SLAVE IO_THREAD;
SET sql_slave_skip_counter=2;
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 20 ORDER BY a;
SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
SET GLOBAL slave_ddl_exec_mode= @saved_mode;
--echo *** Test GTID-connecting to a master with out-of-order sequence numbers in the binlog. ***
# Create an out-of-order binlog on server 2.
# Let server 3 replicate to an out-of-order point, stop it, restart it,
# and check that it replicates correctly despite the out-of-order.
--connection server_1
SET gtid_domain_id= @@GLOBAL.gtid_domain_id;
INSERT INTO t1 VALUES (31);
--save_master_pos
--connection server_2
--sync_with_master
SET gtid_domain_id= @@GLOBAL.gtid_domain_id;
INSERT INTO t1 VALUES (32);
--connection server_1
INSERT INTO t1 VALUES (33);
--save_master_pos
--connection server_2
--sync_with_master
--save_master_pos
--connection server_3
--sync_with_master
--source include/stop_slave.inc
--connection server_1
INSERT INTO t1 VALUES (34);
--save_master_pos
--connection server_2
--sync_with_master
--save_master_pos
--connection server_3
--source include/start_slave.inc
--sync_with_master
SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
--save_master_pos
--connection server_4
--sync_with_master
SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
# Clean up.
--connection server_1
DROP TABLE t1;
--source include/rpl_end.inc
--echo # --echo #
--echo # Start of 10.2 tests --echo # Start of 10.2 tests

View File

@ -1 +1,61 @@
--source include/rpl_incident.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
--source include/have_debug.inc
--source include/master-slave.inc
SET @old_binlog_checksum=@@binlog_checksum;
SET GLOBAL BINLOG_CHECKSUM=none;
connection slave;
SET @old_binlog_checksum=@@binlog_checksum;
SET GLOBAL BINLOG_CHECKSUM=none;
connection master;
--echo **** On Master ****
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3);
SELECT * FROM t1;
set @saved_dbug = @@global.debug_dbug;
SET GLOBAL debug_dbug= '+d,incident_database_resync_on_replace,*';
# This will generate an incident log event and store it in the binary
# log before the replace statement.
REPLACE INTO t1 VALUES (4);
--save_master_pos
SELECT * FROM t1;
set @@global.debug_dbug = @saved_dbug;
connection slave;
# Wait until SQL thread stops with error LOST_EVENT on master
call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occurred on the master.* 1590");
let $slave_sql_errno= 1590;
let $show_slave_sql_error= 1;
source include/wait_for_slave_sql_error.inc;
# The 4 should not be inserted into the table, since the incident log
# event should have stop the slave.
--echo **** On Slave ****
SELECT * FROM t1;
SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1;
START SLAVE;
--sync_with_master
# Now, we should have inserted the row into the table and the slave
# should be running. We should also have rotated to a new binary log.
SELECT * FROM t1;
source include/check_slave_is_running.inc;
connection master;
SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum;
DROP TABLE t1;
--sync_slave_with_master
SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum;
--source include/rpl_end.inc

View File

@ -1 +1,96 @@
--source include/rpl_init_slave_errors.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
######################################################################
# Some errors that cause the slave SQL thread to stop are not shown in
# the Slave_SQL_Error column of "SHOW SLAVE STATUS". Instead, the error
# is only in the server's error log.
#
# Two failures and their respective reporting are verified:
#
# 1 - Failures during slave thread initialization
# 2 - Failures while processing queries passed through the init_slave
# option.
#
# In order to check the first type of failure, we inject a fault in the
# SQL/IO Threads through SET GLOBAL debug.
#
# To check the second type, we set @@global.init_slave to an invalid
# command thus preventing the initialization of the SQL Thread.
#
# Obs:
# 1 - Note that testing failures while initializing the relay log position
# is hard as the same function is called before the code reaches the point
# that we want to test.
#
# 2 - This test does not target failures that are reported while applying
# events such as duplicate keys, errors while reading the relay-log.bin*,
# etc. Such errors are already checked on other tests.
######################################################################
######################################################################
# Configuring the Environment
######################################################################
source include/have_debug.inc;
source include/have_log_bin.inc;
source include/master-slave.inc;
connection slave;
--disable_warnings
stop slave;
--enable_warnings
reset slave;
######################################################################
# Injecting faults in the threads' initialization
######################################################################
connection slave;
# Set debug flags on slave to force errors to occur
set @saved_dbug = @@global.debug_dbug;
SET GLOBAL debug_dbug= "d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on_init";
start slave;
#
# slave is going to stop because of emulated failures
# but there won't be any crashes nor asserts hit.
#
# 1593 = ER_SLAVE_FATAL_ERROR
--let $slave_sql_errno= 1593
--let $show_slave_sql_error= 1
--source include/wait_for_slave_sql_error.inc
call mtr.add_suppression("Failed during slave.* thread initialization");
set @@global.debug_dbug = @saved_dbug;
######################################################################
# Injecting faults in the init_slave option
######################################################################
connection slave;
reset slave;
SET GLOBAL init_slave= "garbage";
start slave;
# 1064 = ER_PARSE_ERROR
--let $slave_sql_errno= 1064
--let $show_slave_sql_error= 1
--source include/wait_for_slave_sql_error.inc
######################################################################
# Clean up
######################################################################
SET GLOBAL init_slave= "";
# Clean up Last_SQL_Error
--source include/stop_slave_io.inc
RESET SLAVE;
--let $rpl_only_running_threads= 1
--source include/rpl_end.inc

View File

@ -1 +0,0 @@
--source include/rpl_loaddata_local.inc

View File

@ -1 +1,120 @@
--source include/rpl_loadfile.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#############################################################################
# Original Author: JBM #
# Original Date: Aug/18/2005 #
#############################################################################
# TEST: To test the LOAD_FILE() in rbr #
#############################################################################
# Change Author: JBM
# Change Date: 2006-01-16
##########
# Includes
-- source include/have_binlog_format_mixed_or_row.inc
-- source include/master-slave.inc
-- source suite/rpl/include/rpl_loadfile.test
# BUG#39701: Mixed binlog format does not switch to row mode on LOAD_FILE
#
# DESCRIPTION
#
# Problem: when using load_file string function and mixed binlogging format
# there was no switch to row based binlogging format. This leads
# to scenarios on which the slave replicates the statement and it
# will try to load the file from local file system, which in most
# likely it will not exist.
#
# Solution:
# Marking this function as unsafe for statement format, makes the
# statement using it to be logged in row based format. As such, data
# replicated from the master, becomes the content of the loaded file.
# Consequently, the slave receives the necessary data to complete
# the load_file instruction correctly.
#
# IMPLEMENTATION
#
# The test is implemented as follows:
#
# On Master,
# i) write to file the desired content.
# ii) create table and stored procedure with load_file
# iii) stop slave
# iii) execute load_file
# iv) remove file
#
# On Slave,
# v) start slave
# vi) sync it with master so that it gets the updates from binlog (which
# should have bin logged in row format).
#
# If the the binlog format does not change to row, then the assertion
# done in the following step fails. This happens because tables differ
# since the file does not exist anymore, meaning that when slave
# attempts to execute LOAD_FILE statement it inserts NULL on table
# instead of the same contents that the master loaded when it executed
# the procedure (which was executed when file existed).
#
# vii) assert that the contents of master and slave
# table are the same
--source include/rpl_reset.inc
connection master;
let $file= $MYSQLTEST_VARDIR/tmp/bug_39701.data;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--eval SELECT repeat('x',20) INTO OUTFILE '$file'
disable_warnings;
DROP TABLE IF EXISTS t1;
enable_warnings;
CREATE TABLE t1 (t text);
DELIMITER |;
CREATE PROCEDURE p(file varchar(4096))
BEGIN
INSERT INTO t1 VALUES (LOAD_FILE(file));
END|
DELIMITER ;|
# stop slave before issuing the load_file on master
connection slave;
source include/stop_slave.inc;
connection master;
# test: check that logging falls back to rbr.
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--eval CALL p('$file')
# test: remove the file from the filesystem and assert that slave still
# gets the loaded file
remove_file $file;
# now that the file is removed it is safe (regarding what we want to test)
# to start slave
connection slave;
source include/start_slave.inc;
connection master;
sync_slave_with_master;
# assertion: assert that the slave got the updates even
# if the file was removed before the slave started,
# meaning that contents were indeed transfered
# through binlog (in row format)
let $diff_tables= master:t1, slave:t1;
source include/diff_tables.inc;
# CLEAN UP
--connection master
DROP TABLE t1;
DROP PROCEDURE p;
--source include/rpl_end.inc

View File

@ -1 +1,184 @@
--source include/rpl_packet.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# ==== Purpose ====
#
# Check replication protocol packet size handling
#
# ==== Related bugs ====
# Bug#19402 SQL close to the size of the max_allowed_packet fails on slave
# BUG#23755: Replicated event larger that max_allowed_packet infinitely re-transmits
# BUG#42914: No LAST_IO_ERROR for max_allowed_packet errors
# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET
# max-out size db name
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153");
call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet");
let $db= DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________;
disable_warnings;
eval drop database if exists $db;
enable_warnings;
eval create database $db;
connection master;
let $old_max_allowed_packet= `SELECT @@global.max_allowed_packet`;
let $old_net_buffer_length= `SELECT @@global.net_buffer_length`;
let $old_slave_max_allowed_packet= `SELECT @@global.slave_max_allowed_packet`;
SET @@global.max_allowed_packet=1024;
SET @@global.net_buffer_length=1024;
sync_slave_with_master;
# Restart slave for setting to take effect
source include/stop_slave.inc;
source include/start_slave.inc;
# Reconnect to master for new setting to take effect
disconnect master;
# alas, can't use eval here; if db name changed apply the change here
connect (master,localhost,root,,DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________);
connection master;
select @@net_buffer_length, @@max_allowed_packet;
create table `t1` (`f1` LONGTEXT) ENGINE=MyISAM;
INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1023');
sync_slave_with_master;
eval select count(*) from `$db`.`t1` /* must be 1 */;
SHOW STATUS LIKE 'Slave_running';
select * from information_schema.session_status where variable_name= 'SLAVE_RUNNING';
connection master;
eval drop database $db;
sync_slave_with_master;
#
# Bug #23755: Replicated event larger that max_allowed_packet infinitely re-transmits
#
# Check that a situation when the size of event on the master is greater than
# max_allowed_packet on the slave does not lead to infinite re-transmits.
connection master;
# Change the max packet size on master
SET @@global.max_allowed_packet=4096;
SET @@global.net_buffer_length=4096;
# Restart slave for new setting to take effect
connection slave;
source include/stop_slave.inc;
source include/start_slave.inc;
# Reconnect to master for new setting to take effect
disconnect master;
connect (master, localhost, root);
connection master;
CREATE TABLE `t1` (`f1` LONGTEXT) ENGINE=MyISAM;
sync_slave_with_master;
connection master;
INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048');
#
# Bug#42914: The slave I/O thread must stop after trying to read the above
# event, However there is no Last_IO_Error report.
#
# The slave I/O thread must stop after trying to read the above event
connection slave;
# 1153 = ER_NET_PACKET_TOO_LARGE
--let $slave_io_errno= 1153
--let $show_slave_io_error= 1
--source include/wait_for_slave_io_error.inc
# TODO: this is needed because of BUG#55790. Remove once that is fixed.
--source include/stop_slave_sql.inc
#
# Bug#42914: On the master, if a binary log event is larger than
# max_allowed_packet, the error message ER_MASTER_FATAL_ERROR_READING_BINLOG
# is sent to a slave when it requests a dump from the master, thus leading the
# I/O thread to stop. However, there is no Last_IO_Error reported.
#
--let $rpl_only_running_threads= 1
--source include/rpl_reset.inc
--connection master
DROP TABLE t1;
--sync_slave_with_master
connection master;
CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM;
sync_slave_with_master;
connection master;
INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet));
connection slave;
# The slave I/O thread must stop after receiving
# 1153 = ER_NET_PACKET_TOO_LARGE
--let $slave_io_errno= 1153
--let $show_slave_io_error= 1
--source include/wait_for_slave_io_error.inc
# Remove the bad binlog and clear error status on slave.
STOP SLAVE;
RESET SLAVE;
--connection master
RESET MASTER;
#
# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET
#
# In BUG#55322, @@session.max_allowed_packet increased each time SHOW
# BINLOG EVENTS was issued. To verify that this bug is fixed, we
# execute SHOW BINLOG EVENTS twice and check that max_allowed_packet
# never changes. We turn off the result log because we don't care
# about the contents of the binlog.
--disable_result_log
SET @max_allowed_packet_0= @@session.max_allowed_packet;
SHOW BINLOG EVENTS;
SET @max_allowed_packet_1= @@session.max_allowed_packet;
SHOW BINLOG EVENTS;
SET @max_allowed_packet_2= @@session.max_allowed_packet;
--enable_result_log
if (`SELECT NOT(@max_allowed_packet_0 = @max_allowed_packet_1 AND @max_allowed_packet_1 = @max_allowed_packet_2)`)
{
--echo ERROR: max_allowed_packet changed after executing SHOW BINLOG EVENTS
--source include/show_rpl_debug_info.inc
SELECT @max_allowed_packet_0, @max_allowed_packet_1, @max_allowed_packet_2;
--die @max_allowed_packet changed after executing SHOW BINLOG EVENTS
}
--echo ==== clean up ====
connection master;
DROP TABLE t1;
eval SET @@global.max_allowed_packet= $old_max_allowed_packet;
eval SET @@global.net_buffer_length= $old_net_buffer_length;
eval SET @@global.slave_max_allowed_packet= $old_slave_max_allowed_packet;
# slave is stopped
connection slave;
DROP TABLE t1;
# Clear Last_IO_Error
RESET SLAVE;
--source include/rpl_end.inc
# End of tests

View File

@ -1 +1,112 @@
--source include/rpl_parallel_ignored_errors.inc # ==== Purpose ====
#
# Test verifies that, in parallel replication, transaction failure notification
# is propagated to all the workers. Workers should abort the execution of
# transaction event groups, whose event positions are higher than the failing
# transaction group.
#
# ==== Implementation ====
#
# Steps:
# 0 - Create a table t1 on master which has a primary key. Enable parallel
# replication on slave with slave_parallel_mode='optimistic' and
# slave_parallel_threads=3.
# 1 - On slave start a transaction and execute a local INSERT statement
# which will insert value 32. This is done to block the INSERT coming
# from master.
# 2 - On master execute an INSERT statement with value 32, so that it is
# blocked on slave.
# 3 - On slave enable a debug sync point such that it holds the worker thread
# execution as soon as work is scheduled to it.
# 4 - INSERT value 33 on master. It will be held on slave by other worker
# thread due to debug simulation.
# 5 - INSERT value 34 on master.
# 6 - On slave, enusre that INSERT 34 has reached a state where it waits for
# its prior transactions to commit.
# 7 - Commit the local INSERT 32 on slave server so that first worker will
# error out.
# 8 - Now send a continue signal to second worker processing 33. It should
# wakeup and propagate the error to INSERT 34.
# 9 - Upon slave stop due to error, check that no rows are found after the
# failed INSERT 32.
#
# ==== References ====
#
# MDEV-20645: Replication consistency is broken as workers miss the error
# notification from an earlier failed group.
#
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/have_binlog_format_statement.inc
--source include/master-slave.inc
--enable_connect_log
--connection server_2
--source include/stop_slave.inc
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET @old_debug= @@GLOBAL.debug_dbug;
SET GLOBAL slave_parallel_mode='optimistic';
SET GLOBAL slave_parallel_threads= 3;
CHANGE MASTER TO master_use_gtid=slave_pos;
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
--source include/start_slave.inc
--connection server_1
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
--source include/save_master_gtid.inc
--connection server_2
--source include/sync_with_master_gtid.inc
--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
BEGIN;
INSERT INTO t1 VALUES (32);
--connection server_1
INSERT INTO t1 VALUES (32);
--connection server_2
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE info like "INSERT INTO t1 VALUES (32)"
--source include/wait_condition.inc
SET GLOBAL debug_dbug="+d,hold_worker_on_schedule";
SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker";
--connection server_1
SET gtid_seq_no=100;
INSERT INTO t1 VALUES (33);
--connection server_2
SET debug_sync='now WAIT_FOR reached_pause';
--connection server_1
INSERT INTO t1 VALUES (34);
--connection server_2
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit"
--source include/wait_condition.inc
--connection con_temp2
COMMIT;
# Clean up.
--connection server_2
--source include/stop_slave.inc
--let $assert_cond= COUNT(*) = 0 FROM t1 WHERE a>32
--let $assert_text= table t1 should have zero rows where a>32
--source include/assert.inc
SELECT * FROM t1 WHERE a>32;
DELETE FROM t1 WHERE a=32;
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
SET GLOBAL slave_parallel_mode=@old_parallel_mode;
SET GLOBAL debug_dbug=@old_debug;
SET DEBUG_SYNC= 'RESET';
--source include/start_slave.inc
--connection server_1
DROP TABLE t1;
--disable_connect_log
--source include/rpl_end.inc

View File

@ -1 +1,38 @@
--source include/rpl_parallel_show_binlog_events_purge_logs.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# BUG#13979418: SHOW BINLOG EVENTS MAY CRASH THE SERVER
#
# The function mysql_show_binlog_events has a local stack variable
# 'LOG_INFO linfo;', which is assigned to thd->current_linfo, however
# this variable goes out of scope and is destroyed before clean
# thd->current_linfo.
#
# This test case runs SHOW BINLOG EVENTS and FLUSH LOGS to make sure
# that with the fix local variable linfo is valid along all
# mysql_show_binlog_events function scope.
#
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/master-slave.inc
--connection slave
SET DEBUG_SYNC= 'after_show_binlog_events SIGNAL on_show_binlog_events WAIT_FOR end';
--send SHOW BINLOG EVENTS
--connection slave1
SET DEBUG_SYNC= 'now WAIT_FOR on_show_binlog_events';
FLUSH LOGS;
SET DEBUG_SYNC= 'now SIGNAL end';
--connection slave
--disable_result_log
--reap
--enable_result_log
SET DEBUG_SYNC= 'RESET';
--connection master
--source include/rpl_end.inc

View File

@ -1 +1,18 @@
--source include/rpl_relayrotate.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#######################################################
# Wrapper for rpl_relayrotate.test to allow multi #
# Engines to reuse test code. By JBM 2006-02-15 #
#######################################################
-- source include/have_innodb.inc
# Slow test, don't run during staging part
-- source include/not_staging.inc
-- source include/master-slave.inc
let $engine_type=innodb;
-- source suite/rpl/include/rpl_relayrotate.test
--source include/rpl_end.inc

View File

@ -1 +1,525 @@
--source include/rpl_semi_sync.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
source include/not_embedded.inc;
source include/have_innodb.inc;
source include/master-slave.inc;
let $engine_type= InnoDB;
# Suppress warnings that might be generated during the test
connection master;
call mtr.add_suppression("Timeout waiting for reply of binlog");
call mtr.add_suppression("Read semi-sync reply");
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.");
call mtr.add_suppression("mysqld: Got an error reading communication packets");
connection slave;
call mtr.add_suppression("Master server does not support semi-sync");
call mtr.add_suppression("Semi-sync slave .* reply");
call mtr.add_suppression("Slave SQL.*Request to stop slave SQL Thread received while applying a group that has non-transactional changes; waiting for completion of the group");
connection master;
# wait for dying connections (if any) to disappear
let $wait_condition= select count(*) = 0 from information_schema.processlist where command='killed';
--source include/wait_condition.inc
# After fix of BUG#45848, semi-sync slave should not create any extra
# connections on master, save the count of connections before start
# semi-sync slave for comparison below.
let $_connections_normal_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1);
--echo #
--echo # Uninstall semi-sync plugins on master and slave
--echo #
connection slave;
source include/stop_slave.inc;
reset slave;
set global rpl_semi_sync_master_enabled= 0;
set global rpl_semi_sync_slave_enabled= 0;
connection master;
reset master;
set global rpl_semi_sync_master_enabled= 0;
set global rpl_semi_sync_slave_enabled= 0;
--echo #
--echo # Main test of semi-sync replication start here
--echo #
connection master;
set global rpl_semi_sync_master_timeout= 60000; # 60s
echo [ default state of semi-sync on master should be OFF ];
show variables like 'rpl_semi_sync_master_enabled';
echo [ enable semi-sync on master ];
set global rpl_semi_sync_master_enabled = 1;
show variables like 'rpl_semi_sync_master_enabled';
echo [ status of semi-sync on master should be ON even without any semi-sync slaves ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
--echo # BUG#45672 Semisync repl: ActiveTranx:insert_tranx_node: transaction node allocation failed
--echo # BUG#45673 Semisynch reports correct operation even if no slave is connected
--echo #
# BUG#45672 When semi-sync is enabled on master, it would allocate
# transaction node even without semi-sync slave connected, and would
# finally result in transaction node allocation error.
#
# Semi-sync master will pre-allocate 'max_connections' transaction
# nodes, so here we do more than that much transactions to check if it
# will fail or not.
# select @@global.max_connections + 1;
let $i= `select @@global.max_connections + 1`;
disable_query_log;
eval create table t1 (a int) engine=$engine_type;
while ($i)
{
eval insert into t1 values ($i);
dec $i;
}
drop table t1;
enable_query_log;
# BUG#45673
echo [ status of semi-sync on master should be OFF ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_yes_tx';
# reset master to make sure the following test will start with a clean environment
reset master;
connection slave;
echo [ default state of semi-sync on slave should be OFF ];
show variables like 'rpl_semi_sync_slave_enabled';
echo [ enable semi-sync on slave ];
set global rpl_semi_sync_slave_enabled = 1;
show variables like 'rpl_semi_sync_slave_enabled';
source include/start_slave.inc;
connection master;
# NOTE: Rpl_semi_sync_master_client will only be updated when
# semi-sync slave has started binlog dump request
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
echo [ initial master state after the semi-sync slave connected ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
replace_result $engine_type ENGINE_TYPE;
eval create table t1(a int) engine = $engine_type;
echo [ master state after CREATE TABLE statement ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
# After fix of BUG#45848, semi-sync slave should not create any extra
# connections on master.
let $_connections_semisync_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1);
replace_result $_connections_normal_slave CONNECTIONS_NORMAL_SLAVE $_connections_semisync_slave CONNECTIONS_SEMISYNC_SLAVE;
eval select $_connections_semisync_slave - $_connections_normal_slave as 'Should be 0';
echo [ insert records to table ];
insert t1 values (10);
insert t1 values (9);
insert t1 values (8);
insert t1 values (7);
insert t1 values (6);
insert t1 values (5);
insert t1 values (4);
insert t1 values (3);
insert t1 values (2);
insert t1 values (1);
echo [ master status after inserts ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
sync_slave_with_master;
echo [ slave status after replicated inserts ];
show status like 'Rpl_semi_sync_slave_status';
select count(distinct a) from t1;
select min(a) from t1;
select max(a) from t1;
--echo
--echo # BUG#50157
--echo # semi-sync replication crashes when replicating a transaction which
--echo # include 'CREATE TEMPORARY TABLE `MyISAM_t` SELECT * FROM `Innodb_t` ;
connection master;
SET SESSION AUTOCOMMIT= 0;
CREATE TABLE t2(c1 INT) ENGINE=innodb;
sync_slave_with_master;
connection master;
BEGIN;
--echo
--echo # Even though it is in a transaction, this statement is binlogged into binlog
--echo # file immediately.
--disable_warnings
CREATE TEMPORARY TABLE t3 SELECT c1 FROM t2 where 1=1;
--enable_warnings
--echo
--echo # These statements will not be binlogged until the transaction is committed
INSERT INTO t2 VALUES(11);
INSERT INTO t2 VALUES(22);
COMMIT;
DROP TABLE t2, t3;
SET SESSION AUTOCOMMIT= 1;
sync_slave_with_master;
--echo #
--echo # Test semi-sync master will switch OFF after one transaction
--echo # timeout waiting for slave reply.
--echo #
connection slave;
source include/stop_slave.inc;
connection master;
--source include/kill_binlog_dump_threads.inc
set global rpl_semi_sync_master_timeout= 5000;
# The first semi-sync check should be on because after slave stop,
# there are no transactions on the master.
echo [ master status should be ON ];
let $status_var= Rpl_semi_sync_master_status;
let $status_var_value= ON;
source include/wait_for_status_var.inc;
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 0;
source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
echo [ semi-sync replication of these transactions will fail ];
insert into t1 values (500);
# Wait for the semi-sync replication of this transaction to timeout
let $status_var= Rpl_semi_sync_master_status;
let $status_var_value= OFF;
source include/wait_for_status_var.inc;
# The second semi-sync check should be off because one transaction
# times out during waiting.
echo [ master status should be OFF ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
# Semi-sync status on master is now OFF, so all these transactions
# will be replicated asynchronously.
delete from t1 where a=10;
delete from t1 where a=9;
delete from t1 where a=8;
delete from t1 where a=7;
delete from t1 where a=6;
delete from t1 where a=5;
delete from t1 where a=4;
delete from t1 where a=3;
delete from t1 where a=2;
delete from t1 where a=1;
insert into t1 values (100);
echo [ master status should be OFF ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
--echo # Test semi-sync status on master will be ON again when slave catches up
--echo #
# Save the master position for later use.
save_master_pos;
connection slave;
echo [ slave status should be OFF ];
show status like 'Rpl_semi_sync_slave_status';
source include/start_slave.inc;
sync_with_master;
echo [ slave status should be ON ];
show status like 'Rpl_semi_sync_slave_status';
select count(distinct a) from t1;
select min(a) from t1;
select max(a) from t1;
connection master;
# The master semi-sync status should be on again after slave catches up.
echo [ master status should be ON again after slave catches up ];
let $status_var= Rpl_semi_sync_master_status;
let $status_var_value= ON;
source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
show status like 'Rpl_semi_sync_master_clients';
--echo #
--echo # Test disable/enable master semi-sync on the fly.
--echo #
drop table t1;
sync_slave_with_master;
source include/stop_slave.inc;
--echo #
--echo # Flush status
--echo #
connection master;
echo [ Semi-sync master status variables before FLUSH STATUS ];
SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx';
SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx';
# Do not write the FLUSH STATUS to binlog, to make sure we'll get a
# clean status after this.
FLUSH NO_WRITE_TO_BINLOG STATUS;
echo [ Semi-sync master status variables after FLUSH STATUS ];
SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx';
SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx';
connection master;
source include/show_master_logs.inc;
show variables like 'rpl_semi_sync_master_enabled';
echo [ disable semi-sync on the fly ];
set global rpl_semi_sync_master_enabled=0;
show variables like 'rpl_semi_sync_master_enabled';
show status like 'Rpl_semi_sync_master_status';
echo [ enable semi-sync on the fly ];
set global rpl_semi_sync_master_enabled=1;
show variables like 'rpl_semi_sync_master_enabled';
show status like 'Rpl_semi_sync_master_status';
--echo #
--echo # Test RESET MASTER/SLAVE
--echo #
connection slave;
source include/start_slave.inc;
connection master;
replace_result $engine_type ENGINE_TYPE;
eval create table t1 (a int) engine = $engine_type;
drop table t1;
sync_slave_with_master;
echo [ test reset master ];
connection master;
reset master;
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
connection slave;
source include/stop_slave.inc;
reset slave;
# Kill the dump thread on master for previous slave connection and
--source include/kill_binlog_dump_threads.inc
connection slave;
source include/start_slave.inc;
connection master;
# Wait for dump thread to start, Rpl_semi_sync_master_clients will be
# 1 after dump thread started.
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
replace_result $engine_type ENGINE_TYPE;
eval create table t1 (a int) engine = $engine_type;
insert into t1 values (1);
insert into t1 values (2), (3);
sync_slave_with_master;
select * from t1;
connection master;
echo [ master semi-sync status should be ON ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
--echo # Start semi-sync replication without SUPER privilege
--echo #
connection slave;
source include/stop_slave.inc;
reset slave;
connection master;
reset master;
# Kill the dump thread on master for previous slave connection and wait for it to exit
--source include/kill_binlog_dump_threads.inc
# Do not binlog the following statement because it will generate
# different events for ROW and STATEMENT format
set sql_log_bin=0;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
flush privileges;
set sql_log_bin=1;
connection slave;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
flush privileges;
change master to master_user='rpl',master_password='rpl_password';
source include/start_slave.inc;
show status like 'Rpl_semi_sync_slave_status';
connection master;
# Wait for the semi-sync binlog dump thread to start
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
echo [ master semi-sync should be ON ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
insert into t1 values (4);
insert into t1 values (5);
echo [ master semi-sync should be ON ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
--echo # Test semi-sync slave connect to non-semi-sync master
--echo #
# Disable semi-sync on master
connection slave;
source include/stop_slave.inc;
SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
connection master;
# Kill the dump thread on master for previous slave connection and wait for it to exit
--source include/kill_binlog_dump_threads.inc
echo [ Semi-sync status on master should be ON ];
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 0;
source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_status';
let $status_var= Rpl_semi_sync_master_status;
let $status_var_value= ON;
source include/wait_for_status_var.inc;
set global rpl_semi_sync_master_enabled= 0;
connection slave;
SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
source include/start_slave.inc;
connection master;
insert into t1 values (8);
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
echo [ master semi-sync clients should be 1, status should be OFF ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
sync_slave_with_master;
show status like 'Rpl_semi_sync_slave_status';
# Uninstall semi-sync plugin on master
connection slave;
source include/stop_slave.inc;
connection master;
set global rpl_semi_sync_master_enabled= 0;
connection slave;
SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
source include/start_slave.inc;
connection master;
insert into t1 values (10);
sync_slave_with_master;
--echo #
--echo # Test non-semi-sync slave connect to semi-sync master
--echo #
connection master;
set global rpl_semi_sync_master_timeout= 5000; # 5s
set global rpl_semi_sync_master_enabled= 1;
connection slave;
source include/stop_slave.inc;
SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
echo [ uninstall semi-sync slave plugin ];
set global rpl_semi_sync_slave_enabled= 0;
echo [ reinstall semi-sync slave plugin and disable semi-sync ];
SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled';
SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
source include/start_slave.inc;
SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
--echo #
--echo # Clean up
--echo #
connection slave;
source include/stop_slave.inc;
set global rpl_semi_sync_slave_enabled= 0;
connection master;
set global rpl_semi_sync_master_enabled= 0;
connection slave;
change master to master_user='root',master_password='';
source include/start_slave.inc;
connection master;
drop table t1;
sync_slave_with_master;
connection master;
drop user rpl@127.0.0.1;
flush privileges;
set global rpl_semi_sync_master_timeout= default;
--source include/rpl_end.inc

View File

@ -1 +1,402 @@
--source include/rpl_skip_replication.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it.
#
# Usage:
#
# --let $use_remote_mysqlbinlog= 1 # optional
# --source suite/rpl/include/rpl_skip_replication.inc
#
# The script uses MYSQLBINLOG to verify certain results.
# By default, it uses binary logs directly. If it is undesirable,
# this behavior can be overridden by setting $use_remote_binlog
# as shown above.
# The value will be unset after every execution of the script,
# so if it is needed, it should be set explicitly before each call.
#
--source include/have_innodb.inc
--source include/master-slave.inc
connection slave;
# Test that SUPER is required to change @@replicate_events_marked_for_skip.
CREATE USER 'nonsuperuser'@'127.0.0.1';
GRANT ALTER,CREATE,DELETE,DROP,EVENT,INSERT,PROCESS,REPLICATION SLAVE,
SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1';
connect(nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,);
connection nonpriv;
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
disconnect nonpriv;
connection slave;
DROP USER'nonsuperuser'@'127.0.0.1';
SELECT @@global.replicate_events_marked_for_skip;
--error ER_SLAVE_MUST_STOP
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
SELECT @@global.replicate_events_marked_for_skip;
STOP SLAVE;
--error ER_GLOBAL_VARIABLE
SET SESSION replicate_events_marked_for_skip=FILTER_ON_MASTER;
SELECT @@global.replicate_events_marked_for_skip;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
SELECT @@global.replicate_events_marked_for_skip;
START SLAVE;
connection master;
SELECT @@skip_replication;
--error ER_LOCAL_VARIABLE
SET GLOBAL skip_replication=1;
SELECT @@skip_replication;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=innodb;
INSERT INTO t1(a) VALUES (1);
INSERT INTO t2(a) VALUES (1);
# Test that master-side filtering works.
SET skip_replication=1;
CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
INSERT INTO t1(a) VALUES (2);
INSERT INTO t2(a) VALUES (2);
# Inject a rotate event in the binlog stream sent to slave (otherwise we will
# fail sync_slave_with_master as the last event on the master is not present
# on the slave).
FLUSH NO_WRITE_TO_BINLOG LOGS;
sync_slave_with_master;
connection slave;
SHOW TABLES;
SELECT * FROM t1;
SELECT * FROM t2;
connection master;
DROP TABLE t3;
FLUSH NO_WRITE_TO_BINLOG LOGS;
sync_slave_with_master;
# Test that slave-side filtering works.
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
START SLAVE;
connection master;
SET skip_replication=1;
CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
INSERT INTO t1(a) VALUES (3);
INSERT INTO t2(a) VALUES (3);
# Inject a rotate event in the binlog stream sent to slave (otherwise we will
# fail sync_slave_with_master as the last event on the master is not present
# on the slave).
FLUSH NO_WRITE_TO_BINLOG LOGS;
sync_slave_with_master;
connection slave;
SHOW TABLES;
SELECT * FROM t1;
SELECT * FROM t2;
connection master;
DROP TABLE t3;
FLUSH NO_WRITE_TO_BINLOG LOGS;
sync_slave_with_master;
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
START SLAVE;
# Test that events with @@skip_replication=1 are not filtered when filtering is
# not set on slave.
connection master;
SET skip_replication=1;
CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam;
INSERT INTO t3(a) VALUES(2);
sync_slave_with_master;
connection slave;
SELECT * FROM t3;
connection master;
DROP TABLE t3;
#
# Test that the slave will preserve the @@skip_replication flag in its
# own binlog.
#
TRUNCATE t1;
sync_slave_with_master;
connection slave;
RESET MASTER;
connection master;
SET skip_replication=0;
INSERT INTO t1 VALUES (1,0);
SET skip_replication=1;
INSERT INTO t1 VALUES (2,0);
SET skip_replication=0;
INSERT INTO t1 VALUES (3,0);
sync_slave_with_master;
connection slave;
# Since slave has @@replicate_events_marked_for_skip=REPLICATE, it should have
# applied all events.
SELECT * FROM t1 ORDER by a;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
let $SLAVE_DATADIR= `select @@datadir`;
connection master;
TRUNCATE t1;
# Now apply the slave binlog to the master, to check that both the slave
# and mysqlbinlog will preserve the @@skip_replication flag.
--let $mysqlbinlog_args= $SLAVE_DATADIR/slave-bin.000001
if ($use_remote_mysqlbinlog)
{
--let $mysqlbinlog_args= --read-from-remote-server --protocol=tcp --host=127.0.0.1 --port=$SLAVE_MYPORT -uroot slave-bin.000001
--let $use_remote_mysqlbinlog= 0
}
--exec $MYSQL_BINLOG $mysqlbinlog_args > $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog
--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog
# The master should have all three events.
SELECT * FROM t1 ORDER by a;
# The slave should be missing event 2, which is marked with the
# @@skip_replication flag.
connection slave;
START SLAVE;
connection master;
sync_slave_with_master;
connection slave;
SELECT * FROM t1 ORDER by a;
#
# Test that @@sql_slave_skip_counter does not count skipped @@skip_replication
# events.
#
connection master;
TRUNCATE t1;
sync_slave_with_master;
connection slave;
STOP SLAVE;
# We will skip two INSERTs (in addition to any skipped due to
# @@skip_replication). Since from 5.5 every statement is wrapped in
# BEGIN ... END, we need to skip 6 events for this.
SET GLOBAL sql_slave_skip_counter=6;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
START SLAVE;
connection master;
# Need to fix @@binlog_format to get consistent event count.
SET @old_binlog_format= @@binlog_format;
SET binlog_format= statement;
SET skip_replication=0;
INSERT INTO t1 VALUES (1,5);
SET skip_replication=1;
INSERT INTO t1 VALUES (2,5);
SET skip_replication=0;
INSERT INTO t1 VALUES (3,5);
INSERT INTO t1 VALUES (4,5);
SET binlog_format= @old_binlog_format;
sync_slave_with_master;
connection slave;
# The slave should have skipped the first three inserts (number 1 and 3 due
# to @@sql_slave_skip_counter=2, number 2 due to
# @@replicate_events_marked_for_skip=FILTER_ON_SLAVE). So only number 4
# should be left.
SELECT * FROM t1;
#
# Check that BINLOG statement preserves the @@skip_replication flag.
#
connection slave;
# Need row @@binlog_format for BINLOG statements containing row events.
--source include/stop_slave.inc
SET @old_slave_binlog_format= @@global.binlog_format;
SET GLOBAL binlog_format= row;
--source include/start_slave.inc
connection master;
TRUNCATE t1;
SET @old_binlog_format= @@binlog_format;
SET binlog_format= row;
# Format description log event.
BINLOG 'wlZOTw8BAAAA8QAAAPUAAAAAAAQANS41LjIxLU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAEzgNAAgAEgAEBAQEEgAA2QAEGggAAAAICAgCAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAA371saA==';
# INSERT INTO t1 VALUES (1,8) # with @@skip_replication=1
BINLOG 'wlZOTxMBAAAAKgAAAGMBAAAAgCkAAAAAAAEABHRlc3QAAnQxAAIDAwAC
wlZOTxcBAAAAJgAAAIkBAAAAgCkAAAAAAAEAAv/8AQAAAAgAAAA=';
# INSERT INTO t1 VALUES (2,8) # with @@skip_replication=0
BINLOG 'wlZOTxMBAAAAKgAAADwCAAAAACkAAAAAAAEABHRlc3QAAnQxAAIDAwAC
wlZOTxcBAAAAJgAAAGICAAAAACkAAAAAAAEAAv/8AgAAAAgAAAA=';
SET binlog_format= @old_binlog_format;
SELECT * FROM t1 ORDER BY a;
sync_slave_with_master;
connection slave;
# Slave should have only the second insert, the first should be ignored due to
# the @@skip_replication flag.
SELECT * FROM t1 ORDER by a;
--source include/stop_slave.inc
SET GLOBAL binlog_format= @old_slave_binlog_format;
--source include/start_slave.inc
# Test that it is not possible to change @@skip_replication inside a
# transaction or statement, thereby replicating only parts of statements
# or transactions.
connection master;
SET skip_replication=0;
BEGIN;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=0;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=1;
ROLLBACK;
SET skip_replication=1;
BEGIN;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=0;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=1;
COMMIT;
SET autocommit=0;
INSERT INTO t2(a) VALUES(100);
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET skip_replication=1;
ROLLBACK;
SET autocommit=1;
SET skip_replication=1;
--delimiter |
CREATE FUNCTION foo (x INT) RETURNS INT BEGIN SET SESSION skip_replication=x; RETURN x; END|
CREATE PROCEDURE bar(x INT) BEGIN SET SESSION skip_replication=x; END|
CREATE FUNCTION baz (x INT) RETURNS INT BEGIN CALL bar(x); RETURN x; END|
--delimiter ;
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
SELECT foo(0);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
SELECT baz(0);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET @a= foo(1);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
SET @a= baz(1);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
UPDATE t2 SET b=foo(0);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
UPDATE t2 SET b=baz(0);
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
INSERT INTO t1 VALUES (101, foo(1));
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
INSERT INTO t1 VALUES (101, baz(0));
SELECT @@skip_replication;
CALL bar(0);
SELECT @@skip_replication;
CALL bar(1);
SELECT @@skip_replication;
DROP FUNCTION foo;
DROP PROCEDURE bar;
DROP FUNCTION baz;
# Test that master-side filtering happens on the master side, and that
# slave-side filtering happens on the slave.
# First test that events do not reach the slave when master-side filtering
# is configured. Do this by replicating first with only the IO thread running
# and master-side filtering; then change to no filtering and start the SQL
# thread. This should still skip the events, as master-side filtering
# means the events never reached the slave.
connection master;
SET skip_replication= 0;
TRUNCATE t1;
sync_slave_with_master;
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
START SLAVE IO_THREAD;
connection master;
SET skip_replication= 1;
INSERT INTO t1(a) VALUES (1);
SET skip_replication= 0;
INSERT INTO t1(a) VALUES (2);
--source include/save_master_pos.inc
connection slave;
--source include/sync_io_with_master.inc
STOP SLAVE IO_THREAD;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
START SLAVE;
connection master;
sync_slave_with_master;
connection slave;
# Now only the second insert of (2) should be visible, as the first was
# filtered on the master, so even though the SQL thread ran without skipping
# events, it will never see the event in the first place.
SELECT * FROM t1;
# Now tests that when slave-side filtering is configured, events _do_ reach
# the slave.
connection master;
SET skip_replication= 0;
TRUNCATE t1;
sync_slave_with_master;
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
START SLAVE IO_THREAD;
connection master;
SET skip_replication= 1;
INSERT INTO t1(a) VALUES (1);
SET skip_replication= 0;
INSERT INTO t1(a) VALUES (2);
--source include/save_master_pos.inc
connection slave;
--source include/sync_io_with_master.inc
STOP SLAVE IO_THREAD;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
START SLAVE;
connection master;
sync_slave_with_master;
connection slave;
# Now both inserts should be visible. Since filtering was configured to be
# slave-side, the event is in the relay log, and when the SQL thread ran we
# had disabled filtering again.
SELECT * FROM t1 ORDER BY a;
# Clean up.
connection master;
SET skip_replication=0;
DROP TABLE t1,t2;
connection slave;
STOP SLAVE;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
START SLAVE;
--source include/rpl_end.inc

View File

@ -1 +1,32 @@
--source include/rpl_special_charset.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
################################################################################
# Bug#19855907 IO THREAD AUTHENTICATION ISSUE WITH SOME CHARACTER SETS
# Problem: IO thread fails to connect to master if servers are configured with
# special character sets like utf16, utf32, ucs2.
#
# Analysis: MySQL server does not support few special character sets like
# utf16,utf32 and ucs2 as "client's character set"(eg: utf16,utf32, ucs2).
# When IO thread is trying to connect to Master, it sets server's character
# set as client's character set. When Slave server is started with these
# special character sets, IO thread (a connection to Master) fails because
# of the above said reason.
#
# Fix: If server's character set is not supported as client's character set,
# then set default's client character set(latin1) as client's character set.
###############################################################################
--source include/master-slave.inc
call mtr.add_suppression("'utf16' can not be used as client character set");
CREATE TABLE t1(i VARCHAR(20));
INSERT INTO t1 VALUES (0xFFFF);
--sync_slave_with_master
--let diff_tables=master:t1, slave:t1
--source include/diff_tables.inc
# Cleanup
--connection master
DROP TABLE t1;
--source include/rpl_end.inc

View File

@ -1 +1,32 @@
--source include/rpl_sporadic_master.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
# test to see if replication can continue when master sporadically fails on
# COM_BINLOG_DUMP and additionally limits the number of events per dump
source include/master-slave.inc;
create table t2(n int);
create table t1(n int not null auto_increment primary key);
insert into t1 values (NULL),(NULL);
truncate table t1;
# We have to use 4 in the following to make this test work with all table types
insert into t1 values (4),(NULL);
sync_slave_with_master;
--source include/stop_slave.inc
--source include/start_slave.inc
connection master;
insert into t1 values (NULL),(NULL);
flush logs;
truncate table t1;
insert into t1 values (10),(NULL),(NULL),(NULL),(NULL),(NULL);
sync_slave_with_master;
select * from t1 ORDER BY n;
connection master;
drop table t1,t2;
sync_slave_with_master;
--source include/rpl_end.inc

View File

@ -1 +1,116 @@
--source include/rpl_ssl.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
source include/have_ssl_communication.inc;
source include/master-slave.inc;
source include/no_valgrind_without_big.inc;
# create a user for replication that requires ssl encryption
connection master;
create user replssl@localhost;
grant replication slave on *.* to replssl@localhost require ssl;
create table t1 (t int auto_increment, KEY(t));
sync_slave_with_master;
# Set slave to use SSL for connection to master
stop slave;
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
eval change master to
master_user='replssl',
master_password='',
master_ssl=1,
master_ssl_ca ='$MYSQL_TEST_DIR/std_data/cacert.pem',
master_ssl_cert='$MYSQL_TEST_DIR/std_data/client-cert.pem',
master_ssl_key='$MYSQL_TEST_DIR/std_data/client-key.pem';
start slave;
# Switch to master and insert one record, then sync it to slave
connection master;
insert into t1 values(1);
sync_slave_with_master;
# The record should now be on slave
select * from t1;
# The slave is synced and waiting/reading from master
# SHOW SLAVE STATUS will show "Waiting for master to send event"
let $status_items= Master_SSL_Allowed, Master_SSL_CA_Path, Master_SSL_CA_File, Master_SSL_Crl, Master_SSL_Crlpath, Master_SSL_Cert, Master_SSL_Key;
source include/show_slave_status.inc;
source include/check_slave_is_running.inc;
# Stop the slave, as reported in bug#21871 it would hang
STOP SLAVE;
select * from t1;
# Do the same thing a number of times
disable_query_log;
disable_result_log;
# 2007-11-27 mats Bug #32756 Starting and stopping the slave in a loop can lose rows
# After discussions with Engineering, I'm disabling this part of the test to avoid it causing
# red trees.
disable_parsing;
let $i= 100;
while ($i)
{
start slave;
connection master;
insert into t1 values (NULL);
select * from t1; # Some variance
connection slave;
select * from t1; # Some variance
stop slave;
dec $i;
}
enable_parsing;
START SLAVE;
enable_query_log;
enable_result_log;
connection master;
# INSERT one more record to make sure
# the sync has something to do
insert into t1 values (NULL);
let $master_count= `select count(*) from t1`;
sync_slave_with_master;
--source include/wait_for_slave_to_start.inc
source include/show_slave_status.inc;
source include/check_slave_is_running.inc;
let $slave_count= `select count(*) from t1`;
if ($slave_count != $master_count)
{
echo master and slave differed in number of rows;
echo master: $master_count;
echo slave: $slave_count;
connection master;
select count(*) t1;
select * from t1;
connection slave;
select count(*) t1;
select * from t1;
query_vertical show slave status;
}
connection master;
drop user replssl@localhost;
drop table t1;
sync_slave_with_master;
--source include/stop_slave.inc
CHANGE MASTER TO
master_user = 'root',
master_ssl = 0,
master_ssl_ca = '',
master_ssl_cert = '',
master_ssl_key = '';
--echo End of 5.0 tests
--let $rpl_only_running_threads= 1
--source include/rpl_end.inc

View File

@ -1 +1,107 @@
--source include/rpl_stm_relay_ign_space.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#
# BUG#12400313 / BUG#64503 test case
#
#
# Description
# -----------
#
# This test case starts the slave server with:
# --relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096
#
# Then it issues some queries that will cause the slave to reach
# relay-log-space-limit. We lock the table so that the SQL thread is
# not able to purge the log and then we issue some more statements.
#
# The purpose is to show that the IO thread will honor the limits
# while the SQL thread is not able to purge the relay logs, which did
# not happen before this patch. In addition we assert that while
# ignoring the limit (SQL thread needs to rotate before purging), the
# IO thread does not do it in an uncontrolled manner.
--source include/have_binlog_format_statement.inc
--source include/have_innodb.inc
--source include/master-slave.inc
--disable_query_log
CREATE TABLE t1 (c1 TEXT) engine=InnoDB;
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
--sync_slave_with_master
# wait for the SQL thread to sleep
--let $show_statement= SHOW PROCESSLIST
--let $field= State
--let $condition= = 'Slave has read all relay log; waiting for the slave I/O thread to update it'
--source include/wait_show_condition.inc
# now the io thread has set rli->ignore_space_limit
# lets lock the table so that once the SQL thread awakes
# it blocks there and does not set rli->ignore_space_limit
# back to zero
LOCK TABLE t1 WRITE;
# now issue more statements that will overflow the
# rli->log_space_limit (in this case ~10K)
--connection master
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx');
--connection slave
# ASSERT that the IO thread waits for the SQL thread to release some
# space before continuing
--let $show_statement= SHOW PROCESSLIST
--let $field= State
--let $condition= LIKE 'Waiting for %'
# before the patch (IO would have transfered everything)
#--let $condition= = 'Waiting for master to send event'
# after the patch (now it waits for space to be freed)
#--let $condition= = 'Waiting for the slave SQL thread to free enough relay log space'
--source include/wait_show_condition.inc
# without the patch we can uncomment the following two lines and
# watch the IO thread synchronize with the master, thus writing
# relay logs way over the space limit
#--connection master
#--source include/sync_slave_io_with_master.inc
## ASSERT that the IO thread has honored the limit+few bytes required to be able to purge
--let $relay_log_space_while_sql_is_executing = query_get_value(SHOW SLAVE STATUS, Relay_Log_Space, 1)
--let $relay_log_space_limit = query_get_value(SHOW VARIABLES LIKE "relay_log_space_limit", Value, 1)
--let $assert_text= Assert that relay log space is close to the limit
--let $assert_cond= $relay_log_space_while_sql_is_executing <= $relay_log_space_limit * 1.15
--source include/assert.inc
# unlock the table and let SQL thread continue applying events
UNLOCK TABLES;
--connection master
--sync_slave_with_master
--let $diff_tables=master:test.t1,slave:test.t1
--source include/diff_tables.inc
--connection master
DROP TABLE t1;
--enable_query_log
--sync_slave_with_master
--source include/rpl_end.inc

View File

@ -1 +1,633 @@
--source include/rpl_switch_stm_row_mixed.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
#
# rpl_switch_stm_row_mixed tests covers
#
# - Master is switching explicitly between STATEMENT, ROW, and MIXED
# binlog format showing when it is possible and when not.
# - Master switching from MIXED to RBR implicitly listing all use
# cases, e.g a query invokes UUID(), thereafter to serve as the
# definition of MIXED binlog format
# - correctness of execution
-- source include/have_binlog_format_mixed_or_row.inc
-- source include/master-slave.inc
# Since this test generates row-based events in the binary log, the
# slave SQL thread cannot be in STATEMENT mode to execute this test,
# so we only execute it for MIXED and ROW as default value of
# BINLOG_FORMAT.
connection slave;
connection master;
--disable_warnings
drop database if exists mysqltest1;
create database mysqltest1;
--enable_warnings
use mysqltest1;
# Save binlog format
set @my_binlog_format= @@global.binlog_format;
# play with switching
set session binlog_format=mixed;
show session variables like "binlog_format%";
set session binlog_format=statement;
show session variables like "binlog_format%";
set session binlog_format=row;
show session variables like "binlog_format%";
set global binlog_format=DEFAULT;
show global variables like "binlog_format%";
set global binlog_format=MIXED;
show global variables like "binlog_format%";
set global binlog_format=STATEMENT;
show global variables like "binlog_format%";
set global binlog_format=ROW;
show global variables like "binlog_format%";
show session variables like "binlog_format%";
select @@global.binlog_format, @@session.binlog_format;
CREATE TABLE t1 (a varchar(100));
prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
set @string="emergency_1_";
insert into t1 values("work_2_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values(concat(UUID(),"work_3_"));
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(concat("for_4_",UUID()));
insert into t1 select "yesterday_5_";
# verify that temp tables prevent a switch to SBR
create temporary table tmp(a char(100));
insert into tmp values("see_6_");
--error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
set binlog_format=statement;
insert into t1 select * from tmp;
drop temporary table tmp;
# Now we go to SBR
set binlog_format=statement;
show global variables like "binlog_format%";
show session variables like "binlog_format%";
select @@global.binlog_format, @@session.binlog_format;
set global binlog_format=statement;
show global variables like "binlog_format%";
show session variables like "binlog_format%";
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select ?';
set @string="emergency_7_";
insert into t1 values("work_8_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values("work_9_");
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values("for_10_");
insert into t1 select "yesterday_11_";
# test statement (is not default after wl#3368)
set binlog_format=statement;
select @@global.binlog_format, @@session.binlog_format;
set global binlog_format=statement;
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select ?';
set @string="emergency_12_";
insert into t1 values("work_13_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values("work_14_");
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values("for_15_");
insert into t1 select "yesterday_16_";
# and now the mixed mode
set global binlog_format=mixed;
select @@global.binlog_format, @@session.binlog_format;
set binlog_format=default;
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
set @string="emergency_17_";
insert into t1 values("work_18_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values(concat(UUID(),"work_19_"));
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(concat("for_20_",UUID()));
insert into t1 select "yesterday_21_";
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values(concat(UUID(),"work_22_"));
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(concat("for_23_",UUID()));
insert into t1 select "yesterday_24_";
# Test of CREATE TABLE SELECT
create table t2 ENGINE=MyISAM select rpad(UUID(),100,' ');
create table t3 select 1 union select UUID();
--disable_warnings
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3);
--enable_warnings
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
# what if UUID() is first:
--disable_warnings
insert ignore into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
--enable_warnings
# inside a stored procedure
delimiter |;
create procedure foo()
begin
insert into t1 values("work_25_");
insert into t1 values(concat("for_26_",UUID()));
insert into t1 select "yesterday_27_";
end|
create procedure foo2()
begin
insert into t1 values(concat("emergency_28_",UUID()));
insert into t1 values("work_29_");
insert into t1 values(concat("for_30_",UUID()));
set session binlog_format=row; # accepted for stored procs
insert into t1 values("more work_31_");
set session binlog_format=mixed;
end|
create function foo3() returns bigint unsigned
begin
set session binlog_format=row; # rejected for stored funcs
insert into t1 values("alarm");
return 100;
end|
create procedure foo4(x varchar(100))
begin
insert into t1 values(concat("work_250_",x));
insert into t1 select "yesterday_270_";
end|
delimiter ;|
call foo();
call foo2();
call foo4("hello");
call foo4(UUID());
call foo4("world");
# test that can't SET in a stored function
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
select foo3();
select * from t1 where a="alarm";
# Tests of stored functions/triggers/views for BUG#20930 "Mixed
# binlogging mode does not work with stored functions, triggers,
# views"
# Function which calls procedure
drop function foo3;
delimiter |;
create function foo3() returns bigint unsigned
begin
insert into t1 values("foo3_32_");
call foo();
return 100;
end|
delimiter ;|
insert into t2 select foo3();
prepare stmt1 from 'insert into t2 select foo3()';
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# Test if stored function calls stored function which calls procedure
# which requires row-based.
delimiter |;
create function foo4() returns bigint unsigned
begin
insert into t2 select foo3();
return 100;
end|
delimiter ;|
select foo4();
prepare stmt1 from 'select foo4()';
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# A simple stored function
delimiter |;
create function foo5() returns bigint unsigned
begin
insert into t2 select UUID();
return 100;
end|
delimiter ;|
select foo5();
prepare stmt1 from 'select foo5()';
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# A simple stored function where UUID() is in the argument
delimiter |;
create function foo6(x varchar(100)) returns bigint unsigned
begin
insert into t2 select x;
return 100;
end|
delimiter ;|
select foo6("foo6_1_");
select foo6(concat("foo6_2_",UUID()));
prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))';
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# Test of views using UUID()
create view v1 as select uuid();
create table t11 (data varchar(255));
insert into t11 select * from v1;
# Test of querying INFORMATION_SCHEMA which parses the view's body,
# to verify that it binlogs statement-based (is not polluted by
# the parsing of the view's body).
insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11');
prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')";
execute stmt1;
execute stmt1;
deallocate prepare stmt1;
# Test of triggers with UUID()
delimiter |;
create trigger t11_bi before insert on t11 for each row
begin
set NEW.data = concat(NEW.data,UUID());
end|
delimiter ;|
insert into t11 values("try_560_");
# Test that INSERT DELAYED works in mixed mode (BUG#20649)
insert delayed into t2 values("delay_1_");
insert delayed into t2 values(concat("delay_2_",UUID()));
insert delayed into t2 values("delay_6_");
# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not
# replicate fine in statement-based ; we test that in mixed mode it
# works).
insert delayed into t2 values(rand());
set @a=2.345;
insert delayed into t2 values(@a);
# With INSERT DELAYED, rows are written to the binlog after they are
# written to the table. Therefore, it is not enough to wait until the
# rows make it to t2 on the master (the rows may not be in the binlog
# at that time, and may still not be in the binlog when
# sync_slave_with_master is later called). Instead, we wait until the
# rows make it to t2 on the slave. We first call
# sync_slave_with_master, so that we are sure that t2 has been created
# on the slave.
sync_slave_with_master;
let $wait_condition= SELECT COUNT(*) = 19 FROM mysqltest1.t2;
--source include/wait_condition.inc
connection master;
# If you want to do manual testing of the mixed mode regarding UDFs (not
# testable automatically as quite platform- and compiler-dependent),
# you just need to set the variable below to 1, and to
# "make udf_example.so" in sql/, and to copy sql/udf_example.so to
# MYSQL_TEST_DIR/lib/mysql.
let $you_want_to_test_UDF=0;
if ($you_want_to_test_UDF)
{
CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so';
prepare stmt1 from 'insert into t1 select metaphon(?)';
set @string="emergency_133_";
insert into t1 values("work_134_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
insert into t1 values(metaphon("work_135_"));
execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(metaphon("for_136_"));
insert into t1 select "yesterday_137_";
create table t6 select metaphon("for_138_");
create table t7 select 1 union select metaphon("for_139_");
create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3);
create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
}
create table t20 select * from t1; # save for comparing later
create table t21 select * from t2;
create table t22 select * from t3;
drop table t1,t2,t3;
# This tests the fix to
# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog
# We verify that under the mixed binlog mode, a stored function
# modifying at least two tables having an auto_increment column,
# is binlogged row-based. Indeed in statement-based binlogging,
# only the auto_increment value generated for the first table
# is recorded in the binlog, the value generated for the 2nd table
# lacking.
create table t1 (a int primary key auto_increment, b varchar(100));
create table t2 (a int primary key auto_increment, b varchar(100));
create table t3 (b varchar(100));
delimiter |;
create function f (x varchar(100)) returns int deterministic
begin
insert into t1 values(null,x);
insert into t2 values(null,x);
return 1;
end|
delimiter ;|
select f("try_41_");
# Two operations which compensate each other except that their net
# effect is that they advance the auto_increment counter of t2 on slave:
sync_slave_with_master;
use mysqltest1;
insert into t2 values(2,null),(3,null),(4,null);
delete from t2 where a>=2;
connection master;
# this is the call which didn't replicate well
select f("try_42_");
sync_slave_with_master;
# now use prepared statement and test again, just to see that the RBB
# mode isn't set at PREPARE but at EXECUTE.
insert into t2 values(3,null),(4,null);
delete from t2 where a>=3;
connection master;
prepare stmt1 from 'select f(?)';
set @string="try_43_";
insert into t1 values(null,"try_44_"); # should be SBB
execute stmt1 using @string; # should be RBB
deallocate prepare stmt1;
sync_slave_with_master;
# verify that if only one table has auto_inc, it does not trigger RBB
# (we'll check in binlog further below)
connection master;
create table t12 select * from t1; # save for comparing later
drop table t1;
create table t1 (a int, b varchar(100), key(a));
select f("try_45_");
# restore table's key
create table t13 select * from t1;
drop table t1;
create table t1 (a int primary key auto_increment, b varchar(100));
# now test if it's two functions, each of them inserts in one table
drop function f;
# we need a unique key to have sorting of rows by mysqldump
create table t14 (unique (a)) select * from t2;
truncate table t2;
delimiter |;
create function f1 (x varchar(100)) returns int deterministic
begin
insert into t1 values(null,x);
return 1;
end|
create function f2 (x varchar(100)) returns int deterministic
begin
insert into t2 values(null,x);
return 1;
end|
delimiter ;|
select f1("try_46_"),f2("try_47_");
sync_slave_with_master;
insert into t2 values(2,null),(3,null),(4,null);
delete from t2 where a>=2;
connection master;
# Test with SELECT and INSERT
select f1("try_48_"),f2("try_49_");
insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_")));
sync_slave_with_master;
# verify that if f2 does only read on an auto_inc table, this does not
# switch to RBB
connection master;
drop function f2;
delimiter |;
create function f2 (x varchar(100)) returns int deterministic
begin
declare y int;
insert into t1 values(null,x);
set y = (select count(*) from t2);
return y;
end|
delimiter ;|
select f1("try_53_"),f2("try_54_");
sync_slave_with_master;
# And now, a normal statement with a trigger (no stored functions)
connection master;
drop function f2;
delimiter |;
create trigger t1_bi before insert on t1 for each row
begin
insert into t2 values(null,"try_55_");
end|
delimiter ;|
insert into t1 values(null,"try_56_");
# and now remove one auto_increment and verify SBB
alter table t1 modify a int, drop primary key;
insert into t1 values(null,"try_57_");
sync_slave_with_master;
# Test for BUG#20499 "mixed mode with temporary table breaks binlog"
# Slave used to have only 2 rows instead of 3.
connection master;
CREATE TEMPORARY TABLE t15 SELECT UUID();
create table t16 like t15;
INSERT INTO t16 SELECT * FROM t15;
# we'll verify that this one is done RBB
insert into t16 values("try_65_");
drop table t15;
# we'll verify that this one is done SBB
insert into t16 values("try_66_");
sync_slave_with_master;
# and now compare:
connection master;
# first check that data on master is sensible
select count(*) from t1;
select count(*) from t2;
select count(*) from t3;
select count(*) from t4;
select count(*) from t5;
select count(*) from t11;
select count(*) from t20;
select count(*) from t21;
select count(*) from t22;
select count(*) from t12;
select count(*) from t13;
select count(*) from t14;
select count(*) from t16;
if ($you_want_to_test_UDF)
{
select count(*) from t6;
select count(*) from t7;
select count(*) from t8;
select count(*) from t9;
}
sync_slave_with_master;
#
# Bug#20863 If binlog format is changed between update and unlock of
# tables, wrong binlog
#
connection master;
DROP TABLE IF EXISTS t11;
SET SESSION BINLOG_FORMAT=STATEMENT;
CREATE TABLE t11 (song VARCHAR(255));
LOCK TABLES t11 WRITE;
SET SESSION BINLOG_FORMAT=ROW;
INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict');
SET SESSION BINLOG_FORMAT=STATEMENT;
INSERT INTO t11 VALUES('Careful With That Axe, Eugene');
UNLOCK TABLES;
--query_vertical SELECT * FROM t11
sync_slave_with_master;
USE mysqltest1;
--query_vertical SELECT * FROM t11
connection master;
DROP TABLE IF EXISTS t12;
SET SESSION BINLOG_FORMAT=MIXED;
CREATE TABLE t12 (data LONG);
LOCK TABLES t12 WRITE;
INSERT INTO t12 VALUES(UUID());
UNLOCK TABLES;
sync_slave_with_master;
#
# BUG#28086: SBR of USER() becomes corrupted on slave
#
connection master;
# Just to get something that is non-trivial, albeit still simple, we
# stuff the result of USER() and CURRENT_USER() into a variable.
--delimiter $$
CREATE FUNCTION my_user()
RETURNS CHAR(64)
BEGIN
DECLARE user CHAR(64);
SELECT USER() INTO user;
RETURN user;
END $$
--delimiter ;
--delimiter $$
CREATE FUNCTION my_current_user()
RETURNS CHAR(64)
BEGIN
DECLARE user CHAR(64);
SELECT CURRENT_USER() INTO user;
RETURN user;
END $$
--delimiter ;
DROP TABLE IF EXISTS t13;
CREATE TABLE t13 (data CHAR(64));
INSERT INTO t13 VALUES (USER());
INSERT INTO t13 VALUES (my_user());
INSERT INTO t13 VALUES (CURRENT_USER());
INSERT INTO t13 VALUES (my_current_user());
sync_slave_with_master;
# as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql
# Let's compare. Note: If they match test will pass, if they do not match
# the test will show that the diff statement failed and not reject file
# will be created. You will need to go to the mysql-test dir and diff
# the files your self to see what is not matching
diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
connection master;
# Now test that mysqlbinlog works fine on a binlog generated by the
# mixed mode
# BUG#11312 "DELIMITER is not written to the binary log that causes
# syntax error" makes that mysqlbinlog will fail if we pass it the
# text of queries; this forces us to use --base64-output here.
# BUG#20929 "BINLOG command causes invalid free plus assertion
# failure" makes mysqld segfault when receiving --base64-output
# So I can't enable this piece of test
# SIGH
if ($enable_when_11312_or_20929_fixed)
{
--exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
drop database mysqltest1;
--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
# the old mysqldump output on slave is the same as what it was on
# master before restoring on master.
diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
}
drop database mysqltest1;
sync_slave_with_master;
connection master;
# Restore binlog format setting
set global binlog_format =@my_binlog_format;
--source include/rpl_end.inc

View File

@ -1,2 +1,159 @@
--source include/rpl_sync_test.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
########################################################################################
# This test verifies the options --sync-relay-log-info and --relay-log-recovery by
# crashing the slave in two different situations:
# (case-1) - Corrupt the relay log with changes which were not processed by
# the SQL Thread and crashes it.
# (case-2) - Corrupt the master.info with wrong coordinates and crashes it.
#
# Case 1:
# 1 - Stops the SQL Thread
# 2 - Inserts new records into the master.
# 3 - Corrupts the relay-log.bin* which most likely has such changes.
# 4 - Crashes the slave
# 5 - Verifies if the slave is sync with the master which means that the information
# loss was circumvented by the recovery process.
#
# Case 2:
# 1 - Stops the SQL/IO Threads
# 2 - Inserts new records into the master.
# 3 - Corrupts the master.info with wrong coordinates.
# 4 - Crashes the slave
# 5 - Verifies if the slave is sync with the master which means that the information
# loss was circumvented by the recovery process.
########################################################################################
########################################################################################
# Configuring the environment
########################################################################################
--echo =====Configuring the enviroment=======;
--source include/not_embedded.inc
--source include/not_valgrind.inc
--source include/have_debug.inc
--source include/have_innodb.inc
--source include/not_crashrep.inc
--source include/master-slave.inc
call mtr.add_suppression('Attempting backtrace');
call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001");
# Use innodb so we do not get "table should be repaired" issues.
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
flush tables;
CREATE TABLE t1(a INT, PRIMARY KEY(a)) engine=innodb;
insert into t1(a) values(1);
insert into t1(a) values(2);
insert into t1(a) values(3);
########################################################################################
# Case 1: Corrupt a relay-log.bin*
########################################################################################
--echo =====Inserting data on the master but without the SQL Thread being running=======;
sync_slave_with_master;
connection slave;
let $MYSQLD_SLAVE_DATADIR= `select @@datadir`;
--replace_result $MYSQLD_SLAVE_DATADIR MYSQLD_SLAVE_DATADIR
--copy_file $MYSQLD_SLAVE_DATADIR/master.info $MYSQLD_SLAVE_DATADIR/master.backup
--source include/stop_slave_sql.inc
connection master;
insert into t1(a) values(4);
insert into t1(a) values(5);
insert into t1(a) values(6);
--echo =====Removing relay log files and crashing/recoverying the slave=======;
connection slave;
--source include/stop_slave_io.inc
let $file= query_get_value("SHOW SLAVE STATUS", Relay_Log_File, 1);
--let FILE_TO_CORRUPT= $MYSQLD_SLAVE_DATADIR/$file
perl;
$file= $ENV{'FILE_TO_CORRUPT'};
open(FILE, ">$file") || die "Unable to open $file.";
truncate(FILE,0);
print FILE "failure";
close ($file);
EOF
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
SET SESSION debug_dbug="d,crash_before_rotate_relaylog";
--error 2013
FLUSH LOGS;
--let $rpl_server_number= 2
--source include/rpl_reconnect.inc
--echo =====Dumping and comparing tables=======;
--source include/start_slave.inc
connection master;
sync_slave_with_master;
let $diff_tables=master:t1,slave:t1;
source include/diff_tables.inc;
########################################################################################
# Case 2: Corrupt a master.info
########################################################################################
--echo =====Corrupting the master.info=======;
connection slave;
--source include/stop_slave.inc
connection master;
FLUSH LOGS;
insert into t1(a) values(7);
insert into t1(a) values(8);
insert into t1(a) values(9);
connection slave;
let MYSQLD_SLAVE_DATADIR=`select @@datadir`;
--perl
use strict;
use warnings;
my $src= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.backup";
my $dst= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.info";
open(FILE, "<", $src) or die;
my @content= <FILE>;
close FILE;
open(FILE, ">", $dst) or die;
binmode FILE;
print FILE @content;
close FILE;
EOF
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
SET SESSION debug_dbug="d,crash_before_rotate_relaylog";
--error 2013
FLUSH LOGS;
--let $rpl_server_number= 2
--source include/rpl_reconnect.inc
--echo =====Dumping and comparing tables=======;
--source include/start_slave.inc
connection master;
sync_slave_with_master;
let $diff_tables=master:t1,slave:t1;
source include/diff_tables.inc;
########################################################################################
# Clean up
########################################################################################
--echo =====Clean up=======;
connection master;
drop table t1;
--remove_file $MYSQLD_SLAVE_DATADIR/master.backup
--source include/rpl_end.inc

View File

@ -1 +1,82 @@
--source include/rpl_temporal_format_default_to_default.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption).
# Please check all dependent tests after modifying it
#
--source include/master-slave.inc
if ($force_master_mysql56_temporal_format)
{
connection master;
eval SET @@global.mysql56_temporal_format=$force_master_mysql56_temporal_format;
}
if ($force_slave_mysql56_temporal_format)
{
connection slave;
eval SET @@global.mysql56_temporal_format=$force_slave_mysql56_temporal_format;
}
connection master;
SELECT @@global.mysql56_temporal_format AS on_master;
connection slave;
SELECT @@global.mysql56_temporal_format AS on_slave;
connection master;
CREATE TABLE t1
(
c0 TIME(0),
c1 TIME(1),
c2 TIME(2),
c3 TIME(3),
c4 TIME(4),
c5 TIME(5),
c6 TIME(6)
);
CREATE TABLE t2
(
c0 TIMESTAMP(0),
c1 TIMESTAMP(1),
c2 TIMESTAMP(2),
c3 TIMESTAMP(3),
c4 TIMESTAMP(4),
c5 TIMESTAMP(5),
c6 TIMESTAMP(6)
);
CREATE TABLE t3
(
c0 DATETIME(0),
c1 DATETIME(1),
c2 DATETIME(2),
c3 DATETIME(3),
c4 DATETIME(4),
c5 DATETIME(5),
c6 DATETIME(6)
);
INSERT INTO t1 VALUES ('01:01:01','01:01:01.1','01:01:01.11','01:01:01.111','01:01:01.1111','01:01:01.11111','01:01:01.111111');
INSERT INTO t2 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111');
INSERT INTO t3 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111');
SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME;
sync_slave_with_master;
connection slave;
--query_vertical SELECT * FROM t1;
--query_vertical SELECT * FROM t2;
--query_vertical SELECT * FROM t3;
SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME;
connection master;
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
connection slave;
SET @@global.mysql56_temporal_format=DEFAULT;
connection master;
SET @@global.mysql56_temporal_format=DEFAULT;
--source include/rpl_end.inc

View File

@ -1 +1,78 @@
--source include/rpl_typeconv.inc #
# This include file is used by more than one test suite
# (currently rpl and binlog_encryption suite).
# Please check all dependent tests after modifying it
#
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
connection slave;
set @saved_slave_type_conversions = @@global.slave_type_conversions;
CREATE TABLE type_conversions (
TestNo INT AUTO_INCREMENT PRIMARY KEY,
Source TEXT,
Target TEXT,
Flags TEXT,
On_Master LONGTEXT,
On_Slave LONGTEXT,
Expected LONGTEXT,
Compare INT,
Error TEXT);
SELECT @@global.slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='';
SELECT @@global.slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY';
SELECT @@global.slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY';
SELECT @@global.slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY';
SELECT @@global.slave_type_conversions;
--error ER_WRONG_VALUE_FOR_VAR
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY,NONEXISTING_BIT';
SELECT @@global.slave_type_conversions;
# Checking strict interpretation of type conversions
connection slave;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='';
source suite/rpl/include/type_conversions.test;
# Checking lossy integer type conversions
connection slave;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY';
source suite/rpl/include/type_conversions.test;
# Checking non-lossy integer type conversions
connection slave;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY';
source suite/rpl/include/type_conversions.test;
# Checking all type conversions
connection slave;
SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY';
source suite/rpl/include/type_conversions.test;
connection slave;
--echo **** Result of conversions ****
disable_query_log;
SELECT RPAD(Source, 15, ' ') AS Source_Type,
RPAD(Target, 15, ' ') AS Target_Type,
RPAD(Flags, 25, ' ') AS All_Type_Conversion_Flags,
IF(Compare IS NULL AND Error IS NOT NULL, '<Correct error>',
IF(Compare, '<Correct value>',
CONCAT("'", On_Slave, "' != '", Expected, "'")))
AS Value_On_Slave
FROM type_conversions;
enable_query_log;
DROP TABLE type_conversions;
call mtr.add_suppression("Slave SQL.*Column 1 of table .test.t1. cannot be converted from type.* error.* 1677");
connection master;
DROP TABLE t1;
sync_slave_with_master;
set global slave_type_conversions = @saved_slave_type_conversions;
--source include/rpl_end.inc