mirror of
https://github.com/MariaDB/server.git
synced 2025-11-12 10:22:39 +03:00
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into poseidon.ndb.mysql.com:/home/tomas/v7 include/my_base.h: Auto merged mysql-test/t/mysqltest.test: Auto merged sql/handler.cc: Auto merged sql/handler.h: Auto merged mysql-test/t/disabled.def: SCCS merged
This commit is contained in:
@@ -18,6 +18,11 @@ group_min_max : Bug #15448
|
||||
innodb_concurrent : Results are not deterministic, Elliot will fix (BUG#3300)
|
||||
subselect : Bug#15706
|
||||
type_time : Bug#15805
|
||||
ps_7ndb : dbug assert in RBR mode when executing test suite
|
||||
rpl_ddl : Bug#15963 SBR does not show "Definer" correctly
|
||||
events : Affects flush test case. A table lock not released somewhere
|
||||
ndb_autodiscover : TBF with CR
|
||||
ndb_autodiscover2 : TBF with CR
|
||||
ndb_binlog_basic : Results are not deterministic, Tomas will fix
|
||||
rpl_ndb_basic : Bug#16228
|
||||
rpl_sp : Bug #16456
|
||||
|
||||
@@ -518,9 +518,6 @@ echo $novar1;
|
||||
--error 1
|
||||
--exec echo "let $=hi;" | $MYSQL_TEST 2>&1
|
||||
|
||||
--error 1
|
||||
--exec echo "let hi=hi;" | $MYSQL_TEST 2>&1
|
||||
|
||||
--error 1
|
||||
--exec echo "let $1 hi;" | $MYSQL_TEST 2>&1
|
||||
|
||||
|
||||
@@ -142,23 +142,19 @@ INSERT INTO t1 VALUES (1,2,0),(18,19,4),(20,21,0);
|
||||
select c from t1 order by c;
|
||||
drop table t1;
|
||||
|
||||
--disable_ps_protocol
|
||||
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
|
||||
engine=ndb;
|
||||
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
|
||||
create index c on t1(c);
|
||||
connection server2;
|
||||
select * from t1 where b = 'two';
|
||||
connection server1;
|
||||
alter table t1 drop index c;
|
||||
connection server2;
|
||||
# This should fail since index information is not automatically refreshed
|
||||
--error 1015
|
||||
select * from t1 where b = 'two';
|
||||
select * from t1 where b = 'two';
|
||||
connection server1;
|
||||
drop table t1;
|
||||
--enable_ps_protocol
|
||||
## Test moved to ndb_alter_table_row|stmt respectively as behaviour differs
|
||||
#create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
|
||||
#engine=ndb;
|
||||
#insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
|
||||
#create index c on t1(c);
|
||||
#connection server2;
|
||||
#select * from t1 where c = 'two';
|
||||
#connection server1;
|
||||
#alter table t1 drop index c;
|
||||
#connection server2;
|
||||
#select * from t1 where c = 'two';
|
||||
#connection server1;
|
||||
#drop table t1;
|
||||
|
||||
#--disable_warnings
|
||||
#DROP TABLE IF EXISTS t2;
|
||||
@@ -183,29 +179,32 @@ drop table t1;
|
||||
#select count(*) from t2;
|
||||
#drop table t2;
|
||||
|
||||
connection server1;
|
||||
create table t3 (a int primary key) engine=ndbcluster;
|
||||
## Test moved to ndb_alter_table_row|stmt respectively as behaviour differs
|
||||
#connection server1;
|
||||
#create table t3 (a int primary key) engine=ndbcluster;
|
||||
|
||||
connection server2;
|
||||
begin;
|
||||
insert into t3 values (1);
|
||||
#connection server2;
|
||||
#begin;
|
||||
#insert into t3 values (1);
|
||||
|
||||
connection server1;
|
||||
alter table t3 rename t4;
|
||||
#connection server1;
|
||||
#alter table t3 rename t4;
|
||||
|
||||
connection server2;
|
||||
# This should work as transaction is ongoing...
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
commit;
|
||||
#connection server2;
|
||||
## with rbr the below will not work as the "alter" event
|
||||
## explicitly invalidates the dictionary cache.
|
||||
### This should work as transaction is ongoing...
|
||||
##delete from t3;
|
||||
##insert into t3 values (1);
|
||||
#commit;
|
||||
|
||||
# This should fail as its a new transaction
|
||||
--error 1015
|
||||
select * from t3;
|
||||
select * from t4;
|
||||
drop table t4;
|
||||
show tables;
|
||||
connection server1;
|
||||
## This should fail as its a new transaction
|
||||
#--error 1146
|
||||
#select * from t3;
|
||||
#select * from t4;
|
||||
#drop table t4;
|
||||
#show tables;
|
||||
#connection server1;
|
||||
|
||||
create table t1 (
|
||||
ai bigint auto_increment,
|
||||
|
||||
48
mysql-test/t/ndb_alter_table_row.test
Normal file
48
mysql-test/t/ndb_alter_table_row.test
Normal file
@@ -0,0 +1,48 @@
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/have_multi_ndb.inc
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_binlog_format_row.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
connection server1;
|
||||
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
|
||||
engine=ndb;
|
||||
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
|
||||
create index c on t1(c);
|
||||
connection server2;
|
||||
select * from t1 where c = 'two';
|
||||
connection server1;
|
||||
alter table t1 drop index c;
|
||||
connection server2;
|
||||
select * from t1 where c = 'two';
|
||||
connection server1;
|
||||
drop table t1;
|
||||
|
||||
connection server1;
|
||||
create table t3 (a int primary key) engine=ndbcluster;
|
||||
|
||||
connection server2;
|
||||
begin;
|
||||
insert into t3 values (1);
|
||||
|
||||
connection server1;
|
||||
alter table t3 rename t4;
|
||||
|
||||
connection server2;
|
||||
# with rbr the below will not work as the "alter" event
|
||||
# explicitly invalidates the dictionary cache.
|
||||
## This should work as transaction is ongoing...
|
||||
#delete from t3;
|
||||
#insert into t3 values (1);
|
||||
commit;
|
||||
|
||||
# This should fail as its a new transaction
|
||||
--error 1146
|
||||
select * from t3;
|
||||
select * from t4;
|
||||
drop table t4;
|
||||
show tables;
|
||||
connection server1;
|
||||
51
mysql-test/t/ndb_alter_table_stm.test
Normal file
51
mysql-test/t/ndb_alter_table_stm.test
Normal file
@@ -0,0 +1,51 @@
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/have_multi_ndb.inc
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_binlog_format_statement.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
connection server1;
|
||||
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
|
||||
engine=ndb;
|
||||
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
|
||||
create index c on t1(c);
|
||||
connection server2;
|
||||
select * from t1 where c = 'two';
|
||||
connection server1;
|
||||
alter table t1 drop index c;
|
||||
connection server2;
|
||||
-- error 1015
|
||||
select * from t1 where c = 'two';
|
||||
select * from t1 where c = 'two';
|
||||
connection server1;
|
||||
drop table t1;
|
||||
|
||||
connection server1;
|
||||
create table t3 (a int primary key) engine=ndbcluster;
|
||||
|
||||
connection server2;
|
||||
begin;
|
||||
insert into t3 values (1);
|
||||
|
||||
connection server1;
|
||||
alter table t3 rename t4;
|
||||
|
||||
connection server2;
|
||||
# with rbr the below will not work as the "alter" event
|
||||
# explicitly invalidates the dictionary cache.
|
||||
# This should work as transaction is ongoing...
|
||||
delete from t3;
|
||||
insert into t3 values (1);
|
||||
commit;
|
||||
|
||||
# This should fail as its a new transaction
|
||||
--error 1015
|
||||
select * from t3;
|
||||
select * from t4;
|
||||
drop table t4;
|
||||
show tables;
|
||||
connection server1;
|
||||
|
||||
@@ -6,6 +6,17 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
|
||||
drop database if exists mysqltest;
|
||||
--enable_warnings
|
||||
|
||||
# workaround for bug#16445
|
||||
# remove to reproduce bug and run tests drom ndb start
|
||||
# and with ndb_autodiscover disabled
|
||||
CREATE TABLE t1 (
|
||||
pk1 INT NOT NULL PRIMARY KEY,
|
||||
attr1 INT NOT NULL,
|
||||
attr2 INT,
|
||||
attr3 VARCHAR(10)
|
||||
) ENGINE=ndbcluster;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Basic test to show that the NDB
|
||||
# table handler is working
|
||||
|
||||
76
mysql-test/t/ndb_binlog_basic.test
Normal file
76
mysql-test/t/ndb_binlog_basic.test
Normal file
@@ -0,0 +1,76 @@
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/have_binlog_format_row.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1, t2;
|
||||
drop database if exists mysqltest;
|
||||
create database mysqltest;
|
||||
use mysqltest;
|
||||
drop database mysqltest;
|
||||
use test;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# basic insert, update, delete test, alter, rename, drop
|
||||
# check that binlog_index gets the right info
|
||||
#
|
||||
|
||||
create table t1 (a int primary key) engine=ndb;
|
||||
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
save_master_pos;
|
||||
--replace_column 1 #
|
||||
select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index;
|
||||
|
||||
delete from t1;
|
||||
alter table t1 add (b int);
|
||||
insert into t1 values (3,3),(4,4);
|
||||
alter table t1 rename t2;
|
||||
|
||||
# get all in one epoch
|
||||
begin;
|
||||
insert into t2 values (1,1),(2,2);
|
||||
update t2 set b=1 where a=3;
|
||||
delete from t2 where a=4;
|
||||
commit;
|
||||
drop table t2;
|
||||
|
||||
# check that above is ok
|
||||
# (save_master_pos waits for last gcp to complete, ensuring that we have
|
||||
# the expected data in the binlog)
|
||||
save_master_pos;
|
||||
select inserts from cluster_replication.binlog_index where epoch > @max_epoch and inserts > 5;
|
||||
select deletes from cluster_replication.binlog_index where epoch > @max_epoch and deletes > 5;
|
||||
select inserts,updates,deletes from
|
||||
cluster_replication.binlog_index where epoch > @max_epoch and updates > 0;
|
||||
select schemaops from
|
||||
cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0;
|
||||
|
||||
#
|
||||
# check that purge clears the binlog_index
|
||||
#
|
||||
flush logs;
|
||||
--sleep 1
|
||||
purge master logs before now();
|
||||
select count(*) from cluster_replication.binlog_index;
|
||||
|
||||
#
|
||||
# several tables in different databases
|
||||
# check that same table name in different databases don't mix up
|
||||
#
|
||||
create table t1 (a int primary key, b int) engine=ndb;
|
||||
create database mysqltest;
|
||||
use mysqltest;
|
||||
create table t1 (c int, d int primary key) engine=ndb;
|
||||
use test;
|
||||
|
||||
insert into mysqltest.t1 values (2,1),(2,2);
|
||||
save_master_pos;
|
||||
--replace_column 1 #
|
||||
select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index;
|
||||
|
||||
drop table t1;
|
||||
drop database mysqltest;
|
||||
select inserts,updates,deletes from
|
||||
cluster_replication.binlog_index where epoch > @max_epoch and inserts > 0;
|
||||
select schemaops from
|
||||
cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0;
|
||||
70
mysql-test/t/ndb_binlog_multi.test
Normal file
70
mysql-test/t/ndb_binlog_multi.test
Normal file
@@ -0,0 +1,70 @@
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/have_multi_ndb.inc
|
||||
-- source include/have_binlog_format_row.inc
|
||||
|
||||
--disable_warnings
|
||||
connection server2;
|
||||
drop table if exists t1,t2;
|
||||
connection server1;
|
||||
drop table if exists t1,t2;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# basic test to see if one server sees the table from the other
|
||||
# and sets up the replication correctly
|
||||
#
|
||||
|
||||
# no tables and nothing in cluster_replication.binlog_index;
|
||||
connection server1;
|
||||
SHOW TABLES;
|
||||
|
||||
# create table on the other server
|
||||
connection server2;
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB;
|
||||
|
||||
# make sure the first mysql server knows about this table
|
||||
connection server1;
|
||||
show tables;
|
||||
|
||||
# insert something on server2
|
||||
connection server2;
|
||||
INSERT INTO t2 VALUES (1,1),(2,2);
|
||||
select * from t2 order by a;
|
||||
save_master_pos;
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=epoch,inserts,updates,deletes,schemaops FROM
|
||||
cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1;
|
||||
let $the_epoch= `SELECT @the_epoch`;
|
||||
|
||||
# see if we got something on server1
|
||||
connection server1;
|
||||
SELECT * FROM t2 ORDER BY a;
|
||||
--replace_result $the_epoch <the_epoch>
|
||||
eval SELECT inserts,updates,deletes,schemaops FROM
|
||||
cluster_replication.binlog_index WHERE epoch=$the_epoch;
|
||||
|
||||
# drop the table on server1
|
||||
DROP TABLE t2;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = NDB;
|
||||
INSERT INTO t1 VALUES (1),(2);
|
||||
save_master_pos;
|
||||
--replace_column 1 <the_epoch2>
|
||||
SELECT @the_epoch2:=epoch,inserts,updates,deletes,schemaops FROM
|
||||
cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1;
|
||||
let $the_epoch2= `SELECT @the_epoch2`;
|
||||
|
||||
--replace_result $the_epoch <the_epoch> $the_epoch2 <the_epoch2>
|
||||
eval SELECT inserts,updates,deletes,schemaops FROM
|
||||
cluster_replication.binlog_index WHERE epoch > $the_epoch AND epoch < $the_epoch2;
|
||||
drop table t1;
|
||||
|
||||
# flush on server2
|
||||
connection server2;
|
||||
SHOW TABLES;
|
||||
--replace_result $the_epoch <the_epoch> $the_epoch2 <the_epoch2>
|
||||
eval SELECT inserts,updates,deletes,schemaops FROM
|
||||
cluster_replication.binlog_index WHERE epoch > $the_epoch AND epoch < $the_epoch2;
|
||||
|
||||
# reset
|
||||
connection server1;
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/have_multi_ndb.inc
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_binlog_format_statement.inc
|
||||
|
||||
|
||||
--disable_warnings
|
||||
connection server2;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
connection server1;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
--enable_warnings
|
||||
|
||||
|
||||
78
mysql-test/t/ndb_multi_row.test
Normal file
78
mysql-test/t/ndb_multi_row.test
Normal file
@@ -0,0 +1,78 @@
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/have_multi_ndb.inc
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_binlog_format_row.inc
|
||||
|
||||
|
||||
--disable_warnings
|
||||
connection server2;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
connection server1;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
--enable_warnings
|
||||
|
||||
flush status;
|
||||
|
||||
# Create test tables on server1
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
create table t2 (a int) engine=ndbcluster;
|
||||
insert into t1 value (2);
|
||||
insert into t2 value (3);
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
show status like 'handler_discover%';
|
||||
|
||||
# Check dropping and recreating table on same server
|
||||
connect (con1,localhost,,,test);
|
||||
connect (con2,localhost,,,test);
|
||||
connection con1;
|
||||
select * from t1;
|
||||
connection con2;
|
||||
drop table t1;
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
insert into t1 value (2);
|
||||
connection con1;
|
||||
select * from t1;
|
||||
|
||||
# Check dropping and recreating table on different server
|
||||
connection server2;
|
||||
show status like 'handler_discover%';
|
||||
drop table t1;
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
insert into t1 value (2);
|
||||
connection server1;
|
||||
## Currently a retry is required remotely
|
||||
#--error 1412
|
||||
#select * from t1;
|
||||
#show warnings;
|
||||
#flush table t1;
|
||||
# Table definition change should be propagated automatically
|
||||
select * from t1;
|
||||
|
||||
# Connect to server2 and use the tables from there
|
||||
connection server2;
|
||||
flush status;
|
||||
select * from t1;
|
||||
update t1 set a=3 where a=2;
|
||||
show status like 'handler_discover%';
|
||||
|
||||
# Create a new table on server2
|
||||
create table t3 (a int not null primary key, b varchar(22),
|
||||
c int, last_col text) engine=ndb;
|
||||
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
|
||||
create table t4 (pk int primary key, b int) engine=ndb;
|
||||
|
||||
# Check that the tables are accessible from server1
|
||||
connection server1;
|
||||
select * from t1;
|
||||
select * from t3;
|
||||
show status like 'handler_discover%';
|
||||
show tables like 't4';
|
||||
show status like 'handler_discover%';
|
||||
show tables;
|
||||
|
||||
drop table t1, t2, t3, t4;
|
||||
connection server2;
|
||||
drop table t1, t3, t4;
|
||||
|
||||
# End of 4.1 tests
|
||||
207
mysql-test/t/rpl_ndb_bank.test
Normal file
207
mysql-test/t/rpl_ndb_bank.test
Normal file
@@ -0,0 +1,207 @@
|
||||
#
|
||||
# Currently this test only runs in the source tree with the
|
||||
# ndb/test programs compiled.
|
||||
# invoke with: ./mysql-test-run --ndb-extra-test --do-test=rpl_ndb_bank
|
||||
#
|
||||
# 1. start a "bank" application running on the master cluster
|
||||
# 2. perform online sync of slave
|
||||
# 3. periodically check consistency of slave
|
||||
# 4. stop the bank application
|
||||
# 5. check that the slave and master BANK databases are the same
|
||||
#
|
||||
|
||||
# kill any trailing processes
|
||||
--system killall lt-bankTransactionMaker lt-bankTimer lt-bankMakeGL || true
|
||||
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_ndb_extra.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--disable_warnings
|
||||
# initialize master
|
||||
--connection master
|
||||
CREATE DATABASE IF NOT EXISTS BANK;
|
||||
DROP DATABASE BANK;
|
||||
CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# These tables should correspond to the table definitions in
|
||||
# storage/ndb/test/src/NDBT_Tables.cpp
|
||||
#
|
||||
--connection master
|
||||
USE BANK;
|
||||
CREATE TABLE GL ( TIME BIGINT UNSIGNED NOT NULL,
|
||||
ACCOUNT_TYPE INT UNSIGNED NOT NULL,
|
||||
BALANCE INT UNSIGNED NOT NULL,
|
||||
DEPOSIT_COUNT INT UNSIGNED NOT NULL,
|
||||
DEPOSIT_SUM INT UNSIGNED NOT NULL,
|
||||
WITHDRAWAL_COUNT INT UNSIGNED NOT NULL,
|
||||
WITHDRAWAL_SUM INT UNSIGNED NOT NULL,
|
||||
PURGED INT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY USING HASH (TIME,ACCOUNT_TYPE))
|
||||
ENGINE = NDB;
|
||||
|
||||
CREATE TABLE ACCOUNT ( ACCOUNT_ID INT UNSIGNED NOT NULL,
|
||||
OWNER INT UNSIGNED NOT NULL,
|
||||
BALANCE INT UNSIGNED NOT NULL,
|
||||
ACCOUNT_TYPE INT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY USING HASH (ACCOUNT_ID))
|
||||
ENGINE = NDB;
|
||||
|
||||
CREATE TABLE TRANSACTION ( TRANSACTION_ID BIGINT UNSIGNED NOT NULL,
|
||||
ACCOUNT INT UNSIGNED NOT NULL,
|
||||
ACCOUNT_TYPE INT UNSIGNED NOT NULL,
|
||||
OTHER_ACCOUNT INT UNSIGNED NOT NULL,
|
||||
TRANSACTION_TYPE INT UNSIGNED NOT NULL,
|
||||
TIME BIGINT UNSIGNED NOT NULL,
|
||||
AMOUNT INT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY USING HASH (TRANSACTION_ID,ACCOUNT))
|
||||
ENGINE = NDB;
|
||||
|
||||
CREATE TABLE SYSTEM_VALUES ( SYSTEM_VALUES_ID INT UNSIGNED NOT NULL,
|
||||
VALUE BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY USING HASH (SYSTEM_VALUES_ID))
|
||||
ENGINE = NDB;
|
||||
|
||||
CREATE TABLE ACCOUNT_TYPE ( ACCOUNT_TYPE_ID INT UNSIGNED NOT NULL,
|
||||
DESCRIPTION CHAR(64) NOT NULL,
|
||||
PRIMARY KEY USING HASH (ACCOUNT_TYPE_ID))
|
||||
ENGINE = NDB;
|
||||
|
||||
#
|
||||
# create "BANK" application
|
||||
#
|
||||
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankCreator >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
#
|
||||
# start main loop
|
||||
# repeat backup-restore-check
|
||||
#
|
||||
|
||||
# set this high if testing to run many syncs in loop
|
||||
--let $2=1
|
||||
while ($2)
|
||||
{
|
||||
|
||||
#
|
||||
# start "BANK" application
|
||||
#
|
||||
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankTimer -w 5 >> $NDB_TOOLS_OUTPUT &
|
||||
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankMakeGL >> $NDB_TOOLS_OUTPUT &
|
||||
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankTransactionMaker >> $NDB_TOOLS_OUTPUT &
|
||||
|
||||
#
|
||||
# let the "BANK" run for a while
|
||||
#
|
||||
--sleep 5
|
||||
|
||||
--disable_warnings
|
||||
# initialize slave for sync
|
||||
--connection slave
|
||||
STOP SLAVE;
|
||||
RESET SLAVE;
|
||||
# to make sure we drop any ndbcluster tables
|
||||
CREATE DATABASE IF NOT EXISTS BANK;
|
||||
DROP DATABASE BANK;
|
||||
# create database
|
||||
CREATE DATABASE BANK;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Time to sync the slave:
|
||||
# start by taking a backup on master
|
||||
--connection master
|
||||
RESET MASTER;
|
||||
--exec $NDB_MGM --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -e "start backup" >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
# there is no neat way to find the backupid, this is a hack to find it...
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring=localhost:$NDBCLUSTER_PORT -d sys -D , SYSTAB_0 | grep 520093696 > var/tmp.dat
|
||||
CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM cluster_replication.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
--replace_column 1 <the_backup_id>
|
||||
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
|
||||
let the_backup_id=`select @the_backup_id`;
|
||||
|
||||
# restore on slave, first check that nothing is there
|
||||
--connection slave
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -p 8 -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -p 8 -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
#
|
||||
# now setup replication to continue from last epoch
|
||||
# 1. get apply_status epoch from slave
|
||||
# 2. get corresponding _next_ binlog postition from master
|
||||
# 3. change master on slave
|
||||
# 4. start the replication
|
||||
|
||||
# 1.
|
||||
--connection slave
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
|
||||
--let $the_epoch= `select @the_epoch`
|
||||
|
||||
# 2.
|
||||
--connection master
|
||||
--replace_result $the_epoch <the_epoch>
|
||||
--replace_column 1 <the_pos>
|
||||
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
FROM cluster_replication.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
|
||||
--let $the_pos= `SELECT @the_pos`
|
||||
--let $the_file= `SELECT @the_file`
|
||||
|
||||
# 3.
|
||||
--connection slave
|
||||
--replace_result $the_pos <the_pos>
|
||||
eval CHANGE MASTER TO
|
||||
master_log_file = '$the_file',
|
||||
master_log_pos = $the_pos;
|
||||
|
||||
# 4.
|
||||
--connection slave
|
||||
START SLAVE;
|
||||
|
||||
|
||||
#
|
||||
# Now loop and check consistency every 2 seconds on slave
|
||||
#
|
||||
--connection slave
|
||||
--let $1=10
|
||||
while ($1)
|
||||
{
|
||||
--sleep 2
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
|
||||
SHOW SLAVE STATUS;
|
||||
STOP SLAVE;
|
||||
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT_SLAVE ../storage/ndb/test/ndbapi/bank/bankValidateAllGLs >> $NDB_TOOLS_OUTPUT
|
||||
START SLAVE;
|
||||
--dec $1
|
||||
}
|
||||
|
||||
#
|
||||
# Stop transactions
|
||||
#
|
||||
--exec killall lt-bankTransactionMaker lt-bankTimer lt-bankMakeGL
|
||||
|
||||
#
|
||||
# Check that the databases are the same on slave and master
|
||||
# 1. dump database BANK on both master and slave
|
||||
# 2. compare, there should be no difference
|
||||
#
|
||||
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > ./var/tmp/master_BANK.sql
|
||||
--connection master
|
||||
use test;
|
||||
create table t1 (a int primary key) engine=ndb;
|
||||
insert into t1 values (1);
|
||||
--sync_slave_with_master
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > ./var/tmp/slave_BANK.sql
|
||||
--connection master
|
||||
drop table t1;
|
||||
|
||||
--exec diff ./var/tmp/master_BANK.sql ./var/tmp/slave_BANK.sql
|
||||
|
||||
--dec $2
|
||||
}
|
||||
146
mysql-test/t/rpl_ndb_basic.test
Normal file
146
mysql-test/t/rpl_ndb_basic.test
Normal file
@@ -0,0 +1,146 @@
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Bug #11087
|
||||
#
|
||||
# connect to the master and create tabe t1 in gotoslave database
|
||||
--connection master
|
||||
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
|
||||
`nom` char(4) default NULL,
|
||||
`prenom` char(4) default NULL,
|
||||
PRIMARY KEY (`nid`))
|
||||
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
|
||||
INSERT INTO t1 VALUES(1,"XYZ1","ABC1");
|
||||
select * from t1 order by nid;
|
||||
|
||||
--sync_slave_with_master
|
||||
# connect to slave and ensure data it there.
|
||||
--connection slave
|
||||
select * from t1 order by nid;
|
||||
|
||||
--connection master
|
||||
delete from t1;
|
||||
INSERT INTO t1 VALUES(1,"XYZ2","ABC2");
|
||||
# Make sure all rows are on the master
|
||||
select * from t1 order by nid;
|
||||
|
||||
# make sure all rows are on the slave.
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
# Bug #11087 would have row with nid 2 missing
|
||||
select * from t1 order by nid;
|
||||
|
||||
--connection master
|
||||
DROP table t1;
|
||||
|
||||
#
|
||||
# Test replication of table with no primary key
|
||||
#
|
||||
--connection master
|
||||
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
|
||||
`nom` char(4) default NULL,
|
||||
`prenom` char(4) default NULL)
|
||||
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
|
||||
INSERT INTO t1 VALUES(1,"XYZ1","ABC1"),(2,"AAA","BBB"),(3,"CCC","DDD");
|
||||
select * from t1 order by nid;
|
||||
|
||||
--sync_slave_with_master
|
||||
# connect to slave and ensure data it there.
|
||||
--connection slave
|
||||
select * from t1 order by nid;
|
||||
|
||||
--connection master
|
||||
delete from t1 where nid = 2;
|
||||
INSERT INTO t1 VALUES(4,"EEE","FFF");
|
||||
# Make sure all rows are on the master
|
||||
select * from t1 order by nid;
|
||||
|
||||
# make sure all rows are on the slave.
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
select * from t1 order by nid;
|
||||
|
||||
--connection master
|
||||
UPDATE t1 set nid=nid+1;
|
||||
UPDATE t1 set nom="CCP" where nid = 4;
|
||||
select * from t1 order by nid;
|
||||
|
||||
# make sure all rows are on the slave.
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
select * from t1 order by nid;
|
||||
|
||||
--connection master
|
||||
DROP table t1;
|
||||
|
||||
##################################################################
|
||||
#
|
||||
# Check that retries are made on the slave on some temporary errors
|
||||
#
|
||||
|
||||
#
|
||||
# 1. Deadlock
|
||||
#
|
||||
--connection master
|
||||
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
|
||||
`nom` char(4) default NULL,
|
||||
`prenom` char(4) default NULL,
|
||||
PRIMARY KEY USING HASH (`nid`))
|
||||
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 VALUES(1,"XYZ1","ABC1");
|
||||
|
||||
# cause a lock on that row on the slave
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
BEGIN;
|
||||
UPDATE t1 SET `nom`="LOCK" WHERE `nid`=1;
|
||||
|
||||
# set number of retries low so we fail the retries
|
||||
set GLOBAL slave_transaction_retries=1;
|
||||
|
||||
# now do a change to this row on the master
|
||||
# will deadlock on the slave because of lock above
|
||||
--connection master
|
||||
UPDATE t1 SET `nom`="DEAD" WHERE `nid`=1;
|
||||
|
||||
# wait for deadlock to be detected
|
||||
# sleep longer than dead lock detection timeout in config
|
||||
# we do this 2 times, once with few retries to verify that we
|
||||
# get a failure with the set sleep, and once with the _same_
|
||||
# sleep, but with more retries to get it to succeed
|
||||
--sleep 5
|
||||
|
||||
# replication should have stopped, since max retries where not enough
|
||||
# verify with show slave status
|
||||
--connection slave
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
|
||||
SHOW SLAVE STATUS;
|
||||
|
||||
# now set max retries high enough to succeed, and start slave again
|
||||
set GLOBAL slave_transaction_retries=10;
|
||||
START SLAVE;
|
||||
|
||||
# wait for deadlock to be detected and retried
|
||||
# should be the same sleep as above for test to be valid
|
||||
--sleep 5
|
||||
|
||||
# commit transaction to release lock on row and let replication succeed
|
||||
select * from t1 order by nid;
|
||||
COMMIT;
|
||||
|
||||
# verify that the row succeded to be applied on the slave
|
||||
--connection master
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
select * from t1 order by nid;
|
||||
|
||||
# cleanup
|
||||
--connection master
|
||||
DROP TABLE t1;
|
||||
87
mysql-test/t/rpl_ndb_disk.test
Normal file
87
mysql-test/t/rpl_ndb_disk.test
Normal file
@@ -0,0 +1,87 @@
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Basic test of disk tables for NDB
|
||||
#
|
||||
|
||||
#
|
||||
# Start by creating a logfile group
|
||||
#
|
||||
|
||||
CREATE LOGFILE GROUP lg1
|
||||
ADD UNDOFILE 'undofile.dat'
|
||||
INITIAL_SIZE 16M
|
||||
UNDO_BUFFER_SIZE = 1M
|
||||
ENGINE=NDB;
|
||||
|
||||
alter logfile group lg1
|
||||
add undofile 'undofile02.dat'
|
||||
initial_size 4M engine=ndb;
|
||||
|
||||
#
|
||||
# Create a tablespace connected to the logfile group
|
||||
#
|
||||
|
||||
CREATE TABLESPACE ts1
|
||||
ADD DATAFILE 'datafile.dat'
|
||||
USE LOGFILE GROUP lg1
|
||||
INITIAL_SIZE 12M
|
||||
ENGINE NDB;
|
||||
|
||||
alter tablespace ts1
|
||||
add datafile 'datafile02.dat'
|
||||
initial_size 4M engine=ndb;
|
||||
|
||||
#
|
||||
# Create a table using this tablespace
|
||||
#
|
||||
|
||||
CREATE TABLE t1
|
||||
(pk1 int not null primary key, b int not null, c int not null)
|
||||
tablespace ts1 storage disk
|
||||
engine ndb;
|
||||
|
||||
#
|
||||
# insert some data
|
||||
#
|
||||
|
||||
insert into t1 values (1,2,3);
|
||||
select * from t1 order by pk1;
|
||||
|
||||
#
|
||||
# check that the data is also on the slave
|
||||
#
|
||||
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
select * from t1 order by pk1;
|
||||
|
||||
#
|
||||
# view the binlog
|
||||
#
|
||||
|
||||
--connection master
|
||||
let $VERSION=`select version()`;
|
||||
--replace_result $VERSION VERSION
|
||||
show binlog events;
|
||||
|
||||
#
|
||||
# cleanup
|
||||
#
|
||||
|
||||
drop table t1;
|
||||
alter tablespace ts1
|
||||
drop datafile 'datafile.dat'
|
||||
engine=ndb;
|
||||
alter tablespace ts1
|
||||
drop datafile 'datafile02.dat'
|
||||
engine=ndb;
|
||||
DROP TABLESPACE ts1 ENGINE=NDB;
|
||||
DROP LOGFILE GROUP lg1 ENGINE=NDB;
|
||||
--sync_slave_with_master
|
||||
111
mysql-test/t/rpl_ndb_idempotent.test
Normal file
111
mysql-test/t/rpl_ndb_idempotent.test
Normal file
@@ -0,0 +1,111 @@
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
#
|
||||
# Currently test only works with ndb since it retrieves "old"
|
||||
# binlog positions with cluster_replication.binlog_index and apply_status;
|
||||
#
|
||||
|
||||
# create a table with one row
|
||||
CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
|
||||
INSERT INTO t1 VALUES ("row1","will go away",1);
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
|
||||
# sync slave and retrieve epoch
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
|
||||
let $the_epoch= `select @the_epoch` ;
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
|
||||
# get the master binlog pos from the epoch
|
||||
connection master;
|
||||
--replace_result $the_epoch <the_epoch>
|
||||
--replace_column 1 <the_pos>
|
||||
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
FROM cluster_replication.binlog_index WHERE epoch = $the_epoch ;
|
||||
let $the_pos= `SELECT @the_pos` ;
|
||||
let $the_file= `SELECT @the_file` ;
|
||||
|
||||
# insert some more values
|
||||
INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4);
|
||||
DELETE FROM t1 WHERE c3 = 1;
|
||||
UPDATE t1 SET c2="should go away" WHERE c3 = 2;
|
||||
UPDATE t1 SET c2="C" WHERE c3 = 3;
|
||||
DELETE FROM t1 WHERE c3 = 2;
|
||||
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
|
||||
# check that we have it on the slave
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
|
||||
SHOW SLAVE STATUS;
|
||||
|
||||
# stop slave and reset position to before the last changes
|
||||
STOP SLAVE;
|
||||
--replace_result $the_pos <the_pos>
|
||||
eval CHANGE MASTER TO
|
||||
master_log_file = '$the_file',
|
||||
master_log_pos = $the_pos ;
|
||||
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
|
||||
SHOW SLAVE STATUS;
|
||||
|
||||
# start the slave again
|
||||
# -> same events should have been applied again
|
||||
# e.g. inserting rows that already there
|
||||
# deleting a row which is not there
|
||||
# updating a row which is not there
|
||||
START SLAVE;
|
||||
|
||||
--connection master
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
|
||||
STOP SLAVE;
|
||||
|
||||
#
|
||||
# cleanup
|
||||
#
|
||||
--connection master
|
||||
DROP TABLE t1;
|
||||
RESET master;
|
||||
--connection slave
|
||||
DROP TABLE t1;
|
||||
RESET slave;
|
||||
|
||||
START SLAVE;
|
||||
|
||||
#
|
||||
# Test that we can handle update of a row that does not exist on the slave
|
||||
# will trigger usage of AO_IgnoreError on slave side so that the INSERT
|
||||
# still succeeds even if the replication of the UPDATE generates an error.
|
||||
#
|
||||
--connection master
|
||||
CREATE TABLE t1 (c1 CHAR(15) NOT NULL, c2 CHAR(15) NOT NULL, c3 INT NOT NULL, PRIMARY KEY (c3)) ENGINE = NDB ;
|
||||
INSERT INTO t1 VALUES ("row1","remove on slave",1);
|
||||
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
DELETE FROM t1;
|
||||
|
||||
--connection master
|
||||
BEGIN;
|
||||
UPDATE t1 SET c2="does not exist" WHERE c3=1;
|
||||
INSERT INTO t1 VALUES ("row2","new on slave",2);
|
||||
COMMIT;
|
||||
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
SELECT * FROM t1;
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
|
||||
SHOW SLAVE STATUS;
|
||||
66
mysql-test/t/rpl_ndb_load.test
Normal file
66
mysql-test/t/rpl_ndb_load.test
Normal file
@@ -0,0 +1,66 @@
|
||||
#
|
||||
# Currently this test only runs in the source tree with the
|
||||
# ndb/test programs compiled.
|
||||
# invoke with: ./mysql-test-run --ndb-extra-test --do-test=rpl_ndb_load
|
||||
#
|
||||
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_ndb_extra.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--disable_warnings
|
||||
# reset master
|
||||
connection master;
|
||||
DROP DATABASE IF EXISTS TEST_DB;
|
||||
CREATE DATABASE TEST_DB;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# These tables should correspond to the table definitions in
|
||||
# storage/ndb/test/ndbapi/bench/
|
||||
#
|
||||
connection master;
|
||||
USE TEST_DB;
|
||||
CREATE TABLE SUBSCRIBER
|
||||
( NUMBER CHAR(12) BINARY NOT NULL,
|
||||
NAME CHAR(32) BINARY NOT NULL,
|
||||
GROUP_ID INT UNSIGNED NOT NULL,
|
||||
LOCATION INT UNSIGNED NOT NULL,
|
||||
SESSIONS INT UNSIGNED NOT NULL,
|
||||
CHANGED_BY CHAR(32) BINARY NOT NULL,
|
||||
CHANGED_TIME CHAR(32) BINARY NOT NULL,
|
||||
PRIMARY KEY USING HASH (NUMBER))
|
||||
ENGINE = NDB;
|
||||
|
||||
CREATE TABLE GROUP_T
|
||||
( GROUP_ID INT UNSIGNED NOT NULL,
|
||||
GROUP_NAME CHAR(32) BINARY NOT NULL,
|
||||
ALLOW_READ CHAR(1) BINARY NOT NULL,
|
||||
ALLOW_INSERT INT UNSIGNED NOT NULL,
|
||||
ALLOW_DELETE INT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY USING HASH (GROUP_ID))
|
||||
ENGINE = NDB;
|
||||
|
||||
CREATE TABLE SESSION
|
||||
( NUMBER CHAR(12) BINARY NOT NULL,
|
||||
SERVER_ID INT UNSIGNED NOT NULL,
|
||||
DATA VARBINARY(1998) NOT NULL,
|
||||
PRIMARY KEY USING HASH (NUMBER,SERVER_ID))
|
||||
ENGINE = NDB;
|
||||
|
||||
CREATE TABLE SERVER
|
||||
( SUFFIX CHAR(2) BINARY NOT NULL,
|
||||
SERVER_ID INT UNSIGNED NOT NULL,
|
||||
NAME CHAR(32) BINARY NOT NULL,
|
||||
NO_OF_READ INT UNSIGNED NOT NULL,
|
||||
NO_OF_INSERT INT UNSIGNED NOT NULL,
|
||||
NO_OF_DELETE INT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY USING HASH (SUFFIX, SERVER_ID))
|
||||
ENGINE = NDB;
|
||||
|
||||
#
|
||||
# start "load" application
|
||||
#
|
||||
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/DbCreate >> $NDB_TOOLS_OUTPUT
|
||||
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/DbAsyncGenerator >> $NDB_TOOLS_OUTPUT
|
||||
71
mysql-test/t/rpl_ndb_multi.test
Normal file
71
mysql-test/t/rpl_ndb_multi.test
Normal file
@@ -0,0 +1,71 @@
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_multi_ndb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
# note: server2 is another "master" connected to the master cluster
|
||||
|
||||
#
|
||||
# Currently test only works with ndb since it retrieves "old"
|
||||
# binlog positions with cluster_replication.binlog_index and apply_status;
|
||||
#
|
||||
|
||||
# create a table with one row, and make sure the other "master" gets it
|
||||
CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
|
||||
connection server2;
|
||||
reset master;
|
||||
SHOW TABLES;
|
||||
connection master;
|
||||
INSERT INTO t1 VALUES ("row1","will go away",1);
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
connection server2;
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
|
||||
# sync slave and retrieve epoch and stop the slave
|
||||
connection master;
|
||||
sync_slave_with_master;
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
|
||||
let $the_epoch= `select @the_epoch` ;
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
stop slave;
|
||||
|
||||
# get the master binlog pos from the epoch, from the _other_ "master", server2
|
||||
connection server2;
|
||||
--replace_result $the_epoch <the_epoch>
|
||||
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
FROM cluster_replication.binlog_index WHERE epoch = $the_epoch ;
|
||||
let $the_pos= `SELECT @the_pos` ;
|
||||
let $the_file= `SELECT @the_file` ;
|
||||
|
||||
# now connect the slave to the _other_ "master"
|
||||
connection slave;
|
||||
--replace_result $MASTER_MYPORT1 <MASTER_PORT1>
|
||||
eval CHANGE MASTER TO
|
||||
master_port=$MASTER_MYPORT1,
|
||||
master_log_file = '$the_file',
|
||||
master_log_pos = $the_pos ;
|
||||
start slave;
|
||||
|
||||
# insert some more values on the first master
|
||||
connection master;
|
||||
INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4);
|
||||
DELETE FROM t1 WHERE c3 = 1;
|
||||
UPDATE t1 SET c2="should go away" WHERE c3 = 2;
|
||||
UPDATE t1 SET c2="C" WHERE c3 = 3;
|
||||
DELETE FROM t1 WHERE c3 = 2;
|
||||
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
|
||||
save_master_pos;
|
||||
|
||||
# insert another row, and check that we have it on the slave
|
||||
connection server2;
|
||||
INSERT INTO t1 VALUES ("row5","E",5);
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
#sync_slave_with_master;
|
||||
connection slave;
|
||||
--sleep 2
|
||||
SELECT * FROM t1 ORDER BY c3;
|
||||
|
||||
STOP SLAVE;
|
||||
132
mysql-test/t/rpl_ndb_sync.test
Normal file
132
mysql-test/t/rpl_ndb_sync.test
Normal file
@@ -0,0 +1,132 @@
|
||||
--source include/have_ndb.inc
|
||||
--source include/have_binlog_format_row.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
#
|
||||
# Currently test only works with ndb since it retrieves "old"
|
||||
# binlog positions with cluster_replication.binlog_index and apply_status;
|
||||
#
|
||||
|
||||
# stop the save
|
||||
connection slave;
|
||||
STOP SLAVE;
|
||||
CREATE DATABASE ndbsynctest;
|
||||
USE ndbsynctest;
|
||||
|
||||
# get some data on the master
|
||||
connection master;
|
||||
CREATE DATABASE ndbsynctest;
|
||||
USE ndbsynctest;
|
||||
CREATE TABLE t1 (c1 BIT(1) NOT NULL, c2 BIT(1) NOT NULL, c3 CHAR(15), PRIMARY KEY(c3)) ENGINE = NDB ;
|
||||
INSERT INTO t1 VALUES (1,1,"row1"),(0,1,"row2"),(1,0,"row3"),(0,0,"row4");
|
||||
CREATE TABLE t2 (c1 CHAR(15), c2 BIT(1) NOT NULL, c3 BIT(1) NOT NULL, PRIMARY KEY(c1)) ENGINE = NDB ;
|
||||
INSERT INTO t2 VALUES ("ABC",1,1),("BCDEF",0,1),("CD",1,0),("DEFGHIJKL",0,0);
|
||||
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
|
||||
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
|
||||
|
||||
# take a backup on master
|
||||
--exec $NDB_MGM --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -e "start backup" >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -d sys -D , SYSTAB_0 | grep 520093696 > var/tmp.dat
|
||||
CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT);
|
||||
DELETE FROM cluster_replication.backup_info;
|
||||
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
|
||||
--replace_column 1 <the_backup_id>
|
||||
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
|
||||
let the_backup_id=`select @the_backup_id` ;
|
||||
|
||||
# update a row
|
||||
UPDATE t1 SET c2=0 WHERE c3="row2";
|
||||
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
|
||||
|
||||
# restore on slave, first check that nothing is there
|
||||
connection slave
|
||||
|
||||
# we should have no tables
|
||||
SHOW TABLES;
|
||||
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
#
|
||||
# BUG#11960
|
||||
# prior to bugfix "DROP DATABASE" would give a warning since
|
||||
# the events were not created by ndb_restore
|
||||
#
|
||||
DROP DATABASE ndbsynctest;
|
||||
CREATE DATABASE ndbsynctest;
|
||||
USE ndbsynctest;
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
||||
|
||||
# continue test
|
||||
SHOW TABLES;
|
||||
|
||||
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
|
||||
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
|
||||
|
||||
#
|
||||
# now setup replication to continue from last epoch
|
||||
# 1. get apply_status epoch from slave
|
||||
# 2. get corresponding _next_ binlog postition from master
|
||||
# 3. change master on slave
|
||||
|
||||
# 1.
|
||||
connection slave;
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
|
||||
let $the_epoch= `select @the_epoch` ;
|
||||
|
||||
# 2.
|
||||
connection master;
|
||||
--replace_result $the_epoch <the_epoch>
|
||||
--replace_column 1 <the_pos>
|
||||
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
FROM cluster_replication.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
|
||||
let $the_pos= `SELECT @the_pos` ;
|
||||
let $the_file= `SELECT @the_file` ;
|
||||
|
||||
# 3.
|
||||
connection slave;
|
||||
--replace_result $the_pos <the_pos>
|
||||
eval CHANGE MASTER TO
|
||||
master_log_file = '$the_file',
|
||||
master_log_pos = $the_pos ;
|
||||
START SLAVE;
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
connection master;
|
||||
#sync_slave_with_master;
|
||||
--sleep 2
|
||||
connection slave;
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
|
||||
SHOW SLAVE STATUS;
|
||||
|
||||
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
|
||||
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
|
||||
|
||||
#
|
||||
# Cleanup
|
||||
#
|
||||
|
||||
connection master;
|
||||
DROP DATABASE ndbsynctest;
|
||||
#sync_slave_with_master;
|
||||
--sleep 2
|
||||
connection slave;
|
||||
STOP SLAVE;
|
||||
|
||||
#
|
||||
# Test some replication commands
|
||||
#
|
||||
connection master;
|
||||
reset master;
|
||||
# should now contain nothing
|
||||
select * from cluster_replication.binlog_index;
|
||||
|
||||
connection slave;
|
||||
reset slave;
|
||||
# should now contain nothing
|
||||
select * from cluster_replication.apply_status;
|
||||
4
mysql-test/t/rpl_row_basic_7ndb.test
Normal file
4
mysql-test/t/rpl_row_basic_7ndb.test
Normal file
@@ -0,0 +1,4 @@
|
||||
-- source include/have_ndb.inc
|
||||
let $type= 'NDB' ;
|
||||
let $extra_index= ;
|
||||
-- source include/rpl_row_basic.inc
|
||||
Reference in New Issue
Block a user