mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into poseidon.ndb.mysql.com:/home/tomas/mysql-5.1-new client/mysqltest.c: Auto merged sql/ha_ndbcluster.cc: Auto merged mysql-test/t/disabled.def: manual merge
This commit is contained in:
@ -1596,9 +1596,9 @@ wait_for_position:
|
||||
It may be that the slave SQL thread has not started yet, though START
|
||||
SLAVE has been issued ?
|
||||
*/
|
||||
if (tries++ == 3)
|
||||
if (tries++ == 30)
|
||||
die("could not sync with master ('%s' returned NULL)", query_buf);
|
||||
sleep(1); /* So at most we will wait 3 seconds and make 4 tries */
|
||||
sleep(1); /* So at most we will wait 30 seconds and make 31 tries */
|
||||
mysql_free_result(res);
|
||||
goto wait_for_position;
|
||||
}
|
||||
@ -1664,14 +1664,14 @@ int do_save_master_pos()
|
||||
{
|
||||
ulonglong epoch=0, tmp_epoch= 0;
|
||||
int count= 0;
|
||||
|
||||
do
|
||||
int do_continue= 1;
|
||||
while (do_continue)
|
||||
{
|
||||
const char binlog[]= "binlog";
|
||||
const char latest_trans_epoch[]=
|
||||
"latest_trans_epoch=";
|
||||
const char latest_applied_binlog_epoch[]=
|
||||
"latest_applied_binlog_epoch=";
|
||||
const char latest_handled_binlog_epoch[]=
|
||||
"latest_handled_binlog_epoch=";
|
||||
if (count)
|
||||
sleep(1);
|
||||
if (mysql_query(mysql, query= "show engine ndb status"))
|
||||
@ -1701,26 +1701,32 @@ int do_save_master_pos()
|
||||
start_lineno, latest_trans_epoch, query);
|
||||
}
|
||||
/* latest_applied_binlog_epoch */
|
||||
while (*status && strncmp(status, latest_applied_binlog_epoch,
|
||||
sizeof(latest_applied_binlog_epoch)-1))
|
||||
while (*status && strncmp(status, latest_handled_binlog_epoch,
|
||||
sizeof(latest_handled_binlog_epoch)-1))
|
||||
status++;
|
||||
if (*status)
|
||||
{
|
||||
status+= sizeof(latest_applied_binlog_epoch)-1;
|
||||
status+= sizeof(latest_handled_binlog_epoch)-1;
|
||||
tmp_epoch= strtoull(status, (char**) 0, 10);
|
||||
}
|
||||
else
|
||||
die("line %u: result does not contain '%s' in '%s'",
|
||||
start_lineno, latest_applied_binlog_epoch, query);
|
||||
start_lineno, latest_handled_binlog_epoch, query);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mysql_free_result(res);
|
||||
if (!row)
|
||||
die("line %u: result does not contain '%s' in '%s'",
|
||||
start_lineno, binlog, query);
|
||||
count++;
|
||||
} while (tmp_epoch < epoch && count <= 3);
|
||||
if (tmp_epoch >= epoch)
|
||||
do_continue= 0;
|
||||
else if (count > 30)
|
||||
{
|
||||
break;
|
||||
}
|
||||
mysql_free_result(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -507,4 +507,5 @@ DROP DATABASE IF EXISTS mysqltest2;
|
||||
DROP DATABASE IF EXISTS mysqltest3;
|
||||
--enable_warnings
|
||||
|
||||
-- source include/master-slave-end.inc
|
||||
|
||||
|
@ -51,8 +51,8 @@ SHOW CREATE TABLE t1;
|
||||
# Okay lets see how it holds up to table changes
|
||||
--echo --- Check that simple Alter statements are replicated correctly --
|
||||
|
||||
ALTER TABLE t1 MODIFY vc TEXT;
|
||||
ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
|
||||
ALTER TABLE t1 MODIFY vc TEXT;
|
||||
|
||||
--echo --- Show the new improved table on the master ---
|
||||
|
||||
@ -117,6 +117,7 @@ SHOW CREATE TABLE t1;
|
||||
|
||||
--echo --- Check that simple Alter statements are replicated correctly ---
|
||||
|
||||
ALTER TABLE t1 ADD PRIMARY KEY(t,id);
|
||||
ALTER TABLE t1 MODIFY vc TEXT;
|
||||
|
||||
--echo --- Show the new improved table on the master ---
|
||||
@ -183,6 +184,7 @@ SHOW CREATE TABLE t1;
|
||||
|
||||
--echo --- Check that simple Alter statements are replicated correctly ---
|
||||
|
||||
ALTER TABLE t1 ADD PRIMARY KEY(id);
|
||||
ALTER TABLE t1 MODIFY vc TEXT;
|
||||
|
||||
--echo --- Show the new improved table on the master ---
|
||||
@ -239,6 +241,7 @@ SHOW CREATE TABLE t1;
|
||||
|
||||
--echo --- Check that simple Alter statements are replicated correctly ---
|
||||
|
||||
ALTER TABLE t1 ADD PRIMARY KEY(t,id);
|
||||
ALTER TABLE t1 MODIFY vc TEXT;
|
||||
|
||||
--echo --- Show the new improved table on the master ---
|
||||
|
@ -80,3 +80,4 @@ DROP TABLE test.t2;
|
||||
# be removed at next testsuite run.
|
||||
|
||||
# End of 5.0 test case
|
||||
-- source include/master-slave-end.inc
|
||||
|
@ -183,3 +183,9 @@ connection master;
|
||||
|
||||
DROP TABLE IF EXISTS test.t1;
|
||||
DROP TABLE IF EXISTS test.t2;
|
||||
# ensure cleanup on slave as well:
|
||||
# ndb blob tables consist of several tables
|
||||
# if cluster is shutdown while not all tables are
|
||||
# properly dropped, the table becomes inconsistent
|
||||
# and wrecks later test cases
|
||||
--sync_slave_with_master
|
||||
|
6
mysql-test/include/master-slave-end.inc
Normal file
6
mysql-test/include/master-slave-end.inc
Normal file
@ -0,0 +1,6 @@
|
||||
--connection master
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
--disable_query_log
|
||||
STOP SLAVE;
|
||||
--enable_query_log
|
27
mysql-test/include/ndb_setup_slave.inc
Normal file
27
mysql-test/include/ndb_setup_slave.inc
Normal file
@ -0,0 +1,27 @@
|
||||
#
|
||||
# now setup replication to continue from last epoch
|
||||
# 1. get apply_status epoch from slave
|
||||
# 2. get corresponding _next_ binlog postition from master
|
||||
# 3. change master on slave
|
||||
|
||||
# 1.
|
||||
--connection slave
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status;
|
||||
--let $the_epoch= `select @the_epoch`
|
||||
|
||||
# 2.
|
||||
--connection master
|
||||
--replace_result $the_epoch <the_epoch>
|
||||
--replace_column 1 <the_pos>
|
||||
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
FROM cluster.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
|
||||
--let $the_pos= `SELECT @the_pos`
|
||||
--let $the_file= `SELECT @the_file`
|
||||
|
||||
# 3.
|
||||
--connection slave
|
||||
--replace_result $the_pos <the_pos>
|
||||
eval CHANGE MASTER TO
|
||||
master_log_file = '$the_file',
|
||||
master_log_pos = $the_pos ;
|
@ -32,6 +32,10 @@ show binlog events from <binlog_start>;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Query # # ALTER DATABASE mysqltest CHARACTER SET latin1
|
||||
master-bin.000001 # Query # # use `mysqltest`; drop table `t1`
|
||||
show binlog events from <binlog_start>;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Query # # ALTER DATABASE mysqltest CHARACTER SET latin1
|
||||
master-bin.000001 # Query # # use `mysqltest`; drop table `t1`
|
||||
reset master;
|
||||
reset master;
|
||||
use test;
|
||||
@ -111,6 +115,37 @@ ENGINE = NDB
|
||||
master-bin1.000001 # Query # # DROP LOGFILE GROUP lg1
|
||||
ENGINE =NDB
|
||||
master-bin1.000001 # Query # # use `test`; drop table `t1`
|
||||
show binlog events from <binlog_start>;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin1.000001 # Query # # CREATE LOGFILE GROUP lg1
|
||||
ADD UNDOFILE 'undofile.dat'
|
||||
INITIAL_SIZE 16M
|
||||
UNDO_BUFFER_SIZE = 1M
|
||||
ENGINE=NDB
|
||||
master-bin1.000001 # Query # # ALTER LOGFILE GROUP lg1
|
||||
ADD UNDOFILE 'undofile02.dat'
|
||||
INITIAL_SIZE = 4M
|
||||
ENGINE=NDB
|
||||
master-bin1.000001 # Query # # CREATE TABLESPACE ts1
|
||||
ADD DATAFILE 'datafile.dat'
|
||||
USE LOGFILE GROUP lg1
|
||||
INITIAL_SIZE 12M
|
||||
ENGINE NDB
|
||||
master-bin1.000001 # Query # # ALTER TABLESPACE ts1
|
||||
ADD DATAFILE 'datafile02.dat'
|
||||
INITIAL_SIZE = 4M
|
||||
ENGINE=NDB
|
||||
master-bin1.000001 # Query # # ALTER TABLESPACE ts1
|
||||
DROP DATAFILE 'datafile.dat'
|
||||
ENGINE = NDB
|
||||
master-bin1.000001 # Query # # ALTER TABLESPACE ts1
|
||||
DROP DATAFILE 'datafile02.dat'
|
||||
ENGINE = NDB
|
||||
master-bin1.000001 # Query # # DROP TABLESPACE ts1
|
||||
ENGINE = NDB
|
||||
master-bin1.000001 # Query # # DROP LOGFILE GROUP lg1
|
||||
ENGINE =NDB
|
||||
master-bin1.000001 # Query # # use `test`; drop table `t1`
|
||||
reset master;
|
||||
show tables;
|
||||
Tables_in_test
|
||||
|
@ -370,13 +370,10 @@ COUNT(*)
|
||||
10000
|
||||
***** Add some more records to master *********
|
||||
***** Finsh the slave sync process *******
|
||||
* 1. *
|
||||
@the_epoch:=MAX(epoch)
|
||||
<the_epoch>
|
||||
* 2. *
|
||||
@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
<the_pos> master-bin.000001
|
||||
* 3. *
|
||||
* 4. *
|
||||
* 5. *
|
||||
START SLAVE;
|
||||
|
@ -25,13 +25,13 @@ hex(c2) hex(c3) c1
|
||||
0 1 BCDEF
|
||||
1 0 CD
|
||||
0 0 DEFGHIJKL
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT)ENGINE=HEAP;
|
||||
DELETE FROM cluster.backup_info;
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM cluster.backup_info;
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
|
||||
DELETE FROM test.backup_info;
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
|
||||
SELECT @the_backup_id:=backup_id FROM test.backup_info;
|
||||
@the_backup_id:=backup_id
|
||||
<the_backup_id>
|
||||
DROP TABLE cluster.backup_info;
|
||||
DROP TABLE test.backup_info;
|
||||
UPDATE t1 SET c2=0 WHERE c3="row2";
|
||||
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
|
||||
hex(c1) hex(c2) c3
|
||||
|
@ -15,31 +15,31 @@ events : BUG#17619 2006-02-21 andrey Test case unstable
|
||||
events_logs_tests : BUG#18953 2006-04-12 kent Test is randomly failing
|
||||
ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
|
||||
ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
|
||||
ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent Test fails randomly
|
||||
ndb_cache2 : BUG#18597 2006-03-28 brian
|
||||
ndb_cache_multi2 : BUG#18974 2006-04-10 kent Test fails randomly
|
||||
ndb_gis : BUG#18600 2006-03-28 brian
|
||||
ndb_load : BUG#17233 2006-02-16 jmiller
|
||||
partition_03ndb : BUG#16385 2006-03-24 mikael
|
||||
#ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent Test fails randomly
|
||||
#ndb_cache2 : BUG#18597 2006-03-28 brian simultaneous drop table and ndb statistics update triggers node failure
|
||||
#ndb_cache_multi2 : BUG#18974 2006-04-10 kent Test fails randomly
|
||||
#ndb_gis : BUG#18600 2006-03-28 brian ndb_gis test failure
|
||||
#ndb_load : BUG#17233 2006-02-16 jmiller failed load data from infile causes mysqld dbug_assert, binlog not flushed
|
||||
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
|
||||
ps_7ndb : BUG#18950 2006-02-16 jmiller Dbug assert in RBR mode when executing test suite
|
||||
rpl_deadlock_innodb : BUG#16920 2006-04-12 kent fails in show slave status (randomly)
|
||||
rpl_ndb_dd_basic : BUG#18569 2006-03-28 brian
|
||||
rpl_row_basic_7ndb : BUG#18923 2006-04-09 brian
|
||||
rpl_ndb_dd_advance : BUG#18924 2006-04-09 brian
|
||||
rpl_ndb_multi_update2 : BUG#18928 2006-04-09 brian
|
||||
rpl_ndb_2innodb : BUG#18945 2006-03-22 tomas Assertion in get_parts_for_update()
|
||||
rpl_ndb_2myisam : BUG#18945 2006-03-22 tomas Assertion in get_parts_for_update()
|
||||
rpl_ndb_auto_inc : BUG#17086 2006-02-16 jmiller
|
||||
rpl_ndb_basic : BUG#18592 2006-03-28 brian
|
||||
rpl_ndb_ddl : BUG#18946 2006-03-16 mats Master hangs (Probably related to BUG#17400)
|
||||
rpl_ndb_insert_ignore : BUG#18567 2006-03-28 brian
|
||||
rpl_ndb_innodb2ndb : BUG#18261 2006-03-16 mats Cluster Replication: tests rpl_ndb_xxx2ndb fails (BUG#17400)
|
||||
rpl_ndb_myisam2ndb : BUG#18261 2006-03-16 mats Cluster Replication: tests rpl_ndb_xxx2ndb fails (BUG#17400)
|
||||
rpl_ndb_multi_update3 : BUG#18627 2006-03-29 monty Cluster Replication: rpl_ndb_multi_update3 fails on Intel 64 bit
|
||||
rpl_ndb_log : BUG#18947 2006-03-21 tomas Result not deterministic, TBD if should remove
|
||||
rpl_ndb_relay_space : BUG#16993 2006-02-16 jmiller
|
||||
rpl_ndb_trig004 : BUG#18977 2006-04-10 kent Test fails randomly
|
||||
rpl_ndb_2innodb : BUG#19004 2006-03-22 tomas ndb: partition by range and update hangs
|
||||
rpl_ndb_2myisam : BUG#19004 2006-03-22 tomas ndb: partition by range and update hangs
|
||||
rpl_ndb_auto_inc : BUG#17086 2006-02-16 jmiller CR: auto_increment_increment and auto_increment_offset produce duplicate key er
|
||||
#rpl_ndb_basic : BUG#18592 2006-03-28 brian rpl_ndb_basic failure
|
||||
#rpl_ndb_dd_advance : BUG#18924 2006-04-09 brian rpl_ndb_dd_advance failure
|
||||
#rpl_ndb_dd_basic : BUG#18569 2006-03-28 brian rpl_ndb_dd_basic failure
|
||||
rpl_ndb_ddl : result file needs update + test needs to checked
|
||||
#rpl_ndb_insert_ignore : BUG#18567 2006-03-28 brian rpl_ndb_insert_ignore failure
|
||||
rpl_ndb_innodb2ndb : BUG#18094 2006-03-16 mats Slave caches invalid table definition after atlters causes select failure
|
||||
#rpl_ndb_log : BUG#18947 2006-03-21 tomas Result not deterministic, TBD if should remove
|
||||
#rpl_ndb_multi_update2 : BUG#18928 2006-04-09 brian rpl_ndb_multi_update2 failed
|
||||
#rpl_ndb_multi_update3 : BUG#18627 2006-03-29 monty Cluster Replication: rpl_ndb_multi_update3 fails on Intel 64 bit
|
||||
rpl_ndb_myisam2ndb : BUG#18094 2006-03-16 mats Slave caches invalid table definition after atlters causes select failure
|
||||
rpl_ndb_relay_space : BUG#16993 2006-02-16 jmiller RBR: ALTER TABLE ZEROFILL AUTO_INCREMENT is not replicated correctly
|
||||
#rpl_ndb_trig004 : BUG#18977 2006-04-10 kent Test fails randomly
|
||||
rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian
|
||||
rpl_row_basic_7ndb : BUG#17400 2006-04-09 brian Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||
rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly
|
||||
rpl_row_inexist_tbl : BUG#18948 2006-03-09 mats Disabled since patch makes this test wait forever
|
||||
rpl_sp : BUG#16456 2006-02-16 jmiller
|
||||
|
@ -64,7 +64,9 @@ drop table mysqltest.t1;
|
||||
|
||||
--connection server1
|
||||
--source include/show_binlog_events.inc
|
||||
|
||||
# to track down bug#18976
|
||||
--real_sleep 10
|
||||
--source include/show_binlog_events.inc
|
||||
|
||||
# drop database and drop should come after data events
|
||||
--connection server1
|
||||
@ -131,6 +133,9 @@ drop table t1;
|
||||
|
||||
--connection server2
|
||||
--source include/show_binlog_events.inc
|
||||
# to track down bug#18976
|
||||
--real_sleep 10
|
||||
--source include/show_binlog_events.inc
|
||||
|
||||
#
|
||||
# Bug #17827 cluster: rename of several tables in one statement,
|
||||
|
@ -132,34 +132,7 @@ let the_backup_id=`select @the_backup_id`;
|
||||
|
||||
#
|
||||
# now setup replication to continue from last epoch
|
||||
# 1. get apply_status epoch from slave
|
||||
# 2. get corresponding _next_ binlog postition from master
|
||||
# 3. change master on slave
|
||||
# 4. start the replication
|
||||
|
||||
# 1.
|
||||
--connection slave
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status;
|
||||
--let $the_epoch= `select @the_epoch`
|
||||
|
||||
# 2.
|
||||
--connection master
|
||||
--replace_result $the_epoch <the_epoch>
|
||||
--replace_column 1 <the_pos>
|
||||
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
FROM cluster.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
|
||||
--let $the_pos= `SELECT @the_pos`
|
||||
--let $the_file= `SELECT @the_file`
|
||||
|
||||
# 3.
|
||||
--connection slave
|
||||
--replace_result $the_pos <the_pos>
|
||||
eval CHANGE MASTER TO
|
||||
master_log_file = '$the_file',
|
||||
master_log_pos = $the_pos;
|
||||
|
||||
# 4.
|
||||
--source include/ndb_setup_slave.inc
|
||||
--connection slave
|
||||
START SLAVE;
|
||||
|
||||
|
@ -146,4 +146,4 @@ select * from t1 order by nid;
|
||||
# cleanup
|
||||
--connection master
|
||||
DROP TABLE t1;
|
||||
sync_slave_with_master;
|
||||
-- source include/master-slave-end.inc
|
||||
|
@ -479,29 +479,9 @@ while ($j)
|
||||
# 5. start the replication
|
||||
|
||||
--echo ***** Finsh the slave sync process *******
|
||||
# 1.
|
||||
--echo * 1. *
|
||||
connection slave;
|
||||
--disable_query_log
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status;
|
||||
--let $the_epoch= `select @the_epoch`
|
||||
|
||||
# 2.
|
||||
--echo * 2. *
|
||||
connection master;
|
||||
--replace_column 1 <the_pos>
|
||||
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
FROM cluster.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
|
||||
--let $the_pos= `SELECT @the_pos`
|
||||
--let $the_file= `SELECT @the_file`
|
||||
|
||||
# 3.
|
||||
--echo * 3. *
|
||||
connection slave;
|
||||
eval CHANGE MASTER TO
|
||||
master_log_file = '$the_file',
|
||||
master_log_pos = $the_pos;
|
||||
# 1. 2. 3.
|
||||
--source include/ndb_setup_slave.inc
|
||||
--enable_query_log
|
||||
|
||||
# 4.
|
||||
|
@ -82,4 +82,4 @@ drop datafile 'datafile02.dat'
|
||||
engine=ndb;
|
||||
DROP TABLESPACE ts1 ENGINE=NDB;
|
||||
DROP LOGFILE GROUP lg1 ENGINE=NDB;
|
||||
--sync_slave_with_master
|
||||
-- source include/master-slave-end.inc
|
||||
|
@ -8,13 +8,13 @@
|
||||
#
|
||||
|
||||
# stop the save
|
||||
connection slave;
|
||||
--connection slave
|
||||
STOP SLAVE;
|
||||
CREATE DATABASE ndbsynctest;
|
||||
USE ndbsynctest;
|
||||
|
||||
# get some data on the master
|
||||
connection master;
|
||||
--connection master
|
||||
CREATE DATABASE ndbsynctest;
|
||||
USE ndbsynctest;
|
||||
CREATE TABLE t1 (c1 BIT(1) NOT NULL, c2 BIT(1) NOT NULL, c3 CHAR(15), PRIMARY KEY(c3)) ENGINE = NDB ;
|
||||
@ -25,21 +25,14 @@ SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
|
||||
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
|
||||
|
||||
# take a backup on master
|
||||
--exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "start backup" >> $NDB_TOOLS_OUTPUT
|
||||
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT)ENGINE=HEAP;
|
||||
DELETE FROM cluster.backup_info;
|
||||
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ',';
|
||||
--replace_column 1 <the_backup_id>
|
||||
SELECT @the_backup_id:=backup_id FROM cluster.backup_info;
|
||||
let the_backup_id=`select @the_backup_id` ;
|
||||
DROP TABLE cluster.backup_info;
|
||||
--source include/ndb_backup.inc
|
||||
|
||||
# update a row
|
||||
UPDATE t1 SET c2=0 WHERE c3="row2";
|
||||
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
|
||||
|
||||
# restore on slave, first check that nothing is there
|
||||
connection slave
|
||||
--connection slave
|
||||
|
||||
# we should have no tables
|
||||
SHOW TABLES;
|
||||
@ -66,40 +59,16 @@ SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
|
||||
|
||||
#
|
||||
# now setup replication to continue from last epoch
|
||||
# 1. get apply_status epoch from slave
|
||||
# 2. get corresponding _next_ binlog postition from master
|
||||
# 3. change master on slave
|
||||
|
||||
# 1.
|
||||
connection slave;
|
||||
--replace_column 1 <the_epoch>
|
||||
SELECT @the_epoch:=MAX(epoch) FROM cluster.apply_status;
|
||||
let $the_epoch= `select @the_epoch` ;
|
||||
|
||||
# 2.
|
||||
connection master;
|
||||
--replace_result $the_epoch <the_epoch>
|
||||
--replace_column 1 <the_pos>
|
||||
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
|
||||
FROM cluster.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
|
||||
let $the_pos= `SELECT @the_pos` ;
|
||||
let $the_file= `SELECT @the_file` ;
|
||||
|
||||
# 3.
|
||||
connection slave;
|
||||
--replace_result $the_pos <the_pos>
|
||||
eval CHANGE MASTER TO
|
||||
master_log_file = '$the_file',
|
||||
master_log_pos = $the_pos ;
|
||||
--source include/ndb_setup_slave.inc
|
||||
--connection slave
|
||||
START SLAVE;
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
connection master;
|
||||
#sync_slave_with_master;
|
||||
--sleep 2
|
||||
connection slave;
|
||||
--connection master
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
--replace_result $MASTER_MYPORT MASTER_PORT
|
||||
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
|
||||
SHOW SLAVE STATUS;
|
||||
@ -111,22 +80,21 @@ SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
|
||||
# Cleanup
|
||||
#
|
||||
|
||||
connection master;
|
||||
--connection master
|
||||
DROP DATABASE ndbsynctest;
|
||||
#sync_slave_with_master;
|
||||
--sleep 2
|
||||
connection slave;
|
||||
--sync_slave_with_master
|
||||
--connection slave
|
||||
STOP SLAVE;
|
||||
|
||||
#
|
||||
# Test some replication commands
|
||||
#
|
||||
connection master;
|
||||
--connection master
|
||||
reset master;
|
||||
# should now contain nothing
|
||||
select * from cluster.binlog_index;
|
||||
|
||||
connection slave;
|
||||
--connection slave
|
||||
reset slave;
|
||||
# should now contain nothing
|
||||
select * from cluster.apply_status;
|
||||
|
@ -7,6 +7,7 @@
|
||||
# partition tables with same engine (MyISAM) in both ends. #
|
||||
############################################################
|
||||
|
||||
--source include/have_partition.inc
|
||||
--source include/master-slave.inc
|
||||
connection master;
|
||||
--disable_warnings
|
||||
|
@ -4489,6 +4489,21 @@ int ha_ndbcluster::create(const char *name,
|
||||
DBUG_RETURN(my_errno);
|
||||
}
|
||||
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
/*
|
||||
Don't allow table creation unless
|
||||
schema distribution table is setup
|
||||
( unless it is a creation of the schema dist table itself )
|
||||
*/
|
||||
if (!schema_share &&
|
||||
!(strcmp(m_dbname, NDB_REP_DB) == 0 &&
|
||||
strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0))
|
||||
{
|
||||
DBUG_PRINT("info", ("Schema distribution table not setup"));
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
}
|
||||
#endif /* HAVE_NDB_BINLOG */
|
||||
|
||||
DBUG_PRINT("table", ("name: %s", m_tabname));
|
||||
tab.setName(m_tabname);
|
||||
tab.setLogging(!(info->options & HA_LEX_CREATE_TMP_TABLE));
|
||||
@ -5027,7 +5042,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
||||
is_old_table_tmpfile= 0;
|
||||
String event_name(INJECTOR_EVENT_LEN);
|
||||
ndb_rep_event_name(&event_name, from + sizeof(share_prefix) - 1, 0);
|
||||
ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share);
|
||||
ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share,
|
||||
"rename table");
|
||||
}
|
||||
|
||||
if (!result && !IS_TMP_PREFIX(new_tabname))
|
||||
@ -5111,6 +5127,15 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
||||
DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
/*
|
||||
Don't allow drop table unless
|
||||
schema distribution table is setup
|
||||
*/
|
||||
if (!schema_share)
|
||||
{
|
||||
DBUG_PRINT("info", ("Schema distribution table not setup"));
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
}
|
||||
NDB_SHARE *share= get_share(path, 0, false);
|
||||
#endif
|
||||
|
||||
@ -5179,7 +5204,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
|
||||
ndb_rep_event_name(&event_name, path + sizeof(share_prefix) - 1, 0);
|
||||
ndbcluster_handle_drop_table(ndb,
|
||||
table_dropped ? event_name.c_ptr() : 0,
|
||||
share);
|
||||
share, "delete table");
|
||||
}
|
||||
|
||||
if (share)
|
||||
@ -5208,6 +5233,18 @@ int ha_ndbcluster::delete_table(const char *name)
|
||||
set_dbname(name);
|
||||
set_tabname(name);
|
||||
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
/*
|
||||
Don't allow drop table unless
|
||||
schema distribution table is setup
|
||||
*/
|
||||
if (!schema_share)
|
||||
{
|
||||
DBUG_PRINT("info", ("Schema distribution table not setup"));
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (check_ndb_connection())
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
|
||||
@ -5429,6 +5466,11 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
|
||||
if (!res)
|
||||
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
||||
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
if (!ndb_binlog_tables_inited && ndb_binlog_running)
|
||||
table->db_stat|= HA_READ_ONLY;
|
||||
#endif
|
||||
|
||||
DBUG_RETURN(res);
|
||||
}
|
||||
|
||||
@ -5727,6 +5769,19 @@ int ndbcluster_drop_database_impl(const char *path)
|
||||
|
||||
static void ndbcluster_drop_database(char *path)
|
||||
{
|
||||
DBUG_ENTER("ndbcluster_drop_database");
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
/*
|
||||
Don't allow drop database unless
|
||||
schema distribution table is setup
|
||||
*/
|
||||
if (!schema_share)
|
||||
{
|
||||
DBUG_PRINT("info", ("Schema distribution table not setup"));
|
||||
DBUG_VOID_RETURN;
|
||||
//DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
}
|
||||
#endif
|
||||
ndbcluster_drop_database_impl(path);
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
char db[FN_REFLEN];
|
||||
@ -5735,6 +5790,7 @@ static void ndbcluster_drop_database(char *path)
|
||||
current_thd->query, current_thd->query_length,
|
||||
db, "", 0, 0, SOT_DROP_DB);
|
||||
#endif
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
/*
|
||||
find all tables in ndb and discover those needed
|
||||
@ -5756,36 +5812,37 @@ int ndbcluster_find_all_files(THD *thd)
|
||||
DBUG_ENTER("ndbcluster_find_all_files");
|
||||
Ndb* ndb;
|
||||
char key[FN_REFLEN];
|
||||
NdbDictionary::Dictionary::List list;
|
||||
|
||||
if (!(ndb= check_ndb_in_thd(thd)))
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
|
||||
int unhandled, retries= 5;
|
||||
int unhandled, retries= 5, skipped;
|
||||
do
|
||||
{
|
||||
NdbDictionary::Dictionary::List list;
|
||||
if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0)
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
unhandled= 0;
|
||||
skipped= 0;
|
||||
retries--;
|
||||
for (uint i= 0 ; i < list.count ; i++)
|
||||
{
|
||||
NDBDICT::List::Element& elmt= list.elements[i];
|
||||
int do_handle_table= 0;
|
||||
if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name))
|
||||
{
|
||||
DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name));
|
||||
continue;
|
||||
}
|
||||
DBUG_PRINT("info", ("Found %s.%s in NDB", elmt.database, elmt.name));
|
||||
if (elmt.state == NDBOBJ::StateOnline ||
|
||||
elmt.state == NDBOBJ::StateBackup)
|
||||
do_handle_table= 1;
|
||||
else if (!(elmt.state == NDBOBJ::StateBuilding))
|
||||
if (elmt.state != NDBOBJ::StateOnline &&
|
||||
elmt.state != NDBOBJ::StateBackup &&
|
||||
elmt.state != NDBOBJ::StateBuilding)
|
||||
{
|
||||
sql_print_information("NDB: skipping setup table %s.%s, in state %d",
|
||||
elmt.database, elmt.name, elmt.state);
|
||||
skipped++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -5794,7 +5851,7 @@ int ndbcluster_find_all_files(THD *thd)
|
||||
|
||||
if (!(ndbtab= dict->getTable(elmt.name)))
|
||||
{
|
||||
if (do_handle_table)
|
||||
if (retries == 0)
|
||||
sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s",
|
||||
elmt.database, elmt.name,
|
||||
dict->getNdbError().code,
|
||||
@ -5863,9 +5920,9 @@ int ndbcluster_find_all_files(THD *thd)
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
}
|
||||
}
|
||||
while (unhandled && retries--);
|
||||
while (unhandled && retries);
|
||||
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(-(skipped + unhandled));
|
||||
}
|
||||
|
||||
int ndbcluster_find_files(THD *thd,const char *db,const char *path,
|
||||
@ -7729,6 +7786,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
|
||||
pthread_cond_wait(&COND_server_started, &LOCK_server_started);
|
||||
pthread_mutex_unlock(&LOCK_server_started);
|
||||
|
||||
ndbcluster_util_inited= 1;
|
||||
|
||||
/*
|
||||
Wait for cluster to start
|
||||
*/
|
||||
@ -7760,6 +7819,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
|
||||
}
|
||||
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
if (ndb_extra_logging && ndb_binlog_running)
|
||||
sql_print_information("NDB Binlog: Ndb tables initially read only.");
|
||||
/* create tables needed by the replication */
|
||||
ndbcluster_setup_binlog_table_shares(thd);
|
||||
#else
|
||||
@ -7769,17 +7830,9 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
|
||||
ndbcluster_find_all_files(thd);
|
||||
#endif
|
||||
|
||||
ndbcluster_util_inited= 1;
|
||||
|
||||
#ifdef HAVE_NDB_BINLOG
|
||||
/* Signal injector thread that all is setup */
|
||||
pthread_cond_signal(&injector_cond);
|
||||
#endif
|
||||
|
||||
set_timespec(abstime, 0);
|
||||
for (;!abort_loop;)
|
||||
{
|
||||
|
||||
pthread_mutex_lock(&LOCK_ndb_util_thread);
|
||||
pthread_cond_timedwait(&COND_ndb_util_thread,
|
||||
&LOCK_ndb_util_thread,
|
||||
@ -7797,7 +7850,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
|
||||
Check that the apply_status_share and schema_share has been created.
|
||||
If not try to create it
|
||||
*/
|
||||
if (!apply_status_share || !schema_share)
|
||||
if (!ndb_binlog_tables_inited)
|
||||
ndbcluster_setup_binlog_table_shares(thd);
|
||||
#endif
|
||||
|
||||
@ -10052,14 +10105,15 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
|
||||
}
|
||||
}
|
||||
|
||||
dict->listObjects(dflist, NdbDictionary::Object::Undofile);
|
||||
NdbDictionary::Dictionary::List uflist;
|
||||
dict->listObjects(uflist, NdbDictionary::Object::Undofile);
|
||||
ndberr= dict->getNdbError();
|
||||
if (ndberr.classification != NdbError::NoError)
|
||||
ERR_RETURN(ndberr);
|
||||
|
||||
for (i= 0; i < dflist.count; i++)
|
||||
for (i= 0; i < uflist.count; i++)
|
||||
{
|
||||
NdbDictionary::Dictionary::List::Element& elt= dflist.elements[i];
|
||||
NdbDictionary::Dictionary::List::Element& elt= uflist.elements[i];
|
||||
Ndb_cluster_connection_node_iter iter;
|
||||
unsigned id;
|
||||
|
||||
|
@ -48,6 +48,7 @@ int ndb_binlog_thread_running= 0;
|
||||
FALSE if not
|
||||
*/
|
||||
my_bool ndb_binlog_running= FALSE;
|
||||
my_bool ndb_binlog_tables_inited= FALSE;
|
||||
|
||||
/*
|
||||
Global reference to the ndb injector thread THD oject
|
||||
@ -775,33 +776,51 @@ static int ndbcluster_create_schema_table(THD *thd)
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
void ndbcluster_setup_binlog_table_shares(THD *thd)
|
||||
int ndbcluster_setup_binlog_table_shares(THD *thd)
|
||||
{
|
||||
int done_find_all_files= 0;
|
||||
if (!schema_share &&
|
||||
ndbcluster_check_schema_share() == 0)
|
||||
{
|
||||
if (!done_find_all_files)
|
||||
pthread_mutex_lock(&LOCK_open);
|
||||
ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_SCHEMA_TABLE);
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
if (!schema_share)
|
||||
{
|
||||
ndbcluster_find_all_files(thd);
|
||||
done_find_all_files= 1;
|
||||
}
|
||||
ndbcluster_create_schema_table(thd);
|
||||
// always make sure we create the 'schema' first
|
||||
if (!schema_share)
|
||||
return;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
if (!apply_status_share &&
|
||||
ndbcluster_check_apply_status_share() == 0)
|
||||
{
|
||||
if (!done_find_all_files)
|
||||
pthread_mutex_lock(&LOCK_open);
|
||||
ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_APPLY_TABLE);
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
if (!apply_status_share)
|
||||
{
|
||||
ndbcluster_find_all_files(thd);
|
||||
done_find_all_files= 1;
|
||||
}
|
||||
ndbcluster_create_apply_status_table(thd);
|
||||
if (!apply_status_share)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
if (!ndbcluster_find_all_files(thd))
|
||||
{
|
||||
pthread_mutex_lock(&LOCK_open);
|
||||
ndb_binlog_tables_inited= TRUE;
|
||||
if (ndb_binlog_running)
|
||||
{
|
||||
if (ndb_extra_logging)
|
||||
sql_print_information("NDB Binlog: ndb tables writable");
|
||||
close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
|
||||
}
|
||||
pthread_mutex_unlock(&LOCK_open);
|
||||
/* Signal injector thread that all is setup */
|
||||
pthread_cond_signal(&injector_cond);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Defines and struct for schema table.
|
||||
@ -936,6 +955,31 @@ static char *ndb_pack_varchar(const NDBCOL *col, char *buf,
|
||||
/*
|
||||
log query in schema table
|
||||
*/
|
||||
static void ndb_report_waiting(const char *key,
|
||||
int the_time,
|
||||
const char *op,
|
||||
const char *obj)
|
||||
{
|
||||
ulonglong ndb_latest_epoch= 0;
|
||||
const char *proc_info= "<no info>";
|
||||
pthread_mutex_lock(&injector_mutex);
|
||||
if (injector_ndb)
|
||||
ndb_latest_epoch= injector_ndb->getLatestGCI();
|
||||
if (injector_thd)
|
||||
proc_info= injector_thd->proc_info;
|
||||
pthread_mutex_unlock(&injector_mutex);
|
||||
sql_print_information("NDB %s:"
|
||||
" waiting max %u sec for %s %s."
|
||||
" epochs: (%u,%u,%u)"
|
||||
" injector proc_info: %s"
|
||||
,key, the_time, op, obj
|
||||
,(uint)ndb_latest_handled_binlog_epoch
|
||||
,(uint)ndb_latest_received_binlog_epoch
|
||||
,(uint)ndb_latest_epoch
|
||||
,proc_info
|
||||
);
|
||||
}
|
||||
|
||||
int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
||||
const char *query, int query_length,
|
||||
const char *db, const char *table_name,
|
||||
@ -965,6 +1009,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
||||
}
|
||||
|
||||
char tmp_buf2[FN_REFLEN];
|
||||
const char *type_str;
|
||||
switch (type)
|
||||
{
|
||||
case SOT_DROP_TABLE:
|
||||
@ -975,6 +1020,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
||||
query= tmp_buf2;
|
||||
query_length= (uint) (strxmov(tmp_buf2, "drop table `",
|
||||
table_name, "`", NullS) - tmp_buf2);
|
||||
type_str= "drop table";
|
||||
break;
|
||||
case SOT_RENAME_TABLE:
|
||||
/* redo the rename table query as is may contain several tables */
|
||||
@ -982,20 +1028,28 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
||||
query_length= (uint) (strxmov(tmp_buf2, "rename table `",
|
||||
old_db, ".", old_table_name, "` to `",
|
||||
db, ".", table_name, "`", NullS) - tmp_buf2);
|
||||
type_str= "rename table";
|
||||
break;
|
||||
case SOT_CREATE_TABLE:
|
||||
// fall through
|
||||
type_str= "create table";
|
||||
break;
|
||||
case SOT_ALTER_TABLE:
|
||||
type_str= "create table";
|
||||
break;
|
||||
case SOT_DROP_DB:
|
||||
type_str= "drop db";
|
||||
break;
|
||||
case SOT_CREATE_DB:
|
||||
type_str= "create db";
|
||||
break;
|
||||
case SOT_ALTER_DB:
|
||||
type_str= "alter db";
|
||||
break;
|
||||
case SOT_TABLESPACE:
|
||||
type_str= "tablespace";
|
||||
break;
|
||||
case SOT_LOGFILE_GROUP:
|
||||
type_str= "logfile group";
|
||||
break;
|
||||
default:
|
||||
abort(); /* should not happen, programming error */
|
||||
@ -1174,7 +1228,7 @@ end:
|
||||
struct timespec abstime;
|
||||
int i;
|
||||
set_timespec(abstime, 1);
|
||||
(void) pthread_cond_timedwait(&injector_cond,
|
||||
int ret= pthread_cond_timedwait(&injector_cond,
|
||||
&ndb_schema_object->mutex,
|
||||
&abstime);
|
||||
|
||||
@ -1198,16 +1252,19 @@ end:
|
||||
if (bitmap_is_clear_all(&ndb_schema_object->slock_bitmap))
|
||||
break;
|
||||
|
||||
if (ret)
|
||||
{
|
||||
max_timeout--;
|
||||
if (max_timeout == 0)
|
||||
{
|
||||
sql_print_error("NDB create table: timed out. Ignoring...");
|
||||
sql_print_error("NDB %s: distributing %s timed out. Ignoring...",
|
||||
type_str, ndb_schema_object->key);
|
||||
break;
|
||||
}
|
||||
if (ndb_extra_logging)
|
||||
sql_print_information("NDB create table: "
|
||||
"waiting max %u sec for create table %s.",
|
||||
max_timeout, ndb_schema_object->key);
|
||||
ndb_report_waiting(type_str, max_timeout,
|
||||
"distributing", ndb_schema_object->key);
|
||||
}
|
||||
}
|
||||
(void) pthread_mutex_unlock(&ndb_schema_object->mutex);
|
||||
}
|
||||
@ -1511,7 +1568,7 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
|
||||
pOp->setCustomData(0);
|
||||
|
||||
pthread_mutex_lock(&injector_mutex);
|
||||
injector_ndb->dropEventOperation(pOp);
|
||||
ndb->dropEventOperation(pOp);
|
||||
pOp= 0;
|
||||
pthread_mutex_unlock(&injector_mutex);
|
||||
|
||||
@ -1689,9 +1746,15 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
|
||||
// skip
|
||||
break;
|
||||
case NDBEVENT::TE_CLUSTER_FAILURE:
|
||||
// fall through
|
||||
case NDBEVENT::TE_DROP:
|
||||
if (ndb_extra_logging &&
|
||||
ndb_binlog_tables_inited && ndb_binlog_running)
|
||||
sql_print_information("NDB Binlog: ndb tables initially "
|
||||
"read only on reconnect.");
|
||||
free_share(&schema_share);
|
||||
schema_share= 0;
|
||||
ndb_binlog_tables_inited= FALSE;
|
||||
// fall through
|
||||
case NDBEVENT::TE_ALTER:
|
||||
ndb_handle_schema_change(thd, ndb, pOp, tmp_share);
|
||||
@ -2385,7 +2448,6 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
|
||||
}
|
||||
if (!op)
|
||||
{
|
||||
pthread_mutex_unlock(&injector_mutex);
|
||||
sql_print_error("NDB Binlog: Creating NdbEventOperation failed for"
|
||||
" %s",event_name);
|
||||
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
|
||||
@ -2393,6 +2455,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
|
||||
ndb->getNdbError().code,
|
||||
ndb->getNdbError().message,
|
||||
"NDB");
|
||||
pthread_mutex_unlock(&injector_mutex);
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
|
||||
@ -2494,9 +2557,15 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
|
||||
|
||||
get_share(share);
|
||||
if (do_apply_status_share)
|
||||
{
|
||||
apply_status_share= get_share(share);
|
||||
(void) pthread_cond_signal(&injector_cond);
|
||||
}
|
||||
else if (do_schema_share)
|
||||
{
|
||||
schema_share= get_share(share);
|
||||
(void) pthread_cond_signal(&injector_cond);
|
||||
}
|
||||
|
||||
DBUG_PRINT("info",("%s share->op: 0x%lx, share->use_count: %u",
|
||||
share->key, share->op, share->use_count));
|
||||
@ -2513,7 +2582,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
|
||||
*/
|
||||
int
|
||||
ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
|
||||
NDB_SHARE *share)
|
||||
NDB_SHARE *share, const char *type_str)
|
||||
{
|
||||
DBUG_ENTER("ndbcluster_handle_drop_table");
|
||||
|
||||
@ -2569,21 +2638,24 @@ ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
|
||||
{
|
||||
struct timespec abstime;
|
||||
set_timespec(abstime, 1);
|
||||
(void) pthread_cond_timedwait(&injector_cond,
|
||||
int ret= pthread_cond_timedwait(&injector_cond,
|
||||
&share->mutex,
|
||||
&abstime);
|
||||
max_timeout--;
|
||||
if (share->op == 0)
|
||||
break;
|
||||
if (ret)
|
||||
{
|
||||
max_timeout--;
|
||||
if (max_timeout == 0)
|
||||
{
|
||||
sql_print_error("NDB delete table: timed out. Ignoring...");
|
||||
sql_print_error("NDB %s: %s timed out. Ignoring...",
|
||||
type_str, share->key);
|
||||
break;
|
||||
}
|
||||
if (ndb_extra_logging)
|
||||
sql_print_information("NDB delete table: "
|
||||
"waiting max %u sec for drop table %s.",
|
||||
max_timeout, share->key);
|
||||
ndb_report_waiting(type_str, max_timeout,
|
||||
type_str, share->key);
|
||||
}
|
||||
}
|
||||
(void) pthread_mutex_unlock(&share->mutex);
|
||||
#else
|
||||
@ -2646,7 +2718,8 @@ static int ndb_binlog_thread_handle_error(Ndb *ndb, NdbEventOperation *pOp,
|
||||
}
|
||||
|
||||
static int
|
||||
ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
||||
ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
|
||||
NdbEventOperation *pOp,
|
||||
Binlog_index_row &row)
|
||||
{
|
||||
NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
|
||||
@ -2655,18 +2728,23 @@ ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
||||
/* make sure to flush any pending events as they can be dependent
|
||||
on one of the tables being changed below
|
||||
*/
|
||||
injector_thd->binlog_flush_pending_rows_event(true);
|
||||
thd->binlog_flush_pending_rows_event(true);
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case NDBEVENT::TE_CLUSTER_FAILURE:
|
||||
if (apply_status_share == share)
|
||||
{
|
||||
free_share(&apply_status_share);
|
||||
apply_status_share= 0;
|
||||
}
|
||||
if (ndb_extra_logging)
|
||||
sql_print_information("NDB Binlog: cluster failure for %s.", share->key);
|
||||
if (apply_status_share == share)
|
||||
{
|
||||
if (ndb_extra_logging &&
|
||||
ndb_binlog_tables_inited && ndb_binlog_running)
|
||||
sql_print_information("NDB Binlog: ndb tables initially "
|
||||
"read only on reconnect.");
|
||||
free_share(&apply_status_share);
|
||||
apply_status_share= 0;
|
||||
ndb_binlog_tables_inited= FALSE;
|
||||
}
|
||||
DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: "
|
||||
"%s received share: 0x%lx op: %lx share op: %lx "
|
||||
"op_old: %lx",
|
||||
@ -2675,8 +2753,13 @@ ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
||||
case NDBEVENT::TE_DROP:
|
||||
if (apply_status_share == share)
|
||||
{
|
||||
if (ndb_extra_logging &&
|
||||
ndb_binlog_tables_inited && ndb_binlog_running)
|
||||
sql_print_information("NDB Binlog: ndb tables initially "
|
||||
"read only on reconnect.");
|
||||
free_share(&apply_status_share);
|
||||
apply_status_share= 0;
|
||||
ndb_binlog_tables_inited= FALSE;
|
||||
}
|
||||
/* ToDo: remove printout */
|
||||
if (ndb_extra_logging)
|
||||
@ -2702,7 +2785,7 @@ ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
ndb_handle_schema_change(injector_thd, ndb, pOp, share);
|
||||
ndb_handle_schema_change(thd, ndb, pOp, share);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2982,7 +3065,8 @@ static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object,
|
||||
pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
{
|
||||
THD *thd; /* needs to be first for thread_stack */
|
||||
Ndb *ndb= 0;
|
||||
Ndb *i_ndb= 0;
|
||||
Ndb *s_ndb= 0;
|
||||
Thd_ndb *thd_ndb=0;
|
||||
int ndb_update_binlog_index= 1;
|
||||
injector *inj= injector::instance();
|
||||
@ -3034,16 +3118,16 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
pthread_mutex_unlock(&LOCK_thread_count);
|
||||
thd->lex->start_transaction_opt= 0;
|
||||
|
||||
if (!(schema_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
|
||||
schema_ndb->init())
|
||||
if (!(s_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
|
||||
s_ndb->init())
|
||||
{
|
||||
sql_print_error("NDB Binlog: Getting Schema Ndb object failed");
|
||||
goto err;
|
||||
}
|
||||
|
||||
// empty database
|
||||
if (!(ndb= new Ndb(g_ndb_cluster_connection, "")) ||
|
||||
ndb->init())
|
||||
if (!(i_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
|
||||
i_ndb->init())
|
||||
{
|
||||
sql_print_error("NDB Binlog: Getting Ndb object failed");
|
||||
ndb_binlog_thread_running= -1;
|
||||
@ -3064,7 +3148,8 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
pthread_mutex_lock(&injector_mutex);
|
||||
*/
|
||||
injector_thd= thd;
|
||||
injector_ndb= ndb;
|
||||
injector_ndb= i_ndb;
|
||||
schema_ndb= s_ndb;
|
||||
ndb_binlog_thread_running= 1;
|
||||
if (opt_bin_log)
|
||||
{
|
||||
@ -3087,7 +3172,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
thd->proc_info= "Waiting for ndbcluster to start";
|
||||
|
||||
pthread_mutex_lock(&injector_mutex);
|
||||
while (!ndbcluster_util_inited)
|
||||
while (!schema_share || !apply_status_share)
|
||||
{
|
||||
/* ndb not connected yet */
|
||||
struct timespec abstime;
|
||||
@ -3119,10 +3204,6 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
thd->db= db;
|
||||
if (ndb_binlog_running)
|
||||
open_binlog_index(thd, &binlog_tables, &binlog_index);
|
||||
if (!apply_status_share)
|
||||
{
|
||||
sql_print_error("NDB: Could not get apply status share");
|
||||
}
|
||||
thd->db= db;
|
||||
}
|
||||
|
||||
@ -3150,14 +3231,14 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
int res= 0, tot_poll_wait= 1000;
|
||||
if (ndb_binlog_running)
|
||||
{
|
||||
res= ndb->pollEvents(tot_poll_wait, &gci);
|
||||
res= i_ndb->pollEvents(tot_poll_wait, &gci);
|
||||
tot_poll_wait= 0;
|
||||
}
|
||||
int schema_res= schema_ndb->pollEvents(tot_poll_wait, &schema_gci);
|
||||
int schema_res= s_ndb->pollEvents(tot_poll_wait, &schema_gci);
|
||||
ndb_latest_received_binlog_epoch= gci;
|
||||
|
||||
while (gci > schema_gci && schema_res >= 0)
|
||||
schema_res= schema_ndb->pollEvents(10, &schema_gci);
|
||||
schema_res= s_ndb->pollEvents(10, &schema_gci);
|
||||
|
||||
if ((abort_loop || do_ndbcluster_binlog_close_connection) &&
|
||||
(ndb_latest_handled_binlog_epoch >= g_latest_trans_gci ||
|
||||
@ -3184,15 +3265,16 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
|
||||
if (unlikely(schema_res > 0))
|
||||
{
|
||||
schema_ndb->
|
||||
thd->proc_info= "Processing events from schema table";
|
||||
s_ndb->
|
||||
setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
|
||||
schema_ndb->
|
||||
s_ndb->
|
||||
setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
|
||||
NdbEventOperation *pOp= schema_ndb->nextEvent();
|
||||
NdbEventOperation *pOp= s_ndb->nextEvent();
|
||||
while (pOp != NULL)
|
||||
{
|
||||
if (!pOp->hasError())
|
||||
ndb_binlog_thread_handle_schema_event(thd, schema_ndb, pOp,
|
||||
ndb_binlog_thread_handle_schema_event(thd, s_ndb, pOp,
|
||||
&post_epoch_log_list,
|
||||
&post_epoch_unlock_list,
|
||||
&mem_root);
|
||||
@ -3201,7 +3283,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
"binlog schema event",
|
||||
(ulong) pOp->getNdbError().code,
|
||||
pOp->getNdbError().message);
|
||||
pOp= schema_ndb->nextEvent();
|
||||
pOp= s_ndb->nextEvent();
|
||||
}
|
||||
}
|
||||
|
||||
@ -3213,7 +3295,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
int event_count= 0;
|
||||
#endif
|
||||
thd->proc_info= "Processing events";
|
||||
NdbEventOperation *pOp= ndb->nextEvent();
|
||||
NdbEventOperation *pOp= i_ndb->nextEvent();
|
||||
Binlog_index_row row;
|
||||
while (pOp != NULL)
|
||||
{
|
||||
@ -3224,9 +3306,9 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
! IS_NDB_BLOB_PREFIX(pOp->getEvent()->getTable()->getName()));
|
||||
DBUG_ASSERT(gci <= ndb_latest_received_binlog_epoch);
|
||||
|
||||
ndb->
|
||||
i_ndb->
|
||||
setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
|
||||
ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
|
||||
i_ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
|
||||
|
||||
bzero((char*) &row, sizeof(row));
|
||||
injector::transaction trans;
|
||||
@ -3235,7 +3317,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
Uint32 iter= 0;
|
||||
const NdbEventOperation *gci_op;
|
||||
Uint32 event_types;
|
||||
while ((gci_op= ndb->getGCIEventOperations(&iter, &event_types))
|
||||
while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types))
|
||||
!= NULL)
|
||||
{
|
||||
NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData();
|
||||
@ -3321,7 +3403,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
event_count++;
|
||||
#endif
|
||||
if (pOp->hasError() &&
|
||||
ndb_binlog_thread_handle_error(ndb, pOp, row) < 0)
|
||||
ndb_binlog_thread_handle_error(i_ndb, pOp, row) < 0)
|
||||
goto err;
|
||||
|
||||
#ifndef DBUG_OFF
|
||||
@ -3341,7 +3423,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
Uint32 iter= 0;
|
||||
const NdbEventOperation *gci_op;
|
||||
Uint32 event_types;
|
||||
while ((gci_op= ndb->getGCIEventOperations(&iter, &event_types))
|
||||
while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types))
|
||||
!= NULL)
|
||||
{
|
||||
if (gci_op == pOp)
|
||||
@ -3353,19 +3435,19 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
#endif
|
||||
if ((unsigned) pOp->getEventType() <
|
||||
(unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT)
|
||||
ndb_binlog_thread_handle_data_event(ndb, pOp, row, trans);
|
||||
ndb_binlog_thread_handle_data_event(i_ndb, pOp, row, trans);
|
||||
else
|
||||
{
|
||||
// set injector_ndb database/schema from table internal name
|
||||
int ret=
|
||||
ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
|
||||
i_ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
|
||||
DBUG_ASSERT(ret == 0);
|
||||
ndb_binlog_thread_handle_non_data_event(ndb, pOp, row);
|
||||
ndb_binlog_thread_handle_non_data_event(thd, i_ndb, pOp, row);
|
||||
// reset to catch errors
|
||||
ndb->setDatabaseName("");
|
||||
i_ndb->setDatabaseName("");
|
||||
}
|
||||
|
||||
pOp= ndb->nextEvent();
|
||||
pOp= i_ndb->nextEvent();
|
||||
} while (pOp && pOp->getGCI() == gci);
|
||||
|
||||
/*
|
||||
@ -3379,6 +3461,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
if (trans.good())
|
||||
{
|
||||
//DBUG_ASSERT(row.n_inserts || row.n_updates || row.n_deletes);
|
||||
thd->proc_info= "Committing events to binlog";
|
||||
injector::transaction::binlog_pos start= trans.start_pos();
|
||||
if (int r= trans.commit())
|
||||
{
|
||||
@ -3418,10 +3501,13 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
}
|
||||
err:
|
||||
DBUG_PRINT("info",("Shutting down cluster binlog thread"));
|
||||
thd->proc_info= "Shutting down";
|
||||
close_thread_tables(thd);
|
||||
pthread_mutex_lock(&injector_mutex);
|
||||
/* don't mess with the injector_ndb anymore from other threads */
|
||||
injector_thd= 0;
|
||||
injector_ndb= 0;
|
||||
schema_ndb= 0;
|
||||
pthread_mutex_unlock(&injector_mutex);
|
||||
thd->db= 0; // as not to try to free memory
|
||||
sql_print_information("Stopping Cluster Binlog");
|
||||
@ -3438,21 +3524,45 @@ err:
|
||||
}
|
||||
|
||||
/* remove all event operations */
|
||||
if (ndb)
|
||||
if (s_ndb)
|
||||
{
|
||||
NdbEventOperation *op;
|
||||
DBUG_PRINT("info",("removing all event operations"));
|
||||
while ((op= ndb->getEventOperation()))
|
||||
while ((op= s_ndb->getEventOperation()))
|
||||
{
|
||||
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName()));
|
||||
DBUG_PRINT("info",("removing event operation on %s",
|
||||
op->getEvent()->getName()));
|
||||
NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
|
||||
DBUG_ASSERT(share != 0);
|
||||
DBUG_ASSERT(share->op == op ||
|
||||
share->op_old == op);
|
||||
share->op= share->op_old= 0;
|
||||
free_share(&share);
|
||||
ndb->dropEventOperation(op);
|
||||
s_ndb->dropEventOperation(op);
|
||||
}
|
||||
delete ndb;
|
||||
ndb= 0;
|
||||
delete s_ndb;
|
||||
s_ndb= 0;
|
||||
}
|
||||
if (i_ndb)
|
||||
{
|
||||
NdbEventOperation *op;
|
||||
DBUG_PRINT("info",("removing all event operations"));
|
||||
while ((op= i_ndb->getEventOperation()))
|
||||
{
|
||||
DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName()));
|
||||
DBUG_PRINT("info",("removing event operation on %s",
|
||||
op->getEvent()->getName()));
|
||||
NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
|
||||
DBUG_ASSERT(share != 0);
|
||||
DBUG_ASSERT(share->op == op ||
|
||||
share->op_old == op);
|
||||
share->op= share->op_old= 0;
|
||||
free_share(&share);
|
||||
i_ndb->dropEventOperation(op);
|
||||
}
|
||||
delete i_ndb;
|
||||
i_ndb= 0;
|
||||
}
|
||||
|
||||
hash_free(&ndb_schema_objects);
|
||||
|
@ -101,7 +101,8 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
||||
const char *old_db= 0,
|
||||
const char *old_table_name= 0);
|
||||
int ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
|
||||
NDB_SHARE *share);
|
||||
NDB_SHARE *share,
|
||||
const char *type_str);
|
||||
void ndb_rep_event_name(String *event_name,
|
||||
const char *db, const char *tbl);
|
||||
int ndb_create_table_from_engine(THD *thd, const char *db,
|
||||
@ -112,12 +113,13 @@ pthread_handler_t ndb_binlog_thread_func(void *arg);
|
||||
/*
|
||||
table cluster_replication.apply_status
|
||||
*/
|
||||
void ndbcluster_setup_binlog_table_shares(THD *thd);
|
||||
int ndbcluster_setup_binlog_table_shares(THD *thd);
|
||||
extern NDB_SHARE *apply_status_share;
|
||||
extern NDB_SHARE *schema_share;
|
||||
|
||||
extern THD *injector_thd;
|
||||
extern my_bool ndb_binlog_running;
|
||||
extern my_bool ndb_binlog_tables_inited;
|
||||
|
||||
bool
|
||||
ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print,
|
||||
|
@ -586,7 +586,8 @@ public:
|
||||
enum Unimplemented
|
||||
{
|
||||
ScanOptimised = 15, //Default updateOptimised
|
||||
AttributeGroup = 1012 //Default 0
|
||||
AttributeGroup = 1012, //Default 0
|
||||
FileNo = 102
|
||||
};
|
||||
};
|
||||
|
||||
@ -618,13 +619,13 @@ struct DictFilegroupInfo {
|
||||
*/
|
||||
FileName = 100,
|
||||
FileType = 101,
|
||||
FileId = 102,
|
||||
FileNo = 103, // Per Filegroup
|
||||
FileId = 103,
|
||||
FileFGroupId = 104,
|
||||
FileFGroupVersion = 105,
|
||||
FileSizeHi = 106,
|
||||
FileSizeLo = 107,
|
||||
FileFreeExtents = 108,
|
||||
FileVersion = 109,
|
||||
FileEnd = 199, //
|
||||
|
||||
/**
|
||||
@ -696,8 +697,8 @@ struct DictFilegroupInfo {
|
||||
struct File {
|
||||
char FileName[PATH_MAX];
|
||||
Uint32 FileType;
|
||||
Uint32 FileNo;
|
||||
Uint32 FileId;
|
||||
Uint32 FileVersion;
|
||||
Uint32 FilegroupId;
|
||||
Uint32 FilegroupVersion;
|
||||
Uint32 FileSizeHi;
|
||||
|
@ -66,7 +66,8 @@ struct DropFilegroupRef {
|
||||
Busy = 701,
|
||||
NotMaster = 702,
|
||||
NoSuchFilegroup = 767,
|
||||
FilegroupInUse = 768
|
||||
FilegroupInUse = 768,
|
||||
InvalidSchemaObjectVersion = 774
|
||||
};
|
||||
|
||||
Uint32 senderData;
|
||||
@ -150,7 +151,8 @@ struct DropFileRef {
|
||||
NoError = 0,
|
||||
Busy = 701,
|
||||
NoSuchFile = 766,
|
||||
DropUndoFileNotSupported = 769
|
||||
DropUndoFileNotSupported = 769,
|
||||
InvalidSchemaObjectVersion = 774
|
||||
};
|
||||
|
||||
Uint32 senderData;
|
||||
|
@ -216,8 +216,8 @@ SimpleProperties::SP2StructMapping
|
||||
DictFilegroupInfo::FileMapping[] = {
|
||||
DFGIMAPS(File, FileName, FileName, 0, PATH_MAX),
|
||||
DFGIMAP2(File, FileType, FileType, 0, 1),
|
||||
DFGIMAP(File, FileNo, FileNo),
|
||||
DFGIMAP(File, FileId, FileId),
|
||||
DFGIMAP(File, FileVersion, FileVersion),
|
||||
DFGIMAP(File, FileFGroupId, FilegroupId),
|
||||
DFGIMAP(File, FileFGroupVersion, FilegroupVersion),
|
||||
DFGIMAP(File, FileSizeHi, FileSizeHi),
|
||||
@ -254,8 +254,8 @@ void
|
||||
DictFilegroupInfo::File::init(){
|
||||
memset(FileName, sizeof(FileName), 0);
|
||||
FileType = ~0;
|
||||
FileNo = ~0;
|
||||
FileId = ~0;
|
||||
FileVersion = ~0;
|
||||
FilegroupId = ~0;
|
||||
FilegroupVersion = ~0;
|
||||
FileSizeHi = 0;
|
||||
|
@ -143,12 +143,13 @@ Backup::execREAD_CONFIG_REQ(Signal* signal)
|
||||
m_ctx.m_config.getOwnConfigIterator();
|
||||
ndbrequire(p != 0);
|
||||
|
||||
Uint32 noBackups = 0, noTables = 0, noAttribs = 0;
|
||||
Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0;
|
||||
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &m_diskless));
|
||||
ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups);
|
||||
// ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables));
|
||||
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables));
|
||||
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs));
|
||||
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT, &noFrags));
|
||||
|
||||
noAttribs++; //RT 527 bug fix
|
||||
|
||||
@ -158,9 +159,7 @@ Backup::execREAD_CONFIG_REQ(Signal* signal)
|
||||
c_tablePool.setSize(noBackups * noTables + 1);
|
||||
c_attributePool.setSize(noBackups * noAttribs + MAX_ATTRIBUTES_IN_TABLE);
|
||||
c_triggerPool.setSize(noBackups * 3 * noTables);
|
||||
|
||||
// 2 = no of replicas
|
||||
c_fragmentPool.setSize(noBackups * NO_OF_FRAG_PER_NODE * noTables + 1);
|
||||
c_fragmentPool.setSize(noBackups * noFrags + 1);
|
||||
|
||||
Uint32 szDataBuf = (2 * 1024 * 1024);
|
||||
Uint32 szLogBuf = (2 * 1024 * 1024);
|
||||
|
@ -631,7 +631,8 @@ Dbdict::packFileIntoPages(SimpleProperties::Writer & w,
|
||||
f.FileSizeHi = (f_ptr.p->m_file_size >> 32);
|
||||
f.FileSizeLo = (f_ptr.p->m_file_size & 0xFFFFFFFF);
|
||||
f.FileFreeExtents= free_extents;
|
||||
f.FileNo = f_ptr.p->key;
|
||||
f.FileId = f_ptr.p->key;
|
||||
f.FileVersion = f_ptr.p->m_version;
|
||||
|
||||
FilegroupPtr lfg_ptr;
|
||||
ndbrequire(c_filegroup_hash.find(lfg_ptr, f.FilegroupId));
|
||||
@ -13588,6 +13589,13 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
|
||||
break;
|
||||
}
|
||||
|
||||
if (file_ptr.p->m_version != version)
|
||||
{
|
||||
ref->errorCode = DropFileRef::InvalidSchemaObjectVersion;
|
||||
ref->errorLine = __LINE__;
|
||||
break;
|
||||
}
|
||||
|
||||
Ptr<SchemaTransaction> trans_ptr;
|
||||
if (! c_Trans.seize(trans_ptr))
|
||||
{
|
||||
@ -13663,6 +13671,13 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
|
||||
break;
|
||||
}
|
||||
|
||||
if (filegroup_ptr.p->m_version != version)
|
||||
{
|
||||
ref->errorCode = DropFilegroupRef::InvalidSchemaObjectVersion;
|
||||
ref->errorLine = __LINE__;
|
||||
break;
|
||||
}
|
||||
|
||||
Ptr<SchemaTransaction> trans_ptr;
|
||||
if (! c_Trans.seize(trans_ptr))
|
||||
{
|
||||
@ -15095,6 +15110,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
|
||||
filePtr.p->m_obj_ptr_i = obj_ptr.i;
|
||||
filePtr.p->m_filegroup_id = f.FilegroupId;
|
||||
filePtr.p->m_type = f.FileType;
|
||||
filePtr.p->m_version = op->m_obj_version;
|
||||
|
||||
obj_ptr.p->m_id = op->m_obj_id;
|
||||
obj_ptr.p->m_type = f.FileType;
|
||||
|
@ -535,6 +535,7 @@ public:
|
||||
|
||||
Uint32 key;
|
||||
Uint32 m_magic;
|
||||
Uint32 m_version;
|
||||
Uint32 m_obj_ptr_i;
|
||||
Uint32 m_filegroup_id;
|
||||
Uint32 m_type;
|
||||
|
@ -4625,12 +4625,26 @@ NdbDictInterface::get_filegroup(NdbFilegroupImpl & dst,
|
||||
ptr[0].p = (Uint32*)name;
|
||||
ptr[0].sz = (strLen + 3)/4;
|
||||
|
||||
#ifndef IGNORE_VALGRIND_WARNINGS
|
||||
if (strLen & 3)
|
||||
{
|
||||
Uint32 pad = 0;
|
||||
m_buffer.clear();
|
||||
m_buffer.append(name, strLen);
|
||||
m_buffer.append(&pad, 4);
|
||||
ptr[0].p = (Uint32*)m_buffer.get_data();
|
||||
}
|
||||
#endif
|
||||
|
||||
int r = dictSignal(&tSignal, ptr, 1,
|
||||
-1, // any node
|
||||
WAIT_GET_TAB_INFO_REQ,
|
||||
DICT_WAITFOR_TIMEOUT, 100);
|
||||
if (r)
|
||||
{
|
||||
dst.m_id = -1;
|
||||
dst.m_version = ~0;
|
||||
|
||||
DBUG_PRINT("info", ("get_filegroup failed dictSignal"));
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
@ -4767,6 +4781,17 @@ NdbDictInterface::get_file(NdbFileImpl & dst,
|
||||
ptr[0].p = (Uint32*)name;
|
||||
ptr[0].sz = (strLen + 3)/4;
|
||||
|
||||
#ifndef IGNORE_VALGRIND_WARNINGS
|
||||
if (strLen & 3)
|
||||
{
|
||||
Uint32 pad = 0;
|
||||
m_buffer.clear();
|
||||
m_buffer.append(name, strLen);
|
||||
m_buffer.append(&pad, 4);
|
||||
ptr[0].p = (Uint32*)m_buffer.get_data();
|
||||
}
|
||||
#endif
|
||||
|
||||
int r = dictSignal(&tSignal, ptr, 1,
|
||||
node,
|
||||
WAIT_GET_TAB_INFO_REQ,
|
||||
@ -4834,7 +4859,8 @@ NdbDictInterface::parseFileInfo(NdbFileImpl &dst,
|
||||
}
|
||||
|
||||
dst.m_type= (NdbDictionary::Object::Type)f.FileType;
|
||||
dst.m_id= f.FileNo;
|
||||
dst.m_id= f.FileId;
|
||||
dst.m_version = f.FileVersion;
|
||||
|
||||
dst.m_size= ((Uint64)f.FileSizeHi << 32) | (f.FileSizeLo);
|
||||
dst.m_path.assign(f.FileName);
|
||||
|
@ -386,6 +386,7 @@ ErrorBundle ErrorCodes[] = {
|
||||
{ 768, DMEC, SE, "Cant drop filegroup, filegroup is used" },
|
||||
{ 769, DMEC, SE, "Drop undofile not supported, drop logfile group instead" },
|
||||
{ 770, DMEC, SE, "Cant drop file, file is used" },
|
||||
{ 774, DMEC, SE, "Invalid schema object for drop" },
|
||||
{ 241, HA_ERR_TABLE_DEF_CHANGED, SE, "Invalid schema object version" },
|
||||
{ 283, HA_ERR_NO_SUCH_TABLE, SE, "Table is being dropped" },
|
||||
{ 284, HA_ERR_TABLE_DEF_CHANGED, SE, "Table not defined in transaction coordinator" },
|
||||
|
Reference in New Issue
Block a user