diff --git a/BUILD/compile-ndb-autotest b/BUILD/compile-ndb-autotest new file mode 100755 index 00000000000..3ef0091c155 --- /dev/null +++ b/BUILD/compile-ndb-autotest @@ -0,0 +1,9 @@ +#! /bin/sh + +path=`dirname $0` +. "$path/SETUP.sh" + +extra_flags="$fast_cflags $max_cflags -g" +extra_configs="$max_configs --with-ndb-test --with-ndb-ccflags="-DERROR_INSERT"" + +. "$path/FINISH.sh" diff --git a/mysql-test/r/ndb_dd_backuprestore.result b/mysql-test/r/ndb_dd_backuprestore.result index e7568e4ce49..cb6c62b16da 100644 --- a/mysql-test/r/ndb_dd_backuprestore.result +++ b/mysql-test/r/ndb_dd_backuprestore.result @@ -175,7 +175,7 @@ t1 CREATE TABLE `t1` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (c3) PARTITIONS 4 +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */ SHOW CREATE TABLE test.t2; Table Create Table t2 CREATE TABLE `t2` ( @@ -184,7 +184,7 @@ t2 CREATE TABLE `t2` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t3; Table Create Table t3 CREATE TABLE `t3` ( @@ -193,7 +193,7 @@ t3 CREATE TABLE `t3` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) +) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t4; Table Create Table t4 CREATE TABLE `t4` ( @@ -202,7 +202,7 @@ t4 CREATE TABLE `t4` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (c3) PARTITIONS 2 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */ SHOW CREATE TABLE test.t5; Table Create Table t5 CREATE TABLE `t5` ( @@ -211,7 +211,7 @@ t5 CREATE TABLE `t5` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t6; Table Create Table t6 CREATE TABLE `t6` ( @@ -220,7 +220,7 @@ t6 CREATE TABLE `t6` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SELECT * FROM information_schema.partitions WHERE table_name= 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default @@ -341,7 +341,7 @@ t1 CREATE TABLE `t1` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (c3) PARTITIONS 4 +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */ SHOW CREATE TABLE test.t2; Table Create Table t2 CREATE TABLE `t2` ( @@ -350,7 +350,7 @@ t2 CREATE TABLE `t2` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t3; Table Create Table t3 CREATE TABLE `t3` ( @@ -359,7 +359,7 @@ t3 CREATE TABLE `t3` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) +) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t4; Table Create Table t4 CREATE TABLE `t4` ( @@ -368,7 +368,7 @@ t4 CREATE TABLE `t4` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (c3) PARTITIONS 2 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */ SHOW CREATE TABLE test.t5; Table Create Table t5 CREATE TABLE `t5` ( @@ -377,7 +377,7 @@ t5 CREATE TABLE `t5` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t6; Table Create Table t6 CREATE TABLE `t6` ( @@ -386,7 +386,7 @@ t6 CREATE TABLE `t6` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SELECT * FROM information_schema.partitions WHERE table_name= 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default diff --git a/mysql-test/r/ndb_dd_ddl.result b/mysql-test/r/ndb_dd_ddl.result index 47b95214024..9fff9f06f2a 100644 --- a/mysql-test/r/ndb_dd_ddl.result +++ b/mysql-test/r/ndb_dd_ddl.result @@ -4,12 +4,12 @@ CREATE DATABASE mysqltest; **** Begin Duplicate Statement Testing **** CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE = 1M ENGINE=NDB; CREATE LOGFILE GROUP lg2 ADD UNDOFILE 'undofile2.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE 1M ENGINE NDB; ERROR HY000: Failed to create LOGFILE GROUP @@ -19,35 +19,35 @@ Error 1296 Got error 1514 'Currently there is a limit of one logfile group' from Error 1515 Failed to create LOGFILE GROUP CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE = 1M ENGINE=NDB; ERROR HY000: Failed to create LOGFILE GROUP ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M ENGINE NDB; +INITIAL_SIZE 1M ENGINE NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M ENGINE=NDB; +INITIAL_SIZE 1M ENGINE=NDB; ERROR HY000: Failed to alter: CREATE UNDOFILE CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; ERROR HY000: Failed to create TABLESPACE ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE=NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE=NDB; ERROR HY000: Failed to alter: CREATE DATAFILE CREATE TABLE mysqltest.t1 @@ -94,20 +94,20 @@ DROP DATABASE IF EXISTS mysqltest; **** Begin Statment CaSe Testing **** creaTE LOgfilE GrOuP lg1 adD undoFILE 'undofile.dat' -initiAL_siZE 16M +initiAL_siZE 1M UnDo_BuFfEr_SiZe = 1M ENGInE=NDb; altER LOgFiLE GrOUp lg1 AdD UnDOfILe 'uNdOfiLe02.daT' -INItIAl_SIzE 4M ENgINE nDB; +INItIAl_SIzE 1M ENgINE nDB; CrEAtE TABLEspaCE ts1 ADD DATAfilE 'datafile.dat' UsE LoGFiLE GRoUP lg1 -INITiaL_SizE 12M +INITiaL_SizE 1M ENGiNe NDb; AlTeR tAbLeSpAcE ts1 AdD DaTaFiLe 'dAtAfiLe2.daT' -InItIaL_SiZe 12M +InItIaL_SiZe 1M EnGiNe=NDB; CREATE TABLE t1 (pk1 int not null primary key, b int not null, c int not null) @@ -129,21 +129,21 @@ EnGiNe=nDb; **** Begin = And No = Testing **** CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE=16M +INITIAL_SIZE=1M UNDO_BUFFER_SIZE=1M ENGINE=NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE=4M +INITIAL_SIZE=1M ENGINE=NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE=12M +INITIAL_SIZE=1M ENGINE=NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE=12M +INITIAL_SIZE=1M ENGINE=NDB; CREATE TABLE t1 (pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) @@ -165,21 +165,21 @@ ENGINE=NDB; CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE 1M ENGINE NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLE t1 (pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) @@ -199,3 +199,19 @@ ENGINE NDB; DROP LOGFILE GROUP lg1 ENGINE NDB; **** End = And No = **** +create table t1 (a int primary key) engine = myisam; +create logfile group lg1 add undofile 'MYSQLTEST_VARDIR/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb;; +ERROR HY000: Failed to create UNDOFILE +create logfile group lg1 +add undofile 'undofile.dat' +initial_size 1M +undo_buffer_size = 1M +engine=ndb; +create tablespace ts1 add datafile 'MYSQLTEST_VARDIR/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb;; +ERROR HY000: Failed to create DATAFILE +drop tablespace ts1 +engine ndb; +ERROR HY000: Failed to drop TABLESPACE +drop logfile group lg1 +engine ndb; +drop table t1; diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result index b936469ac14..fd793c4c2c7 100644 --- a/mysql-test/r/ndb_partition_key.result +++ b/mysql-test/r/ndb_partition_key.result @@ -78,7 +78,7 @@ t1 CREATE TABLE `t1` ( `c` int(11) NOT NULL DEFAULT '0', `d` int(11) DEFAULT NULL, PRIMARY KEY (`a`,`b`,`c`) USING HASH -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (b) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (b) */ DROP TABLE t1; CREATE TABLE t1 (a int not null primary key) PARTITION BY KEY(a) @@ -97,19 +97,19 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ alter table t1 engine=heap; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=MEMORY DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = MEMORY, PARTITION p1 ENGINE = MEMORY) +) ENGINE=MEMORY DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = MEMORY, PARTITION p1 ENGINE = MEMORY) */ alter table t1 engine=ndb; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ alter table t1 engine=heap remove partitioning; show create table t1; Table Create Table @@ -123,7 +123,7 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ alter table t1 partition by key (a) (partition p0 engine=ndb, partition p1 engine=ndb); @@ -131,7 +131,7 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ alter table t1 remove partitioning; show create table t1; Table Create Table @@ -150,7 +150,7 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ drop table t1; CREATE TABLE t1 ( c1 MEDIUMINT NOT NULL AUTO_INCREMENT, diff --git a/mysql-test/r/ndb_partition_range.result b/mysql-test/r/ndb_partition_range.result index cb79f04873e..9cc9aa2cda9 100644 --- a/mysql-test/r/ndb_partition_range.result +++ b/mysql-test/r/ndb_partition_range.result @@ -115,7 +115,7 @@ t1 CREATE TABLE `t1` ( `c` int(11) NOT NULL, PRIMARY KEY (`b`), UNIQUE KEY `a` (`a`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (b) (PARTITION x1 VALUES LESS THAN (5) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (10) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (20) ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (b) (PARTITION x1 VALUES LESS THAN (5) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (10) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (20) ENGINE = ndbcluster) */ drop table t1; CREATE TABLE t1 (id MEDIUMINT NOT NULL, diff --git a/mysql-test/t/ndb_dd_ddl.test b/mysql-test/t/ndb_dd_ddl.test index 339f7bc2f22..95ad7f0d152 100644 --- a/mysql-test/t/ndb_dd_ddl.test +++ b/mysql-test/t/ndb_dd_ddl.test @@ -40,7 +40,7 @@ CREATE DATABASE mysqltest; CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE = 1M ENGINE=NDB; @@ -48,7 +48,7 @@ ENGINE=NDB; --error ER_CREATE_FILEGROUP_FAILED CREATE LOGFILE GROUP lg2 ADD UNDOFILE 'undofile2.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE 1M ENGINE NDB; SHOW WARNINGS; @@ -56,42 +56,42 @@ SHOW WARNINGS; --error ER_CREATE_FILEGROUP_FAILED CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE = 1M ENGINE=NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M ENGINE NDB; +INITIAL_SIZE 1M ENGINE NDB; --error ER_ALTER_FILEGROUP_FAILED ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M ENGINE=NDB; +INITIAL_SIZE 1M ENGINE=NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; --error ER_CREATE_FILEGROUP_FAILED CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE=NDB; --error ER_ALTER_FILEGROUP_FAILED ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE=NDB; CREATE TABLE mysqltest.t1 @@ -155,23 +155,23 @@ DROP DATABASE IF EXISTS mysqltest; creaTE LOgfilE GrOuP lg1 adD undoFILE 'undofile.dat' -initiAL_siZE 16M +initiAL_siZE 1M UnDo_BuFfEr_SiZe = 1M ENGInE=NDb; altER LOgFiLE GrOUp lg1 AdD UnDOfILe 'uNdOfiLe02.daT' -INItIAl_SIzE 4M ENgINE nDB; +INItIAl_SIzE 1M ENgINE nDB; CrEAtE TABLEspaCE ts1 ADD DATAfilE 'datafile.dat' UsE LoGFiLE GRoUP lg1 -INITiaL_SizE 12M +INITiaL_SizE 1M ENGiNe NDb; AlTeR tAbLeSpAcE ts1 AdD DaTaFiLe 'dAtAfiLe2.daT' -InItIaL_SiZe 12M +InItIaL_SiZe 1M EnGiNe=NDB; CREATE TABLE t1 @@ -203,24 +203,24 @@ EnGiNe=nDb; CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE=16M +INITIAL_SIZE=1M UNDO_BUFFER_SIZE=1M ENGINE=NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE=4M +INITIAL_SIZE=1M ENGINE=NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE=12M +INITIAL_SIZE=1M ENGINE=NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE=12M +INITIAL_SIZE=1M ENGINE=NDB; CREATE TABLE t1 @@ -250,24 +250,24 @@ ENGINE=NDB; CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE 1M ENGINE NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLE t1 @@ -297,5 +297,32 @@ ENGINE NDB; --echo **** End = And No = **** ############ End = And No = ################## -# End 5.1 test +### +# +# bug#16341 +create table t1 (a int primary key) engine = myisam; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--error ER_CREATE_FILEGROUP_FAILED +--eval create logfile group lg1 add undofile '$MYSQLTEST_VARDIR/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb; + +create logfile group lg1 +add undofile 'undofile.dat' +initial_size 1M +undo_buffer_size = 1M +engine=ndb; + +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--error ER_CREATE_FILEGROUP_FAILED +--eval create tablespace ts1 add datafile '$MYSQLTEST_VARDIR/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb; + +--error ER_DROP_FILEGROUP_FAILED +drop tablespace ts1 +engine ndb; + +drop logfile group lg1 +engine ndb; + +drop table t1; + +# End 5.1 test diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 997072bd2a5..361ec9b0d2b 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -9628,10 +9628,22 @@ static bool adjusted_frag_count(uint no_fragments, uint no_nodes, return (reported_frags < no_fragments); } -int ha_ndbcluster::get_default_no_partitions(ulonglong max_rows) +int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *info) { + ha_rows max_rows, min_rows; + if (info) + { + max_rows= info->max_rows; + min_rows= info->min_rows; + } + else + { + max_rows= table_share->max_rows; + min_rows= table_share->min_rows; + } uint reported_frags; - uint no_fragments= get_no_fragments(max_rows); + uint no_fragments= + get_no_fragments(max_rows >= min_rows ? max_rows : min_rows); uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); if (adjusted_frag_count(no_fragments, no_nodes, reported_frags)) { @@ -9879,7 +9891,17 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, } while (++i < part_info->no_parts); tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions); tab->setLinearFlag(part_info->linear_hash_ind); - tab->setMaxRows(table->s->max_rows); + { + ha_rows max_rows= table_share->max_rows; + ha_rows min_rows= table_share->min_rows; + if (max_rows < min_rows) + max_rows= min_rows; + if (max_rows != (ha_rows)0) /* default setting, don't set fragmentation */ + { + tab->setMaxRows(max_rows); + tab->setMinRows(min_rows); + } + } tab->setTablespaceNames(ts_names, fd_index*sizeof(char*)); tab->setFragmentCount(fd_index); tab->setFragmentData(&frag_data, fd_index*2); @@ -9982,7 +10004,8 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } - + + NdbError err; NDBDICT *dict = ndb->getDictionary(); int error; const char * errmsg; @@ -9995,6 +10018,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) NdbDictionary::Tablespace ndb_ts; NdbDictionary::Datafile ndb_df; + NdbDictionary::ObjectId objid; if (set_up_tablespace(info, &ndb_ts)) { DBUG_RETURN(1); @@ -10004,7 +10028,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) DBUG_RETURN(1); } errmsg= "TABLESPACE"; - if (dict->createTablespace(ndb_ts)) + if (dict->createTablespace(ndb_ts, &objid)) { DBUG_PRINT("error", ("createTablespace returned %d", error)); goto ndberror; @@ -10013,8 +10037,17 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) errmsg= "DATAFILE"; if (dict->createDatafile(ndb_df)) { + err= dict->getNdbError(); + NdbDictionary::Tablespace tmp= dict->getTablespace(ndb_ts.getName()); + if (dict->getNdbError().code == 0 && + tmp.getObjectId() == objid.getObjectId() && + tmp.getObjectVersion() == objid.getObjectVersion()) + { + dict->dropTablespace(tmp); + } + DBUG_PRINT("error", ("createDatafile returned %d", error)); - goto ndberror; + goto ndberror2; } is_tablespace= 1; break; @@ -10068,6 +10101,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) error= ER_CREATE_FILEGROUP_FAILED; NdbDictionary::LogfileGroup ndb_lg; NdbDictionary::Undofile ndb_uf; + NdbDictionary::ObjectId objid; if (info->undo_file_name == NULL) { /* @@ -10080,7 +10114,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) DBUG_RETURN(1); } errmsg= "LOGFILE GROUP"; - if (dict->createLogfileGroup(ndb_lg)) + if (dict->createLogfileGroup(ndb_lg, &objid)) { goto ndberror; } @@ -10092,7 +10126,15 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) errmsg= "UNDOFILE"; if (dict->createUndofile(ndb_uf)) { - goto ndberror; + err= dict->getNdbError(); + NdbDictionary::LogfileGroup tmp= dict->getLogfileGroup(ndb_lg.getName()); + if (dict->getNdbError().code == 0 && + tmp.getObjectId() == objid.getObjectId() && + tmp.getObjectVersion() == objid.getObjectVersion()) + { + dict->dropLogfileGroup(tmp); + } + goto ndberror2; } break; } @@ -10169,7 +10211,8 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) DBUG_RETURN(FALSE); ndberror: - const NdbError err= dict->getNdbError(); + err= dict->getNdbError(); +ndberror2: ERR_PRINT(err); ndb_to_mysql_error(&err); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 2e78a00ef94..9dc9ee79755 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -651,7 +651,7 @@ class ha_ndbcluster: public handler int create(const char *name, TABLE *form, HA_CREATE_INFO *info); int create_handler_files(const char *file, const char *old_name, int action_flag, HA_CREATE_INFO *info); - int get_default_no_partitions(ulonglong max_rows); + int get_default_no_partitions(HA_CREATE_INFO *info); bool get_no_parts(const char *name, uint *no_parts); void set_auto_partitions(partition_info *part_info); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 40a98563613..b18fa8ec931 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1770,8 +1770,31 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, /* acknowledge this query _after_ epoch completion */ post_epoch_unlock= 1; break; - case SOT_CREATE_TABLE: case SOT_TRUNCATE_TABLE: + { + char key[FN_REFLEN]; + build_table_filename(key, sizeof(key), schema->db, schema->name, ""); + NDB_SHARE *share= get_share(key, 0, FALSE, FALSE); + // invalidation already handled by binlog thread + if (!share || !share->op) + { + { + injector_ndb->setDatabaseName(schema->db); + Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(), + schema->name); + ndbtab_g.invalidate(); + } + TABLE_LIST table_list; + bzero((char*) &table_list,sizeof(table_list)); + table_list.db= schema->db; + table_list.alias= table_list.table_name= schema->name; + close_cached_tables(thd, 0, &table_list, FALSE); + } + if (share) + free_share(&share); + } + // fall through + case SOT_CREATE_TABLE: pthread_mutex_lock(&LOCK_open); if (ndb_create_table_from_engine(thd, schema->db, schema->name)) { diff --git a/sql/handler.h b/sql/handler.h index fb5f0f4ba05..94f4519a2e7 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1335,7 +1335,7 @@ public: virtual const char *table_type() const =0; virtual const char **bas_ext() const =0; - virtual int get_default_no_partitions(ulonglong max_rows) { return 1;} + virtual int get_default_no_partitions(HA_CREATE_INFO *info) { return 1;} virtual void set_auto_partitions(partition_info *part_info) { return; } virtual bool get_no_parts(const char *name, uint *no_parts) diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 39c8d976732..286637bd9aa 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -153,7 +153,7 @@ char *partition_info::create_subpartition_name(uint subpart_no, SYNOPSIS set_up_default_partitions() file A reference to a handler of the table - max_rows Maximum number of rows stored in the table + info Create info start_no Starting partition number RETURN VALUE @@ -169,7 +169,8 @@ char *partition_info::create_subpartition_name(uint subpart_no, The external routine needing this code is check_partition_info */ -bool partition_info::set_up_default_partitions(handler *file, ulonglong max_rows, +bool partition_info::set_up_default_partitions(handler *file, + HA_CREATE_INFO *info, uint start_no) { uint i; @@ -188,7 +189,7 @@ bool partition_info::set_up_default_partitions(handler *file, ulonglong max_rows goto end; } if (no_parts == 0) - no_parts= file->get_default_no_partitions(max_rows); + no_parts= file->get_default_no_partitions(info); if (unlikely(no_parts > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -228,7 +229,7 @@ end: SYNOPSIS set_up_default_subpartitions() file A reference to a handler of the table - max_rows Maximum number of rows stored in the table + info Create info RETURN VALUE TRUE Error, attempted default values not possible @@ -244,7 +245,7 @@ end: */ bool partition_info::set_up_default_subpartitions(handler *file, - ulonglong max_rows) + HA_CREATE_INFO *info) { uint i, j; char *default_name, *name_ptr; @@ -254,7 +255,7 @@ bool partition_info::set_up_default_subpartitions(handler *file, DBUG_ENTER("partition_info::set_up_default_subpartitions"); if (no_subparts == 0) - no_subparts= file->get_default_no_partitions(max_rows); + no_subparts= file->get_default_no_partitions(info); if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -296,7 +297,7 @@ end: SYNOPSIS set_up_defaults_for_partitioning() file A reference to a handler of the table - max_rows Maximum number of rows stored in the table + info Create info start_no Starting partition number RETURN VALUE @@ -309,7 +310,7 @@ end: */ bool partition_info::set_up_defaults_for_partitioning(handler *file, - ulonglong max_rows, + HA_CREATE_INFO *info, uint start_no) { DBUG_ENTER("partition_info::set_up_defaults_for_partitioning"); @@ -318,10 +319,10 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file, { default_partitions_setup= TRUE; if (use_default_partitions) - DBUG_RETURN(set_up_default_partitions(file, max_rows, start_no)); + DBUG_RETURN(set_up_default_partitions(file, info, start_no)); if (is_sub_partitioned() && use_default_subpartitions) - DBUG_RETURN(set_up_default_subpartitions(file, max_rows)); + DBUG_RETURN(set_up_default_subpartitions(file, info)); } DBUG_RETURN(FALSE); } @@ -692,7 +693,7 @@ end: SYNOPSIS check_partition_info() file A reference to a handler of the table - max_rows Maximum number of rows stored in the table + info Create info engine_type Return value for used engine in partitions RETURN VALUE @@ -708,7 +709,7 @@ end: */ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, - handler *file, ulonglong max_rows) + handler *file, HA_CREATE_INFO *info) { handlerton **engine_array= NULL; uint part_count= 0; @@ -743,7 +744,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, my_error(ER_SUBPARTITION_ERROR, MYF(0)); goto end; } - if (unlikely(set_up_defaults_for_partitioning(file, max_rows, (uint)0))) + if (unlikely(set_up_defaults_for_partitioning(file, info, (uint)0))) goto end; tot_partitions= get_tot_partitions(); if (unlikely(tot_partitions > MAX_PARTITIONS)) diff --git a/sql/partition_info.h b/sql/partition_info.h index 3d8c6a40221..d938d21653a 100644 --- a/sql/partition_info.h +++ b/sql/partition_info.h @@ -243,21 +243,21 @@ public: return no_parts * (is_sub_partitioned() ? no_subparts : 1); } - bool set_up_defaults_for_partitioning(handler *file, ulonglong max_rows, + bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info, uint start_no); char *has_unique_names(); static bool check_engine_mix(handlerton **engine_array, uint no_parts); bool check_range_constants(); bool check_list_constants(); bool check_partition_info(THD *thd, handlerton **eng_type, - handler *file, ulonglong max_rows); + handler *file, HA_CREATE_INFO *info); void print_no_partition_found(TABLE *table); private: static int list_part_cmp(const void* a, const void* b); static int list_part_cmp_unsigned(const void* a, const void* b); - bool set_up_default_partitions(handler *file, ulonglong max_rows, + bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info, uint start_no); - bool set_up_default_subpartitions(handler *file, ulonglong max_rows); + bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info); char *create_default_partition_names(uint part_no, uint no_parts, uint start_no); char *create_subpartition_name(uint subpart_no, const char *part_name); diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 8eb9bfa1782..44c0b8ffcd9 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -3823,14 +3823,13 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info, if (alter_info->flags == ALTER_TABLE_REORG) { uint new_part_no, curr_part_no; - ulonglong max_rows= table->s->max_rows; if (tab_part_info->part_type != HASH_PARTITION || tab_part_info->use_default_no_partitions) { my_error(ER_REORG_NO_PARAM_ERROR, MYF(0)); DBUG_RETURN(TRUE); } - new_part_no= table->file->get_default_no_partitions(max_rows); + new_part_no= table->file->get_default_no_partitions(create_info); curr_part_no= tab_part_info->no_parts; if (new_part_no == curr_part_no) { diff --git a/sql/sql_partition.h b/sql/sql_partition.h index 845180ad592..e34d71dfdc5 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -65,7 +65,7 @@ int get_part_for_delete(const byte *buf, const byte *rec0, partition_info *part_info, uint32 *part_id); void prune_partition_set(const TABLE *table, part_id_range *part_spec); bool check_partition_info(partition_info *part_info,handlerton **eng_type, - TABLE *table, handler *file, ulonglong max_rows); + TABLE *table, handler *file, HA_CREATE_INFO *info); bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind); char *generate_partition_syntax(partition_info *part_info, uint *buf_length, bool use_sql_alloc, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 2ecbc94541a..7d8631e3236 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3185,8 +3185,7 @@ bool mysql_create_table_internal(THD *thd, } DBUG_PRINT("info", ("db_type = %d", ha_legacy_type(part_info->default_engine_type))); - if (part_info->check_partition_info(thd, &engine_type, file, - create_info->max_rows)) + if (part_info->check_partition_info(thd, &engine_type, file, create_info)) goto err; part_info->default_engine_type= engine_type; @@ -3224,7 +3223,8 @@ bool mysql_create_table_internal(THD *thd, */ if (part_info->use_default_no_partitions && part_info->no_parts && - (int)part_info->no_parts != file->get_default_no_partitions(0ULL)) + (int)part_info->no_parts != + file->get_default_no_partitions(create_info)) { uint i; List_iterator part_it(part_info->partitions); @@ -3237,10 +3237,10 @@ bool mysql_create_table_internal(THD *thd, part_info->use_default_no_subpartitions && part_info->no_subparts && (int)part_info->no_subparts != - file->get_default_no_partitions(0ULL)) + file->get_default_no_partitions(create_info)) { DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE); - part_info->no_subparts= file->get_default_no_partitions(0ULL); + part_info->no_subparts= file->get_default_no_partitions(create_info); } } else if (create_info->db_type != engine_type) diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h index 950b2629d9e..4b2c69e4bc6 100644 --- a/storage/ndb/include/kernel/GlobalSignalNumbers.h +++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h @@ -691,6 +691,8 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_BACKUP_FRAGMENT_REF 546 #define GSN_BACKUP_FRAGMENT_CONF 547 +#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 575 + #define GSN_STOP_BACKUP_REQ 548 #define GSN_STOP_BACKUP_REF 549 #define GSN_STOP_BACKUP_CONF 550 @@ -740,7 +742,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_SUB_STOP_REQ 572 #define GSN_SUB_STOP_REF 573 #define GSN_SUB_STOP_CONF 574 -/* 575 unused */ +/* 575 used */ #define GSN_SUB_CREATE_REQ 576 #define GSN_SUB_CREATE_REF 577 #define GSN_SUB_CREATE_CONF 578 diff --git a/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp b/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp index d3d3f79f310..fe3f48444ec 100644 --- a/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp +++ b/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp @@ -31,7 +31,8 @@ private: BUFFER_UNDERFLOW = 1, BUFFER_FULL_SCAN = 2, BUFFER_FULL_FRAG_COMPLETE = 3, - BUFFER_FULL_META = 4 + BUFFER_FULL_META = 4, + BACKUP_FRAGMENT_INFO = 5 }; }; diff --git a/storage/ndb/include/kernel/signaldata/BackupImpl.hpp b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp index c7bfd07a63d..82fd24558b7 100644 --- a/storage/ndb/include/kernel/signaldata/BackupImpl.hpp +++ b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp @@ -252,15 +252,31 @@ class BackupFragmentConf { friend bool printBACKUP_FRAGMENT_CONF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 6 ); + STATIC_CONST( SignalLength = 8 ); private: Uint32 backupId; Uint32 backupPtr; Uint32 tableId; Uint32 fragmentNo; - Uint32 noOfRecords; - Uint32 noOfBytes; + Uint32 noOfRecordsLow; + Uint32 noOfBytesLow; + Uint32 noOfRecordsHigh; + Uint32 noOfBytesHigh; +}; + +class BackupFragmentCompleteRep { +public: + STATIC_CONST( SignalLength = 8 ); + + Uint32 backupId; + Uint32 backupPtr; + Uint32 tableId; + Uint32 fragmentNo; + Uint32 noOfTableRowsLow; + Uint32 noOfFragmentRowsLow; + Uint32 noOfTableRowsHigh; + Uint32 noOfFragmentRowsHigh; }; class StopBackupReq { diff --git a/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp b/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp index e1b8c6203a1..9e34ea3a211 100644 --- a/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp +++ b/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp @@ -201,17 +201,19 @@ class BackupCompleteRep { friend bool printBACKUP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 8 + NdbNodeBitmask::Size ); + STATIC_CONST( SignalLength = 10 + NdbNodeBitmask::Size ); private: Uint32 senderData; Uint32 backupId; Uint32 startGCP; Uint32 stopGCP; - Uint32 noOfBytes; - Uint32 noOfRecords; + Uint32 noOfBytesLow; + Uint32 noOfRecordsLow; Uint32 noOfLogBytes; Uint32 noOfLogRecords; NdbNodeBitmask nodes; + Uint32 noOfBytesHigh; + Uint32 noOfRecordsHigh; }; /** diff --git a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp index 810f9cdfd03..78216249a72 100644 --- a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp +++ b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp @@ -193,6 +193,7 @@ struct CreateFileConf { Uint32 senderData; Uint32 senderRef; Uint32 fileId; + Uint32 fileVersion; }; #endif diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp index 42e06fba381..1382b09eabf 100644 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -117,6 +117,7 @@ public: CustomTriggerId = 25, FrmLen = 26, FrmData = 27, + FragmentCount = 128, // No of fragments in table (!fragment replicas) FragmentDataLen = 129, FragmentData = 130, // CREATE_FRAGMENTATION reply @@ -132,6 +133,8 @@ public: MaxRowsHigh = 140, DefaultNoPartFlag = 141, LinearHashFlag = 142, + MinRowsLow = 143, + MinRowsHigh = 144, RowGCIFlag = 150, RowChecksumFlag = 151, @@ -312,8 +315,6 @@ public: Uint32 CustomTriggerId; Uint32 TablespaceId; Uint32 TablespaceVersion; - Uint32 MaxRowsLow; - Uint32 MaxRowsHigh; Uint32 DefaultNoPartFlag; Uint32 LinearHashFlag; /* @@ -328,6 +329,12 @@ public: Uint16 ReplicaData[MAX_FRAGMENT_DATA_BYTES]; Uint32 FragmentDataLen; Uint16 FragmentData[3*MAX_NDB_PARTITIONS]; + + Uint32 MaxRowsLow; + Uint32 MaxRowsHigh; + Uint32 MinRowsLow; + Uint32 MinRowsHigh; + Uint32 TablespaceDataLen; Uint32 TablespaceData[2*MAX_NDB_PARTITIONS]; Uint32 RangeListDataLen; diff --git a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp index 05845d5fd21..97481ea2c3e 100644 --- a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp +++ b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp @@ -106,7 +106,7 @@ class LqhFragReq { friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 20 ); + STATIC_CONST( SignalLength = 24 ); enum RequestInfo { CreateInRunning = 0x8000000, @@ -123,7 +123,7 @@ private: Uint32 kValue; Uint32 schemaVersion; Uint32 nextLCP; - Uint32 noOfNewAttr; // noOfCharsets in upper half + Uint32 noOfCharsets; Uint32 startGci; Uint32 tableType; // DictTabInfo::TableType Uint32 primaryTableId; // table of index or RNIL @@ -140,6 +140,10 @@ private: Uint8 checksumIndicator; Uint8 GCPIndicator; Uint32 logPartId; + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; }; class LqhFragConf { diff --git a/storage/ndb/include/kernel/signaldata/TupFrag.hpp b/storage/ndb/include/kernel/signaldata/TupFrag.hpp index d8f2139de61..bc44877bb1c 100644 --- a/storage/ndb/include/kernel/signaldata/TupFrag.hpp +++ b/storage/ndb/include/kernel/signaldata/TupFrag.hpp @@ -30,7 +30,7 @@ class TupFragReq { friend class Dblqh; friend class Dbtup; public: - STATIC_CONST( SignalLength = 15 ); + STATIC_CONST( SignalLength = 17 ); private: Uint32 userPtr; Uint32 userRef; @@ -38,7 +38,16 @@ private: Uint32 tableId; Uint32 noOfAttr; Uint32 fragId; - Uint32 todo[8]; + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; + Uint32 noOfNullAttr; + Uint32 schemaVersion; + Uint32 noOfKeyAttr; + Uint32 noOfCharsets; + Uint32 checksumIndicator; + Uint32 globalCheckpointIdIndicator; Uint32 tablespaceid; }; diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index 27e0aede36d..6e572635247 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -163,6 +163,31 @@ public: }; }; + class Dictionary; // Forward declaration + + class ObjectId : public Object + { + public: + ObjectId(); + virtual ~ObjectId(); + + /** + * Get status of object + */ + virtual Status getObjectStatus() const; + + /** + * Get version of object + */ + virtual int getObjectVersion() const; + + virtual int getObjectId() const; + + private: + friend class Dictionary; + class NdbDictObjectImpl & m_impl; + }; + class Table; // forward declaration class Tablespace; // forward declaration // class NdbEventOperation; // forward declaration @@ -805,13 +830,6 @@ public: */ virtual int getObjectVersion() const; - /** - * Set/Get Maximum number of rows in table (only used to calculate - * number of partitions). - */ - void setMaxRows(Uint64 maxRows); - Uint64 getMaxRows() const; - /** * Set/Get indicator if default number of partitions is used in table. */ @@ -863,6 +881,20 @@ public: */ void setObjectType(Object::Type type); + /** + * Set/Get Maximum number of rows in table (only used to calculate + * number of partitions). + */ + void setMaxRows(Uint64 maxRows); + Uint64 getMaxRows() const; + + /** + * Set/Get Minimum number of rows in table (only used to calculate + * number of partitions). + */ + void setMinRows(Uint64 minRows); + Uint64 getMinRows() const; + /** @} *******************************************************************/ /** @@ -1781,20 +1813,20 @@ public: * @{ */ - int createLogfileGroup(const LogfileGroup &); + int createLogfileGroup(const LogfileGroup &, ObjectId* = 0); int dropLogfileGroup(const LogfileGroup&); LogfileGroup getLogfileGroup(const char * name); - int createTablespace(const Tablespace &); + int createTablespace(const Tablespace &, ObjectId* = 0); int dropTablespace(const Tablespace&); Tablespace getTablespace(const char * name); Tablespace getTablespace(Uint32 tablespaceId); - int createDatafile(const Datafile &, bool overwrite_existing = false); + int createDatafile(const Datafile &, bool overwrite_existing = false, ObjectId* = 0); int dropDatafile(const Datafile&); Datafile getDatafile(Uint32 node, const char * path); - int createUndofile(const Undofile &, bool overwrite_existing = false); + int createUndofile(const Undofile &, bool overwrite_existing = false, ObjectId * = 0); int dropUndofile(const Undofile&); Undofile getUndofile(Uint32 node, const char * path); diff --git a/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp index 8d624ea311e..bc321203590 100644 --- a/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp +++ b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp @@ -92,8 +92,10 @@ printBACKUP_FRAGMENT_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ BackupFragmentConf* sig = (BackupFragmentConf*)data; fprintf(out, " backupPtr: %d backupId: %d\n", sig->backupPtr, sig->backupId); - fprintf(out, " tableId: %d fragmentNo: %d records: %d bytes: %d\n", - sig->tableId, sig->fragmentNo, sig->noOfRecords, sig->noOfBytes); + fprintf(out, " tableId: %d fragmentNo: %d records: %llu bytes: %llu\n", + sig->tableId, sig->fragmentNo, + sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), + sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); return true; } diff --git a/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp index 4b0a0e07b66..27fed22ac72 100644 --- a/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp +++ b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp @@ -72,11 +72,11 @@ printBACKUP_ABORT_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ bool printBACKUP_COMPLETE_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){ BackupCompleteRep* sig = (BackupCompleteRep*)data; - fprintf(out, " senderData: %d backupId: %d records: %d bytes: %d\n", + fprintf(out, " senderData: %d backupId: %d records: %llu bytes: %llu\n", sig->senderData, sig->backupId, - sig->noOfRecords, - sig->noOfBytes); + sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), + sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); return true; } diff --git a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp index e46fc2ff7da..2ed97892488 100644 --- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp @@ -62,6 +62,10 @@ DictTabInfo::TableMapping[] = { DTIMAP(Table, TablespaceVersion, TablespaceVersion), DTIMAP(Table, RowGCIFlag, RowGCIFlag), DTIMAP(Table, RowChecksumFlag, RowChecksumFlag), + DTIMAP(Table, MaxRowsLow, MaxRowsLow), + DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), + DTIMAP(Table, MinRowsLow, MinRowsLow), + DTIMAP(Table, MinRowsHigh, MinRowsHigh), DTIBREAK(AttributeName) }; @@ -154,6 +158,11 @@ DictTabInfo::Table::init(){ RowGCIFlag = ~0; RowChecksumFlag = ~0; + + MaxRowsLow = 0; + MaxRowsHigh = 0; + MinRowsLow = 0; + MinRowsHigh = 0; } void diff --git a/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp index 6d727959a67..3175582c3a2 100644 --- a/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp +++ b/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp @@ -37,8 +37,10 @@ printLQH_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recB fprintf(output, " noOfAttributes: %d noOfNullAttributes: %d keyLength: %d\n", sig->noOfAttributes, sig->noOfNullAttributes, sig->keyLength); - fprintf(output, " noOfPagesToPreAllocate: %d schemaVersion: %d nextLCP: %d\n", - sig->noOfPagesToPreAllocate, sig->schemaVersion, sig->nextLCP); + fprintf(output, " maxRowsLow/High: %u/%u minRowsLow/High: %u/%u\n", + sig->maxRowsLow, sig->maxRowsHigh, sig->minRowsLow, sig->minRowsHigh); + fprintf(output, " schemaVersion: %d nextLCP: %d\n", + sig->schemaVersion, sig->nextLCP); return true; } diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index 07df1db862b..41f140ab5ae 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -170,6 +170,65 @@ Backup::execCONTINUEB(Signal* signal) const Uint32 Tdata2 = signal->theData[2]; switch(Tdata0) { + case BackupContinueB::BACKUP_FRAGMENT_INFO: + { + const Uint32 ptr_I = Tdata1; + Uint32 tabPtr_I = Tdata2; + Uint32 fragPtr_I = signal->theData[3]; + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, ptr_I); + TablePtr tabPtr; + ptr.p->tables.getPtr(tabPtr, tabPtr_I); + FragmentPtr fragPtr; + tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I); + + BackupFilePtr filePtr; + ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); + + const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2; + Uint32 * dst; + if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz)) + { + sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4); + return; + } + + BackupFormat::CtlFile::FragmentInfo * fragInfo = + (BackupFormat::CtlFile::FragmentInfo*)dst; + fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO); + fragInfo->SectionLength = htonl(sz); + fragInfo->TableId = htonl(fragPtr.p->tableId); + fragInfo->FragmentNo = htonl(fragPtr_I); + fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF); + fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32); + fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF); + fragInfo->FilePosHigh = htonl(0 >> 32); + + filePtr.p->operation.dataBuffer.updateWritePtr(sz); + + fragPtr_I++; + if (fragPtr_I == tabPtr.p->fragments.getSize()) + { + signal->theData[0] = tabPtr.p->tableId; + signal->theData[1] = 0; // unlock + EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + + fragPtr_I = 0; + ptr.p->tables.next(tabPtr); + if ((tabPtr_I = tabPtr.i) == RNIL) + { + closeFiles(signal, ptr); + return; + } + } + signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; + signal->theData[1] = ptr_I; + signal->theData[2] = tabPtr_I; + signal->theData[3] = fragPtr_I; + sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); + return; + } case BackupContinueB::START_FILE_THREAD: case BackupContinueB::BUFFER_UNDERFLOW: { @@ -372,7 +431,7 @@ Backup::findTable(const BackupRecordPtr & ptr, return false; } -static Uint32 xps(Uint32 x, Uint64 ms) +static Uint32 xps(Uint64 x, Uint64 ms) { float fx = x; float fs = ms; @@ -386,9 +445,9 @@ static Uint32 xps(Uint32 x, Uint64 ms) } struct Number { - Number(Uint32 r) { val = r;} - Number & operator=(Uint32 r) { val = r; return * this; } - Uint32 val; + Number(Uint64 r) { val = r;} + Number & operator=(Uint64 r) { val = r; return * this; } + Uint64 val; }; NdbOut & @@ -462,8 +521,10 @@ Backup::execBACKUP_COMPLETE_REP(Signal* signal) startTime = NdbTick_CurrentMillisecond() - startTime; ndbout_c("Backup %d has completed", rep->backupId); - const Uint32 bytes = rep->noOfBytes; - const Uint32 records = rep->noOfRecords; + const Uint64 bytes = + rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32); + const Uint64 records = + rep->noOfRecordsLow + (((Uint64)rep->noOfRecordsHigh) << 32); Number rps = xps(records, startTime); Number bps = xps(bytes, startTime); @@ -1722,8 +1783,10 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) const Uint32 tableId = conf->tableId; const Uint32 fragmentNo = conf->fragmentNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - const Uint32 noOfBytes = conf->noOfBytes; - const Uint32 noOfRecords = conf->noOfRecords; + const Uint64 noOfBytes = + conf->noOfBytesLow + (((Uint64)conf->noOfBytesHigh) << 32); + const Uint64 noOfRecords = + conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); @@ -1735,9 +1798,13 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); + tabPtr.p->noOfRecords += noOfRecords; + FragmentPtr fragPtr; tabPtr.p->fragments.getPtr(fragPtr, fragmentNo); + fragPtr.p->noOfRecords = noOfRecords; + ndbrequire(fragPtr.p->scanned == 0); ndbrequire(fragPtr.p->scanning == 1); ndbrequire(fragPtr.p->node == nodeId); @@ -1761,6 +1828,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) } else { + NodeBitmask nodes = ptr.p->nodes; + nodes.clear(getOwnNodeId()); + if (!nodes.isclear()) + { + BackupFragmentCompleteRep *rep = + (BackupFragmentCompleteRep*)signal->getDataPtrSend(); + rep->backupId = ptr.p->backupId; + rep->backupPtr = ptr.i; + rep->tableId = tableId; + rep->fragmentNo = fragmentNo; + rep->noOfTableRowsLow = (Uint32)(tabPtr.p->noOfRecords & 0xFFFFFFFF); + rep->noOfTableRowsHigh = (Uint32)(tabPtr.p->noOfRecords >> 32); + rep->noOfFragmentRowsLow = (Uint32)(noOfRecords & 0xFFFFFFFF); + rep->noOfFragmentRowsHigh = (Uint32)(noOfRecords >> 32); + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_BACKUP_FRAGMENT_COMPLETE_REP, signal, + BackupFragmentCompleteRep::SignalLength, JBB); + } nextFragment(signal, ptr); } } @@ -1823,6 +1908,29 @@ err: execABORT_BACKUP_ORD(signal); } +void +Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal) +{ + jamEntry(); + BackupFragmentCompleteRep * rep = + (BackupFragmentCompleteRep*)signal->getDataPtr(); + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, rep->backupPtr); + + TablePtr tabPtr; + ndbrequire(findTable(ptr, tabPtr, rep->tableId)); + + tabPtr.p->noOfRecords = + rep->noOfTableRowsLow + (((Uint64)rep->noOfTableRowsHigh) << 32); + + FragmentPtr fragPtr; + tabPtr.p->fragments.getPtr(fragPtr, rep->fragmentNo); + + fragPtr.p->noOfRecords = + rep->noOfFragmentRowsLow + (((Uint64)rep->noOfFragmentRowsHigh) << 32); +} + /***************************************************************************** * * Slave functionallity - Drop triggers @@ -1876,19 +1984,18 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr) gcp->StartGCP = htonl(ptr.p->startGCP); gcp->StopGCP = htonl(ptr.p->stopGCP - 1); filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz); - } - { // UNLOCK while dropping trigger for better timeslicing - TablePtr tabPtr; - for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL; - ptr.p->tables.next(tabPtr)) { - signal->theData[0] = tabPtr.p->tableId; - signal->theData[1] = 0; // unlock - EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + TablePtr tabPtr; + ptr.p->tables.first(tabPtr); + + signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; + signal->theData[1] = ptr.i; + signal->theData[2] = tabPtr.i; + signal->theData[3] = 0; + sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); } } - closeFiles(signal, ptr); } } @@ -2051,8 +2158,10 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) rep->senderData = ptr.p->clientData; rep->startGCP = ptr.p->startGCP; rep->stopGCP = ptr.p->stopGCP; - rep->noOfBytes = ptr.p->noOfBytes; - rep->noOfRecords = ptr.p->noOfRecords; + rep->noOfBytesLow = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); + rep->noOfRecordsLow = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); + rep->noOfBytesHigh = (Uint32)(ptr.p->noOfBytes >> 32); + rep->noOfRecordsHigh = (Uint32)(ptr.p->noOfRecords >> 32); rep->noOfLogBytes = ptr.p->noOfLogBytes; rep->noOfLogRecords = ptr.p->noOfLogRecords; rep->nodes = ptr.p->nodes; @@ -2065,12 +2174,14 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) signal->theData[2] = ptr.p->backupId; signal->theData[3] = ptr.p->startGCP; signal->theData[4] = ptr.p->stopGCP; - signal->theData[5] = ptr.p->noOfBytes; - signal->theData[6] = ptr.p->noOfRecords; + signal->theData[5] = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); + signal->theData[6] = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); signal->theData[7] = ptr.p->noOfLogBytes; signal->theData[8] = ptr.p->noOfLogRecords; ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); + signal->theData[9+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfBytes >> 32); + signal->theData[10+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfRecords >> 32); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 11+NdbNodeBitmask::Size, JBB); } else { @@ -2912,6 +3023,7 @@ Backup::parseTableDescription(Signal* signal, /** * Initialize table object */ + tabPtr.p->noOfRecords = 0; tabPtr.p->schemaVersion = tmpTab.TableVersion; tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes; tabPtr.p->noOfNull = 0; @@ -3637,9 +3749,10 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) conf->backupPtr = ptr.i; conf->tableId = filePtr.p->tableId; conf->fragmentNo = filePtr.p->fragmentNo; - conf->noOfRecords = op.noOfRecords; - conf->noOfBytes = op.noOfBytes; - + conf->noOfRecordsLow = (Uint32)(op.noOfRecords & 0xFFFFFFFF); + conf->noOfRecordsHigh = (Uint32)(op.noOfRecords >> 32); + conf->noOfBytesLow = (Uint32)(op.noOfBytes & 0xFFFFFFFF); + conf->noOfBytesHigh = (Uint32)(op.noOfBytes >> 32); sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal, BackupFragmentConf::SignalLength, JBB); diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp index 73f898261ca..afacf01ab2f 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp @@ -70,6 +70,7 @@ protected: void execBACKUP_DATA(Signal* signal); void execSTART_BACKUP_REQ(Signal* signal); void execBACKUP_FRAGMENT_REQ(Signal* signal); + void execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal); void execSTOP_BACKUP_REQ(Signal* signal); void execBACKUP_STATUS_REQ(Signal* signal); void execABORT_BACKUP_ORD(Signal* signal); @@ -192,6 +193,7 @@ public: typedef Ptr AttributePtr; struct Fragment { + Uint64 noOfRecords; Uint32 tableId; Uint16 node; Uint16 fragmentId; @@ -205,6 +207,8 @@ public: struct Table { Table(ArrayPool &, ArrayPool &); + Uint64 noOfRecords; + Uint32 tableId; Uint32 schemaVersion; Uint32 tableType; @@ -280,8 +284,8 @@ public: Uint32 tablePtr; // Ptr.i to current table FsBuffer dataBuffer; - Uint32 noOfRecords; - Uint32 noOfBytes; + Uint64 noOfRecords; + Uint64 noOfBytes; Uint32 maxRecordSize; private: diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp index 9fa5800c120..dddcf8192d5 100644 --- a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp +++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp @@ -32,7 +32,8 @@ struct BackupFormat { FRAGMENT_FOOTER = 3, TABLE_LIST = 4, TABLE_DESCRIPTION = 5, - GCP_ENTRY = 6 + GCP_ENTRY = 6, + FRAGMENT_INFO = 7 }; struct FileHeader { @@ -128,6 +129,20 @@ struct BackupFormat { Uint32 StartGCP; Uint32 StopGCP; }; + + /** + * Fragment Info + */ + struct FragmentInfo { + Uint32 SectionType; + Uint32 SectionLength; + Uint32 TableId; + Uint32 FragmentNo; + Uint32 NoOfRecordsLow; + Uint32 NoOfRecordsHigh; + Uint32 FilePosLow; + Uint32 FilePosHigh; + }; }; /** diff --git a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp index 5f49a1a8725..38a60ac04d6 100644 --- a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -95,6 +95,9 @@ Backup::Backup(Block_context& ctx) : addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ); addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF); + + addRecSignal(GSN_BACKUP_FRAGMENT_COMPLETE_REP, + &Backup::execBACKUP_FRAGMENT_COMPLETE_REP); addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ); addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index a4cb2b706e5..99ca6884dd3 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -459,6 +459,8 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, w.add(DictTabInfo::DefaultNoPartFlag, tablePtr.p->defaultNoPartFlag); w.add(DictTabInfo::LinearHashFlag, tablePtr.p->linearHashFlag); w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount); + w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow); + w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh); if(signal) { @@ -1855,6 +1857,8 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) tablePtr.p->defaultNoPartFlag = true; tablePtr.p->linearHashFlag = true; tablePtr.p->m_bits = 0; + tablePtr.p->minRowsLow = 0; + tablePtr.p->minRowsHigh = 0; tablePtr.p->tableType = DictTabInfo::UserTable; tablePtr.p->primaryTableId = RNIL; // volatile elements @@ -4731,11 +4735,6 @@ Dbdict::alterTab_writeTableConf(Signal* signal, SegmentedSectionPtr tabInfoPtr; getSection(tabInfoPtr, alterTabPtr.p->m_tabInfoPtrI); signal->setSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO); -#ifndef DBUG_OFF - ndbout_c("DICT_TAB_INFO in DICT"); - SimplePropertiesSectionReader reader(tabInfoPtr, getSectionSegmentPool()); - reader.printAll(ndbout); -#endif EXECUTE_DIRECT(SUMA, GSN_ALTER_TAB_REQ, signal, AlterTabReq::SignalLength); releaseSections(signal); @@ -5331,6 +5330,13 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { Uint32 lhPageBits = 0; ::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount); + Uint64 maxRows = tabPtr.p->maxRowsLow + + (((Uint64)tabPtr.p->maxRowsHigh) << 32); + Uint64 minRows = tabPtr.p->minRowsLow + + (((Uint64)tabPtr.p->minRowsHigh) << 32); + maxRows = (maxRows + fragCount - 1) / fragCount; + minRows = (minRows + fragCount - 1) / fragCount; + { LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend(); req->senderData = senderData; @@ -5346,16 +5352,17 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->lh3PageBits = 0; //lhPageBits; req->noOfAttributes = tabPtr.p->noOfAttributes; req->noOfNullAttributes = tabPtr.p->noOfNullBits; - req->noOfPagesToPreAllocate = 0; + req->maxRowsLow = maxRows & 0xFFFFFFFF; + req->maxRowsHigh = maxRows >> 32; + req->minRowsLow = minRows & 0xFFFFFFFF; + req->minRowsHigh = minRows >> 32; req->schemaVersion = tabPtr.p->tableVersion; Uint32 keyLen = tabPtr.p->tupKeyLength; req->keyLength = keyLen; // wl-2066 no more "long keys" req->nextLCP = lcpNo; req->noOfKeyAttr = tabPtr.p->noOfPrimkey; - req->noOfNewAttr = 0; - // noOfCharsets passed to TUP in upper half - req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16); + req->noOfCharsets = tabPtr.p->noOfCharsets; req->checksumIndicator = 1; req->GCPIndicator = 1; req->startGci = startGci; @@ -5976,9 +5983,16 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId; tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow; tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh; + tablePtr.p->minRowsLow = c_tableDesc.MinRowsLow; + tablePtr.p->minRowsHigh = c_tableDesc.MinRowsHigh; tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; + Uint64 maxRows = + (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow; + Uint64 minRows = + (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow; + { Rope frm(c_rope_pool, tablePtr.p->frmData); tabRequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen), @@ -7002,13 +7016,37 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash) { TableRecordPtr tablePtr; c_tableRecordPool.getPtr(tablePtr, tableId); - if (removeFromHash){ + if (removeFromHash) + { jam(); release_object(tablePtr.p->m_obj_ptr_i); } + else + { + Rope tmp(c_rope_pool, tablePtr.p->tableName); + tmp.erase(); + } - Rope frm(c_rope_pool, tablePtr.p->frmData); - frm.erase(); + { + Rope tmp(c_rope_pool, tablePtr.p->frmData); + tmp.erase(); + } + + { + Rope tmp(c_rope_pool, tablePtr.p->tsData); + tmp.erase(); + } + + { + Rope tmp(c_rope_pool, tablePtr.p->ngData); + tmp.erase(); + } + + { + Rope tmp(c_rope_pool, tablePtr.p->rangeData); + tmp.erase(); + } + tablePtr.p->tabState = TableRecord::NOT_DEFINED; LocalDLFifoList list(c_attributeRecordPool, @@ -14277,7 +14315,8 @@ Dbdict::trans_commit_complete_done(Signal* signal, conf->senderRef = reference(); conf->senderData = trans_ptr.p->m_senderData; conf->fileId = f_ptr.p->key; - + conf->fileVersion = f_ptr.p->m_version; + //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_CONF, signal, CreateFileConf::SignalLength, JBB); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 58656023e4e..228dab57650 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -206,6 +206,8 @@ public: TableRecord(){} Uint32 maxRowsLow; Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; /* Table id (array index in DICT and other blocks) */ Uint32 tableId; Uint32 m_obj_ptr_i; diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index f1d1fdbf000..c1d4175833e 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -431,7 +431,6 @@ public: UintR dictConnectptr; UintR fragmentPtr; UintR nextAddfragrec; - UintR noOfAllocPages; UintR schemaVer; UintR tupConnectptr; UintR tuxConnectptr; @@ -449,13 +448,16 @@ public: Uint16 totalAttrReceived; Uint16 fragCopyCreation; Uint16 noOfKeyAttr; - Uint32 noOfNewAttr; // noOfCharsets in upper half - Uint16 noOfAttributeGroups; + Uint16 noOfCharsets; Uint16 lh3DistrBits; Uint16 tableType; Uint16 primaryTableId; Uint32 tablespace_id; - };// Size 108 bytes + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; + };// Size 128 bytes typedef Ptr AddFragRecordPtr; /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index bdd19839a02..3b1f94473f5 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -999,12 +999,15 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) Uint8 tlh = req->lh3PageBits; Uint32 tnoOfAttr = req->noOfAttributes; Uint32 tnoOfNull = req->noOfNullAttributes; - Uint32 noOfAlloc = req->noOfPagesToPreAllocate; + Uint32 maxRowsLow = req->maxRowsLow; + Uint32 maxRowsHigh = req->maxRowsHigh; + Uint32 minRowsLow = req->minRowsLow; + Uint32 minRowsHigh = req->minRowsHigh; Uint32 tschemaVersion = req->schemaVersion; Uint32 ttupKeyLength = req->keyLength; Uint32 nextLcp = req->nextLCP; Uint32 noOfKeyAttr = req->noOfKeyAttr; - Uint32 noOfNewAttr = req->noOfNewAttr; + Uint32 noOfCharsets = req->noOfCharsets; Uint32 checksumIndicator = req->checksumIndicator; Uint32 gcpIndicator = req->GCPIndicator; Uint32 startGci = req->startGci; @@ -1112,7 +1115,10 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->m_senderAttrPtr = RNIL; addfragptr.p->noOfAttr = tnoOfAttr; addfragptr.p->noOfNull = tnoOfNull; - addfragptr.p->noOfAllocPages = noOfAlloc; + addfragptr.p->maxRowsLow = maxRowsLow; + addfragptr.p->maxRowsHigh = maxRowsHigh; + addfragptr.p->minRowsLow = minRowsLow; + addfragptr.p->minRowsHigh = minRowsHigh; addfragptr.p->tabId = tabptr.i; addfragptr.p->totalAttrReceived = 0; addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */ @@ -1121,7 +1127,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->fragCopyCreation = (tmp == 0 ? 0 : 1); addfragptr.p->addfragErrorCode = 0; addfragptr.p->noOfKeyAttr = noOfKeyAttr; - addfragptr.p->noOfNewAttr = noOfNewAttr; + addfragptr.p->noOfCharsets = noOfCharsets; addfragptr.p->checksumIndicator = checksumIndicator; addfragptr.p->GCPIndicator = gcpIndicator; addfragptr.p->lh3DistrBits = tlhstar; @@ -1257,44 +1263,49 @@ Dblqh::sendAddFragReq(Signal* signal) fragptr.i = addfragptr.p->fragmentPtr; c_fragment_pool.getPtr(fragptr); if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TUP){ + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); if (DictTabInfo::isTable(addfragptr.p->tableType) || DictTabInfo::isHashIndex(addfragptr.p->tableType)) { jam(); - signal->theData[0] = addfragptr.i; - signal->theData[1] = cownref; - signal->theData[2] = 0; /* ADD TABLE */ - signal->theData[3] = addfragptr.p->tabId; - signal->theData[4] = addfragptr.p->noOfAttr; - signal->theData[5] = addfragptr.p->addFragid; - signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1; - signal->theData[7] = addfragptr.p->noOfNull; - signal->theData[8] = addfragptr.p->schemaVer; - signal->theData[9] = addfragptr.p->noOfKeyAttr; - signal->theData[10] = addfragptr.p->noOfNewAttr; - signal->theData[11] = addfragptr.p->checksumIndicator; - signal->theData[12] = addfragptr.p->noOfAttributeGroups; - signal->theData[13] = addfragptr.p->GCPIndicator; - signal->theData[14] = addfragptr.p->tablespace_id; + tupFragReq->userPtr = addfragptr.i; + tupFragReq->userRef = cownref; + tupFragReq->reqInfo = 0; /* ADD TABLE */ + tupFragReq->tableId = addfragptr.p->tabId; + tupFragReq->noOfAttr = addfragptr.p->noOfAttr; + tupFragReq->fragId = addfragptr.p->addFragid; + tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; + tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; + tupFragReq->minRowsLow = addfragptr.p->minRowsLow; + tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; + tupFragReq->noOfNullAttr = addfragptr.p->noOfNull; + tupFragReq->schemaVersion = addfragptr.p->schemaVer; + tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr; + tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; + tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; + tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; + tupFragReq->tablespaceid = addfragptr.p->tablespace_id; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; } if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) { jam(); - signal->theData[0] = addfragptr.i; - signal->theData[1] = cownref; - signal->theData[2] = 0; /* ADD TABLE */ - signal->theData[3] = addfragptr.p->tabId; - signal->theData[4] = 1; /* ordered index: one array attr */ - signal->theData[5] = addfragptr.p->addFragid; - signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1; - signal->theData[7] = 0; /* ordered index: no nullable */ - signal->theData[8] = addfragptr.p->schemaVer; - signal->theData[9] = 1; /* ordered index: one key */ - signal->theData[10] = addfragptr.p->noOfNewAttr; - signal->theData[11] = addfragptr.p->checksumIndicator; - signal->theData[12] = addfragptr.p->noOfAttributeGroups; - signal->theData[13] = addfragptr.p->GCPIndicator; + tupFragReq->userPtr = addfragptr.i; + tupFragReq->userRef = cownref; + tupFragReq->reqInfo = 0; /* ADD TABLE */ + tupFragReq->tableId = addfragptr.p->tabId; + tupFragReq->noOfAttr = 1; /* ordered index: one array attr */ + tupFragReq->fragId = addfragptr.p->addFragid; + tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; + tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; + tupFragReq->minRowsLow = addfragptr.p->minRowsLow; + tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; + tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */ + tupFragReq->schemaVersion = addfragptr.p->schemaVer; + tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */ + tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; + tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; + tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; @@ -1598,16 +1609,19 @@ void Dblqh::abortAddFragOps(Signal* signal) { fragptr.i = addfragptr.p->fragmentPtr; c_fragment_pool.getPtr(fragptr); - signal->theData[0] = (Uint32)-1; if (addfragptr.p->tupConnectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tupConnectptr; + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); + tupFragReq->userPtr = (Uint32)-1; + tupFragReq->userRef = addfragptr.p->tupConnectptr; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB); addfragptr.p->tupConnectptr = RNIL; } if (addfragptr.p->tuxConnectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tuxConnectptr; + TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend(); + tuxFragReq->userPtr = (Uint32)-1; + tuxFragReq->userRef = addfragptr.p->tuxConnectptr; sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB); addfragptr.p->tuxConnectptr = RNIL; } diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 4ff6e069963..9bc916c8c22 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -246,6 +246,7 @@ inline const Uint32* ALIGN_WORD(const void* ptr) #define ZTUP_SCAN 10 #define ZFREE_EXTENT 11 #define ZUNMAP_PAGES 12 +#define ZFREE_VAR_PAGES 13 #define ZSCAN_PROCEDURE 0 #define ZCOPY_PROCEDURE 2 @@ -327,7 +328,8 @@ typedef Ptr AttrbufrecPtr; struct Fragoperrec { - bool definingFragment; + Uint64 minRows; + Uint64 maxRows; Uint32 nextFragoprec; Uint32 lqhPtrFrag; Uint32 fragidFrag; @@ -340,6 +342,7 @@ struct Fragoperrec { Uint32 m_var_attributes_size[2]; // In bytes BlockReference lqhBlockrefFrag; bool inUse; + bool definingFragment; }; typedef Ptr FragoperrecPtr; @@ -601,6 +604,8 @@ struct Fragrecord { Uint32 currentPageRange; Uint32 rootPageRange; Uint32 noOfPages; + Uint32 noOfPagesToGrow; + DLList::Head emptyPrimPage; // allocated pages (not init) DLList::Head thFreeFirst; // pages with atleast 1 free record SLList::Head m_empty_pages; // Empty pages not in logical/physical map @@ -620,6 +625,7 @@ struct Fragrecord { Uint32 m_tablespace_id; Uint32 m_logfile_group_id; Disk_alloc_info m_disk_alloc_info; + Uint32 m_var_page_chunks; }; typedef Ptr FragrecordPtr; @@ -2335,6 +2341,7 @@ private: void releaseFragment(Signal* signal, Uint32 tableId); + void drop_fragment_free_var_pages(Signal*); void drop_fragment_free_exent(Signal*, TablerecPtr, FragrecordPtr, Uint32); void drop_fragment_unmap_pages(Signal*, TablerecPtr, FragrecordPtr, Uint32); void drop_fragment_unmap_page_callback(Signal* signal, Uint32, Uint32); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index 8a68905cef9..c59cf4015af 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -227,6 +227,12 @@ void Dbtup::execCONTINUEB(Signal* signal) drop_fragment_unmap_pages(signal, tabPtr, fragPtr, signal->theData[3]); return; } + case ZFREE_VAR_PAGES: + { + ljam(); + drop_fragment_free_var_pages(signal); + return; + } default: ndbrequire(false); break; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 5cfd1f8cb77..88845a6ef64 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -37,7 +37,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) { ljamEntry(); - if (signal->theData[0] == (Uint32)-1) { + TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr(); + if (tupFragReq->userPtr == (Uint32)-1) { ljam(); abortAddFragOp(signal); return; @@ -47,31 +48,32 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) FragrecordPtr regFragPtr; TablerecPtr regTabPtr; - Uint32 userptr= signal->theData[0]; - Uint32 userblockref= signal->theData[1]; - Uint32 reqinfo= signal->theData[2]; - regTabPtr.i= signal->theData[3]; - Uint32 noOfAttributes= signal->theData[4]; - Uint32 fragId= signal->theData[5]; - Uint32 pages= signal->theData[6]; - Uint32 noOfNullAttr= signal->theData[7]; - /* Uint32 schemaVersion= signal->theData[8];*/ - Uint32 noOfKeyAttr= signal->theData[9]; + Uint32 userptr = tupFragReq->userPtr; + Uint32 userblockref = tupFragReq->userRef; + Uint32 reqinfo = tupFragReq->reqInfo; + regTabPtr.i = tupFragReq->tableId; + Uint32 noOfAttributes = tupFragReq->noOfAttr; + Uint32 fragId = tupFragReq->fragId; + Uint32 noOfNullAttr = tupFragReq->noOfNullAttr; + /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/ + Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr; + Uint32 noOfCharsets = tupFragReq->noOfCharsets; - //Uint32 noOfNewAttr= (signal->theData[10] & 0xFFFF); - /* DICT sends number of character sets in upper half */ - Uint32 noOfCharsets= (signal->theData[10] >> 16); - Uint32 gcpIndicator = signal->theData[13]; - Uint32 tablespace= signal->theData[14]; + Uint32 checksumIndicator = tupFragReq->checksumIndicator; + Uint32 gcpIndicator = tupFragReq->globalCheckpointIdIndicator; + Uint32 tablespace_id= tupFragReq->tablespaceid; - Uint32 checksumIndicator= signal->theData[11]; + Uint64 maxRows = + (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow; + Uint64 minRows = + (((Uint64)tupFragReq->minRowsHigh) << 32) + tupFragReq->minRowsLow; #ifndef VM_TRACE // config mismatch - do not crash if release compiled if (regTabPtr.i >= cnoOfTablerec) { ljam(); - signal->theData[0] = userptr; - signal->theData[1] = 800; + tupFragReq->userPtr = userptr; + tupFragReq->userRef = 800; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); return; } @@ -80,8 +82,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); if (cfirstfreeFragopr == RNIL) { ljam(); - signal->theData[0]= userptr; - signal->theData[1]= ZNOFREE_FRAGOP_ERROR; + tupFragReq->userPtr = userptr; + tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); return; } @@ -101,6 +103,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) sizeof(fragOperPtr.p->m_var_attributes_size)); fragOperPtr.p->charsetIndex = 0; + fragOperPtr.p->minRows = minRows; + fragOperPtr.p->maxRows = maxRows; ndbrequire(reqinfo == ZADDFRAG); @@ -136,19 +140,11 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regFragPtr.p->fragTableId= regTabPtr.i; regFragPtr.p->fragmentId= fragId; - regFragPtr.p->m_tablespace_id= tablespace; + regFragPtr.p->m_tablespace_id= tablespace_id; regFragPtr.p->m_undo_complete= false; regFragPtr.p->m_lcp_scan_op = RNIL; regFragPtr.p->m_lcp_keep_list = RNIL; - - Uint32 noAllocatedPages= allocFragPages(regFragPtr.p, pages); - - if (noAllocatedPages == 0) { - ljam(); - terrorCode= ZNO_PAGES_ALLOCATED_ERROR; - fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); - return; - } + regFragPtr.p->m_var_page_chunks = RNIL; if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId || ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) { @@ -538,6 +534,28 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) } #endif + { + Uint32 fix_tupheader = regTabPtr.p->m_offsets[MM].m_fix_header_size; + if(regTabPtr.p->m_attributes[MM].m_no_of_varsize != 0) + fix_tupheader += Tuple_header::HeaderSize + 1; + ndbassert(fix_tupheader > 0); + Uint32 noRowsPerPage = ZWORDS_ON_PAGE / fix_tupheader; + Uint32 noAllocatedPages = + (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage; + if (fragOperPtr.p->minRows == 0) + noAllocatedPages = 2; + else if (noAllocatedPages == 0) + noAllocatedPages = 2; + noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); + + if (noAllocatedPages == 0) { + ljam(); + terrorCode = ZNO_PAGES_ALLOCATED_ERROR; + addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); + return; + }//if + } + CreateFilegroupImplReq rep; if(regTabPtr.p->m_no_of_disk_attributes) { @@ -970,7 +988,7 @@ Dbtup::drop_fragment_unmap_pages(Signal *signal, case -1: break; default: - ndbrequire(res == pagePtr.i); + ndbrequire((Uint32)res == pagePtr.i); drop_fragment_unmap_page_callback(signal, pos, res); } return; @@ -1052,6 +1070,44 @@ Dbtup::drop_fragment_free_exent(Signal *signal, } } + signal->theData[0] = ZFREE_VAR_PAGES; + signal->theData[1] = tabPtr.i; + signal->theData[2] = fragPtr.i; + sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); +} + +void +Dbtup::drop_fragment_free_var_pages(Signal* signal) +{ + ljam(); + Uint32 tableId = signal->theData[1]; + Uint32 fragPtrI = signal->theData[2]; + + TablerecPtr tabPtr; + tabPtr.i= tableId; + ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); + + FragrecordPtr fragPtr; + fragPtr.i = fragPtrI; + ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); + + PagePtr pagePtr; + if ((pagePtr.i = fragPtr.p->m_var_page_chunks) != RNIL) + { + c_page_pool.getPtr(pagePtr); + Var_page* page = (Var_page*)pagePtr.p; + fragPtr.p->m_var_page_chunks = page->next_chunk; + + Uint32 sz = page->chunk_size; + returnCommonArea(pagePtr.i, sz); + + signal->theData[0] = ZFREE_VAR_PAGES; + signal->theData[1] = tabPtr.i; + signal->theData[2] = fragPtr.i; + sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); + return; + } + releaseFragPages(fragPtr.p); Uint32 i; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index db6f5e3b185..90fdd8c69d7 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -299,6 +299,11 @@ void Dbtup::releaseFragPages(Fragrecord* regFragPtr) LocalDLList tmp(c_page_pool, regFragPtr->thFreeFirst); tmp.remove(); } + + { + LocalSLList tmp(c_page_pool, regFragPtr->m_empty_pages); + tmp.remove(); + } return; } else { @@ -346,6 +351,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr) regFragPtr->rootPageRange = RNIL; regFragPtr->currentPageRange = RNIL; regFragPtr->noOfPages = 0; + regFragPtr->noOfPagesToGrow = 2; regFragPtr->nextStartRange = 0; }//initFragRange() @@ -421,9 +427,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr) { - Uint32 noAllocPages = regFragPtr->noOfPages >> 3; // 12.5% - noAllocPages += regFragPtr->noOfPages >> 4; // 6.25% + Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5% + noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25% noAllocPages += 2; + regFragPtr->noOfPagesToGrow += noAllocPages; /* -----------------------------------------------------------------*/ // We will grow by 18.75% plus two more additional pages to grow // a little bit quicker in the beginning. diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp index 94bd75108a4..52ab66b5c0e 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp @@ -323,6 +323,13 @@ Dbtup::get_empty_var_page(Fragrecord* fragPtr) ptr.p->nextList = RNIL; list.add(ret.i + 1, ptr); } + + c_page_pool.getPtr(ret); + + Var_page* page = (Var_page*)ret.p; + page->chunk_size = cnt; + page->next_chunk = fragPtr->m_var_page_chunks; + fragPtr->m_var_page_chunks = ret.i; return ret.i; } diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp index 04ed18da58d..4b4df909061 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp @@ -107,8 +107,14 @@ struct Tup_varsize_page Uint32 page_state; Uint32 next_page; Uint32 prev_page; - Uint32 first_cluster_page; - Uint32 last_cluster_page; + union { + Uint32 first_cluster_page; + Uint32 chunk_size; + }; + union { + Uint32 last_cluster_page; + Uint32 next_chunk; + }; Uint32 next_cluster_page; Uint32 prev_cluster_page; Uint32 frag_page_id; diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index 511536d2fdd..50a623920d2 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2487,14 +2487,20 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) event.Event = BackupEvent::BackupCompleted; event.Completed.BackupId = rep->backupId; - event.Completed.NoOfBytes = rep->noOfBytes; + event.Completed.NoOfBytes = rep->noOfBytesLow; event.Completed.NoOfLogBytes = rep->noOfLogBytes; - event.Completed.NoOfRecords = rep->noOfRecords; + event.Completed.NoOfRecords = rep->noOfRecordsLow; event.Completed.NoOfLogRecords = rep->noOfLogRecords; event.Completed.stopGCP = rep->stopGCP; event.Completed.startGCP = rep->startGCP; event.Nodes = rep->nodes; + if (signal->header.theLength >= BackupCompleteRep::SignalLength) + { + event.Completed.NoOfBytes += ((Uint64)rep->noOfBytesHigh) << 32; + event.Completed.NoOfRecords += ((Uint64)rep->noOfRecordsHigh) << 32; + } + backupId = rep->backupId; return 0; } diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp index a00ba2cda12..1473ec90c33 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -323,9 +323,9 @@ public: Uint32 ErrorCode; } FailedToStart ; struct { + Uint64 NoOfBytes; + Uint64 NoOfRecords; Uint32 BackupId; - Uint32 NoOfBytes; - Uint32 NoOfRecords; Uint32 NoOfLogBytes; Uint32 NoOfLogRecords; Uint32 startGCP; diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index c71689d2e81..b7df3037c34 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -18,6 +18,32 @@ #include "NdbDictionaryImpl.hpp" #include +NdbDictionary::ObjectId::ObjectId() + : m_impl(* new NdbDictObjectImpl(NdbDictionary::Object::TypeUndefined)) +{ +} + +NdbDictionary::ObjectId::~ObjectId() +{ + NdbDictObjectImpl * tmp = &m_impl; + delete tmp; +} + +NdbDictionary::Object::Status +NdbDictionary::ObjectId::getObjectStatus() const { + return m_impl.m_status; +} + +int +NdbDictionary::ObjectId::getObjectVersion() const { + return m_impl.m_version; +} + +int +NdbDictionary::ObjectId::getObjectId() const { + return m_impl.m_id; +} + /***************************************************************** * Column facade */ @@ -425,6 +451,18 @@ NdbDictionary::Table::getMaxRows() const return m_impl.m_max_rows; } +void +NdbDictionary::Table::setMinRows(Uint64 minRows) +{ + m_impl.m_min_rows = minRows; +} + +Uint64 +NdbDictionary::Table::getMinRows() const +{ + return m_impl.m_min_rows; +} + void NdbDictionary::Table::setDefaultNoPartitionsFlag(Uint32 flag) { @@ -1799,17 +1837,22 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) } int -NdbDictionary::Dictionary::createLogfileGroup(const LogfileGroup & lg){ - return m_impl.createLogfileGroup(NdbLogfileGroupImpl::getImpl(lg)); +NdbDictionary::Dictionary::createLogfileGroup(const LogfileGroup & lg, + ObjectId * obj) +{ + return m_impl.createLogfileGroup(NdbLogfileGroupImpl::getImpl(lg), + obj ? &obj->m_impl : 0); } int -NdbDictionary::Dictionary::dropLogfileGroup(const LogfileGroup & lg){ +NdbDictionary::Dictionary::dropLogfileGroup(const LogfileGroup & lg) +{ return m_impl.dropLogfileGroup(NdbLogfileGroupImpl::getImpl(lg)); } NdbDictionary::LogfileGroup -NdbDictionary::Dictionary::getLogfileGroup(const char * name){ +NdbDictionary::Dictionary::getLogfileGroup(const char * name) +{ NdbDictionary::LogfileGroup tmp; m_impl.m_receiver.get_filegroup(NdbLogfileGroupImpl::getImpl(tmp), NdbDictionary::Object::LogfileGroup, name); @@ -1817,17 +1860,22 @@ NdbDictionary::Dictionary::getLogfileGroup(const char * name){ } int -NdbDictionary::Dictionary::createTablespace(const Tablespace & lg){ - return m_impl.createTablespace(NdbTablespaceImpl::getImpl(lg)); +NdbDictionary::Dictionary::createTablespace(const Tablespace & lg, + ObjectId * obj) +{ + return m_impl.createTablespace(NdbTablespaceImpl::getImpl(lg), + obj ? &obj->m_impl : 0); } int -NdbDictionary::Dictionary::dropTablespace(const Tablespace & lg){ +NdbDictionary::Dictionary::dropTablespace(const Tablespace & lg) +{ return m_impl.dropTablespace(NdbTablespaceImpl::getImpl(lg)); } NdbDictionary::Tablespace -NdbDictionary::Dictionary::getTablespace(const char * name){ +NdbDictionary::Dictionary::getTablespace(const char * name) +{ NdbDictionary::Tablespace tmp; m_impl.m_receiver.get_filegroup(NdbTablespaceImpl::getImpl(tmp), NdbDictionary::Object::Tablespace, name); @@ -1835,7 +1883,8 @@ NdbDictionary::Dictionary::getTablespace(const char * name){ } NdbDictionary::Tablespace -NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId){ +NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId) +{ NdbDictionary::Tablespace tmp; m_impl.m_receiver.get_filegroup(NdbTablespaceImpl::getImpl(tmp), NdbDictionary::Object::Tablespace, @@ -1844,17 +1893,24 @@ NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId){ } int -NdbDictionary::Dictionary::createDatafile(const Datafile & df, bool force){ - return m_impl.createDatafile(NdbDatafileImpl::getImpl(df), force); +NdbDictionary::Dictionary::createDatafile(const Datafile & df, + bool force, + ObjectId * obj) +{ + return m_impl.createDatafile(NdbDatafileImpl::getImpl(df), + force, + obj ? &obj->m_impl : 0); } int -NdbDictionary::Dictionary::dropDatafile(const Datafile& df){ +NdbDictionary::Dictionary::dropDatafile(const Datafile& df) +{ return m_impl.dropDatafile(NdbDatafileImpl::getImpl(df)); } NdbDictionary::Datafile -NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path){ +NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path) +{ NdbDictionary::Datafile tmp; m_impl.m_receiver.get_file(NdbDatafileImpl::getImpl(tmp), NdbDictionary::Object::Datafile, @@ -1863,17 +1919,24 @@ NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path){ } int -NdbDictionary::Dictionary::createUndofile(const Undofile & df, bool force){ - return m_impl.createUndofile(NdbUndofileImpl::getImpl(df), force); +NdbDictionary::Dictionary::createUndofile(const Undofile & df, + bool force, + ObjectId * obj) +{ + return m_impl.createUndofile(NdbUndofileImpl::getImpl(df), + force, + obj ? &obj->m_impl : 0); } int -NdbDictionary::Dictionary::dropUndofile(const Undofile& df){ +NdbDictionary::Dictionary::dropUndofile(const Undofile& df) +{ return m_impl.dropUndofile(NdbUndofileImpl::getImpl(df)); } NdbDictionary::Undofile -NdbDictionary::Dictionary::getUndofile(Uint32 node, const char * path){ +NdbDictionary::Dictionary::getUndofile(Uint32 node, const char * path) +{ NdbDictionary::Undofile tmp; m_impl.m_receiver.get_file(NdbUndofileImpl::getImpl(tmp), NdbDictionary::Object::Undofile, diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 22a5d2f20a5..1e33a843a42 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -445,7 +445,6 @@ NdbTableImpl::init(){ m_hashpointerValue= 0; m_linear_flag= true; m_primaryTable.clear(); - m_max_rows = 0; m_default_no_part_flag = 1; m_logging= true; m_row_gci = true; @@ -461,6 +460,8 @@ NdbTableImpl::init(){ m_noOfDistributionKeys= 0; m_noOfBlobs= 0; m_replicaCount= 0; + m_min_rows = 0; + m_max_rows = 0; m_tablespace_name.clear(); m_tablespace_id = ~0; m_tablespace_version = ~0; @@ -729,6 +730,9 @@ NdbTableImpl::assign(const NdbTableImpl& org) m_version = org.m_version; m_status = org.m_status; + m_max_rows = org.m_max_rows; + m_min_rows = org.m_min_rows; + m_tablespace_name = org.m_tablespace_name; m_tablespace_id= org.m_tablespace_id; m_tablespace_version = org.m_tablespace_version; @@ -2066,6 +2070,9 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, Uint64 max_rows = ((Uint64)tableDesc->MaxRowsHigh) << 32; max_rows += tableDesc->MaxRowsLow; impl->m_max_rows = max_rows; + Uint64 min_rows = ((Uint64)tableDesc->MinRowsHigh) << 32; + min_rows += tableDesc->MinRowsLow; + impl->m_min_rows = min_rows; impl->m_default_no_part_flag = tableDesc->DefaultNoPartFlag; impl->m_linear_flag = tableDesc->LinearHashFlag; impl->m_logging = tableDesc->TableLoggedFlag; @@ -2521,6 +2528,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpTab->NoOfAttributes = sz; tmpTab->MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32); tmpTab->MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF); + tmpTab->MinRowsHigh = (Uint32)(impl.m_min_rows >> 32); + tmpTab->MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF); tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag; tmpTab->LinearHashFlag = impl.m_linear_flag; @@ -4391,19 +4400,23 @@ NdbUndofileImpl::assign(const NdbUndofileImpl& org) } int -NdbDictionaryImpl::createDatafile(const NdbDatafileImpl & file, bool force){ +NdbDictionaryImpl::createDatafile(const NdbDatafileImpl & file, + bool force, + NdbDictObjectImpl* obj) + +{ DBUG_ENTER("NdbDictionaryImpl::createDatafile"); NdbFilegroupImpl tmp(NdbDictionary::Object::Tablespace); if(file.m_filegroup_version != ~(Uint32)0){ tmp.m_id = file.m_filegroup_id; tmp.m_version = file.m_filegroup_version; - DBUG_RETURN(m_receiver.create_file(file, tmp)); + DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj)); } if(m_receiver.get_filegroup(tmp, NdbDictionary::Object::Tablespace, file.m_filegroup_name.c_str()) == 0){ - DBUG_RETURN(m_receiver.create_file(file, tmp, force)); + DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj)); } DBUG_RETURN(-1); } @@ -4414,53 +4427,65 @@ NdbDictionaryImpl::dropDatafile(const NdbDatafileImpl & file){ } int -NdbDictionaryImpl::createUndofile(const NdbUndofileImpl & file, bool force){ +NdbDictionaryImpl::createUndofile(const NdbUndofileImpl & file, + bool force, + NdbDictObjectImpl* obj) +{ DBUG_ENTER("NdbDictionaryImpl::createUndofile"); NdbFilegroupImpl tmp(NdbDictionary::Object::LogfileGroup); if(file.m_filegroup_version != ~(Uint32)0){ tmp.m_id = file.m_filegroup_id; tmp.m_version = file.m_filegroup_version; - DBUG_RETURN(m_receiver.create_file(file, tmp)); + DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj)); } if(m_receiver.get_filegroup(tmp, NdbDictionary::Object::LogfileGroup, file.m_filegroup_name.c_str()) == 0){ - DBUG_RETURN(m_receiver.create_file(file, tmp, force)); + DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj)); } DBUG_PRINT("info", ("Failed to find filegroup")); DBUG_RETURN(-1); } int -NdbDictionaryImpl::dropUndofile(const NdbUndofileImpl & file){ +NdbDictionaryImpl::dropUndofile(const NdbUndofileImpl & file) +{ return m_receiver.drop_file(file); } int -NdbDictionaryImpl::createTablespace(const NdbTablespaceImpl & fg){ - return m_receiver.create_filegroup(fg); +NdbDictionaryImpl::createTablespace(const NdbTablespaceImpl & fg, + NdbDictObjectImpl* obj) +{ + return m_receiver.create_filegroup(fg, obj); } int -NdbDictionaryImpl::dropTablespace(const NdbTablespaceImpl & fg){ +NdbDictionaryImpl::dropTablespace(const NdbTablespaceImpl & fg) +{ return m_receiver.drop_filegroup(fg); } int -NdbDictionaryImpl::createLogfileGroup(const NdbLogfileGroupImpl & fg){ - return m_receiver.create_filegroup(fg); +NdbDictionaryImpl::createLogfileGroup(const NdbLogfileGroupImpl & fg, + NdbDictObjectImpl* obj) +{ + return m_receiver.create_filegroup(fg, obj); } int -NdbDictionaryImpl::dropLogfileGroup(const NdbLogfileGroupImpl & fg){ +NdbDictionaryImpl::dropLogfileGroup(const NdbLogfileGroupImpl & fg) +{ return m_receiver.drop_filegroup(fg); } int NdbDictInterface::create_file(const NdbFileImpl & file, const NdbFilegroupImpl & group, - bool overwrite){ + bool overwrite, + NdbDictObjectImpl* obj) +{ DBUG_ENTER("NdbDictInterface::create_file"); UtilBufferWriter w(m_buffer); DictFilegroupInfo::File f; f.init(); @@ -4503,23 +4528,39 @@ NdbDictInterface::create_file(const NdbFileImpl & file, Send signal without time-out since creating files can take a very long time if the file is very big. */ - DBUG_RETURN(dictSignal(&tSignal, ptr, 1, - 0, // master - WAIT_CREATE_INDX_REQ, - -1, 100, - err)); + int ret = dictSignal(&tSignal, ptr, 1, + 0, // master + WAIT_CREATE_INDX_REQ, + -1, 100, + err); + + if (ret == 0 && obj) + { + Uint32* data = (Uint32*)m_buffer.get_data(); + obj->m_id = data[0]; + obj->m_version = data[1]; + } + + DBUG_RETURN(ret); } void NdbDictInterface::execCREATE_FILE_CONF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) + LinearSectionPtr ptr[3]) { + const CreateFileConf* conf= + CAST_CONSTPTR(CreateFileConf, signal->getDataPtr()); + m_buffer.grow(4 * 2); // 2 words + Uint32* data = (Uint32*)m_buffer.get_data(); + data[0] = conf->fileId; + data[1] = conf->fileVersion; + m_waiter.signal(NO_WAIT); } void NdbDictInterface::execCREATE_FILE_REF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) + LinearSectionPtr ptr[3]) { const CreateFileRef* ref = CAST_CONSTPTR(CreateFileRef, signal->getDataPtr()); @@ -4529,7 +4570,8 @@ NdbDictInterface::execCREATE_FILE_REF(NdbApiSignal * signal, } int -NdbDictInterface::drop_file(const NdbFileImpl & file){ +NdbDictInterface::drop_file(const NdbFileImpl & file) +{ DBUG_ENTER("NdbDictInterface::drop_file"); NdbApiSignal tSignal(m_reference); tSignal.theReceiversBlockNumber = DBDICT; @@ -4569,7 +4611,9 @@ NdbDictInterface::execDROP_FILE_REF(NdbApiSignal * signal, } int -NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group){ +NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group, + NdbDictObjectImpl* obj) +{ DBUG_ENTER("NdbDictInterface::create_filegroup"); UtilBufferWriter w(m_buffer); DictFilegroupInfo::Filegroup fg; fg.init(); @@ -4638,17 +4682,32 @@ NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group){ ptr[0].sz = m_buffer.length() / 4; int err[] = { CreateFilegroupRef::Busy, CreateFilegroupRef::NotMaster, 0}; - DBUG_RETURN(dictSignal(&tSignal, ptr, 1, - 0, // master - WAIT_CREATE_INDX_REQ, - DICT_WAITFOR_TIMEOUT, 100, - err)); + int ret = dictSignal(&tSignal, ptr, 1, + 0, // master + WAIT_CREATE_INDX_REQ, + DICT_WAITFOR_TIMEOUT, 100, + err); + + if (ret == 0 && obj) + { + Uint32* data = (Uint32*)m_buffer.get_data(); + obj->m_id = data[0]; + obj->m_version = data[1]; + } + + DBUG_RETURN(ret); } void NdbDictInterface::execCREATE_FILEGROUP_CONF(NdbApiSignal * signal, LinearSectionPtr ptr[3]) { + const CreateFilegroupConf* conf= + CAST_CONSTPTR(CreateFilegroupConf, signal->getDataPtr()); + m_buffer.grow(4 * 2); // 2 words + Uint32* data = (Uint32*)m_buffer.get_data(); + data[0] = conf->filegroupId; + data[1] = conf->filegroupVersion; m_waiter.signal(NO_WAIT); } @@ -4664,7 +4723,8 @@ NdbDictInterface::execCREATE_FILEGROUP_REF(NdbApiSignal * signal, } int -NdbDictInterface::drop_filegroup(const NdbFilegroupImpl & group){ +NdbDictInterface::drop_filegroup(const NdbFilegroupImpl & group) +{ DBUG_ENTER("NdbDictInterface::drop_filegroup"); NdbApiSignal tSignal(m_reference); tSignal.theReceiversBlockNumber = DBDICT; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp index b6961edd019..5f5295b9ffc 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -52,6 +52,8 @@ protected: m_status(NdbDictionary::Object::New) { m_id = -1; } + + friend class NdbDictionary::ObjectId; }; /** @@ -184,6 +186,7 @@ public: Vector m_fragments; Uint64 m_max_rows; + Uint64 m_min_rows; Uint32 m_default_no_part_flag; bool m_linear_flag; bool m_logging; @@ -468,9 +471,10 @@ public: static int parseFilegroupInfo(NdbFilegroupImpl &dst, const Uint32 * data, Uint32 len); - int create_file(const NdbFileImpl &, const NdbFilegroupImpl&, bool overwrite = false); + int create_file(const NdbFileImpl &, const NdbFilegroupImpl&, + bool overwrite, NdbDictObjectImpl*); int drop_file(const NdbFileImpl &); - int create_filegroup(const NdbFilegroupImpl &); + int create_filegroup(const NdbFilegroupImpl &, NdbDictObjectImpl*); int drop_filegroup(const NdbFilegroupImpl &); int get_filegroup(NdbFilegroupImpl&, NdbDictionary::Object::Type, Uint32); @@ -622,17 +626,17 @@ public: NdbEventImpl * getBlobEvent(const NdbEventImpl& ev, uint col_no); NdbEventImpl * getEventImpl(const char * internalName); - int createDatafile(const NdbDatafileImpl &, bool force = false); + int createDatafile(const NdbDatafileImpl &, bool force, NdbDictObjectImpl*); int dropDatafile(const NdbDatafileImpl &); - int createUndofile(const NdbUndofileImpl &, bool force = false); + int createUndofile(const NdbUndofileImpl &, bool force, NdbDictObjectImpl*); int dropUndofile(const NdbUndofileImpl &); - int createTablespace(const NdbTablespaceImpl &); + int createTablespace(const NdbTablespaceImpl &, NdbDictObjectImpl*); int dropTablespace(const NdbTablespaceImpl &); - int createLogfileGroup(const NdbLogfileGroupImpl &); + int createLogfileGroup(const NdbLogfileGroupImpl &, NdbDictObjectImpl*); int dropLogfileGroup(const NdbLogfileGroupImpl &); - + const NdbError & getNdbError() const; NdbError m_error; Uint32 m_local_table_data_size; diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index 7762785ef61..ef535cf9e26 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -85,7 +85,12 @@ RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) { RestoreMetaData::~RestoreMetaData(){ for(Uint32 i= 0; i < allTables.size(); i++) - delete allTables[i]; + { + TableS *table = allTables[i]; + for(Uint32 j= 0; j < table->m_fragmentInfo.size(); j++) + delete table->m_fragmentInfo[j]; + delete table; + } allTables.clear(); } @@ -118,6 +123,9 @@ RestoreMetaData::loadContent() return 0; if(!readGCPEntry()) return 0; + + if(!readFragmentInfo()) + return 0; return 1; } @@ -353,6 +361,52 @@ RestoreMetaData::readGCPEntry() { return true; } +bool +RestoreMetaData::readFragmentInfo() +{ + BackupFormat::CtlFile::FragmentInfo fragInfo; + TableS * table = 0; + Uint32 tableId = RNIL; + + while (buffer_read(&fragInfo, 4, 2) == 2) + { + fragInfo.SectionType = ntohl(fragInfo.SectionType); + fragInfo.SectionLength = ntohl(fragInfo.SectionLength); + + if (fragInfo.SectionType != BackupFormat::FRAGMENT_INFO) + { + err << "readFragmentInfo invalid section type: " << + fragInfo.SectionType << endl; + return false; + } + + if (buffer_read(&fragInfo.TableId, (fragInfo.SectionLength-2)*4, 1) != 1) + { + err << "readFragmentInfo invalid section length: " << + fragInfo.SectionLength << endl; + return false; + } + + fragInfo.TableId = ntohl(fragInfo.TableId); + if (fragInfo.TableId != tableId) + { + tableId = fragInfo.TableId; + table = getTable(tableId); + } + + FragmentInfo * tmp = new FragmentInfo; + tmp->fragmentNo = ntohl(fragInfo.FragmentNo); + tmp->noOfRecords = ntohl(fragInfo.NoOfRecordsLow) + + (((Uint64)ntohl(fragInfo.NoOfRecordsHigh)) << 32); + tmp->filePosLow = ntohl(fragInfo.FilePosLow); + tmp->filePosHigh = ntohl(fragInfo.FilePosHigh); + + table->m_fragmentInfo.push_back(tmp); + table->m_noOfRecords += tmp->noOfRecords; + } + return true; +} + TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) : m_dictTable(tableImpl) { @@ -360,6 +414,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) m_noOfNullable = m_nullBitmaskSize = 0; m_auto_val_id= ~(Uint32)0; m_max_auto_val= 0; + m_noOfRecords= 0; backupVersion = version; isSysTable = false; @@ -1161,4 +1216,5 @@ operator<<(NdbOut& ndbout, const TableS & table){ template class Vector; template class Vector; template class Vector; +template class Vector; template class Vector; diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp index 8698d0943e2..e824f1bbdc0 100644 --- a/storage/ndb/tools/restore/Restore.hpp +++ b/storage/ndb/tools/restore/Restore.hpp @@ -114,6 +114,14 @@ public: AttributeData * getData(int i) const; }; // class TupleS +struct FragmentInfo +{ + Uint32 fragmentNo; + Uint64 noOfRecords; + Uint32 filePosLow; + Uint32 filePosHigh; +}; + class TableS { friend class TupleS; @@ -136,6 +144,9 @@ class TableS { bool isSysTable; + Uint64 m_noOfRecords; + Vector m_fragmentInfo; + void createAttr(NdbDictionary::Column *column); public: @@ -146,6 +157,9 @@ public: Uint32 getTableId() const { return m_dictTable->getTableId(); } + Uint32 getNoOfRecords() const { + return m_noOfRecords; + } /* void setMysqlTableName(char * tableName) { strpcpy(mysqlTableName, tableName); @@ -286,6 +300,7 @@ class RestoreMetaData : public BackupFile { bool markSysTables(); bool readGCPEntry(); + bool readFragmentInfo(); Uint32 readMetaTableList(); Uint32 m_startGCP; diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index 6b0d42ee0d2..b6a5728b0c4 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -709,6 +709,16 @@ BackupRestore::table(const TableS & table){ copy.setFragmentData((const void *)ng_array, no_parts << 1); } + /* + update min and max rows to reflect the table, this to + ensure that memory is allocated properly in the ndb kernel + */ + copy.setMinRows(table.getNoOfRecords()); + if (table.getNoOfRecords() > copy.getMaxRows()) + { + copy.setMaxRows(table.getNoOfRecords()); + } + if (dict->createTable(copy) == -1) { err << "Create table " << table.getTableName() << " failed: "