From 46e8ad78222425e416e8eaf56da8e23f30dd9714 Mon Sep 17 00:00:00 2001 From: "jmiller/ndbdev@mysql.com/ndb08.mysql.com" <> Date: Fri, 15 Dec 2006 16:34:29 +0100 Subject: [PATCH 01/35] New cluster replication test cases for replication options --- mysql-test/r/rpl_ndb_do_db.result | 60 +++++++++++++++++++++++ mysql-test/r/rpl_ndb_do_table.result | 25 ++++++++++ mysql-test/r/rpl_ndb_rep_ignore.result | 56 +++++++++++++++++++++ mysql-test/t/rpl_ndb_do_db-slave.opt | 1 + mysql-test/t/rpl_ndb_do_db.test | 55 +++++++++++++++++++++ mysql-test/t/rpl_ndb_do_table-slave.opt | 1 + mysql-test/t/rpl_ndb_do_table.test | 31 ++++++++++++ mysql-test/t/rpl_ndb_rep_ignore-slave.opt | 1 + mysql-test/t/rpl_ndb_rep_ignore.test | 56 +++++++++++++++++++++ 9 files changed, 286 insertions(+) create mode 100644 mysql-test/r/rpl_ndb_do_db.result create mode 100644 mysql-test/r/rpl_ndb_do_table.result create mode 100644 mysql-test/r/rpl_ndb_rep_ignore.result create mode 100644 mysql-test/t/rpl_ndb_do_db-slave.opt create mode 100644 mysql-test/t/rpl_ndb_do_db.test create mode 100644 mysql-test/t/rpl_ndb_do_table-slave.opt create mode 100644 mysql-test/t/rpl_ndb_do_table.test create mode 100644 mysql-test/t/rpl_ndb_rep_ignore-slave.opt create mode 100644 mysql-test/t/rpl_ndb_rep_ignore.test diff --git a/mysql-test/r/rpl_ndb_do_db.result b/mysql-test/r/rpl_ndb_do_db.result new file mode 100644 index 00000000000..32f25d5f5a4 --- /dev/null +++ b/mysql-test/r/rpl_ndb_do_db.result @@ -0,0 +1,60 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +DROP DATABASE IF EXISTS replica; +Warnings: +Note 1008 Can't drop database 'replica'; database doesn't exist +CREATE DATABASE replica; +CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +USE replica; +CREATE TABLE replica.t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE replica.t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +USE test; +INSERT INTO t1 VALUES(1, repeat('abc',10)); +INSERT INTO t2 VALUES(1, repeat('abc',10)); +SHOW TABLES; +Tables_in_test +t1 +t2 +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +SELECT COUNT(*) FROM t2; +COUNT(*) +1 +USE replica; +INSERT INTO replica.t1 VALUES(2, repeat('def',200)); +INSERT INTO replica.t2 VALUES(2, repeat('def',200)); +SHOW TABLES; +Tables_in_replica +t1 +t2 +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +SELECT COUNT(*) FROM t2; +COUNT(*) +1 +SHOW TABLES; +Tables_in_test +USE replica; +SHOW TABLES; +Tables_in_replica +t1 +t2 +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +SELECT COUNT(*) FROM t2; +COUNT(*) +1 +USE test; +SHOW TABLES; +Tables_in_test +USE test; +DROP TABLE t1, t2; +DROP DATABASE IF EXISTS replica; diff --git a/mysql-test/r/rpl_ndb_do_table.result b/mysql-test/r/rpl_ndb_do_table.result new file mode 100644 index 00000000000..761c7837fde --- /dev/null +++ b/mysql-test/r/rpl_ndb_do_table.result @@ -0,0 +1,25 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +DROP TABLE IF EXISTS t1, t2; +Warnings: +Note 1051 Unknown table 't1' +Note 1051 Unknown table 't2' +CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +INSERT INTO t1 VALUES(1, repeat('abc',10)); +INSERT INTO t1 VALUES(2, repeat('def',200)); +INSERT INTO t1 VALUES(3, repeat('ghi',3000)); +INSERT INTO t2 VALUES(1, repeat('abc',10)); +INSERT INTO t2 VALUES(2, repeat('def',200)); +INSERT INTO t2 VALUES(3, repeat('ghi',3000)); +SHOW TABLES; +Tables_in_test +t1 +SELECT COUNT(*) FROM t1; +COUNT(*) +3 +DROP TABLE IF EXISTS t1, t2; diff --git a/mysql-test/r/rpl_ndb_rep_ignore.result b/mysql-test/r/rpl_ndb_rep_ignore.result new file mode 100644 index 00000000000..45d9e5db857 --- /dev/null +++ b/mysql-test/r/rpl_ndb_rep_ignore.result @@ -0,0 +1,56 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +DROP DATABASE IF EXISTS replica; +Warnings: +Note 1008 Can't drop database 'replica'; database doesn't exist +CREATE DATABASE replica; +CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +USE replica; +CREATE TABLE replica.t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE replica.t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +USE test; +INSERT INTO t1 VALUES(1, repeat('abc',10)); +INSERT INTO t2 VALUES(1, repeat('abc',10)); +SHOW TABLES; +Tables_in_test +t1 +t2 +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +SELECT COUNT(*) FROM t2; +COUNT(*) +1 +USE replica; +INSERT INTO replica.t1 VALUES(2, repeat('def',200)); +INSERT INTO replica.t2 VALUES(2, repeat('def',200)); +SHOW TABLES; +Tables_in_replica +t1 +t2 +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +SELECT COUNT(*) FROM t2; +COUNT(*) +1 +SHOW TABLES; +Tables_in_test +USE replica; +SHOW TABLES; +Tables_in_replica +t2 +SELECT COUNT(*) FROM t2; +COUNT(*) +1 +USE test; +SHOW TABLES; +Tables_in_test +USE test; +DROP TABLE t1, t2; +DROP DATABASE IF EXISTS replica; diff --git a/mysql-test/t/rpl_ndb_do_db-slave.opt b/mysql-test/t/rpl_ndb_do_db-slave.opt new file mode 100644 index 00000000000..fb5e378b65f --- /dev/null +++ b/mysql-test/t/rpl_ndb_do_db-slave.opt @@ -0,0 +1 @@ +--replicate-do-db=replica diff --git a/mysql-test/t/rpl_ndb_do_db.test b/mysql-test/t/rpl_ndb_do_db.test new file mode 100644 index 00000000000..52c638261bf --- /dev/null +++ b/mysql-test/t/rpl_ndb_do_db.test @@ -0,0 +1,55 @@ +########################################################### +# Author: Jeb +# Date: 14-12-2006 +# Purpose: To test --replicate-do-database=db_name +# using cluster. Only replica should replicate. +########################################################## + +--source include/have_ndb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +DROP DATABASE IF EXISTS replica; + +# Create database and tables for the test. +CREATE DATABASE replica; +CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +USE replica; +CREATE TABLE replica.t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE replica.t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; + +# Insert data into db that should not be picked up by slave +USE test; +INSERT INTO t1 VALUES(1, repeat('abc',10)); +INSERT INTO t2 VALUES(1, repeat('abc',10)); +SHOW TABLES; +SELECT COUNT(*) FROM t1; +SELECT COUNT(*) FROM t2; + +# Insert data into db that should be replicated +USE replica; +INSERT INTO replica.t1 VALUES(2, repeat('def',200)); +INSERT INTO replica.t2 VALUES(2, repeat('def',200)); +SHOW TABLES; +SELECT COUNT(*) FROM t1; +SELECT COUNT(*) FROM t2; + +# Check results on slave +--sync_slave_with_master +SHOW TABLES; +USE replica; +SHOW TABLES; +SELECT COUNT(*) FROM t1; +SELECT COUNT(*) FROM t2; +USE test; +SHOW TABLES; + +# Cleanup from testing +connection master; +USE test; +DROP TABLE t1, t2; +DROP DATABASE IF EXISTS replica; +--sync_slave_with_master + +# End 5.1 test case diff --git a/mysql-test/t/rpl_ndb_do_table-slave.opt b/mysql-test/t/rpl_ndb_do_table-slave.opt new file mode 100644 index 00000000000..da345474216 --- /dev/null +++ b/mysql-test/t/rpl_ndb_do_table-slave.opt @@ -0,0 +1 @@ +--replicate-do-table=test.t1 diff --git a/mysql-test/t/rpl_ndb_do_table.test b/mysql-test/t/rpl_ndb_do_table.test new file mode 100644 index 00000000000..8cc34a6ba5c --- /dev/null +++ b/mysql-test/t/rpl_ndb_do_table.test @@ -0,0 +1,31 @@ +########################################################### +# Author: Jeb +# Date: 14-12-2006 +# Purpose: To test --replicate-do-table=db_name.tbl_name +# using cluster. Only t1 should replicate. +########################################################## + +--source include/have_ndb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +DROP TABLE IF EXISTS t1, t2; + + +CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; + +INSERT INTO t1 VALUES(1, repeat('abc',10)); +INSERT INTO t1 VALUES(2, repeat('def',200)); +INSERT INTO t1 VALUES(3, repeat('ghi',3000)); +INSERT INTO t2 VALUES(1, repeat('abc',10)); +INSERT INTO t2 VALUES(2, repeat('def',200)); +INSERT INTO t2 VALUES(3, repeat('ghi',3000)); + +--sync_slave_with_master +SHOW TABLES; +SELECT COUNT(*) FROM t1; + +connection master; +DROP TABLE IF EXISTS t1, t2; +--sync_slave_with_master diff --git a/mysql-test/t/rpl_ndb_rep_ignore-slave.opt b/mysql-test/t/rpl_ndb_rep_ignore-slave.opt new file mode 100644 index 00000000000..6262cf451a6 --- /dev/null +++ b/mysql-test/t/rpl_ndb_rep_ignore-slave.opt @@ -0,0 +1 @@ +--replicate-ignore-db=test --replicate-ignore-table=replica.t1 diff --git a/mysql-test/t/rpl_ndb_rep_ignore.test b/mysql-test/t/rpl_ndb_rep_ignore.test new file mode 100644 index 00000000000..14a583eac8e --- /dev/null +++ b/mysql-test/t/rpl_ndb_rep_ignore.test @@ -0,0 +1,56 @@ +########################################################### +# Author: Jeb +# Date: 15-12-2006 +# Purpose: To test --replicate-ignore-table=db_name.tbl_name +# and --replicate-ignore-db=db_name +# using cluster. Only replica should replicate. +########################################################## + +--source include/have_ndb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +DROP DATABASE IF EXISTS replica; + +# Create database and tables for the test. +CREATE DATABASE replica; +CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +USE replica; +CREATE TABLE replica.t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; +CREATE TABLE replica.t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; + +# Insert data into db that should not be picked up by slave +USE test; +INSERT INTO t1 VALUES(1, repeat('abc',10)); +INSERT INTO t2 VALUES(1, repeat('abc',10)); +SHOW TABLES; +SELECT COUNT(*) FROM t1; +SELECT COUNT(*) FROM t2; + +# Insert data into db that should be replicated +USE replica; +INSERT INTO replica.t1 VALUES(2, repeat('def',200)); +INSERT INTO replica.t2 VALUES(2, repeat('def',200)); +SHOW TABLES; +SELECT COUNT(*) FROM t1; +SELECT COUNT(*) FROM t2; + +# Check results on slave +--sync_slave_with_master +SHOW TABLES; +USE replica; +SHOW TABLES; +#SELECT COUNT(*) FROM t1; +SELECT COUNT(*) FROM t2; +USE test; +SHOW TABLES; + +# Cleanup from testing +connection master; +USE test; +DROP TABLE t1, t2; +DROP DATABASE IF EXISTS replica; +--sync_slave_with_master + +# End 5.1 test case From 40ab7bdb8fca0cb91ea79505d291ad3d4ead87c4 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Tue, 2 Jan 2007 10:05:58 +0100 Subject: [PATCH 02/35] bug#24667 After ALTER TABLE operation ndb_dd table becomes regular ndb: removed use of environment variable NDB_DEFAULT_DISK --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 5614cc3ecd8..315470dca7c 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4806,7 +4806,7 @@ int ha_ndbcluster::create(const char *name, if ((my_errno= create_ndb_column(col, field, info))) DBUG_RETURN(my_errno); - if (info->storage_media == HA_SM_DISK || getenv("NDB_DEFAULT_DISK")) + if (info->storage_media == HA_SM_DISK) col.setStorageType(NdbDictionary::Column::StorageTypeDisk); else col.setStorageType(NdbDictionary::Column::StorageTypeMemory); From 866b02b9347e8e87f0c2ae186e55ded89602866a Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Tue, 2 Jan 2007 15:47:58 +0100 Subject: [PATCH 03/35] Added --ndb-use-transactions --- sql/mysqld.cc | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 1dd15398cd1..ce12db1c53a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4531,8 +4531,8 @@ enum options_mysqld OPT_LOG_BIN_TRUST_FUNCTION_CREATORS, OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG, OPT_INNODB, OPT_ISAM, - OPT_ENGINE_CONDITION_PUSHDOWN, - OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, + OPT_ENGINE_CONDITION_PUSHDOWN, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, + OPT_NDB_USE_EXACT_COUNT, OPT_NDB_USE_TRANSACTIONS, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME, OPT_NDB_MGMD, OPT_NDB_NODEID, @@ -5158,6 +5158,17 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb-use-transactions", OPT_NDB_USE_TRANSACTIONS, + "Use transactions for large inserts, if enabled then large " + "inserts will be split into several smaller transactions", + (gptr*) &global_system_variables.ndb_use_transactions, + (gptr*) &global_system_variables.ndb_use_transactions, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb_use_transactions", OPT_NDB_USE_TRANSACTIONS, + "same as --ndb-use-transactions.", + (gptr*) &global_system_variables.ndb_use_transactions, + (gptr*) &global_system_variables.ndb_use_transactions, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb-shm", OPT_NDB_SHM, "Use shared memory connections when available.", (gptr*) &opt_ndb_shm, From 500918f2ab3e47fb32321f7a9eea5bbd88855b82 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Tue, 2 Jan 2007 22:31:29 +0100 Subject: [PATCH 04/35] bug #25296 Truncate table converts NDB disk based tables to in-memory tables: implemented ha_ndbcluster::update_create_info --- mysql-test/r/ndb_dd_alter.result | 23 ++++++++++++++++++++++- mysql-test/t/ndb_dd_alter.test | 3 +++ sql/ha_ndbcluster.cc | 6 ++++++ sql/ha_ndbcluster.h | 1 + 4 files changed, 32 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/ndb_dd_alter.result b/mysql-test/r/ndb_dd_alter.result index fec4e5496ad..cbe8db303b4 100644 --- a/mysql-test/r/ndb_dd_alter.result +++ b/mysql-test/r/ndb_dd_alter.result @@ -419,6 +419,27 @@ t1 CREATE TABLE `t1` ( PRIMARY KEY (`a1`), KEY `a3_i` (`a3`) ) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 +TRUNCATE TABLE test.t1; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`), + KEY `a3_i` (`a3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 ALTER TABLE test.t1 DROP a14; ALTER TABLE test.t1 DROP a13; ALTER TABLE test.t1 DROP a12; @@ -438,7 +459,7 @@ t1 CREATE TABLE `t1` ( `a4` bit(1) DEFAULT NULL, `a5` tinyint(4) DEFAULT NULL, KEY `a3_i` (`a3`) -) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 DROP TABLE test.t1; ALTER TABLESPACE ts DROP DATAFILE './table_space/datafile.dat' diff --git a/mysql-test/t/ndb_dd_alter.test b/mysql-test/t/ndb_dd_alter.test index 6a9bdb79f6f..ef7807e8723 100644 --- a/mysql-test/t/ndb_dd_alter.test +++ b/mysql-test/t/ndb_dd_alter.test @@ -221,6 +221,9 @@ ALTER TABLE test.t1 DROP INDEX a2_i; SHOW CREATE TABLE test.t1; +TRUNCATE TABLE test.t1; + +SHOW CREATE TABLE test.t1; #### Try to ALTER DD Tables and drop columns diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 315470dca7c..c11c7e3c1f4 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -8309,6 +8309,12 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) DBUG_RETURN(0); } +void ha_ndbcluster::update_create_info(HA_CREATE_INFO *create_info) +{ + if (get_tablespace_name(current_thd,0,0)) + create_info->storage_media= HA_SM_DISK; +} + char* ha_ndbcluster::update_table_comment( /* out: table comment + additional */ diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 50f24c7a4cf..509211014c6 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -884,6 +884,7 @@ private: ulonglong *nb_reserved_values); bool uses_blob_value(); + void update_create_info(HA_CREATE_INFO *create_info); char *update_table_comment(const char * comment); int write_ndb_file(const char *name); From d9fa993e07f6f0827aa2d217b675a9baa6b2b8e1 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Wed, 3 Jan 2007 06:17:34 +0100 Subject: [PATCH 05/35] ndb - bug#25364 on master node failure during qmgr-commitreq make sure to remove all committed failed nodes from failed/prepfailed arrays --- .../kernel/signaldata/DumpStateOrd.hpp | 1 + ndb/src/kernel/blocks/ERROR_codes.txt | 3 + ndb/src/kernel/blocks/qmgr/Qmgr.hpp | 4 ++ ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 72 +++++++++++++++---- ndb/test/ndbapi/testNodeRestart.cpp | 43 +++++++++++ ndb/test/run-test/daily-basic-tests.txt | 4 ++ 6 files changed, 114 insertions(+), 13 deletions(-) diff --git a/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/ndb/include/kernel/signaldata/DumpStateOrd.hpp index a2993ad5d03..04f94aaba58 100644 --- a/ndb/include/kernel/signaldata/DumpStateOrd.hpp +++ b/ndb/include/kernel/signaldata/DumpStateOrd.hpp @@ -68,6 +68,7 @@ public: // 100-105 TUP and ACC // 200-240 UTIL // 300-305 TRIX + QmgrErr935 = 935, NdbfsDumpFileStat = 400, NdbfsDumpAllFiles = 401, NdbfsDumpOpenFiles = 402, diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index 16f5da8a553..0bcc99a6334 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -21,6 +21,9 @@ Crash president when he starts to run in ArbitState 1-9. 910: Crash new president after node crash +935 : Crash master on node failure (delayed) + and skip sending GSN_COMMIT_FAILREQ to specified node + ERROR CODES FOR TESTING NODE FAILURE, GLOBAL CHECKPOINT HANDLING: ----------------------------------------------------------------- diff --git a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index e728ea81a7d..0c4bdc5d3c1 100644 --- a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -426,6 +426,10 @@ private: StopReq c_stopReq; bool check_multi_node_shutdown(Signal* signal); + +#ifdef ERROR_INSERT + Uint32 c_error_insert_extra; +#endif }; #endif diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index cc981f37987..66ee7549b9d 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -3110,6 +3110,18 @@ Qmgr::sendCommitFailReq(Signal* signal) for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { jam(); ptrAss(nodePtr, nodeRec); + +#ifdef ERROR_INSERT + if (ERROR_INSERTED(935) && nodePtr.i == c_error_insert_extra) + { + ndbout_c("skipping node %d", c_error_insert_extra); + CLEAR_ERROR_INSERT_VALUE; + signal->theData[0] = 9999; + sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 1000, 1); + continue; + } +#endif + if (nodePtr.p->phase == ZRUNNING) { jam(); nodePtr.p->sendCommitFailReqStatus = Q_ACTIVE; @@ -3180,6 +3192,33 @@ void Qmgr::execPREP_FAILREF(Signal* signal) return; }//Qmgr::execPREP_FAILREF() +static +Uint32 +clear_nodes(Uint32 dstcnt, Uint16 dst[], Uint32 srccnt, const Uint16 src[]) +{ + if (srccnt == 0) + return dstcnt; + + Uint32 pos = 0; + for (Uint32 i = 0; i 0) { - jam(); - guard0 = cnoFailedNodes - 1; - arrGuard(guard0 + cnoCommitFailedNodes, MAX_NDB_NODES); - for (Tj = 0; Tj <= guard0; Tj++) { - jam(); - cfailedNodes[Tj] = cfailedNodes[Tj + cnoCommitFailedNodes]; - }//for - }//if - }//if + + /** + * Remove committed nodes from failed/prepared + */ + cnoFailedNodes = clear_nodes(cnoFailedNodes, + cfailedNodes, + cnoCommitFailedNodes, + ccommitFailedNodes); + cnoPrepFailedNodes = clear_nodes(cnoPrepFailedNodes, + cprepFailedNodes, + cnoCommitFailedNodes, + ccommitFailedNodes); cnoCommitFailedNodes = 0; }//if /**----------------------------------------------------------------------- @@ -4658,6 +4696,14 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal) default: ; }//switch + +#ifdef ERROR_INSERT + if (signal->theData[0] == 935 && signal->getLength() == 2) + { + SET_ERROR_INSERT_VALUE(935); + c_error_insert_extra = signal->theData[1]; + } +#endif }//Qmgr::execDUMP_STATE_ORD() void Qmgr::execSET_VAR_REQ(Signal* signal) diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index 082013f07cc..c0c5cc5163a 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -955,6 +955,46 @@ int runBug24717(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runBug25364(NDBT_Context* ctx, NDBT_Step* step){ + int result = NDBT_OK; + NdbRestarter restarter; + Ndb* pNdb = GETNDB(step); + int loops = ctx->getNumLoops(); + + if (restarter.getNumDbNodes() < 4) + return NDBT_OK; + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + + for (; loops; loops --) + { + int master = restarter.getMasterNodeId(); + int victim = restarter.getRandomNodeOtherNodeGroup(master, rand()); + int second = restarter.getRandomNodeSameNodeGroup(victim, rand()); + + int dump[] = { 935, victim } ; + if (restarter.dumpStateOneNode(master, dump, 2)) + return NDBT_FAILED; + + if (restarter.dumpStateOneNode(master, val2, 2)) + return NDBT_FAILED; + + if (restarter.restartOneDbNode(second, false, true, true)) + return NDBT_FAILED; + + int nodes[2] = { master, second }; + if (restarter.waitNodesNoStart(nodes, 2)) + return NDBT_FAILED; + + restarter.startNodes(nodes, 2); + + if (restarter.waitNodesStarted(nodes, 2)) + return NDBT_FAILED; + } + + return NDBT_OK; +} + NDBT_TESTSUITE(testNodeRestart); TESTCASE("NoLoad", @@ -1271,6 +1311,9 @@ TESTCASE("Bug20185", TESTCASE("Bug24717", ""){ INITIALIZER(runBug24717); } +TESTCASE("Bug25364", ""){ + INITIALIZER(runBug25364); +} NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index a1443970388..41070275935 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -469,6 +469,10 @@ max-time: 1000 cmd: testNodeRestart args: -n Bug24717 T1 +max-time: 1000 +cmd: testNodeRestart +args: -n Bug25364 T1 + # OLD FLEX max-time: 500 cmd: flexBench From b448ad77c8266bb57c262a86b17c0a5ff7a00acb Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Wed, 3 Jan 2007 10:22:31 +0100 Subject: [PATCH 06/35] bug #25296 Truncate table converts NDB disk based tables to in-memory tables: ha_ndbcluster::update_create_infohas to check for explicit STORAGE MEMORY --- sql/ha_ndbcluster.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index c11c7e3c1f4..a70f63869c4 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -8311,7 +8311,8 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) void ha_ndbcluster::update_create_info(HA_CREATE_INFO *create_info) { - if (get_tablespace_name(current_thd,0,0)) + if (create_info->storage_media != HA_SM_MEMORY && + get_tablespace_name(current_thd,0,0)) create_info->storage_media= HA_SM_DISK; } From 82723332432eedae70bd22659b1b3845068ac171 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Wed, 3 Jan 2007 14:56:26 +0100 Subject: [PATCH 07/35] ndb_use_transactions is set from value for command line flag --- sql/sql_class.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 645ac6e28f3..8dd53262eea 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -290,9 +290,6 @@ void THD::init(void) variables.date_format); variables.datetime_format= date_time_format_copy((THD*) 0, variables.datetime_format); -#ifdef HAVE_NDBCLUSTER_DB - variables.ndb_use_transactions= 1; -#endif pthread_mutex_unlock(&LOCK_global_system_variables); server_status= SERVER_STATUS_AUTOCOMMIT; if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) From b913b2e7c29ef8847ec53b571db12684becc5cfd Mon Sep 17 00:00:00 2001 From: "jmiller/ndbdev@mysql.com/ndb08.mysql.com" <> Date: Thu, 4 Jan 2007 04:09:06 +0100 Subject: [PATCH 08/35] Many files: Updated from Tomas review --- mysql-test/r/rpl_ndb_do_db.result | 2 -- mysql-test/r/rpl_ndb_do_table.result | 3 --- mysql-test/r/rpl_ndb_rep_ignore.result | 2 -- mysql-test/t/rpl_ndb_do_db.test | 2 ++ mysql-test/t/rpl_ndb_do_table.test | 3 ++- mysql-test/t/rpl_ndb_rep_ignore.test | 2 ++ 6 files changed, 6 insertions(+), 8 deletions(-) diff --git a/mysql-test/r/rpl_ndb_do_db.result b/mysql-test/r/rpl_ndb_do_db.result index 32f25d5f5a4..316f5fc7e31 100644 --- a/mysql-test/r/rpl_ndb_do_db.result +++ b/mysql-test/r/rpl_ndb_do_db.result @@ -5,8 +5,6 @@ reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; DROP DATABASE IF EXISTS replica; -Warnings: -Note 1008 Can't drop database 'replica'; database doesn't exist CREATE DATABASE replica; CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; diff --git a/mysql-test/r/rpl_ndb_do_table.result b/mysql-test/r/rpl_ndb_do_table.result index 761c7837fde..a5854985352 100644 --- a/mysql-test/r/rpl_ndb_do_table.result +++ b/mysql-test/r/rpl_ndb_do_table.result @@ -5,9 +5,6 @@ reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; DROP TABLE IF EXISTS t1, t2; -Warnings: -Note 1051 Unknown table 't1' -Note 1051 Unknown table 't2' CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; INSERT INTO t1 VALUES(1, repeat('abc',10)); diff --git a/mysql-test/r/rpl_ndb_rep_ignore.result b/mysql-test/r/rpl_ndb_rep_ignore.result index 45d9e5db857..4e28a7e5865 100644 --- a/mysql-test/r/rpl_ndb_rep_ignore.result +++ b/mysql-test/r/rpl_ndb_rep_ignore.result @@ -5,8 +5,6 @@ reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; DROP DATABASE IF EXISTS replica; -Warnings: -Note 1008 Can't drop database 'replica'; database doesn't exist CREATE DATABASE replica; CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; diff --git a/mysql-test/t/rpl_ndb_do_db.test b/mysql-test/t/rpl_ndb_do_db.test index 52c638261bf..9b65d43f244 100644 --- a/mysql-test/t/rpl_ndb_do_db.test +++ b/mysql-test/t/rpl_ndb_do_db.test @@ -9,7 +9,9 @@ --source include/have_binlog_format_row.inc --source include/master-slave.inc +--disable_warnings DROP DATABASE IF EXISTS replica; +--enable_warnings # Create database and tables for the test. CREATE DATABASE replica; diff --git a/mysql-test/t/rpl_ndb_do_table.test b/mysql-test/t/rpl_ndb_do_table.test index 8cc34a6ba5c..278a326aefd 100644 --- a/mysql-test/t/rpl_ndb_do_table.test +++ b/mysql-test/t/rpl_ndb_do_table.test @@ -9,8 +9,9 @@ --source include/have_binlog_format_row.inc --source include/master-slave.inc +--disable_warnings DROP TABLE IF EXISTS t1, t2; - +--enable_warnings CREATE TABLE t1 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; CREATE TABLE t2 (a INT NOT NULL KEY, b text NOT NULL)ENGINE=NDB; diff --git a/mysql-test/t/rpl_ndb_rep_ignore.test b/mysql-test/t/rpl_ndb_rep_ignore.test index 14a583eac8e..47f5bce5527 100644 --- a/mysql-test/t/rpl_ndb_rep_ignore.test +++ b/mysql-test/t/rpl_ndb_rep_ignore.test @@ -10,7 +10,9 @@ --source include/have_binlog_format_row.inc --source include/master-slave.inc +--disable_warnings DROP DATABASE IF EXISTS replica; +--enable_warnings # Create database and tables for the test. CREATE DATABASE replica; From 408e4a61d15f87484d6f3f2fccf5fef5b7cd5cc7 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Thu, 4 Jan 2007 09:15:09 +0100 Subject: [PATCH 09/35] bug #25296 Truncate table converts NDB disk based tables to in-memory tables: ha_ndbcluster::update_create_info should only update for TRUNCATE since we need to detect change of STORAGE at ALTER TABLE --- sql/ha_ndbcluster.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a70f63869c4..4cd504d00a6 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -8311,8 +8311,10 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) void ha_ndbcluster::update_create_info(HA_CREATE_INFO *create_info) { - if (create_info->storage_media != HA_SM_MEMORY && - get_tablespace_name(current_thd,0,0)) + THD *thd= current_thd; + + if (thd->lex->sql_command == SQLCOM_TRUNCATE && + get_tablespace_name(thd,0,0)) create_info->storage_media= HA_SM_DISK; } From db8cd1ec448fd5b97954865823e0943ea2c5c995 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Thu, 4 Jan 2007 10:03:11 +0100 Subject: [PATCH 10/35] ndb - bug#25329 Fix most obvious error in dict/suma error handling of events --- .../ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 10 +++ .../ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 5 ++ storage/ndb/src/kernel/blocks/suma/Suma.cpp | 65 ++++++++++++++----- .../ndb/src/kernel/blocks/suma/SumaInit.cpp | 2 + 4 files changed, 65 insertions(+), 17 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index beb7e0ceb7b..3201cfdab89 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -10151,6 +10151,7 @@ void Dbdict::execSUB_START_REF(Signal* signal) SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend(); ref->senderRef = reference(); ref->senderData = subbPtr.p->m_senderData; + ref->errorCode = err; sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF, signal, SubStartRef::SignalLength2, JBB); c_opSubEvent.release(subbPtr); @@ -10213,6 +10214,7 @@ void Dbdict::execSUB_START_CONF(Signal* signal) #ifdef EVENT_PH3_DEBUG ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_CONF = (%d)", subbPtr.i); #endif + subbPtr.p->m_sub_start_conf = *conf; subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef)); completeSubStartReq(signal,subbPtr.i,0); } @@ -10252,6 +10254,9 @@ void Dbdict::completeSubStartReq(Signal* signal, #ifdef EVENT_DEBUG ndbout_c("SUB_START_CONF"); #endif + + SubStartConf* conf = (SubStartConf*)signal->getDataPtrSend(); + * conf = subbPtr.p->m_sub_start_conf; sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF, signal, SubStartConf::SignalLength, JBB); c_opSubEvent.release(subbPtr); @@ -10373,6 +10378,7 @@ void Dbdict::execSUB_STOP_REF(Signal* signal) SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend(); ref->senderRef = reference(); ref->senderData = subbPtr.p->m_senderData; + ref->errorCode = err; sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF, signal, SubStopRef::SignalLength, JBB); c_opSubEvent.release(subbPtr); @@ -10425,6 +10431,7 @@ void Dbdict::execSUB_STOP_CONF(Signal* signal) * Coordinator */ ndbrequire(refToBlock(senderRef) == DBDICT); + subbPtr.p->m_sub_stop_conf = *conf; subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef)); completeSubStopReq(signal,subbPtr.i,0); } @@ -10465,6 +10472,8 @@ void Dbdict::completeSubStopReq(Signal* signal, #ifdef EVENT_DEBUG ndbout_c("SUB_STOP_CONF"); #endif + SubStopConf* conf = (SubStopConf*)signal->getDataPtrSend(); + * conf = subbPtr.p->m_sub_stop_conf; sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF, signal, SubStopConf::SignalLength, JBB); c_opSubEvent.release(subbPtr); @@ -10713,6 +10722,7 @@ Dbdict::execSUB_REMOVE_REF(Signal* signal) SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtrSend(); ref->senderRef = reference(); ref->senderData = subbPtr.p->m_senderData; + ref->errorCode = err; sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_REF, signal, SubRemoveRef::SignalLength, JBB); } diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 718d53d8b96..414b3dabb52 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -52,6 +52,7 @@ #include #include #include +#include #include "SchemaFile.hpp" #include #include @@ -1632,6 +1633,10 @@ private: Uint32 m_senderRef; Uint32 m_senderData; Uint32 m_errorCode; + union { + SubStartConf m_sub_start_conf; + SubStopConf m_sub_stop_conf; + }; RequestTracker m_reqTracker; }; typedef Ptr OpSubEventPtr; diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 92efca36a35..6f45cfb1975 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -230,7 +230,6 @@ Suma::execREAD_CONFIG_REQ(Signal* signal) c_startup.m_wait_handover= false; c_failedApiNodes.clear(); - c_startup.m_restart_server_node_id = 0; // Server for my NR ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); conf->senderRef = reference(); @@ -261,6 +260,14 @@ Suma::execSTTOR(Signal* signal) { if(startphase == 5) { + if (ERROR_INSERTED(13029)) /* Hold startphase 5 */ + { + sendSignalWithDelay(SUMA_REF, GSN_STTOR, signal, + 30, signal->getLength()); + DBUG_VOID_RETURN; + } + + c_startup.m_restart_server_node_id = 0; getNodeGroupMembers(signal); if (typeOfStart == NodeState::ST_NODE_RESTART || typeOfStart == NodeState::ST_INITIAL_NODE_RESTART) @@ -373,6 +380,8 @@ Suma::execSUMA_START_ME_REF(Signal* signal) infoEvent("Suma: node %d refused %d", c_startup.m_restart_server_node_id, ref->errorCode); + + c_startup.m_restart_server_node_id++; send_start_me_req(signal); } @@ -887,6 +896,22 @@ Suma::execDUMP_STATE_ORD(Signal* signal){ ptr->m_buffer_head.m_page_id); } } + + if (tCase == 8006) + { + SET_ERROR_INSERT_VALUE(13029); + } + + if (tCase == 8007) + { + c_startup.m_restart_server_node_id = MAX_NDB_NODES + 1; + SET_ERROR_INSERT_VALUE(13029); + } + + if (tCase == 8008) + { + CLEAR_ERROR_INSERT_VALUE; + } } /************************************************************* @@ -1092,14 +1117,14 @@ Suma::execSUB_CREATE_REQ(Signal* signal) } } else { if (c_startup.m_restart_server_node_id && - refToNode(subRef) != c_startup.m_restart_server_node_id) + subRef != calcSumaBlockRef(c_startup.m_restart_server_node_id)) { /** * only allow "restart_server" Suma's to come through * for restart purposes */ jam(); - sendSubStartRef(signal, 1405); + sendSubCreateRef(signal, 1415); DBUG_VOID_RETURN; } // Check that id/key is unique @@ -2232,14 +2257,17 @@ Suma::execSUB_START_REQ(Signal* signal){ key.m_subscriptionKey = req->subscriptionKey; if (c_startup.m_restart_server_node_id && - refToNode(senderRef) != c_startup.m_restart_server_node_id) + senderRef != calcSumaBlockRef(c_startup.m_restart_server_node_id)) { /** * only allow "restart_server" Suma's to come through * for restart purposes */ jam(); - sendSubStartRef(signal, 1405); + Uint32 err = c_startup.m_restart_server_node_id != RNIL ? 1405 : + SubStartRef::NF_FakeErrorREF; + + sendSubStartRef(signal, err); DBUG_VOID_RETURN; } @@ -2454,6 +2482,21 @@ Suma::execSUB_STOP_REQ(Signal* signal){ DBUG_VOID_RETURN; } + if (c_startup.m_restart_server_node_id && + senderRef != calcSumaBlockRef(c_startup.m_restart_server_node_id)) + { + /** + * only allow "restart_server" Suma's to come through + * for restart purposes + */ + jam(); + Uint32 err = c_startup.m_restart_server_node_id != RNIL ? 1405 : + SubStopRef::NF_FakeErrorREF; + + sendSubStopRef(signal, err); + DBUG_VOID_RETURN; + } + if(!c_subscriptions.find(subPtr, key)){ jam(); DBUG_PRINT("error", ("not found")); @@ -2461,18 +2504,6 @@ Suma::execSUB_STOP_REQ(Signal* signal){ DBUG_VOID_RETURN; } - if (c_startup.m_restart_server_node_id && - refToNode(senderRef) != c_startup.m_restart_server_node_id) - { - /** - * only allow "restart_server" Suma's to come through - * for restart purposes - */ - jam(); - sendSubStopRef(signal, 1405); - DBUG_VOID_RETURN; - } - if (subPtr.p->m_state == Subscription::LOCKED) { jam(); DBUG_PRINT("error", ("locked")); diff --git a/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp index a9b9727cf99..c6311058035 100644 --- a/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp +++ b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp @@ -122,6 +122,8 @@ Suma::Suma(Block_context& ctx) : addRecSignal(GSN_SUB_GCP_COMPLETE_REP, &Suma::execSUB_GCP_COMPLETE_REP); + + c_startup.m_restart_server_node_id = RNIL; // Server for my NR } Suma::~Suma() From 8cf69ddb8b89d5db0696f56bd33a5693f04ab6e0 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Thu, 4 Jan 2007 10:59:12 +0100 Subject: [PATCH 11/35] ndb - recommit extra version info to real-51 --- .../ndb/include/kernel/GlobalSignalNumbers.h | 2 +- storage/ndb/include/kernel/NodeInfo.hpp | 10 ++ .../kernel/signaldata/ApiRegSignalData.hpp | 3 +- storage/ndb/include/ndb_version.h.in | 2 + .../debugger/signaldata/SignalNames.cpp | 1 + storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp | 5 + .../ndb/src/kernel/blocks/qmgr/QmgrInit.cpp | 8 ++ .../ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 136 +++++++++++++++++- storage/ndb/src/kernel/vm/GlobalData.hpp | 1 + storage/ndb/src/kernel/vm/SimulatedBlock.hpp | 15 ++ 10 files changed, 177 insertions(+), 6 deletions(-) diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h index 49f937ba221..a2a5adeed9e 100644 --- a/storage/ndb/include/kernel/GlobalSignalNumbers.h +++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h @@ -183,7 +183,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_CNTR_START_REP 119 /* 120 not unused */ #define GSN_ROUTE_ORD 121 -/* 122 unused */ +#define GSN_NODE_VERSION_REP 122 /* 123 unused */ /* 124 unused */ #define GSN_CHECK_LCP_STOP 125 diff --git a/storage/ndb/include/kernel/NodeInfo.hpp b/storage/ndb/include/kernel/NodeInfo.hpp index fffd94b5258..75b2654d699 100644 --- a/storage/ndb/include/kernel/NodeInfo.hpp +++ b/storage/ndb/include/kernel/NodeInfo.hpp @@ -90,4 +90,14 @@ operator<<(NdbOut& ndbout, const NodeInfo & info){ return ndbout; } +struct NodeVersionInfo +{ + STATIC_CONST( DataLength = 6 ); + struct + { + Uint32 m_min_version; + Uint32 m_max_version; + } m_type [3]; // Indexed as NodeInfo::Type +}; + #endif diff --git a/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp b/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp index 84dca8fb260..4a8adddc4d5 100644 --- a/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp +++ b/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp @@ -80,12 +80,13 @@ class ApiRegConf { friend class ClusterMgr; public: - STATIC_CONST( SignalLength = 3 + NodeState::DataLength ); + STATIC_CONST( SignalLength = 4 + NodeState::DataLength ); private: Uint32 qmgrRef; Uint32 version; // Version of NDB node Uint32 apiHeartbeatFrequency; + Uint32 minDbVersion; NodeState nodeState; }; diff --git a/storage/ndb/include/ndb_version.h.in b/storage/ndb/include/ndb_version.h.in index 9eb609e3830..0bbb12ed223 100644 --- a/storage/ndb/include/ndb_version.h.in +++ b/storage/ndb/include/ndb_version.h.in @@ -72,5 +72,7 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ]; #define NDBD_QMGR_SINGLEUSER_VERSION_5 MAKE_VERSION(5,0,25) +#define NDBD_NODE_VERSION_REP MAKE_VERSION(6,1,1) + #endif diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp index 25a491422ef..74a090994b1 100644 --- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp @@ -637,5 +637,6 @@ const GsnName SignalNames [] = { ,{ GSN_DICT_COMMIT_REQ, "DICT_COMMIT_REQ"} ,{ GSN_ROUTE_ORD, "ROUTE_ORD" } + ,{ GSN_NODE_VERSION_REP, "NODE_VERSION_REP" } }; const unsigned short NO_OF_SIGNAL_NAMES = sizeof(SignalNames)/sizeof(GsnName); diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index ef8eabf3eff..0fdce8b5166 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -450,6 +450,11 @@ private: #ifdef ERROR_INSERT Uint32 c_error_insert_extra; #endif + + void recompute_version_info(Uint32 type); + void recompute_version_info(Uint32 type, Uint32 version); + void execNODE_VERSION_REP(Signal* signal); + void sendApiVersionRep(Signal* signal, NodeRecPtr nodePtr); }; #endif diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp index 8ec5e681045..aac9db03625 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp @@ -37,6 +37,13 @@ void Qmgr::initData() setHbApiDelay(hbDBAPI); c_connectedNodes.set(getOwnNodeId()); c_stopReq.senderRef = 0; + + /** + * Check sanity for NodeVersion + */ + ndbrequire((Uint32)NodeInfo::DB == 0); + ndbrequire((Uint32)NodeInfo::API == 1); + ndbrequire((Uint32)NodeInfo::MGM == 2); }//Qmgr::initData() void Qmgr::initRecords() @@ -107,6 +114,7 @@ Qmgr::Qmgr(Block_context& ctx) addRecSignal(GSN_DIH_RESTARTREF, &Qmgr::execDIH_RESTARTREF); addRecSignal(GSN_DIH_RESTARTCONF, &Qmgr::execDIH_RESTARTCONF); + addRecSignal(GSN_NODE_VERSION_REP, &Qmgr::execNODE_VERSION_REP); initData(); }//Qmgr::Qmgr() diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 809febfc8d1..d59299989d4 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -260,6 +260,9 @@ void Qmgr::execSTTOR(Signal* signal) case 1: initData(signal); startphase1(signal); + recompute_version_info(NodeInfo::DB); + recompute_version_info(NodeInfo::API); + recompute_version_info(NodeInfo::MGM); return; case 7: cactivateApiCheck = 1; @@ -765,6 +768,7 @@ void Qmgr::execCM_REGREQ(Signal* signal) */ UintR TdynId = ++c_maxDynamicId; setNodeInfo(addNodePtr.i).m_version = startingVersion; + recompute_version_info(NodeInfo::DB, startingVersion); addNodePtr.p->ndynamicId = TdynId; /** @@ -1503,7 +1507,8 @@ void Qmgr::execCM_NODEINFOCONF(Signal* signal) replyNodePtr.p->ndynamicId = dynamicId; replyNodePtr.p->blockRef = signal->getSendersBlockRef(); setNodeInfo(replyNodePtr.i).m_version = version; - + recompute_version_info(NodeInfo::DB, version); + if(!c_start.m_nodes.done()){ jam(); return; @@ -1602,6 +1607,7 @@ Qmgr::cmAddPrepare(Signal* signal, NodeRecPtr nodePtr, const NodeRec * self){ } sendCmAckAdd(signal, nodePtr.i, CmAdd::Prepare); + sendApiVersionRep(signal, nodePtr); /* President have prepared us */ CmNodeInfoConf * conf = (CmNodeInfoConf*)signal->getDataPtrSend(); @@ -1613,6 +1619,29 @@ Qmgr::cmAddPrepare(Signal* signal, NodeRecPtr nodePtr, const NodeRec * self){ DEBUG_START(GSN_CM_NODEINFOCONF, refToNode(nodePtr.p->blockRef), ""); } +void +Qmgr::sendApiVersionRep(Signal* signal, NodeRecPtr nodePtr) +{ + if (getNodeInfo(nodePtr.i).m_version >= NDBD_NODE_VERSION_REP) + { + jam(); + Uint32 ref = calcQmgrBlockRef(nodePtr.i); + for(Uint32 i = 1; itheData[0] = i; + signal->theData[1] = version; + sendSignal(ref, GSN_NODE_VERSION_REP, signal, 2, JBB); + } + } + } +} + void Qmgr::sendCmAckAdd(Signal * signal, Uint32 nodeId, CmAdd::RequestType type){ @@ -2401,7 +2430,9 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo) * SECONDS. *-------------------------------------------------------------------------*/ setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0; - + setNodeInfo(failedNodePtr.i).m_version = 0; + recompute_version_info(getNodeInfo(failedNodePtr.i).m_type); + CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0]; closeCom->xxxBlockRef = reference(); @@ -2707,7 +2738,6 @@ void Qmgr::execAPI_REGREQ(Signal* signal) } setNodeInfo(apiNodePtr.i).m_version = version; - setNodeInfo(apiNodePtr.i).m_heartbeat_cnt= 0; ApiRegConf * const apiRegConf = (ApiRegConf *)&signal->theData[0]; @@ -2728,8 +2758,9 @@ void Qmgr::execAPI_REGREQ(Signal* signal) apiRegConf->nodeState.dynamicId = -dynamicId; } } + NodeVersionInfo info = getNodeVersionInfo(); + apiRegConf->minDbVersion = info.m_type[NodeInfo::DB].m_min_version; apiRegConf->nodeState.m_connected_nodes.assign(c_connectedNodes); - sendSignal(ref, GSN_API_REGCONF, signal, ApiRegConf::SignalLength, JBB); if (apiNodePtr.p->phase == ZAPI_INACTIVE && @@ -2748,6 +2779,33 @@ void Qmgr::execAPI_REGREQ(Signal* signal) signal->theData[0] = apiNodePtr.i; sendSignal(CMVMI_REF, GSN_ENABLE_COMORD, signal, 1, JBA); + recompute_version_info(type, version); + + if (info.m_type[NodeInfo::DB].m_min_version >= NDBD_NODE_VERSION_REP) + { + jam(); + NodeReceiverGroup rg(QMGR, c_clusterNodes); + rg.m_nodes.clear(getOwnNodeId()); + signal->theData[0] = apiNodePtr.i; + signal->theData[1] = version; + sendSignal(rg, GSN_NODE_VERSION_REP, signal, 2, JBB); + } + else + { + Uint32 i = 0; + while((i = c_clusterNodes.find(i + 1)) != NdbNodeBitmask::NotFound) + { + jam(); + if (i == getOwnNodeId()) + continue; + if (getNodeInfo(i).m_version >= NDBD_NODE_VERSION_REP) + { + jam(); + sendSignal(calcQmgrBlockRef(i), GSN_NODE_VERSION_REP, signal, 2,JBB); + } + } + } + signal->theData[0] = apiNodePtr.i; EXECUTE_DIRECT(NDBCNTR, GSN_API_START_REP, signal, 1); } @@ -2783,6 +2841,76 @@ Qmgr::execAPI_VERSION_REQ(Signal * signal) { ApiVersionConf::SignalLength, JBB); } +void +Qmgr::execNODE_VERSION_REP(Signal* signal) +{ + jamEntry(); + Uint32 nodeId = signal->theData[0]; + Uint32 version = signal->theData[1]; + + if (nodeId < MAX_NODES) + { + jam(); + Uint32 type = getNodeInfo(nodeId).m_type; + setNodeInfo(nodeId).m_version = version; + recompute_version_info(type, version); + } +} + +void +Qmgr::recompute_version_info(Uint32 type, Uint32 version) +{ + NodeVersionInfo& info = setNodeVersionInfo(); + switch(type){ + case NodeInfo::DB: + case NodeInfo::API: + case NodeInfo::MGM: + break; + default: + return; + } + + if (info.m_type[type].m_min_version == 0 || + version < info.m_type[type].m_min_version) + info.m_type[type].m_min_version = version; + if (version > info.m_type[type].m_max_version) + info.m_type[type].m_max_version = version; +} + +void +Qmgr::recompute_version_info(Uint32 type) +{ + switch(type){ + case NodeInfo::DB: + case NodeInfo::API: + case NodeInfo::MGM: + break; + default: + return; + } + + Uint32 min = ~0, max = 0; + Uint32 cnt = type == NodeInfo::DB ? MAX_NDB_NODES : MAX_NODES; + for (Uint32 i = 1; i max) + max = version; + } + } + } + + NodeVersionInfo& info = setNodeVersionInfo(); + info.m_type[type].m_min_version = min == ~(Uint32)0 ? 0 : min; + info.m_type[type].m_max_version = max; +} #if 0 bool diff --git a/storage/ndb/src/kernel/vm/GlobalData.hpp b/storage/ndb/src/kernel/vm/GlobalData.hpp index 2761edb0571..fa0ad996c01 100644 --- a/storage/ndb/src/kernel/vm/GlobalData.hpp +++ b/storage/ndb/src/kernel/vm/GlobalData.hpp @@ -36,6 +36,7 @@ enum restartStates {initial_state, struct GlobalData { Uint32 m_restart_seq; // + NodeVersionInfo m_versionInfo; NodeInfo m_nodeInfo[MAX_NODES]; Signal VMSignals[1]; // Owned by FastScheduler:: diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp index 46fe03de98e..01897825b2e 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -403,6 +403,9 @@ protected: const NodeInfo & getNodeInfo(NodeId nodeId) const; NodeInfo & setNodeInfo(NodeId); + const NodeVersionInfo& getNodeVersionInfo() const; + NodeVersionInfo& setNodeVersionInfo(); + /********************** * Xfrm stuff */ @@ -709,6 +712,18 @@ SimulatedBlock::getNodeInfo(NodeId nodeId) const { return globalData.m_nodeInfo[nodeId]; } +inline +const NodeVersionInfo & +SimulatedBlock::getNodeVersionInfo() const { + return globalData.m_versionInfo; +} + +inline +NodeVersionInfo & +SimulatedBlock::setNodeVersionInfo() { + return globalData.m_versionInfo; +} + inline void SimulatedBlock::EXECUTE_DIRECT(Uint32 block, From a1fdeba32820bf4913c17fa56d6cc248098f0f8a Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Thu, 4 Jan 2007 11:13:19 +0100 Subject: [PATCH 12/35] ndb - bug#25329 extra sledge hammer, (real 51) use dicklock to prevent SUB_START_REQ during node recovery --- storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 3201cfdab89..6d2be7dde39 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -10050,9 +10050,20 @@ void Dbdict::execSUB_START_REQ(Signal* signal) } OpSubEventPtr subbPtr; Uint32 errCode = 0; + + DictLockPtr loopPtr; + if (c_dictLockQueue.first(loopPtr) && + loopPtr.p->lt->lockType == DictLockReq::NodeRestartLock) + { + jam(); + errCode = 1405; + goto busy; + } + if (!c_opSubEvent.seize(subbPtr)) { errCode = SubStartRef::Busy; busy: + jam(); SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); { // fix From 4ddf45b19cdb9b9223871cf24b9b663cccb73c24 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Thu, 4 Jan 2007 17:26:53 +0100 Subject: [PATCH 13/35] ha_ndbcluster.h, ndb_dd_alter.result, ha_ndbcluster.cc: bug#25296 Truncate table converts NDB disk based tables to in-memory tables: Changed fix to use get_tablespace_name directly instead --- mysql-test/r/ndb_dd_alter.result | 4 ++-- sql/ha_ndbcluster.cc | 30 ++++++++++++++++-------------- sql/ha_ndbcluster.h | 1 - 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/mysql-test/r/ndb_dd_alter.result b/mysql-test/r/ndb_dd_alter.result index cbe8db303b4..d101e6de791 100644 --- a/mysql-test/r/ndb_dd_alter.result +++ b/mysql-test/r/ndb_dd_alter.result @@ -439,7 +439,7 @@ t1 CREATE TABLE `t1` ( `a14` blob, PRIMARY KEY (`a1`), KEY `a3_i` (`a3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 ALTER TABLE test.t1 DROP a14; ALTER TABLE test.t1 DROP a13; ALTER TABLE test.t1 DROP a12; @@ -459,7 +459,7 @@ t1 CREATE TABLE `t1` ( `a4` bit(1) DEFAULT NULL, `a5` tinyint(4) DEFAULT NULL, KEY `a3_i` (`a3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 DROP TABLE test.t1; ALTER TABLESPACE ts DROP DATAFILE './table_space/datafile.dat' diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 4cd504d00a6..a666e6b1c34 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4734,6 +4734,7 @@ int ha_ndbcluster::create(const char *name, const void *data, *pack_data; bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE); bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE); + char tablespace[FN_LEN]; DBUG_ENTER("ha_ndbcluster::create"); DBUG_PRINT("enter", ("name: %s", name)); @@ -4742,8 +4743,22 @@ int ha_ndbcluster::create(const char *name, set_dbname(name); set_tabname(name); + if ((my_errno= check_ndb_connection())) + DBUG_RETURN(my_errno); + + Ndb *ndb= get_ndb(); + NDBDICT *dict= ndb->getDictionary(); + if (is_truncate) { + { + Ndb_table_guard ndbtab_g(dict, m_tabname); + if (!(m_table= ndbtab_g.get_table())) + ERR_RETURN(dict->getNdbError()); + if ((get_tablespace_name(thd, tablespace, FN_LEN))) + info->tablespace= tablespace; + m_table= NULL; + } DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE")); if ((my_errno= delete_table(name))) DBUG_RETURN(my_errno); @@ -4903,12 +4918,7 @@ int ha_ndbcluster::create(const char *name, DBUG_RETURN(my_errno); } - if ((my_errno= check_ndb_connection())) - DBUG_RETURN(my_errno); - // Create the table in NDB - Ndb *ndb= get_ndb(); - NDBDICT *dict= ndb->getDictionary(); if (dict->createTable(tab) != 0) { const NdbError err= dict->getNdbError(); @@ -8309,15 +8319,6 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) DBUG_RETURN(0); } -void ha_ndbcluster::update_create_info(HA_CREATE_INFO *create_info) -{ - THD *thd= current_thd; - - if (thd->lex->sql_command == SQLCOM_TRUNCATE && - get_tablespace_name(thd,0,0)) - create_info->storage_media= HA_SM_DISK; -} - char* ha_ndbcluster::update_table_comment( /* out: table comment + additional */ @@ -10016,6 +10017,7 @@ char* ha_ndbcluster::get_tablespace_name(THD *thd, char* name, uint name_len) ndberr= ndbdict->getNdbError(); if(ndberr.classification != NdbError::NoError) goto err; + DBUG_PRINT("info", ("Found tablespace '%s'", ts.getName())); if (name) { strxnmov(name, name_len, ts.getName(), NullS); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 509211014c6..50f24c7a4cf 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -884,7 +884,6 @@ private: ulonglong *nb_reserved_values); bool uses_blob_value(); - void update_create_info(HA_CREATE_INFO *create_info); char *update_table_comment(const char * comment); int write_ndb_file(const char *name); From db0107b801df0f4fd0ef0f978a45c6d2830d748a Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Mon, 8 Jan 2007 10:38:53 +0100 Subject: [PATCH 14/35] bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index: Added error checking --- mysql-test/r/ndb_index_ordered.result | 9 +++++++++ mysql-test/t/ndb_index_ordered.test | 14 ++++++++++++++ sql/ha_ndbcluster.cc | 10 ++++++++++ 3 files changed, 33 insertions(+) diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 36bac7b0f9d..54458e1b459 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -658,3 +658,12 @@ insert into t1 (a, c) values (1,'aaa'),(3,'bbb'); select count(*) from t1 where c<'bbb'; count(*) 1 +create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, +CountryCode char(3) NOT NULL, +DishTitle varchar(64) NOT NULL, +calories smallint(5) unsigned DEFAULT NULL, +PRIMARY KEY (DishID) +) ENGINE=ndbcluster; +create index i using hash on nationaldish(countrycode,calories); +ERROR HY000: Can't create table './test/#sql-3c51_2.frm' (errno: 138) +drop table nationaldish; diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index e6827bdbe12..fa76202c7b7 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -356,3 +356,17 @@ insert into t1 (a, c) values (1,'aaa'),(3,'bbb'); select count(*) from t1 where c<'bbb'; # End of 4.1 tests + +# bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index + +create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, + CountryCode char(3) NOT NULL, + DishTitle varchar(64) NOT NULL, + calories smallint(5) unsigned DEFAULT NULL, + PRIMARY KEY (DishID) + ) ENGINE=ndbcluster; + +--error ER_CANT_CREATE_TABLE +create index i using hash on nationaldish(countrycode,calories); + +drop table nationaldish; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 149a7c83895..34a3a001b21 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1107,6 +1107,16 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase) error= create_unique_index(unique_index_name, key_info); break; case ORDERED_INDEX: + if (key_info->algorithm == HA_KEY_ALG_HASH) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_UNSUPPORTED_EXTENSION, + ER(ER_UNSUPPORTED_EXTENSION), + "Ndb does not support non-unique " + "hash based indexes"); + error= HA_ERR_UNSUPPORTED; + break; + } error= create_ordered_index(index_name, key_info); break; default: From a74b06c723adc41f1252010cd6c6cd65b8563752 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Mon, 8 Jan 2007 11:42:32 +0100 Subject: [PATCH 15/35] bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index: Added error checking --- mysql-test/r/ndb_index_ordered.result | 9 +++++++++ mysql-test/t/ndb_index_ordered.test | 14 ++++++++++++++ sql/ha_ndbcluster.cc | 13 ++++++++++++- 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 788c0d68259..37fa29da18d 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -835,3 +835,12 @@ a 3 delete from t1; drop table t1; +create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, +CountryCode char(3) NOT NULL, +DishTitle varchar(64) NOT NULL, +calories smallint(5) unsigned DEFAULT NULL, +PRIMARY KEY (DishID) +) ENGINE=ndbcluster; +create index i on nationaldish(countrycode,calories) using hash; +ERROR 42000: Table 'nationaldish' uses an extension that doesn't exist in this MySQL version +drop table nationaldish; diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index bba0c5ca53f..13b13f39f70 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -455,3 +455,17 @@ rollback; select * from t1 order by a; delete from t1; drop table t1; + +# bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index + +create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, + CountryCode char(3) NOT NULL, + DishTitle varchar(64) NOT NULL, + calories smallint(5) unsigned DEFAULT NULL, + PRIMARY KEY (DishID) + ) ENGINE=ndbcluster; + +--error ER_UNSUPPORTED_EXTENSION +create index i on nationaldish(countrycode,calories) using hash; + +drop table nationaldish; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a666e6b1c34..0079535fa12 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -5153,6 +5153,17 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info, error= create_unique_index(unique_name, key_info); break; case ORDERED_INDEX: + if (key_info->algorithm == HA_KEY_ALG_HASH) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_ILLEGAL_HA_CREATE_OPTION, + ER(ER_ILLEGAL_HA_CREATE_OPTION), + ndbcluster_hton_name, + "Ndb does not support non-unique " + "hash based indexes"); + error= HA_ERR_UNSUPPORTED; + break; + } error= create_ordered_index(name, key_info); break; default: @@ -5247,7 +5258,7 @@ int ha_ndbcluster::add_index(TABLE *table_arg, KEY *key= key_info + idx; KEY_PART_INFO *key_part= key->key_part; KEY_PART_INFO *end= key_part + key->key_parts; - NDB_INDEX_TYPE idx_type= get_index_type_from_key(idx, key, false); + NDB_INDEX_TYPE idx_type= get_index_type_from_key(idx, key_info, false); DBUG_PRINT("info", ("Adding index: '%s'", key_info[idx].name)); // Add fields to key_part struct for (; key_part != end; key_part++) From 5ebcc10e368796a23bba37332786fa0ef99324ab Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Mon, 8 Jan 2007 13:53:37 +0100 Subject: [PATCH 16/35] bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index: Changed test since error mesage wasn't predictable --- mysql-test/r/ndb_index_ordered.result | 7 +++---- mysql-test/t/ndb_index_ordered.test | 8 +++----- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 54458e1b459..b3e55a23073 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -662,8 +662,7 @@ create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, CountryCode char(3) NOT NULL, DishTitle varchar(64) NOT NULL, calories smallint(5) unsigned DEFAULT NULL, -PRIMARY KEY (DishID) +PRIMARY KEY (DishID), +INDEX i USING HASH (countrycode,calories) ) ENGINE=ndbcluster; -create index i using hash on nationaldish(countrycode,calories); -ERROR HY000: Can't create table './test/#sql-3c51_2.frm' (errno: 138) -drop table nationaldish; +ERROR HY000: Can't create table './test/nationaldish.frm' (errno: 138) diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index fa76202c7b7..5867140fabb 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -359,14 +359,12 @@ select count(*) from t1 where c<'bbb'; # bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index +--error ER_CANT_CREATE_TABLE create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, CountryCode char(3) NOT NULL, DishTitle varchar(64) NOT NULL, calories smallint(5) unsigned DEFAULT NULL, - PRIMARY KEY (DishID) + PRIMARY KEY (DishID), + INDEX i USING HASH (countrycode,calories) ) ENGINE=ndbcluster; ---error ER_CANT_CREATE_TABLE -create index i using hash on nationaldish(countrycode,calories); - -drop table nationaldish; From 4cc91593bbd81e7661c3abae0f3fa0597d4e522c Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Mon, 8 Jan 2007 14:20:35 +0100 Subject: [PATCH 17/35] bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index: Merged up test 5.0 case --- mysql-test/r/ndb_index_ordered.result | 8 ++++++++ mysql-test/t/ndb_index_ordered.test | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 37fa29da18d..a29b5343d7c 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -839,6 +839,14 @@ create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, CountryCode char(3) NOT NULL, DishTitle varchar(64) NOT NULL, calories smallint(5) unsigned DEFAULT NULL, +PRIMARY KEY (DishID), +INDEX i USING HASH (countrycode,calories) +) ENGINE=ndbcluster; +ERROR HY000: Can't create table 'test.nationaldish' (errno: 138) +create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, +CountryCode char(3) NOT NULL, +DishTitle varchar(64) NOT NULL, +calories smallint(5) unsigned DEFAULT NULL, PRIMARY KEY (DishID) ) ENGINE=ndbcluster; create index i on nationaldish(countrycode,calories) using hash; diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index 13b13f39f70..c2a96a590ca 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -458,6 +458,15 @@ drop table t1; # bug#24820 CREATE INDEX ....USING HASH on NDB table creates ordered index, not HASH index +--error ER_CANT_CREATE_TABLE +create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, + CountryCode char(3) NOT NULL, + DishTitle varchar(64) NOT NULL, + calories smallint(5) unsigned DEFAULT NULL, + PRIMARY KEY (DishID), + INDEX i USING HASH (countrycode,calories) + ) ENGINE=ndbcluster; + create table nationaldish (DishID int(10) unsigned NOT NULL AUTO_INCREMENT, CountryCode char(3) NOT NULL, DishTitle varchar(64) NOT NULL, From 403efda29430aaf83da1eaf13818306cc3eed791 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Wed, 10 Jan 2007 20:50:19 +0100 Subject: [PATCH 18/35] ndb - bug#25468 handle partially transfered LCP_FRAG_REP after node failure recommit to 51-work --- storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp | 1 + .../ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 79 +++++++++++++++++-- storage/ndb/test/ndbapi/testNodeRestart.cpp | 60 ++++++++++++++ .../ndb/test/run-test/daily-basic-tests.txt | 4 + 4 files changed, 139 insertions(+), 5 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index 37eb54028a6..3436a609fe7 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -637,6 +637,7 @@ private: void execTCGETOPSIZECONF(Signal *); void execTC_CLOPSIZECONF(Signal *); + int handle_invalid_lcp_no(const class LcpFragRep*, ReplicaRecordPtr); void execLCP_FRAG_REP(Signal *); void execLCP_COMPLETE_REP(Signal *); void execSTART_LCP_REQ(Signal *); diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 50c7c5472ba..f9b7eb9d100 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -4046,6 +4046,11 @@ void Dbdih::execNODE_FAILREP(Signal* signal) Uint32 newMasterId = nodeFail->masterNodeId; const Uint32 noOfFailedNodes = nodeFail->noOfNodes; + if (ERROR_INSERTED(7179)) + { + CLEAR_ERROR_INSERT_VALUE; + } + /*-------------------------------------------------------------------------*/ // The first step is to convert from a bit mask to an array of failed nodes. /*-------------------------------------------------------------------------*/ @@ -10256,12 +10261,42 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal) Uint32 fragId = lcpReport->fragId; jamEntry(); + + if (ERROR_INSERTED(7178) && nodeId != getOwnNodeId()) + { + jam(); + Uint32 owng =Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups); + Uint32 nodeg = Sysfile::getNodeGroup(nodeId, SYSFILE->nodeGroups); + if (owng == nodeg) + { + jam(); + ndbout_c("throwing away LCP_FRAG_REP from (and killing) %d", nodeId); + SET_ERROR_INSERT_VALUE(7179); + signal->theData[0] = 9999; + sendSignal(numberToRef(CMVMI, nodeId), + GSN_NDB_TAMPER, signal, 1, JBA); + return; + } + } + if (ERROR_INSERTED(7179) && nodeId != getOwnNodeId()) + { + jam(); + Uint32 owng =Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups); + Uint32 nodeg = Sysfile::getNodeGroup(nodeId, SYSFILE->nodeGroups); + if (owng == nodeg) + { + jam(); + ndbout_c("throwing away LCP_FRAG_REP from %d", nodeId); + return; + } + } + CRASH_INSERTION2(7025, isMaster()); CRASH_INSERTION2(7016, !isMaster()); - + bool fromTimeQueue = (signal->senderBlockRef() == reference()); - + TabRecordPtr tabPtr; tabPtr.i = tableId; ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); @@ -10463,6 +10498,37 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr, ndbrequire(false); }//Dbdih::findReplica() + +int +Dbdih::handle_invalid_lcp_no(const LcpFragRep* rep, + ReplicaRecordPtr replicaPtr) +{ + ndbrequire(!isMaster()); + Uint32 lcpNo = rep->lcpNo; + Uint32 lcpId = rep->lcpId; + Uint32 replicaLcpNo = replicaPtr.p->nextLcp; + Uint32 prevReplicaLcpNo = prevLcpNo(replicaLcpNo); + + warningEvent("Detected previous node failure of %d during lcp", + rep->nodeId); + replicaPtr.p->nextLcp = lcpNo; + replicaPtr.p->lcpId[lcpNo] = 0; + replicaPtr.p->lcpStatus[lcpNo] = ZINVALID; + + for (Uint32 i = lcpNo; i != lcpNo; i = nextLcpNo(i)) + { + jam(); + if (replicaPtr.p->lcpStatus[i] == ZVALID && + replicaPtr.p->lcpId[i] >= lcpId) + { + ndbout_c("i: %d lcpId: %d", i, replicaPtr.p->lcpId[i]); + ndbrequire(false); + } + } + + return 0; +} + /** * Return true if table is all fragment replicas have been checkpointed * to disk (in all LQHs) @@ -10491,9 +10557,12 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport) ndbrequire(replicaPtr.p->lcpOngoingFlag == true); if(lcpNo != replicaPtr.p->nextLcp){ - ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d", - lcpNo, replicaPtr.p->nextLcp); - ndbrequire(false); + if (handle_invalid_lcp_no(lcpReport, replicaPtr)) + { + ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d", + lcpNo, replicaPtr.p->nextLcp); + ndbrequire(false); + } } ndbrequire(lcpNo == replicaPtr.p->nextLcp); ndbrequire(lcpNo < MAX_LCP_STORED); diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index 0ceb3b5d6f5..92e59b92c5a 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -1073,6 +1073,63 @@ int runBug25364(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runBug25468(NDBT_Context* ctx, NDBT_Step* step){ + + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter restarter; + + for (int i = 0; i Date: Thu, 11 Jan 2007 20:51:16 +0100 Subject: [PATCH 19/35] ndb - bug#25554 fix bug when master failure during nr (recommit against 5.0) --- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 3 +- ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 21 +++++++++ ndb/test/ndbapi/testNodeRestart.cpp | 53 +++++++++++++++++++++++ ndb/test/run-test/daily-basic-tests.txt | 4 ++ 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index cd82b6fc425..5f573d40dfe 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -3555,7 +3555,6 @@ void Dbdih::endTakeOver(Uint32 takeOverPtrI) takeOverPtr.i = takeOverPtrI; ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - releaseTakeOver(takeOverPtrI); if ((takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) && (takeOverPtr.p->toMasterStatus != TakeOverRecord::TO_WAIT_START_TAKE_OVER)) { jam(); @@ -3569,6 +3568,7 @@ void Dbdih::endTakeOver(Uint32 takeOverPtrI) }//if setAllowNodeStart(takeOverPtr.p->toStartingNode, true); initTakeOver(takeOverPtr); + releaseTakeOver(takeOverPtrI); }//Dbdih::endTakeOver() void Dbdih::releaseTakeOver(Uint32 takeOverPtrI) @@ -4710,6 +4710,7 @@ void Dbdih::handleTakeOverNewMaster(Signal* signal, Uint32 takeOverPtrI) break; } ndbrequire(ok); + endTakeOver(takeOverPtr.i); }//if }//Dbdih::handleTakeOverNewMaster() diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 66ee7549b9d..c5987ee8a57 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2847,6 +2847,17 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode, systemErrorLab(signal, __LINE__); return; }//if + + if (getNodeState().startLevel < NodeState::SL_STARTED) + { + jam(); + CRASH_INSERTION(932); + char buf[100]; + BaseString::snprintf(buf, 100, "Node failure during restart"); + progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf); + ndbrequire(false); + } + TnoFailedNodes = cnoFailedNodes; failReport(signal, failedNodePtr.i, (UintR)ZTRUE, aFailCause); if (cpresident == getOwnNodeId()) { @@ -2933,6 +2944,16 @@ void Qmgr::execPREP_FAILREQ(Signal* signal) return; }//if + if (getNodeState().startLevel < NodeState::SL_STARTED) + { + jam(); + CRASH_INSERTION(932); + char buf[100]; + BaseString::snprintf(buf, 100, "Node failure during restart"); + progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf); + ndbrequire(false); + } + guard0 = cnoPrepFailedNodes - 1; arrGuard(guard0, MAX_NDB_NODES); for (Tindex = 0; Tindex <= guard0; Tindex++) { diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index c0c5cc5163a..43fb77342b5 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -995,6 +995,56 @@ int runBug25364(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runBug25554(NDBT_Context* ctx, NDBT_Step* step){ + + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter restarter; + + if (restarter.getNumDbNodes() < 4) + return NDBT_OK; + + for (int i = 0; i Date: Thu, 11 Jan 2007 21:06:36 +0100 Subject: [PATCH 20/35] fix merge error --- storage/ndb/test/ndbapi/testNodeRestart.cpp | 42 +-------------------- 1 file changed, 2 insertions(+), 40 deletions(-) diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index bd81f8a53fc..01e138830d6 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -1003,6 +1003,8 @@ int runBug25364(NDBT_Context* ctx, NDBT_Step* step){ if (restarter.waitNodesStarted(nodes, 2)) return NDBT_FAILED; } + + return NDBT_OK; } int @@ -1070,46 +1072,6 @@ runBug24543(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } -int runBug25364(NDBT_Context* ctx, NDBT_Step* step){ - int result = NDBT_OK; - NdbRestarter restarter; - Ndb* pNdb = GETNDB(step); - int loops = ctx->getNumLoops(); - - if (restarter.getNumDbNodes() < 4) - return NDBT_OK; - - int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; - - for (; loops; loops --) - { - int master = restarter.getMasterNodeId(); - int victim = restarter.getRandomNodeOtherNodeGroup(master, rand()); - int second = restarter.getRandomNodeSameNodeGroup(victim, rand()); - - int dump[] = { 935, victim } ; - if (restarter.dumpStateOneNode(master, dump, 2)) - return NDBT_FAILED; - - if (restarter.dumpStateOneNode(master, val2, 2)) - return NDBT_FAILED; - - if (restarter.restartOneDbNode(second, false, true, true)) - return NDBT_FAILED; - - int nodes[2] = { master, second }; - if (restarter.waitNodesNoStart(nodes, 2)) - return NDBT_FAILED; - - restarter.startNodes(nodes, 2); - - if (restarter.waitNodesStarted(nodes, 2)) - return NDBT_FAILED; - } - - return NDBT_OK; -} - int runBug25468(NDBT_Context* ctx, NDBT_Step* step){ int result = NDBT_OK; From f8fdb7c99742c94e09cd8f103f28c19dcfc6f7e2 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Fri, 12 Jan 2007 15:48:47 +0100 Subject: [PATCH 21/35] ndb - bug#25587 fix not aligned or non 32-bit values in "smart" scan --- mysql-test/r/ndb_partition_key.result | 48 +++++++++++++++++++++ mysql-test/t/ndb_partition_key.test | 28 ++++++++++++ storage/ndb/src/ndbapi/NdbScanOperation.cpp | 12 ++++-- 3 files changed, 85 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result index 7e480f68dd9..e294807b40d 100644 --- a/mysql-test/r/ndb_partition_key.result +++ b/mysql-test/r/ndb_partition_key.result @@ -203,3 +203,51 @@ NODEGROUP PARTITION_NAME 0 p0 0 p1 DROP TABLE t1; +CREATE TABLE t1 ( +a tinyint unsigned NOT NULL, +b bigint(20) unsigned NOT NULL, +c char(12), +PRIMARY KEY (a,b) +) ENGINE ndb DEFAULT CHARSET=latin1 PARTITION BY KEY (a); +insert into t1 values(1,1,'1'), (2,2,'2'), (3,3,'3'), (4,4,'4'), (5,5,'5'); +select * from t1 where a = 1; +a b c +1 1 1 +select * from t1 where a = 2; +a b c +2 2 2 +select * from t1 where a = 3; +a b c +3 3 3 +select * from t1 where a = 4; +a b c +4 4 4 +select * from t1 where a = 5; +a b c +5 5 5 +delete from t1 where a = 1; +select * from t1 order by 1; +a b c +2 2 2 +3 3 3 +4 4 4 +5 5 5 +delete from t1 where a = 2; +select * from t1 order by 1; +a b c +3 3 3 +4 4 4 +5 5 5 +delete from t1 where a = 3; +select * from t1 order by 1; +a b c +4 4 4 +5 5 5 +delete from t1 where a = 4; +select * from t1 order by 1; +a b c +5 5 5 +delete from t1 where a = 5; +select * from t1 order by 1; +a b c +drop table t1; diff --git a/mysql-test/t/ndb_partition_key.test b/mysql-test/t/ndb_partition_key.test index d8c1b61b94f..78e2c9d15c2 100644 --- a/mysql-test/t/ndb_partition_key.test +++ b/mysql-test/t/ndb_partition_key.test @@ -199,3 +199,31 @@ ALTER TABLE t1 ADD COLUMN c4 INT AFTER c1; SELECT NODEGROUP,PARTITION_NAME FROM information_schema.partitions WHERE table_name = "t1"; DROP TABLE t1; + +# bug#25587 + +CREATE TABLE t1 ( +a tinyint unsigned NOT NULL, +b bigint(20) unsigned NOT NULL, +c char(12), +PRIMARY KEY (a,b) +) ENGINE ndb DEFAULT CHARSET=latin1 PARTITION BY KEY (a); + +insert into t1 values(1,1,'1'), (2,2,'2'), (3,3,'3'), (4,4,'4'), (5,5,'5'); +select * from t1 where a = 1; +select * from t1 where a = 2; +select * from t1 where a = 3; +select * from t1 where a = 4; +select * from t1 where a = 5; +delete from t1 where a = 1; +select * from t1 order by 1; +delete from t1 where a = 2; +select * from t1 order by 1; +delete from t1 where a = 3; +select * from t1 order by 1; +delete from t1 where a = 4; +select * from t1 order by 1; +delete from t1 where a = 5; +select * from t1 order by 1; + +drop table t1; diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp index 3e2081b6018..75ad6306c69 100644 --- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp @@ -1188,25 +1188,31 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo, const bool nobytes = (len & 0x3) == 0; const Uint32 totalLen = 2 + sizeInWords; Uint32 tupKeyLen = theTupKeyLen; + union { + Uint32 tempData[2000]; + Uint64 __align; + }; + Uint64 *valPtr; if(remaining > totalLen && aligned && nobytes){ Uint32 * dst = theKEYINFOptr + currLen; * dst ++ = type; * dst ++ = ahValue; memcpy(dst, aValue, 4 * sizeInWords); theTotalNrOfKeyWordInSignal = currLen + totalLen; + valPtr = (Uint64*)aValue; } else { if(!aligned || !nobytes){ - Uint32 tempData[2000]; tempData[0] = type; tempData[1] = ahValue; tempData[2 + (len >> 2)] = 0; memcpy(tempData+2, aValue, len); - insertBOUNDS(tempData, 2+sizeInWords); + valPtr = (Uint64*)(tempData+2); } else { Uint32 buf[2] = { type, ahValue }; insertBOUNDS(buf, 2); insertBOUNDS((Uint32*)aValue, sizeInWords); + valPtr = (Uint64*)aValue; } } theTupKeyLen = tupKeyLen + totalLen; @@ -1223,7 +1229,7 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo, if(type == BoundEQ && tDistrKey) { theNoOfTupKeyLeft--; - return handle_distribution_key((Uint64*)aValue, sizeInWords); + return handle_distribution_key(valPtr, sizeInWords); } return 0; } else { From b3c177073afb9646a3c652b69ffb4a9ad44eef51 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Mon, 15 Jan 2007 21:03:39 +0100 Subject: [PATCH 22/35] ndb - bug#25636 Fix DD problem during NR after 3 missed LCP's --- .../ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 11 +++++++++++ .../ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 16 +++++++++++++--- storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 2 ++ .../src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp | 7 +++++++ storage/ndb/src/kernel/blocks/pgman.cpp | 7 +++++++ 5 files changed, 40 insertions(+), 3 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 2e68addb1d7..7937e74533a 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -2916,6 +2916,17 @@ Dbdih::nr_start_fragment(Signal* signal, takeOverPtr.p->toCurrentTabref, takeOverPtr.p->toCurrentFragid); replicaPtr.p->lcpIdStarted = 0; + BlockReference ref = calcLqhBlockRef(takeOverPtr.p->toStartingNode); + StartFragReq *req = (StartFragReq *)signal->getDataPtrSend(); + req->userPtr = 0; + req->userRef = reference(); + req->lcpNo = ZNIL; + req->lcpId = 0; + req->tableId = takeOverPtr.p->toCurrentTabref; + req->fragId = takeOverPtr.p->toCurrentFragid; + req->noOfLogNodes = 0; + sendSignal(ref, GSN_START_FRAGREQ, signal, + StartFragReq::SignalLength, JBB); } else { diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index dff0c77f7dc..43961533ed9 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -13836,6 +13836,7 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal) Uint32 lcpNo = startFragReq->lcpNo; Uint32 noOfLogNodes = startFragReq->noOfLogNodes; Uint32 lcpId = startFragReq->lcpId; + ndbrequire(noOfLogNodes <= 4); fragptr.p->fragStatus = Fragrecord::CRASH_RECOVERING; fragptr.p->srBlockref = startFragReq->userRef; @@ -13890,7 +13891,16 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal) signal->theData[1] = fragId; sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); } - + + if (getNodeState().getNodeRestartInProgress()) + { + jam(); + fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION; + } + + c_tup->disk_restart_mark_no_lcp(tabptr.i, fragId); + jamEntry(); + return; }//if @@ -16713,8 +16723,8 @@ void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data, ccurrentGcprec = RNIL; caddNodeState = ZFALSE; cstartRecReq = ZFALSE; - cnewestGci = ~0; - cnewestCompletedGci = ~0; + cnewestGci = 0; + cnewestCompletedGci = 0; crestartOldestGci = 0; crestartNewestGci = 0; csrPhaseStarted = ZSR_NO_PHASE_STARTED; diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index a27daf0a8ea..02fa4e71d08 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -2711,6 +2711,8 @@ public: Ptr m_extent_ptr; Local_key m_key; }; + + void disk_restart_mark_no_lcp(Uint32 table, Uint32 frag); private: void disk_restart_undo_next(Signal*); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp index 0342f2c9e0c..257fcaa5683 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp @@ -1422,6 +1422,13 @@ Dbtup::disk_restart_undo_next(Signal* signal) sendSignal(LGMAN_REF, GSN_CONTINUEB, signal, 1, JBB); } +void +Dbtup::disk_restart_mark_no_lcp(Uint32 tableId, Uint32 fragId) +{ + jamEntry(); + disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_CREATE); +} + void Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag) { diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp index 88ea0122268..9badaf1926b 100644 --- a/storage/ndb/src/kernel/blocks/pgman.cpp +++ b/storage/ndb/src/kernel/blocks/pgman.cpp @@ -1258,6 +1258,8 @@ Pgman::process_lcp(Signal* signal) void Pgman::process_lcp_locked(Signal* signal, Ptr ptr) { + CRASH_INSERTION(11006); + ptr.p->m_last_lcp = m_last_lcp; if (ptr.p->m_state & Page_entry::DIRTY) { @@ -2351,6 +2353,11 @@ Pgman::execDUMP_STATE_ORD(Signal* signal) { g_dbg_lcp = ~g_dbg_lcp; } + + if (signal->theData[0] == 11006) + { + SET_ERROR_INSERT_VALUE(11006); + } } // page cache client From 3c36b92a1df978db6a0c5992f5f80dc68485fe48 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Tue, 16 Jan 2007 09:19:41 +0100 Subject: [PATCH 23/35] Changed Dictionary::Table::setTablespace/getTablespace to setTablespaceName/getTablespaceName --- sql/ha_ndbcluster.cc | 6 +++--- storage/ndb/include/ndbapi/NdbDictionary.hpp | 4 ++-- storage/ndb/src/ndbapi/NdbDictionary.cpp | 8 +++++++- storage/ndb/test/ndbapi/bank/BankLoad.cpp | 2 +- storage/ndb/test/ndbapi/create_tab.cpp | 2 +- storage/ndb/test/ndbapi/testDict.cpp | 2 +- storage/ndb/test/src/NDBT_Test.cpp | 2 +- 7 files changed, 16 insertions(+), 10 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 0079535fa12..106b2e075f8 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4844,9 +4844,9 @@ int ha_ndbcluster::create(const char *name, if (info->storage_media == HA_SM_DISK) { if (info->tablespace) - tab.setTablespace(info->tablespace); + tab.setTablespaceName(info->tablespace); else - tab.setTablespace("DEFAULT-TS"); + tab.setTablespaceName("DEFAULT-TS"); } else if (info->tablespace) { @@ -4860,7 +4860,7 @@ int ha_ndbcluster::create(const char *name, "STORAGE DISK"); DBUG_RETURN(HA_ERR_UNSUPPORTED); } - tab.setTablespace(info->tablespace); + tab.setTablespaceName(info->tablespace); info->storage_media = HA_SM_DISK; //if use tablespace, that also means store on disk } diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index 1945a644571..aada314e454 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -809,9 +809,9 @@ public: */ void setMaxLoadFactor(int); - void setTablespace(const char * name); + void setTablespaceName(const char * name); + const char * getTablespaceName() const; void setTablespace(const class Tablespace &); - const char * getTablespace() const; bool getTablespace(Uint32 *id= 0, Uint32 *version= 0) const; /** diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index 481b2eac6d2..47ba0335183 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -663,8 +663,14 @@ NdbDictionary::Table::getTablespace(Uint32 *id, Uint32 *version) const return true; } +const char * +NdbDictionary::Table::getTablespaceName() const +{ + return m_impl.m_tablespace_name.c_str(); +} + void -NdbDictionary::Table::setTablespace(const char * name){ +NdbDictionary::Table::setTablespaceName(const char * name){ m_impl.m_tablespace_id = ~0; m_impl.m_tablespace_version = ~0; m_impl.m_tablespace_name.assign(name); diff --git a/storage/ndb/test/ndbapi/bank/BankLoad.cpp b/storage/ndb/test/ndbapi/bank/BankLoad.cpp index c033d112890..985391c0066 100644 --- a/storage/ndb/test/ndbapi/bank/BankLoad.cpp +++ b/storage/ndb/test/ndbapi/bank/BankLoad.cpp @@ -163,7 +163,7 @@ int Bank::createTable(const char* tabName, bool disk){ return NDBT_FAILED; } NdbDictionary::Table copy(* pTab); - copy.setTablespace("DEFAULT-TS"); + copy.setTablespaceName("DEFAULT-TS"); for (Uint32 i = 0; isetStorageType(NdbDictionary::Column::StorageTypeDisk); if(m_ndb.getDictionary()->createTable(copy) == -1){ diff --git a/storage/ndb/test/ndbapi/create_tab.cpp b/storage/ndb/test/ndbapi/create_tab.cpp index cf1a543c62c..c8c7dd6e27f 100644 --- a/storage/ndb/test/ndbapi/create_tab.cpp +++ b/storage/ndb/test/ndbapi/create_tab.cpp @@ -37,7 +37,7 @@ g_create_hook(Ndb* ndb, NdbDictionary::Table& tab, int when, void* arg) } } if (g_tsname != NULL) { - tab.setTablespace(g_tsname); + tab.setTablespaceName(g_tsname); } } return 0; diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp index b3487e443f0..f7de43aea20 100644 --- a/storage/ndb/test/ndbapi/testDict.cpp +++ b/storage/ndb/test/ndbapi/testDict.cpp @@ -1643,7 +1643,7 @@ runCreateDiskTable(NDBT_Context* ctx, NDBT_Step* step){ Ndb* pNdb = GETNDB(step); NdbDictionary::Table tab = *ctx->getTab(); - tab.setTablespace("DEFAULT-TS"); + tab.setTablespaceName("DEFAULT-TS"); for(Uint32 i = 0; igetPrimaryKey()) diff --git a/storage/ndb/test/src/NDBT_Test.cpp b/storage/ndb/test/src/NDBT_Test.cpp index 71e7bf5b70b..9c908ab27c6 100644 --- a/storage/ndb/test/src/NDBT_Test.cpp +++ b/storage/ndb/test/src/NDBT_Test.cpp @@ -969,7 +969,7 @@ NDBT_TestSuite::createHook(Ndb* ndb, NdbDictionary::Table& tab, int when) } if (tsname != NULL) { - tab.setTablespace(tsname); + tab.setTablespaceName(tsname); } } return 0; From 045985df4f576d54d50c5224143f2ef318be0ef5 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Tue, 16 Jan 2007 18:58:43 +0100 Subject: [PATCH 24/35] ndb - bug#25636 additional fix after autotest dont send start_fragreq to temporary tables such as ordered indexes... --- storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 7937e74533a..19e8941fde3 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -2832,7 +2832,9 @@ Dbdih::nr_start_fragments(Signal* signal, return; }//if ptrAss(tabPtr, tabRecord); - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){ + if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE || + tabPtr.p->tabStorage != TabRecord::ST_NORMAL) + { jam(); takeOverPtr.p->toCurrentFragid = 0; takeOverPtr.p->toCurrentTabref++; From ec0969f9decfdc80701b3ab8248149c63bf8d3a6 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Tue, 16 Jan 2007 19:22:10 +0100 Subject: [PATCH 25/35] Bug#25387 - ndb: dbug assert in reference counting for event operations - blob event operation not reference counted correctly, missing TE_ACTIVE - add reference counting for blobs events - make sure also blob event operations get TE_ACTIVE - some minor cleanups + adjustment of dbug prints --- .../ndb/src/ndbapi/NdbEventOperationImpl.cpp | 140 ++++++++++-------- .../ndb/src/ndbapi/NdbEventOperationImpl.hpp | 9 +- 2 files changed, 88 insertions(+), 61 deletions(-) diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 83b227860cf..93ae14a8d50 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -215,8 +215,6 @@ NdbEventOperationImpl::~NdbEventOperationImpl() DBUG_VOID_RETURN; stop(); - // m_bufferHandle->dropSubscribeEvent(m_bufferId); - ; // ToDo? We should send stop signal here if (theMainOp == NULL) { @@ -428,7 +426,7 @@ NdbEventOperationImpl::getBlobHandle(const NdbColumnImpl *tAttrInfo, int n) // create blob event operation tBlobOp = - m_ndb->theEventBuffer->createEventOperation(*blobEvnt, m_error); + m_ndb->theEventBuffer->createEventOperationImpl(*blobEvnt, m_error); if (tBlobOp == NULL) DBUG_RETURN(NULL); @@ -561,6 +559,8 @@ NdbEventOperationImpl::execute_nolock() m_state= EO_EXECUTING; mi_type= m_eventImpl->mi_type; m_ndb->theEventBuffer->add_op(); + m_ref_count++; + DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this)); int r= NdbDictionaryImpl::getImpl(*myDict).executeSubscribeEvent(*this); if (r == 0) { if (theMainOp == NULL) { @@ -568,19 +568,24 @@ NdbEventOperationImpl::execute_nolock() NdbEventOperationImpl* blob_op = theBlobOpList; while (blob_op != NULL) { r = blob_op->execute_nolock(); - if (r != 0) + if (r != 0) { break; + } + // blob event op now holds reference + // cleared by TE_STOP or TE_CLUSTER_FAILURE + m_ref_count++; + DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this)); blob_op = blob_op->m_next; } } if (r == 0) { - m_ref_count++; - DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this)); DBUG_RETURN(0); } } //Error + m_ref_count--; + DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this)); m_state= EO_ERROR; mi_type= 0; m_magic_number= 0; @@ -1582,6 +1587,33 @@ NdbEventBuffer::complete_outof_order_gcis() ndbout_c("complete_outof_order_gcis: m_latestGCI: %lld", m_latestGCI); } +void +NdbEventBuffer::insert_event(NdbEventOperationImpl* impl, + SubTableData &data, + LinearSectionPtr *ptr, + Uint32 &oid_ref) +{ + NdbEventOperationImpl *dropped_ev_op = m_dropped_ev_op; + do + { + do + { + oid_ref = impl->m_oid; + insertDataL(impl, &data, ptr); + NdbEventOperationImpl* blob_op = impl->theBlobOpList; + while (blob_op != NULL) + { + oid_ref = blob_op->m_oid; + insertDataL(blob_op, &data, ptr); + blob_op = blob_op->m_next; + } + } while((impl = impl->m_next)); + impl = dropped_ev_op; + dropped_ev_op = NULL; + } while (impl); +} + + void NdbEventBuffer::report_node_connected(Uint32 node_id) { @@ -1606,21 +1638,8 @@ NdbEventBuffer::report_node_connected(Uint32 node_id) /** * Insert this event for each operation */ - { - // no need to lock()/unlock(), receive thread calls this - NdbEventOperationImpl* impl = &op->m_impl; - do if (!impl->m_node_bit_mask.isclear()) - { - data.senderData = impl->m_oid; - insertDataL(impl, &data, ptr); - } while((impl = impl->m_next)); - for (impl = m_dropped_ev_op; impl; impl = impl->m_next) - if (!impl->m_node_bit_mask.isclear()) - { - data.senderData = impl->m_oid; - insertDataL(impl, &data, ptr); - } - } + // no need to lock()/unlock(), receive thread calls this + insert_event(&op->m_impl, data, ptr, data.senderData); DBUG_VOID_RETURN; } @@ -1648,21 +1667,8 @@ NdbEventBuffer::report_node_failure(Uint32 node_id) /** * Insert this event for each operation */ - { - // no need to lock()/unlock(), receive thread calls this - NdbEventOperationImpl* impl = &op->m_impl; - do if (!impl->m_node_bit_mask.isclear()) - { - data.senderData = impl->m_oid; - insertDataL(impl, &data, ptr); - } while((impl = impl->m_next)); - for (impl = m_dropped_ev_op; impl; impl = impl->m_next) - if (!impl->m_node_bit_mask.isclear()) - { - data.senderData = impl->m_oid; - insertDataL(impl, &data, ptr); - } - } + // no need to lock()/unlock(), receive thread calls this + insert_event(&op->m_impl, data, ptr, data.senderData); DBUG_VOID_RETURN; } @@ -1693,21 +1699,8 @@ NdbEventBuffer::completeClusterFailed() /** * Insert this event for each operation */ - { - // no need to lock()/unlock(), receive thread calls this - NdbEventOperationImpl* impl = &op->m_impl; - do if (!impl->m_node_bit_mask.isclear()) - { - data.senderData = impl->m_oid; - insertDataL(impl, &data, ptr); - } while((impl = impl->m_next)); - for (impl = m_dropped_ev_op; impl; impl = impl->m_next) - if (!impl->m_node_bit_mask.isclear()) - { - data.senderData = impl->m_oid; - insertDataL(impl, &data, ptr); - } - } + // no need to lock()/unlock(), receive thread calls this + insert_event(&op->m_impl, data, ptr, data.senderData); /** * Release all GCI's with m_gci > gci @@ -1797,17 +1790,36 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, { case NdbDictionary::Event::_TE_NODE_FAILURE: op->m_node_bit_mask.clear(SubTableData::getNdbdNodeId(ri)); + DBUG_PRINT("info", + ("_TE_NODE_FAILURE: m_ref_count: %u for op: %p id: %u", + op->m_ref_count, op, SubTableData::getNdbdNodeId(ri))); break; case NdbDictionary::Event::_TE_ACTIVE: op->m_node_bit_mask.set(SubTableData::getNdbdNodeId(ri)); // internal event, do not relay to user + DBUG_PRINT("info", + ("_TE_ACTIVE: m_ref_count: %u for op: %p id: %u", + op->m_ref_count, op, SubTableData::getNdbdNodeId(ri))); DBUG_RETURN_EVENT(0); break; case NdbDictionary::Event::_TE_CLUSTER_FAILURE: - op->m_node_bit_mask.clear(); - DBUG_ASSERT(op->m_ref_count > 0); - op->m_ref_count--; - DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op)); + if (!op->m_node_bit_mask.isclear()) + { + op->m_node_bit_mask.clear(); + DBUG_ASSERT(op->m_ref_count > 0); + op->m_ref_count--; + DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p", + op->m_ref_count, op)); + if (op->theMainOp) + { + // blob event op, need to clear ref count in main op + DBUG_ASSERT(op->m_ref_count == 0); + DBUG_ASSERT(op->theMainOp->m_ref_count > 0); + op->theMainOp->m_ref_count--; + DBUG_PRINT("info", ("m_ref_count: %u for op: %p", + op->theMainOp->m_ref_count, op->theMainOp)); + } + } break; case NdbDictionary::Event::_TE_STOP: op->m_node_bit_mask.clear(SubTableData::getNdbdNodeId(ri)); @@ -1815,7 +1827,17 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, { DBUG_ASSERT(op->m_ref_count > 0); op->m_ref_count--; - DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op)); + DBUG_PRINT("info", ("_TE_STOP: m_ref_count: %u for op: %p", + op->m_ref_count, op)); + if (op->theMainOp) + { + // blob event op, need to clear ref count in main op + DBUG_ASSERT(op->m_ref_count == 0); + DBUG_ASSERT(op->theMainOp->m_ref_count > 0); + op->theMainOp->m_ref_count--; + DBUG_PRINT("info", ("m_ref_count: %u for op: %p", + op->theMainOp->m_ref_count, op->theMainOp)); + } } break; default: @@ -2639,10 +2661,10 @@ NdbEventBuffer::createEventOperation(const char* eventName, } NdbEventOperationImpl* -NdbEventBuffer::createEventOperation(NdbEventImpl& evnt, - NdbError &theError) +NdbEventBuffer::createEventOperationImpl(NdbEventImpl& evnt, + NdbError &theError) { - DBUG_ENTER("NdbEventBuffer::createEventOperation [evnt]"); + DBUG_ENTER("NdbEventBuffer::createEventOperationImpl"); NdbEventOperationImpl* tOp= new NdbEventOperationImpl(m_ndb, evnt); if (tOp == 0) { diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp index ef25c8f48ec..b68aa803690 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -436,8 +436,8 @@ public: Vector m_active_gci; NdbEventOperation *createEventOperation(const char* eventName, NdbError &); - NdbEventOperationImpl *createEventOperation(NdbEventImpl& evnt, - NdbError &); + NdbEventOperationImpl *createEventOperationImpl(NdbEventImpl& evnt, + NdbError &); void dropEventOperation(NdbEventOperation *); static NdbEventOperationImpl* getEventOperationImpl(NdbEventOperation* tOp); @@ -541,6 +541,11 @@ public: #endif private: + void insert_event(NdbEventOperationImpl* impl, + SubTableData &data, + LinearSectionPtr *ptr, + Uint32 &oid_ref); + int expand(unsigned sz); // all allocated data From 79e80aac49d5fc4135df9b8027409995c4ce12df Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Wed, 17 Jan 2007 10:53:42 +0100 Subject: [PATCH 26/35] Bug#25387 - added comments in code suggestion during review --- .../ndb/src/ndbapi/NdbEventOperationImpl.cpp | 33 ++++++++-- .../ndb/src/ndbapi/NdbEventOperationImpl.hpp | 62 ++++++++++++++++++- 2 files changed, 88 insertions(+), 7 deletions(-) diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 93ae14a8d50..c2a07899143 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -559,6 +559,8 @@ NdbEventOperationImpl::execute_nolock() m_state= EO_EXECUTING; mi_type= m_eventImpl->mi_type; m_ndb->theEventBuffer->add_op(); + // add kernel reference + // removed on TE_STOP, TE_CLUSTER_FAILURE, or error below m_ref_count++; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this)); int r= NdbDictionaryImpl::getImpl(*myDict).executeSubscribeEvent(*this); @@ -571,8 +573,8 @@ NdbEventOperationImpl::execute_nolock() if (r != 0) { break; } - // blob event op now holds reference - // cleared by TE_STOP or TE_CLUSTER_FAILURE + // add blob reference to main op + // removed by TE_STOP or TE_CLUSTER_FAILURE m_ref_count++; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this)); blob_op = blob_op->m_next; @@ -583,7 +585,9 @@ NdbEventOperationImpl::execute_nolock() DBUG_RETURN(0); } } - //Error + // Error + // remove kernel reference + // added above m_ref_count--; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", m_ref_count, this)); m_state= EO_ERROR; @@ -1227,6 +1231,8 @@ NdbEventBuffer::nextEvent() EventBufData_list::Gci_ops *gci_ops = m_available_data.first_gci_ops(); while (gci_ops && op->getGCI() > gci_ops->m_gci) { + // moved to next gci, check if any references have been + // released when completing the last gci deleteUsedEventOperations(); gci_ops = m_available_data.next_gci_ops(); } @@ -1254,6 +1260,8 @@ NdbEventBuffer::nextEvent() #endif // free all "per gci unique" collected operations + // completed gci, check if any references have been + // released when completing the gci EventBufData_list::Gci_ops *gci_ops = m_available_data.first_gci_ops(); while (gci_ops) { @@ -1290,6 +1298,8 @@ NdbEventBuffer::deleteUsedEventOperations() { NdbEventOperationImpl *op = &op_f->m_impl; DBUG_ASSERT(op->m_ref_count > 0); + // remove gci reference + // added in inserDataL op->m_ref_count--; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op)); if (op->m_ref_count == 0) @@ -1807,14 +1817,17 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, { op->m_node_bit_mask.clear(); DBUG_ASSERT(op->m_ref_count > 0); + // remove kernel reference + // added in execute_nolock op->m_ref_count--; DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p", op->m_ref_count, op)); if (op->theMainOp) { - // blob event op, need to clear ref count in main op DBUG_ASSERT(op->m_ref_count == 0); DBUG_ASSERT(op->theMainOp->m_ref_count > 0); + // remove blob reference in main op + // added in execute_no_lock op->theMainOp->m_ref_count--; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->theMainOp->m_ref_count, op->theMainOp)); @@ -1826,14 +1839,17 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, if (op->m_node_bit_mask.isclear()) { DBUG_ASSERT(op->m_ref_count > 0); + // remove kernel reference + // added in execute_no_lock op->m_ref_count--; DBUG_PRINT("info", ("_TE_STOP: m_ref_count: %u for op: %p", op->m_ref_count, op)); if (op->theMainOp) { - // blob event op, need to clear ref count in main op DBUG_ASSERT(op->m_ref_count == 0); DBUG_ASSERT(op->theMainOp->m_ref_count > 0); + // remove blob reference in main op + // added in execute_no_lock op->theMainOp->m_ref_count--; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->theMainOp->m_ref_count, op->theMainOp)); @@ -2586,6 +2602,8 @@ EventBufData_list::add_gci_op(Gci_op g) #ifndef DBUG_OFF i = m_gci_op_count; #endif + // add gci reference + // removed in deleteUsedOperations g.op->m_ref_count++; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", g.op->m_ref_count, g.op)); m_gci_op_list[m_gci_op_count++] = g; @@ -2654,6 +2672,8 @@ NdbEventBuffer::createEventOperation(const char* eventName, delete tOp; DBUG_RETURN(NULL); } + // add user reference + // removed in dropEventOperation getEventOperationImpl(tOp)->m_ref_count = 1; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", getEventOperationImpl(tOp)->m_ref_count, getEventOperationImpl(tOp))); @@ -2706,6 +2726,9 @@ NdbEventBuffer::dropEventOperation(NdbEventOperation* tOp) } DBUG_ASSERT(op->m_ref_count > 0); + // remove user reference + // added in createEventOperation + // user error to use reference after this op->m_ref_count--; DBUG_PRINT("info", ("m_ref_count: %u for op: %p", op->m_ref_count, op)); if (op->m_ref_count == 0) diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp index b68aa803690..04a62b91002 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -400,7 +400,59 @@ public: Uint32 m_eventId; Uint32 m_oid; + /* + m_node_bit_mask keeps track of which ndb nodes have reference to + an event op + + - add - TE_ACTIVE + - remove - TE_STOP, TE_NODE_FAILURE, TE_CLUSTER_FAILURE + + TE_NODE_FAILURE and TE_CLUSTER_FAILURE are created as events + and added to all event ops listed as active or pending delete + in m_dropped_ev_op using insertDataL, includeing the blob + event ops referenced by a regular event op. + - NdbEventBuffer::report_node_failure + - NdbEventBuffer::completeClusterFailed + + TE_ACTIVE is sent from the kernel on initial execute/start of the + event op, but is also internally generetad on node connect like + TE_NODE_FAILURE and TE_CLUSTER_FAILURE + - NdbEventBuffer::report_node_connected + + when m_node_bit_mask becomes clear, the kernel reference is + removed from m_ref_count + */ + Bitmask<(unsigned int)_NDB_NODE_BITMASK_SIZE> m_node_bit_mask; + + /* + m_ref_count keeps track of outstanding references to an event + operation impl object. To make sure that the object is not + deleted too early. + + If on dropEventOperation there are still references to an + object it is queued for delete in NdbEventBuffer::m_dropped_ev_op + + the following references exists for a _non_ blob event op: + * user reference + - add - NdbEventBuffer::createEventOperation + - remove - NdbEventBuffer::dropEventOperation + * kernel reference + - add - execute_nolock + - remove - TE_STOP, TE_CLUSTER_FAILURE + * blob reference + - add - execute_nolock on blob event + - remove - TE_STOP, TE_CLUSTER_FAILURE on blob event + * gci reference + - add - insertDataL/add_gci_op + - remove - NdbEventBuffer::deleteUsedEventOperations + + the following references exists for a blob event op: + * kernel reference + - add - execute_nolock + - remove - TE_STOP, TE_CLUSTER_FAILURE + */ + int m_ref_count; bool m_mergeEvents; @@ -557,8 +609,14 @@ private: Vector m_allocated_data; unsigned m_sz; - // dropped event operations that have not yet - // been deleted + /* + dropped event operations (dropEventOperation) that have not yet + been deleted because of outstanding m_ref_count + + check for delete is done on occations when the ref_count may have + changed by calling deleteUsedEventOperations: + - nextEvent - each time the user has completed processing a gci + */ NdbEventOperationImpl *m_dropped_ev_op; Uint32 m_active_op_count; From 3af9db79759b3d0d41cf7c8e709c1c2b7239d2e2 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Wed, 17 Jan 2007 21:15:13 +0100 Subject: [PATCH 27/35] ndb - bug#25686 add support for doing mlockall before mallc instead of after (recommit in 5.0) --- ndb/include/portlib/NdbMem.h | 2 +- ndb/src/common/portlib/NdbMem.c | 10 +++++++++- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 19 +++++++++++++++++-- ndb/src/kernel/vm/Configuration.cpp | 2 +- ndb/src/kernel/vm/Configuration.hpp | 2 +- ndb/src/mgmsrv/ConfigInfo.cpp | 8 ++++---- 6 files changed, 33 insertions(+), 10 deletions(-) diff --git a/ndb/include/portlib/NdbMem.h b/ndb/include/portlib/NdbMem.h index 0f2de80200e..2afb1845112 100644 --- a/ndb/include/portlib/NdbMem.h +++ b/ndb/include/portlib/NdbMem.h @@ -66,7 +66,7 @@ void NdbMem_Free(void* ptr); * NdbMem_MemLockAll * Locks virtual memory in main memory */ -int NdbMem_MemLockAll(void); +int NdbMem_MemLockAll(int); /** * NdbMem_MemUnlockAll diff --git a/ndb/src/common/portlib/NdbMem.c b/ndb/src/common/portlib/NdbMem.c index f964f4d9937..0d2021aaf0a 100644 --- a/ndb/src/common/portlib/NdbMem.c +++ b/ndb/src/common/portlib/NdbMem.c @@ -57,7 +57,15 @@ void NdbMem_Free(void* ptr) } -int NdbMem_MemLockAll(){ +int NdbMem_MemLockAll(int i){ + if (i == 1) + { +#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) && defined (MCL_FUTURE) + return mlockall(MCL_CURRENT | MCL_FUTURE); +#else + return -1; +#endif + } #if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) return mlockall(MCL_CURRENT); #else diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index cd0e471a676..5642a11db81 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -342,9 +342,9 @@ void Cmvmi::execSTTOR(Signal* signal) if (theStartPhase == 1){ jam(); - if(theConfig.lockPagesInMainMemory()) + if(theConfig.lockPagesInMainMemory() == 1) { - int res = NdbMem_MemLockAll(); + int res = NdbMem_MemLockAll(0); if(res != 0){ g_eventLogger.warning("Failed to memlock pages"); warningEvent("Failed to memlock pages"); @@ -788,6 +788,21 @@ Cmvmi::execSTART_ORD(Signal* signal) { if(globalData.theStartLevel == NodeState::SL_CMVMI){ jam(); + + if(theConfig.lockPagesInMainMemory() == 2) + { + int res = NdbMem_MemLockAll(1); + if(res != 0) + { + g_eventLogger.warning("Failed to memlock pages"); + warningEvent("Failed to memlock pages"); + } + else + { + g_eventLogger.info("Locked future allocations"); + } + } + globalData.theStartLevel = NodeState::SL_STARTING; globalData.theRestartFlag = system_started; /** diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 49f16dae3dd..cbdd9494fd8 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -477,7 +477,7 @@ Configuration::setupConfiguration(){ DBUG_VOID_RETURN; } -bool +Uint32 Configuration::lockPagesInMainMemory() const { return _lockPagesInMainMemory; } diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp index 6315209ddbb..13b31ad3538 100644 --- a/ndb/src/kernel/vm/Configuration.hpp +++ b/ndb/src/kernel/vm/Configuration.hpp @@ -37,7 +37,7 @@ public: void setupConfiguration(); void closeConfiguration(bool end_session= true); - bool lockPagesInMainMemory() const; + Uint32 lockPagesInMainMemory() const; int timeBetweenWatchDogCheck() const ; void timeBetweenWatchDogCheck(int value); diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index ab4f2b413b3..7f89f5c5c49 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -564,10 +564,10 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "If set to yes, then NDB Cluster data will not be swapped out to disk", ConfigInfo::CI_USED, true, - ConfigInfo::CI_BOOL, - "false", - "false", - "true" }, + ConfigInfo::CI_INT, + "0", + "1", + "2" }, { CFG_DB_WATCHDOG_INTERVAL, From c72cae75d41fcf030c6e865f9a9f54a7dcadedc0 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Thu, 18 Jan 2007 13:21:20 +0100 Subject: [PATCH 28/35] Crash in rpl_ndb_dd_advance: check for null transaction pointer --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 106b2e075f8..cad53ff9836 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -7827,7 +7827,7 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, const retry: if(report_error) { - if (file) + if (file && pTrans) { reterr= file->ndb_err(pTrans); } From 8aea461461efb510695b1f587800181d54b46aaf Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Fri, 19 Jan 2007 04:36:33 +0100 Subject: [PATCH 29/35] ndb - bug#25711 fix cpu peak in big clusters during unpack of config --- ndb/src/common/util/ConfigValues.cpp | 208 ++++++++++++++++----------- 1 file changed, 122 insertions(+), 86 deletions(-) diff --git a/ndb/src/common/util/ConfigValues.cpp b/ndb/src/common/util/ConfigValues.cpp index ae4fbfd2f71..49fd6dd9a28 100644 --- a/ndb/src/common/util/ConfigValues.cpp +++ b/ndb/src/common/util/ConfigValues.cpp @@ -34,7 +34,7 @@ static const char Magic[] = { 'N', 'D', 'B', 'C', 'O', 'N', 'F', 'V' }; //#define DEBUG_CV #ifdef DEBUG_CV -#define DEBUG +#define DEBUG if(getenv("CV_DEBUG")) #else #define DEBUG if(0) #endif @@ -202,62 +202,60 @@ ConfigValues::Iterator::set(Uint32 key, const char * value){ static bool findKey(const Uint32 * values, Uint32 sz, Uint32 key, Uint32 * _pos){ - Uint32 pos = hash(key, sz); - Uint32 count = 0; - while((values[pos] & KP_MASK) != key && count < sz){ - pos = nextHash(key, sz, pos, ++count); + Uint32 lo = 0; + Uint32 hi = sz; + Uint32 pos = (hi + lo) >> 1; + + DEBUG printf("findKey(H'%.8x %d)", key, sz); + + if (sz == 0) + { + DEBUG ndbout_c(" -> false, 0"); + * _pos = 0; + return false; } - if((values[pos] & KP_MASK)== key){ - *_pos = pos; - return true; - } + Uint32 val = 0; + Uint32 oldpos = pos + 1; + while (pos != oldpos) + { + DEBUG printf(" [ %d %d %d ] ", lo, pos, hi); + assert(pos < hi); + assert(pos >= lo); + val = values[2*pos] & KP_MASK; + if (key > val) + { + lo = pos; + } + else if (key < val) + { + hi = pos; + } + else + { + * _pos = 2*pos; + DEBUG ndbout_c(" -> true, %d", pos); + return true; + } + oldpos = pos; + pos = (hi + lo) >> 1; + } + + DEBUG printf(" pos: %d (key %.8x val: %.8x values[pos]: %x) key>val: %d ", + pos, key, val, values[2*pos] & KP_MASK, + key > val); + + pos += (key > val) ? 1 : 0; + + * _pos = 2*pos; + DEBUG ndbout_c(" -> false, %d", pos); return false; } -static -Uint32 -hash(Uint32 key, Uint32 size){ - Uint32 tmp = (key >> 16) ^ (key & 0xFFFF); - return (((tmp << 16) | tmp) % size) << 1; -} - -static -Uint32 -nextHash(Uint32 key, Uint32 size, Uint32 pos, Uint32 count){ - Uint32 p = (pos >> 1); - if((key % size) != 0) - p += key; - else - p += 1; - return (p % size) << 1; -} - -static -Uint32 -directory(Uint32 sz){ - const Uint32 _input = sz; - if((sz & 1) == 0) - sz ++; - - bool prime = false; - while(!prime){ - prime = true; - for(Uint32 n = 3; n*n <= sz; n += 2){ - if((sz % n) == 0){ - prime = false; - sz += 2; - break; - } - } - } - DEBUG printf("directory %d -> %d\n", _input, sz); - return sz; -} ConfigValuesFactory::ConfigValuesFactory(Uint32 keys, Uint32 data){ m_sectionCounter = (1 << KP_SECTION_SHIFT); - m_freeKeys = directory(keys); + m_freeKeys = keys; m_freeData = (data + 7) & ~7; m_currentSection = 0; m_cfg = create(m_freeKeys, m_freeData); @@ -316,11 +314,14 @@ ConfigValuesFactory::expand(Uint32 fk, Uint32 fs){ return ; } + DEBUG printf("[ fk fd ] : [ %d %d ]", m_freeKeys, m_freeData); + m_freeKeys = (m_freeKeys >= fk ? m_cfg->m_size : fk + m_cfg->m_size); m_freeData = (m_freeData >= fs ? m_cfg->m_dataSize : fs + m_cfg->m_dataSize); - m_freeKeys = directory(m_freeKeys); m_freeData = (m_freeData + 7) & ~7; - + + DEBUG ndbout_c(" [ %d %d ]", m_freeKeys, m_freeData); + ConfigValues * m_tmp = m_cfg; m_cfg = create(m_freeKeys, m_freeData); put(* m_tmp); @@ -336,7 +337,6 @@ ConfigValuesFactory::shrink(){ m_freeKeys = m_cfg->m_size - m_freeKeys; m_freeData = m_cfg->m_dataSize - m_freeData; - m_freeKeys = directory(m_freeKeys); m_freeData = (m_freeData + 7) & ~7; ConfigValues * m_tmp = m_cfg; @@ -415,52 +415,58 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){ } const Uint32 tmp = entry.m_key | m_currentSection; - const Uint32 sz = m_cfg->m_size; - Uint32 pos = hash(tmp, sz); - Uint32 count = 0; - Uint32 val = m_cfg->m_values[pos]; + const Uint32 sz = m_cfg->m_size - m_freeKeys; - while((val & KP_MASK) != tmp && val != CFV_KEY_FREE && count < sz){ - pos = nextHash(tmp, sz, pos, ++count); - val = m_cfg->m_values[pos]; - } - - if((val & KP_MASK) == tmp){ + Uint32 pos; + if (findKey(m_cfg->m_values, sz, tmp, &pos)) + { DEBUG ndbout_c("key %x already found at pos: %d", tmp, pos); return false; } - if(count >= sz){ - pos = hash(tmp, sz); - count = 0; - Uint32 val = m_cfg->m_values[pos]; - - printf("key: %d, (key %% size): %d\n", entry.m_key, (entry.m_key % sz)); - printf("pos: %d", pos); - while((val & KP_MASK) != tmp && val != CFV_KEY_FREE && count < sz){ - pos = nextHash(tmp, sz, pos, ++count); - val = m_cfg->m_values[pos]; - printf(" %d", pos); + DEBUG { + printf("H'before "); + Uint32 prev = 0; + for (Uint32 i = 0; im_values[2*i] & KP_MASK; + ndbout_c("%.8x", val); + assert(val >= prev); + prev = val; } - printf("\n"); - - abort(); - printf("Full\n"); - return false; + } + + if (pos != 2*sz) + { + DEBUG ndbout_c("pos: %d sz: %d", pos, sz); + memmove(m_cfg->m_values + pos + 2, m_cfg->m_values + pos, + 4 * (2*sz - pos)); } - assert(pos < (sz << 1)); Uint32 key = tmp; key |= (entry.m_type << KP_TYPE_SHIFT); m_cfg->m_values[pos] = key; + + DEBUG { + printf("H'after "); + Uint32 prev = 0; + for (Uint32 i = 0; i<=sz; i++) + { + Uint32 val = m_cfg->m_values[2*i] & KP_MASK; + ndbout_c("%.8x", val); + assert(val >= prev); + prev = val; + } + } + switch(entry.m_type){ case ConfigValues::IntType: case ConfigValues::SectionType: m_cfg->m_values[pos+1] = entry.m_int; m_freeKeys--; DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value: %d\n", - pos, sz, count, + pos, sz, 0, (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, entry.m_int); return true; @@ -472,7 +478,7 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){ m_freeKeys--; m_freeData -= sizeof(char *); DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value(%d): %s\n", - pos, sz, count, + pos, sz, 0, (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, index, entry.m_string); @@ -485,7 +491,7 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){ m_freeKeys--; m_freeData -= 8; DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value64(%d): %lld\n", - pos, sz, count, + pos, sz, 0, (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, index, entry.m_int64); @@ -648,7 +654,9 @@ ConfigValuesFactory::unpack(const void * _src, Uint32 len){ } const char * src = (const char *)_src; - + const char * end = src + len - 4; + src += sizeof(Magic); + { Uint32 len32 = (len >> 2); const Uint32 * tmp = (const Uint32*)_src; @@ -663,9 +671,37 @@ ConfigValuesFactory::unpack(const void * _src, Uint32 len){ } } - const char * end = src + len - 4; - src += sizeof(Magic); - + const char * save = src; + + { + Uint32 keys = 0; + Uint32 data = 0; + while(end - src > 4){ + Uint32 tmp = ntohl(* (const Uint32 *)src); src += 4; + keys++; + switch(::getTypeOf(tmp)){ + case ConfigValues::IntType: + case ConfigValues::SectionType: + src += 4; + break; + case ConfigValues::Int64Type: + src += 8; + data += 8; + break; + case ConfigValues::StringType:{ + Uint32 s_len = ntohl(* (const Uint32 *)src); + src += 4 + mod4(s_len); + data += sizeof(char*); + break; + } + default: + break; + } + } + expand(keys, data); + } + + src = save; ConfigValues::Entry entry; while(end - src > 4){ Uint32 tmp = ntohl(* (const Uint32 *)src); src += 4; From 9c8ab666b0059be709141c353f647e7f425e62ce Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Fri, 19 Jan 2007 11:35:00 +0100 Subject: [PATCH 30/35] Bug#25387 ndb: dbug assert in reference counting for event operations - on blob part execute failure, must leave in state executin, and await dropEventOperation --- storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index c2a07899143..fe10cf133c2 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -571,7 +571,12 @@ NdbEventOperationImpl::execute_nolock() while (blob_op != NULL) { r = blob_op->execute_nolock(); if (r != 0) { - break; + // since main op is running and possibly some blob ops as well + // we can't just reset the main op. Instead return with error, + // main op (and blob ops) will be cleaned up when user calls + // dropEventOperation + m_error.code= myDict->getNdbError().code; + DBUG_RETURN(r); } // add blob reference to main op // removed by TE_STOP or TE_CLUSTER_FAILURE From 372cbc1def63bfe1a5919e6688edac17503193ff Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Fri, 19 Jan 2007 17:01:52 +0100 Subject: [PATCH 31/35] ndb - bug#19645 fix some more sp100 hang cases --- .../ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 5 ++++ storage/ndb/src/kernel/blocks/suma/Suma.cpp | 29 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 89b1d18f22c..f4a4bbbb400 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2456,7 +2456,12 @@ void Qmgr::execAPI_FAILREQ(Signal* signal) // ignore if api not active if (failedNodePtr.p->phase != ZAPI_ACTIVE) + { + jam(); + // But send to SUMA anyway... + sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA); return; + } signal->theData[0] = NDB_LE_Disconnected; signal->theData[1] = failedNodePtr.i; diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 6f45cfb1975..3804cda00c9 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -319,6 +319,12 @@ Suma::execSTTOR(Signal* signal) { createSequence(signal); DBUG_VOID_RETURN; }//if + + if (ERROR_INSERTED(13030)) + { + ndbout_c("Dont start handover"); + return; + } }//if if(startphase == 100) @@ -564,6 +570,15 @@ void Suma::execAPI_FAILREQ(Signal* signal) Uint32 failedApiNode = signal->theData[0]; //BlockReference retRef = signal->theData[1]; + if (c_startup.m_restart_server_node_id && + c_startup.m_restart_server_node_id != RNIL) + { + jam(); + sendSignalWithDelay(reference(), GSN_API_FAILREQ, signal, + 200, signal->getLength()); + return; + } + c_failedApiNodes.set(failedApiNode); c_connected_nodes.clear(failedApiNode); bool found = removeSubscribersOnNode(signal, failedApiNode); @@ -912,6 +927,20 @@ Suma::execDUMP_STATE_ORD(Signal* signal){ { CLEAR_ERROR_INSERT_VALUE; } + + if (tCase == 8009) + { + if (ERROR_INSERTED(13030)) + { + CLEAR_ERROR_INSERT_VALUE; + sendSTTORRY(signal); + } + else + { + SET_ERROR_INSERT_VALUE(13030); + } + return; + } } /************************************************************* From 162aa18e15c06c962fef1e16bd07b2f3e276e794 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Sat, 20 Jan 2007 03:01:37 +0100 Subject: [PATCH 32/35] ndb - Fix bug when sending NODE_VERSION_REP --- storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp | 6 ++ .../ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 62 ++++++++++++------- 2 files changed, 44 insertions(+), 24 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index 0fdce8b5166..674b8bf0669 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -455,6 +455,12 @@ private: void recompute_version_info(Uint32 type, Uint32 version); void execNODE_VERSION_REP(Signal* signal); void sendApiVersionRep(Signal* signal, NodeRecPtr nodePtr); + void sendVersionedDb(NodeReceiverGroup rg, + GlobalSignalNumber gsn, + Signal* signal, + Uint32 length, + JobBufferLevel jbuf, + Uint32 minversion); }; #endif diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index f4a4bbbb400..317cc870873 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2786,30 +2786,12 @@ void Qmgr::execAPI_REGREQ(Signal* signal) recompute_version_info(type, version); - if (info.m_type[NodeInfo::DB].m_min_version >= NDBD_NODE_VERSION_REP) - { - jam(); - NodeReceiverGroup rg(QMGR, c_clusterNodes); - rg.m_nodes.clear(getOwnNodeId()); - signal->theData[0] = apiNodePtr.i; - signal->theData[1] = version; - sendSignal(rg, GSN_NODE_VERSION_REP, signal, 2, JBB); - } - else - { - Uint32 i = 0; - while((i = c_clusterNodes.find(i + 1)) != NdbNodeBitmask::NotFound) - { - jam(); - if (i == getOwnNodeId()) - continue; - if (getNodeInfo(i).m_version >= NDBD_NODE_VERSION_REP) - { - jam(); - sendSignal(calcQmgrBlockRef(i), GSN_NODE_VERSION_REP, signal, 2,JBB); - } - } - } + signal->theData[0] = apiNodePtr.i; + signal->theData[1] = version; + NodeReceiverGroup rg(QMGR, c_clusterNodes); + rg.m_nodes.clear(getOwnNodeId()); + sendVersionedDb(rg, GSN_NODE_VERSION_REP, signal, 2, JBB, + NDBD_NODE_VERSION_REP); signal->theData[0] = apiNodePtr.i; EXECUTE_DIRECT(NDBCNTR, GSN_API_START_REP, signal, 1); @@ -2817,6 +2799,38 @@ void Qmgr::execAPI_REGREQ(Signal* signal) return; }//Qmgr::execAPI_REGREQ() +void +Qmgr::sendVersionedDb(NodeReceiverGroup rg, + GlobalSignalNumber gsn, + Signal* signal, + Uint32 length, + JobBufferLevel jbuf, + Uint32 minversion) +{ + jam(); + NodeVersionInfo info = getNodeVersionInfo(); + if (info.m_type[NodeInfo::DB].m_min_version >= minversion) + { + jam(); + sendSignal(rg, gsn, signal, length, jbuf); + } + else + { + jam(); + Uint32 i = 0, cnt = 0; + while((i = rg.m_nodes.find(i + 1)) != NodeBitmask::NotFound) + { + jam(); + if (getNodeInfo(i).m_version >= minversion) + { + jam(); + cnt++; + sendSignal(numberToRef(rg.m_block, i), gsn, signal, length, jbuf); + } + } + ndbassert(cnt < rg.m_nodes.count()); + } +} void Qmgr::execAPI_VERSION_REQ(Signal * signal) { From f7c6b1337541f99b8a137d57ae9b7b7b4d27d780 Mon Sep 17 00:00:00 2001 From: "jonas@eel.(none)" <> Date: Mon, 22 Jan 2007 17:06:27 +0100 Subject: [PATCH 33/35] ndb - fix bug is NODE_VERSION_REP Fix assertion in corner-case where signal isnt sent to any node --- storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 317cc870873..81679d3e16a 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2828,7 +2828,8 @@ Qmgr::sendVersionedDb(NodeReceiverGroup rg, sendSignal(numberToRef(rg.m_block, i), gsn, signal, length, jbuf); } } - ndbassert(cnt < rg.m_nodes.count()); + ndbassert((cnt == 0 && rg.m_nodes.count() == 0) || + (cnt < rg.m_nodes.count())); } } From e7e4dde51ef0c8040d3b85f5da7bc21c59a4090f Mon Sep 17 00:00:00 2001 From: "jonas@eel.(none)" <> Date: Mon, 22 Jan 2007 17:25:49 +0100 Subject: [PATCH 34/35] ndb - bug#25755 Make sure subscriber is removed from list when n_subscribers is decreased --- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 48 ++++++++++++++++++--- storage/ndb/src/kernel/blocks/suma/Suma.hpp | 2 +- 2 files changed, 42 insertions(+), 8 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 3804cda00c9..a9803343871 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -1431,17 +1431,26 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr, if (r) { + jam(); // we have to wait getting tab info DBUG_RETURN(1); } if (tabPtr.p->setupTrigger(signal, *this)) { + jam(); // we have to wait for triggers to be setup DBUG_RETURN(1); } - completeOneSubscriber(signal, tabPtr, subbPtr); + int ret = completeOneSubscriber(signal, tabPtr, subbPtr); + if (ret == -1) + { + jam(); + LocalDLList subscribers(c_subscriberPool, + tabPtr.p->c_subscribers); + subscribers.release(subbPtr); + } completeInitTable(signal, tabPtr); DBUG_RETURN(0); } @@ -1517,6 +1526,22 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr) req->tableId = tableId; DBUG_PRINT("info",("GET_TABINFOREQ id %d", req->tableId)); + + if (ERROR_INSERTED(13031)) + { + jam(); + ndbout_c("HERE"); + CLEAR_ERROR_INSERT_VALUE; + GetTabInfoRef* ref = (GetTabInfoRef*)signal->getDataPtrSend(); + ref->tableId = tableId; + ref->senderData = tabPtr.i; + ref->errorCode = GetTabInfoRef::TableNotDefined; + sendSignal(reference(), GSN_GET_TABINFOREF, signal, + GetTabInfoRef::SignalLength, JBB); + DBUG_RETURN(1); + } + + ndbout_c("HARE"); sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal, GetTabInfoReq::SignalLength, JBB); DBUG_RETURN(1); @@ -1530,7 +1555,7 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr) DBUG_RETURN(0); } -void +int Suma::completeOneSubscriber(Signal *signal, TablePtr tabPtr, SubscriberPtr subbPtr) { jam(); @@ -1540,19 +1565,22 @@ Suma::completeOneSubscriber(Signal *signal, TablePtr tabPtr, SubscriberPtr subbP (c_startup.m_restart_server_node_id == 0 || tabPtr.p->m_state != Table::DROPPED)) { + jam(); sendSubStartRef(signal,subbPtr,tabPtr.p->m_error, SubscriptionData::TableData); tabPtr.p->n_subscribers--; + DBUG_RETURN(-1); } else { + jam(); SubscriptionPtr subPtr; c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI); subPtr.p->m_table_ptrI= tabPtr.i; sendSubStartComplete(signal,subbPtr, m_last_complete_gci + 3, SubscriptionData::TableData); } - DBUG_VOID_RETURN; + DBUG_RETURN(0); } void @@ -1565,11 +1593,17 @@ Suma::completeAllSubscribers(Signal *signal, TablePtr tabPtr) LocalDLList subscribers(c_subscriberPool, tabPtr.p->c_subscribers); SubscriberPtr subbPtr; - for(subscribers.first(subbPtr); - !subbPtr.isNull(); - subscribers.next(subbPtr)) + for(subscribers.first(subbPtr); !subbPtr.isNull();) { - completeOneSubscriber(signal, tabPtr, subbPtr); + jam(); + Ptr tmp = subbPtr; + subscribers.next(subbPtr); + int ret = completeOneSubscriber(signal, tabPtr, tmp); + if (ret == -1) + { + jam(); + subscribers.release(tmp); + } } } DBUG_VOID_RETURN; diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp index 4408d6aff8d..1cf30c1fa24 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp @@ -251,7 +251,7 @@ public: SubscriberPtr subbPtr); int initTable(Signal *signal,Uint32 tableId, TablePtr &tabPtr); - void completeOneSubscriber(Signal* signal, TablePtr tabPtr, SubscriberPtr subbPtr); + int completeOneSubscriber(Signal* signal, TablePtr tabPtr, SubscriberPtr subbPtr); void completeAllSubscribers(Signal* signal, TablePtr tabPtr); void completeInitTable(Signal* signal, TablePtr tabPtr); From 220eb32a3bf6e70b62514944bce6a3cbba7de2b4 Mon Sep 17 00:00:00 2001 From: "jonas@eel.(none)" <> Date: Mon, 22 Jan 2007 17:29:11 +0100 Subject: [PATCH 35/35] ndb - bug#25755 remove accidently left debug prinouts --- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index a9803343871..6fbbceaceea 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -1530,7 +1530,6 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr) if (ERROR_INSERTED(13031)) { jam(); - ndbout_c("HERE"); CLEAR_ERROR_INSERT_VALUE; GetTabInfoRef* ref = (GetTabInfoRef*)signal->getDataPtrSend(); ref->tableId = tableId; @@ -1541,7 +1540,6 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr) DBUG_RETURN(1); } - ndbout_c("HARE"); sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal, GetTabInfoReq::SignalLength, JBB); DBUG_RETURN(1);