diff --git a/mysql-test/include/ndb_master-slave.inc b/mysql-test/include/ndb_master-slave.inc index 1568072202b..0bf4b701f0c 100644 --- a/mysql-test/include/ndb_master-slave.inc +++ b/mysql-test/include/ndb_master-slave.inc @@ -1,3 +1,6 @@ +# Replication tests need binlog +source include/have_log_bin.inc; + connect (master,127.0.0.1,root,,test,$MASTER_MYPORT,); connect (master1,127.0.0.1,root,,test,$MASTER_MYPORT,); connect (slave,127.0.0.1,root,,test,$SLAVE_MYPORT,); diff --git a/mysql-test/suite/ndb/t/disabled.def b/mysql-test/suite/ndb/t/disabled.def index b97cb9cecc9..9c2dc80d5ee 100644 --- a/mysql-test/suite/ndb/t/disabled.def +++ b/mysql-test/suite/ndb/t/disabled.def @@ -9,8 +9,6 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog -ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_dd_sql_features : Bug#29102 ndb_dd_sql_features fails in pushbuild ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed @@ -21,5 +19,3 @@ ndb_partition_error2 : HF is not sure if the test can work as internded on all # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open #ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events #ndb_binlog_discover : bug#21806 2006-08-24 -#ndb_autodiscover3 : bug#21806 -#ndb_autodiscover3 : Bug#20872 2007-07-15 ingo master*.err: miscellaneous error messages diff --git a/mysql-test/suite/ndb/r/ndb_autodiscover.result b/mysql-test/suite/ndb_team/r/ndb_autodiscover.result similarity index 100% rename from mysql-test/suite/ndb/r/ndb_autodiscover.result rename to mysql-test/suite/ndb_team/r/ndb_autodiscover.result diff --git a/mysql-test/suite/ndb/r/ndb_autodiscover2.result b/mysql-test/suite/ndb_team/r/ndb_autodiscover2.result similarity index 100% rename from mysql-test/suite/ndb/r/ndb_autodiscover2.result rename to mysql-test/suite/ndb_team/r/ndb_autodiscover2.result diff --git a/mysql-test/suite/ndb/r/ndb_autodiscover3.result b/mysql-test/suite/ndb_team/r/ndb_autodiscover3.result similarity index 100% rename from mysql-test/suite/ndb/r/ndb_autodiscover3.result rename to mysql-test/suite/ndb_team/r/ndb_autodiscover3.result diff --git a/mysql-test/suite/ndb_team/t/disabled.def b/mysql-test/suite/ndb_team/t/disabled.def new file mode 100644 index 00000000000..8ff2d29147d --- /dev/null +++ b/mysql-test/suite/ndb_team/t/disabled.def @@ -0,0 +1,17 @@ +############################################################################## +# +# List the test cases that are to be disabled temporarily. +# +# Separate the test case name and the comment with ':'. +# +# : BUG# +# +# Do not use any TAB characters for whitespace. +# +############################################################################## +ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog +ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog + +# the below testcase have been reworked to avoid the bug, test contains comment, keep bug open +#ndb_autodiscover3 : bug#21806 +#ndb_autodiscover3 : Bug#20872 2007-07-15 ingo master*.err: miscellaneous error messages diff --git a/mysql-test/suite/ndb/t/ndb_autodiscover.test b/mysql-test/suite/ndb_team/t/ndb_autodiscover.test similarity index 100% rename from mysql-test/suite/ndb/t/ndb_autodiscover.test rename to mysql-test/suite/ndb_team/t/ndb_autodiscover.test diff --git a/mysql-test/suite/ndb/t/ndb_autodiscover2-master.opt b/mysql-test/suite/ndb_team/t/ndb_autodiscover2-master.opt similarity index 100% rename from mysql-test/suite/ndb/t/ndb_autodiscover2-master.opt rename to mysql-test/suite/ndb_team/t/ndb_autodiscover2-master.opt diff --git a/mysql-test/suite/ndb/t/ndb_autodiscover2.test b/mysql-test/suite/ndb_team/t/ndb_autodiscover2.test similarity index 100% rename from mysql-test/suite/ndb/t/ndb_autodiscover2.test rename to mysql-test/suite/ndb_team/t/ndb_autodiscover2.test diff --git a/mysql-test/suite/ndb/t/ndb_autodiscover3.test b/mysql-test/suite/ndb_team/t/ndb_autodiscover3.test similarity index 100% rename from mysql-test/suite/ndb/t/ndb_autodiscover3.test rename to mysql-test/suite/ndb_team/t/ndb_autodiscover3.test diff --git a/mysql-test/suite/rpl_ndb/t/disabled.def b/mysql-test/suite/rpl_ndb/t/disabled.def index 90286ecc421..f372d44cb90 100644 --- a/mysql-test/suite/rpl_ndb/t/disabled.def +++ b/mysql-test/suite/rpl_ndb/t/disabled.def @@ -13,6 +13,7 @@ rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated rpl_ndb_2myisam : BUG#19227 Seems to pass currently +rpl_ndb_2other : BUG#21842 2007-08-30 tsmith test has never worked on bigendian (sol10-sparc-a, powermacg5 rpl_ndb_dd_partitions : BUG#19259 2006-04-21 rpl_ndb_dd_partitions fails on s/AMD rpl_ndb_innodb2ndb : Bug#29549 rpl_ndb_myisam2ndb,rpl_ndb_innodb2ndb failed on Solaris for pack_length issue rpl_ndb_myisam2ndb : Bug#29549 rpl_ndb_myisam2ndb,rpl_ndb_innodb2ndb failed on Solaris for pack_length issue diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_2other.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_2other.test index 736e2cadf78..b7e393ca3cc 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_2other.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_2other.test @@ -7,7 +7,7 @@ --source include/have_ndb.inc --source include/have_innodb.inc --source include/have_binlog_format_mixed_or_row.inc ---source include/master-slave.inc +--source include/ndb_master-slave.inc # On master use NDB as storage engine. connection master; diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_basic.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_basic.test index ea7d80aef50..08f7e12b1fb 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_basic.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_basic.test @@ -1,5 +1,4 @@ --source include/have_ndb.inc ---source include/have_binlog_format_mixed_or_row.inc --source include/ndb_master-slave.inc diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob.test index 181d225d3c0..822f1f224cd 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob.test @@ -1,5 +1,4 @@ --source include/have_ndb.inc ---source include/have_binlog_format_mixed_or_row.inc --source include/ndb_master-slave.inc # diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob2.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob2.test index 64f6f5e4f42..e29f24998fe 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob2.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob2.test @@ -4,7 +4,6 @@ # code between engine tests # ################################# -- source include/have_ndb.inc --- source include/have_binlog_format_mixed_or_row.inc -- source include/ndb_master-slave.inc let $engine_type=NDBCLUSTER; -- source extra/rpl_tests/rpl_row_blob.test diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular.test index 62583188e34..c1a48d08c53 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular.test @@ -1,5 +1,4 @@ --source include/have_ndb.inc ---source include/have_binlog_format_mixed_or_row.inc --source include/ndb_master-slave.inc # set up circular replication diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_simplex.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_simplex.test index a832639c9fd..fcbff1392f4 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_simplex.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_simplex.test @@ -1,5 +1,4 @@ --source include/have_ndb.inc ---source include/have_binlog_format_mixed_or_row.inc --source include/ndb_master-slave.inc connection master; diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_commit_afterflush.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_commit_afterflush.test index 7adb62d5668..8c45ac5dbdd 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_commit_afterflush.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_commit_afterflush.test @@ -6,6 +6,5 @@ ##################################### -- source include/have_ndb.inc -- source include/ndb_master-slave.inc --- source include/have_log_bin.inc let $engine_type=NDB; -- source extra/rpl_tests/rpl_commit_after_flush.test diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_delete_nowhere.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_delete_nowhere.test index bbd67e4d8d2..6b1d932f9a6 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_delete_nowhere.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_delete_nowhere.test @@ -3,7 +3,6 @@ # Share test code between engine tests # ######################################### --source include/have_ndb.inc --- source include/have_binlog_format_mixed_or_row.inc -- source include/ndb_master-slave.inc let $engine_type=NDB; -- source extra/rpl_tests/rpl_delete_no_where.test diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db.test index c0f85dd8981..ffe8551d00f 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db.test @@ -6,7 +6,6 @@ ########################################################## --source include/have_ndb.inc ---source include/have_binlog_format_mixed_or_row.inc --source include/ndb_master-slave.inc --disable_warnings diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table.test index 3f46c6fbda4..ee6abd580f5 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table.test @@ -6,7 +6,6 @@ ########################################################## --source include/have_ndb.inc ---source include/have_binlog_format_mixed_or_row.inc --source include/ndb_master-slave.inc --disable_warnings diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_func003.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_func003.test index 90315a01150..c02d82e2dc9 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_func003.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_func003.test @@ -7,7 +7,6 @@ # reduce test case code # ################################### -- source include/have_ndb.inc --- source include/have_binlog_format_mixed_or_row.inc -- source include/ndb_master-slave.inc let $engine_type=NDB; -- source extra/rpl_tests/rpl_row_func003.test diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans.test index 63c5c5e93e3..2f2414ba578 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans.test @@ -3,7 +3,6 @@ -- source include/have_ndb.inc -- source include/have_innodb.inc -- source include/ndb_master-slave.inc --- source include/have_log_bin.inc create table t1 (a int, unique(a)) engine=ndbcluster; create table t2 (a int, unique(a)) engine=innodb; diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_insert_ignore.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_insert_ignore.test index 3ef360e3fdf..e6c66011fb7 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_insert_ignore.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_insert_ignore.test @@ -2,7 +2,6 @@ # Wrapper for rpl_insert_ignore.test# ##################################### -- source include/have_ndb.inc --- source include/have_binlog_format_mixed_or_row.inc -- source include/ndb_master-slave.inc let $engine_type=NDB; let $engine_type2=myisam; diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update3.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update3.test index 3e7042a0cfa..f144965e9b0 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update3.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update3.test @@ -3,7 +3,6 @@ # to reuse test code between engine runs # ############################################################ -- source include/have_ndb.inc --- source include/have_binlog_format_mixed_or_row.inc -- source include/ndb_master-slave.inc let $engine_type=NDB; -- source extra/rpl_tests/rpl_multi_update3.test diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore.test index c4e9e701251..1f305d20c92 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore.test @@ -7,7 +7,6 @@ ########################################################## --source include/have_ndb.inc ---source include/have_binlog_format_mixed_or_row.inc --source include/ndb_master-slave.inc --disable_warnings diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_row_001.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_row_001.test index 5b1090c0d41..f5b05080131 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_row_001.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_row_001.test @@ -2,7 +2,6 @@ # By JBM 2005-02-15 Wrapped to allow reuse of test code# ######################################################## --source include/have_ndb.inc --- source include/have_binlog_format_mixed_or_row.inc -- source include/ndb_master-slave.inc let $engine_type=NDB; -- source extra/rpl_tests/rpl_row_001.test diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp003.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp003.test index 599124ad9bd..6453f45aa75 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp003.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp003.test @@ -5,7 +5,6 @@ # For different engines # ################################# -- source include/have_ndb.inc --- source include/have_binlog_format_mixed_or_row.inc -- source include/ndb_master-slave.inc let $engine_type=NDBCLUSTER; -- source extra/rpl_tests/rpl_row_sp003.test diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp006.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp006.test index 7476e7c114a..c2c7be6112c 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp006.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp006.test @@ -5,7 +5,6 @@ # For different engines # ################################# -- source include/have_ndb.inc --- source include/have_binlog_format_mixed_or_row.inc -- source include/ndb_master-slave.inc let $engine_type=NDBCLUSTER; -- source extra/rpl_tests/rpl_row_sp006.test diff --git a/mysql-test/suite/rpl_ndb/t/rpl_ndb_trig004.test b/mysql-test/suite/rpl_ndb/t/rpl_ndb_trig004.test index 5544ded056f..7f7e08685bf 100644 --- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_trig004.test +++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_trig004.test @@ -7,7 +7,6 @@ ############################################################################# # Includes --- source include/have_binlog_format_mixed_or_row.inc -- source include/have_ndb.inc -- source include/ndb_master-slave.inc let $engine_type=NDB; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 9bb0d75fa90..1ec6898078f 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -329,6 +329,7 @@ Thd_ndb::Thd_ndb() all= NULL; stmt= NULL; m_error= FALSE; + m_error_code= 0; query_state&= NDB_QUERY_NORMAL; options= 0; (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0, @@ -364,6 +365,7 @@ Thd_ndb::init_open_tables() { count= 0; m_error= FALSE; + m_error_code= 0; my_hash_reset(&open_tables); } @@ -487,6 +489,7 @@ void ha_ndbcluster::no_uncommitted_rows_execute_failure() return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure"); get_thd_ndb(current_thd)->m_error= TRUE; + get_thd_ndb(current_thd)->m_error_code= 0; DBUG_VOID_RETURN; } @@ -2084,9 +2087,15 @@ int ha_ndbcluster::unique_index_read(const uchar *key, if (execute_no_commit_ie(this,trans,FALSE) != 0 || op->getNdbError().code) { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(ndb_err(trans)); + int err= ndb_err(trans); + if(err==HA_ERR_KEY_NOT_FOUND) + table->status= STATUS_NOT_FOUND; + else + table->status= STATUS_GARBAGE; + + DBUG_RETURN(err); } + // The value have now been fetched from NDB unpack_record(buf); table->status= 0; @@ -8068,9 +8077,9 @@ int handle_trailing_share(NDB_SHARE *share) } } - sql_print_error("NDB_SHARE: %s already exists use_count=%d." - " Moving away for safety, but possible memleak.", - share->key, share->use_count); + sql_print_warning("NDB_SHARE: %s already exists use_count=%d." + " Moving away for safety, but possible memleak.", + share->key, share->use_count); dbug_print_open_tables(); /* @@ -8266,7 +8275,15 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, share->table_name= share->db + strlen(share->db) + 1; ha_ndbcluster::set_tabname(key, share->table_name); #ifdef HAVE_NDB_BINLOG - ndbcluster_binlog_init_share(share, table); + if (ndbcluster_binlog_init_share(share, table)) + { + DBUG_PRINT("error", ("get_share: %s could not init share", key)); + ndbcluster_real_free_share(&share); + *root_ptr= old_root; + if (!have_lock) + pthread_mutex_unlock(&ndbcluster_mutex); + DBUG_RETURN(0); + } #endif *root_ptr= old_root; } diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 468adba0aa0..5d5c8a26447 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -264,12 +264,13 @@ static void run_query(THD *thd, char *buf, char *end, int i; Thd_ndb *thd_ndb= get_thd_ndb(thd); for (i= 0; no_print_error[i]; i++) - if (thd_ndb->m_error == no_print_error[i]) + if ((thd_ndb->m_error_code == no_print_error[i]) || + (thd->net.last_errno == (unsigned)no_print_error[i])) break; if (!no_print_error[i]) sql_print_error("NDB: %s: error %s %d(ndb: %d) %d %d", buf, thd->net.last_error, thd->net.last_errno, - thd_ndb->m_error, + thd_ndb->m_error_code, thd->net.report_error, thd->query_error); } @@ -324,18 +325,14 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share, share->key); if ((error= open_table_def(thd, table_share, 0))) { - sql_print_error("Unable to get table share for %s, error=%d", - share->key, error); - DBUG_PRINT("error", ("open_table_def failed %d", error)); + DBUG_PRINT("error", ("open_table_def failed: %d my_errno: %d", error, my_errno)); free_table_share(table_share); DBUG_RETURN(error); } if ((error= open_table_from_share(thd, table_share, "", 0 /* fon't allocate buffers */, (uint) READ_ALL, 0, table, FALSE))) { - sql_print_error("Unable to open table for %s, error=%d(%d)", - share->key, error, my_errno); - DBUG_PRINT("error", ("open_table_from_share failed %d", error)); + DBUG_PRINT("error", ("open_table_from_share failed %d my_errno: %d", error, my_errno)); free_table_share(table_share); DBUG_RETURN(error); } @@ -381,11 +378,12 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share, /* Initialize the binlog part of the NDB_SHARE */ -void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) +int ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) { THD *thd= current_thd; MEM_ROOT *mem_root= &share->mem_root; int do_event_op= ndb_binlog_running; + int error= 0; DBUG_ENTER("ndbcluster_binlog_init_share"); share->connect_count= g_ndb_cluster_connection->get_connect_count(); @@ -428,7 +426,7 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) { share->flags|= NSF_NO_BINLOG; } - DBUG_VOID_RETURN; + DBUG_RETURN(error); } while (1) { @@ -455,7 +453,7 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) share->flags|= NSF_BLOB_FLAG; break; } - DBUG_VOID_RETURN; + DBUG_RETURN(error); } /***************************************************************** @@ -779,7 +777,10 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd) " end_pos BIGINT UNSIGNED NOT NULL, " " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); - const int no_print_error[3]= {701, 4009, 0}; // do not print error 701 etc + const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, + 701, + 4009, + 0}; // do not print error 701 etc run_query(thd, buf, end, no_print_error, TRUE); DBUG_RETURN(0); @@ -836,7 +837,10 @@ static int ndbcluster_create_schema_table(THD *thd) " type INT UNSIGNED NOT NULL," " PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB"); - const int no_print_error[3]= {701, 4009, 0}; // do not print error 701 etc + const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, + 701, + 4009, + 0}; // do not print error 701 etc run_query(thd, buf, end, no_print_error, TRUE); DBUG_RETURN(0); @@ -3078,7 +3082,7 @@ ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name, /* ToDo; handle error? */ if (share && share->op && share->op->getState() == NdbEventOperation::EO_EXECUTING && - dict->getNdbError().code != 4009) + dict->getNdbError().mysql_code != HA_ERR_NO_CONNECTION) { DBUG_ASSERT(FALSE); DBUG_RETURN(-1); @@ -3205,11 +3209,6 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb, NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData(); NDBEVENT::TableEvent type= pOp->getEventType(); - /* make sure to flush any pending events as they can be dependent - on one of the tables being changed below - */ - thd->binlog_flush_pending_rows_event(TRUE); - switch (type) { case NDBEVENT::TE_CLUSTER_FAILURE: diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index b5b8d0d9745..aab13c6e902 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -124,7 +124,7 @@ void ndbcluster_binlog_init_handlerton(); /* Initialize the binlog part of the NDB_SHARE */ -void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *table); +int ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *table); bool ndbcluster_check_if_local_table(const char *dbname, const char *tabname); bool ndbcluster_check_if_local_tables_in_db(THD *thd, const char *dbname); diff --git a/sql/slave.cc b/sql/slave.cc index 0854bc123f9..8c769770818 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -132,6 +132,11 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, const char* table_name, bool overwrite); static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); static Log_event* next_event(RELAY_LOG_INFO* rli); +static int terminate_slave_thread(THD *thd, + pthread_mutex_t* term_lock, + pthread_cond_t* term_cond, + volatile uint *slave_running, + bool skip_lock); /* Find out which replications threads are running @@ -312,35 +317,26 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock) DBUG_RETURN(0); /* successfully do nothing */ int error,force_all = (thread_mask & SLAVE_FORCE_ALL); pthread_mutex_t *sql_lock = &mi->rli.run_lock, *io_lock = &mi->run_lock; - pthread_mutex_t *sql_cond_lock,*io_cond_lock; - sql_cond_lock=sql_lock; - io_cond_lock=io_lock; - - if (skip_lock) - { - sql_lock = io_lock = 0; - } - if ((thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL)) && mi->slave_running) + if ((thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL))) { DBUG_PRINT("info",("Terminating IO thread")); mi->abort_slave=1; if ((error=terminate_slave_thread(mi->io_thd,io_lock, - io_cond_lock, &mi->stop_cond, - &mi->slave_running)) && + &mi->slave_running, + skip_lock)) && !force_all) DBUG_RETURN(error); } - if ((thread_mask & (SLAVE_SQL|SLAVE_FORCE_ALL)) && mi->rli.slave_running) + if ((thread_mask & (SLAVE_SQL|SLAVE_FORCE_ALL))) { DBUG_PRINT("info",("Terminating SQL thread")); - DBUG_ASSERT(mi->rli.sql_thd != 0) ; mi->rli.abort_slave=1; if ((error=terminate_slave_thread(mi->rli.sql_thd,sql_lock, - sql_cond_lock, &mi->rli.stop_cond, - &mi->rli.slave_running)) && + &mi->rli.slave_running, + skip_lock)) && !force_all) DBUG_RETURN(error); } @@ -348,23 +344,60 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock) } -int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, - pthread_mutex_t *cond_lock, - pthread_cond_t* term_cond, - volatile uint *slave_running) +/** + Wait for a slave thread to terminate. + + This function is called after requesting the thread to terminate + (by setting @c abort_slave member of @c Relay_log_info or @c + Master_info structure to 1). Termination of the thread is + controlled with the the predicate *slave_running. + + Function will acquire @c term_lock before waiting on the condition + unless @c skip_lock is true in which case the mutex should be owned + by the caller of this function and will remain acquired after + return from the function. + + @param term_lock + Associated lock to use when waiting for @c term_cond + + @param term_cond + Condition that is signalled when the thread has terminated + + @param slave_running + Pointer to predicate to check for slave thread termination + + @param skip_lock + If @c true the lock will not be acquired before waiting on + the condition. In this case, it is assumed that the calling + function acquires the lock before calling this function. + + @retval 0 All OK + */ +static int +terminate_slave_thread(THD *thd, + pthread_mutex_t* term_lock, + pthread_cond_t* term_cond, + volatile uint *slave_running, + bool skip_lock) { + int error; + DBUG_ENTER("terminate_slave_thread"); - if (term_lock) - { + + if (!skip_lock) pthread_mutex_lock(term_lock); - if (!*slave_running) - { + + safe_mutex_assert_owner(term_lock); + + if (!*slave_running) + { + if (!skip_lock) pthread_mutex_unlock(term_lock); - DBUG_RETURN(ER_SLAVE_NOT_RUNNING); - } + DBUG_RETURN(ER_SLAVE_NOT_RUNNING); } DBUG_ASSERT(thd != 0); THD_CHECK_SENTRY(thd); + /* Is is critical to test if the slave is running. Otherwise, we might be referening freed memory trying to kick it @@ -380,9 +413,13 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, */ struct timespec abstime; set_timespec(abstime,2); - pthread_cond_timedwait(term_cond, cond_lock, &abstime); + error= pthread_cond_timedwait(term_cond, term_lock, &abstime); + DBUG_ASSERT(error == ETIMEDOUT || error == 0); } - if (term_lock) + + DBUG_ASSERT(*slave_running == 0); + + if (!skip_lock) pthread_mutex_unlock(term_lock); DBUG_RETURN(0); } diff --git a/sql/slave.h b/sql/slave.h index 731728bde4f..c7385934460 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -133,10 +133,6 @@ bool flush_relay_log_info(RELAY_LOG_INFO* rli); int register_slave_on_master(MYSQL* mysql); int terminate_slave_threads(MASTER_INFO* mi, int thread_mask, bool skip_lock = 0); -int terminate_slave_thread(THD* thd, pthread_mutex_t* term_mutex, - pthread_mutex_t* cond_lock, - pthread_cond_t* term_cond, - volatile uint* slave_running); int start_slave_threads(bool need_slave_mutex, bool wait_for_start, MASTER_INFO* mi, const char* master_info_fname, const char* slave_info_fname, int thread_mask); diff --git a/storage/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c index ee79bc94906..9cb4e0f6f20 100644 --- a/storage/innobase/row/row0sel.c +++ b/storage/innobase/row/row0sel.c @@ -4555,6 +4555,15 @@ row_search_autoinc_read_column( ut_a(len != UNIV_SQL_NULL); ut_a(len <= sizeof value); +#ifdef WORDS_BIGENDIAN + /* Copy integer data and restore sign bit */ + + memcpy((ptr = dest), data, len); + + if (!unsigned_type) { + dest[0] ^= 128; + } +#else /* Convert integer data from Innobase to a little-endian format, sign bit restored to normal */ @@ -4566,6 +4575,7 @@ row_search_autoinc_read_column( if (!unsigned_type) { dest[len - 1] ^= 128; } +#endif /* The assumption here is that the AUTOINC value can't be negative.*/ switch (len) { diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt index 6f0cfbff7e9..acfbf649522 100644 --- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt @@ -6,7 +6,7 @@ Next DBTUP 4029 Next DBLQH 5045 Next DBDICT 6008 Next DBDIH 7186 -Next DBTC 8040 +Next DBTC 8053 Next CMVMI 9000 Next BACKUP 10038 Next DBUTIL 11002 @@ -304,6 +304,10 @@ ABORT OF TCKEYREQ 8038 : Simulate API disconnect just after SCAN_TAB_REQ +8052 : Simulate failure of TransactionBufferMemory allocation for OI lookup + +8051 : Simulate failure of allocation for saveINDXKEYINFO + CMVMI ----- diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 6a1ad48f8b7..63d22bd0a37 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -2137,6 +2137,7 @@ Dbacc::placeReadInLockQueue(OperationrecPtr lockOwnerPtr) if (same && (lastbits & Operationrec::OP_ACC_LOCK_MODE)) { jam(); + opbits |= Operationrec::OP_LOCK_MODE; // Upgrade to X-lock goto checkop; } diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index 42f4033dd4a..75d79ba737f 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -1499,12 +1499,12 @@ private: void clearCommitAckMarker(ApiConnectRecord * const regApiPtr, TcConnectRecord * const regTcPtr); // Trigger and index handling - bool saveINDXKEYINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len); + int saveINDXKEYINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len); bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp); - bool saveINDXATTRINFO(Signal* signal, + int saveINDXATTRINFO(Signal* signal, TcIndexOperation* indexOp, const Uint32 *src, Uint32 len); diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 3e030de959e..887e6f848b1 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -1800,9 +1800,18 @@ start_failure: }//switch } +static +inline +bool +compare_transid(Uint32* val0, Uint32* val1) +{ + Uint32 tmp0 = val0[0] ^ val1[0]; + Uint32 tmp1 = val0[1] ^ val1[1]; + return (tmp0 | tmp1) == 0; +} + void Dbtc::execKEYINFO(Signal* signal) { - UintR compare_transid1, compare_transid2; jamEntry(); apiConnectptr.i = signal->theData[0]; tmaxData = 20; @@ -1812,10 +1821,8 @@ void Dbtc::execKEYINFO(Signal* signal) }//if ptrAss(apiConnectptr, apiConnectRecord); ttransid_ptr = 1; - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { + if (compare_transid(apiConnectptr.p->transid, signal->theData+1) == false) + { TCKEY_abort(signal, 19); return; }//if @@ -2116,7 +2123,6 @@ void Dbtc::saveAttrbuf(Signal* signal) void Dbtc::execATTRINFO(Signal* signal) { - UintR compare_transid1, compare_transid2; UintR Tdata1 = signal->theData[0]; UintR Tlength = signal->length(); UintR TapiConnectFilesize = capiConnectFilesize; @@ -2131,17 +2137,13 @@ void Dbtc::execATTRINFO(Signal* signal) return; }//if - UintR Tdata2 = signal->theData[1]; - UintR Tdata3 = signal->theData[2]; ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1]; - compare_transid1 = regApiPtr->transid[0] ^ Tdata2; - compare_transid2 = regApiPtr->transid[1] ^ Tdata3; apiConnectptr.p = regApiPtr; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { + if (compare_transid(regApiPtr->transid, signal->theData+1) == false) + { DEBUG("Drop ATTRINFO, wrong transid, lenght="<theData[1]<<", "<theData[2]); TCKEY_abort(signal, 19); return; }//if @@ -5456,11 +5458,32 @@ void Dbtc::execTC_COMMITREQ(Signal* signal) } }//Dbtc::execTC_COMMITREQ() +/** + * TCROLLBACKREQ + * + * Format is: + * + * thedata[0] = apiconnectptr + * thedata[1] = transid[0] + * thedata[2] = transid[1] + * OPTIONAL thedata[3] = flags + * + * Flags: + * 0x1 = potentiallyBad data from API (try not to assert) + */ void Dbtc::execTCROLLBACKREQ(Signal* signal) { + bool potentiallyBad= false; UintR compare_transid1, compare_transid2; jamEntry(); + + if(unlikely((signal->getLength() >= 4) && (signal->theData[3] & 0x1))) + { + ndbout_c("Trying to roll back potentially bad txn\n"); + potentiallyBad= true; + } + apiConnectptr.i = signal->theData[0]; if (apiConnectptr.i >= capiConnectFilesize) { goto TC_ROLL_warning; @@ -5547,12 +5570,14 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal) TC_ROLL_warning: jam(); - warningHandlerLab(signal, __LINE__); + if(likely(potentiallyBad==false)) + warningHandlerLab(signal, __LINE__); return; TC_ROLL_system_error: jam(); - systemErrorLab(signal, __LINE__); + if(likely(potentiallyBad==false)) + systemErrorLab(signal, __LINE__); return; }//Dbtc::execTCROLLBACKREQ() @@ -11566,6 +11591,7 @@ void Dbtc::execTCINDXREQ(Signal* signal) // This is a newly started transaction, clean-up releaseAllSeizedIndexOperations(regApiPtr); + regApiPtr->apiConnectstate = CS_STARTED; regApiPtr->transid[0] = tcIndxReq->transId1; regApiPtr->transid[1] = tcIndxReq->transId2; }//if @@ -11606,20 +11632,29 @@ void Dbtc::execTCINDXREQ(Signal* signal) Uint32 includedIndexLength = MIN(indexLength, indexBufSize); indexOp->expectedAttrInfo = attrLength; Uint32 includedAttrLength = MIN(attrLength, attrBufSize); - if (saveINDXKEYINFO(signal, - indexOp, - dataPtr, - includedIndexLength)) { + + int ret; + if ((ret = saveINDXKEYINFO(signal, + indexOp, + dataPtr, + includedIndexLength)) == 0) + { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); return; } + else if (ret == -1) + { + jam(); + return; + } + dataPtr += includedIndexLength; if (saveINDXATTRINFO(signal, indexOp, dataPtr, - includedAttrLength)) { + includedAttrLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); @@ -11722,13 +11757,25 @@ void Dbtc::execINDXKEYINFO(Signal* signal) TcIndexOperationPtr indexOpPtr; TcIndexOperation* indexOp; + if (compare_transid(regApiPtr->transid, indxKeyInfo->transId) == false) + { + TCKEY_abort(signal, 19); + return; + } + + if (regApiPtr->apiConnectstate == CS_ABORTING) + { + jam(); + return; + } + if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL) { indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); if (saveINDXKEYINFO(signal, indexOp, src, - keyInfoLength)) { + keyInfoLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); @@ -11755,17 +11802,31 @@ void Dbtc::execINDXATTRINFO(Signal* signal) TcIndexOperationPtr indexOpPtr; TcIndexOperation* indexOp; + if (compare_transid(regApiPtr->transid, indxAttrInfo->transId) == false) + { + TCKEY_abort(signal, 19); + return; + } + + if (regApiPtr->apiConnectstate == CS_ABORTING) + { + jam(); + return; + } + if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL) { indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); if (saveINDXATTRINFO(signal, indexOp, src, - attrInfoLength)) { + attrInfoLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); + return; } + return; } } @@ -11773,12 +11834,13 @@ void Dbtc::execINDXATTRINFO(Signal* signal) * Save signal INDXKEYINFO * Return true if we have received all needed data */ -bool Dbtc::saveINDXKEYINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) +int +Dbtc::saveINDXKEYINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len) { - if (!indexOp->keyInfo.append(src, len)) { + if (ERROR_INSERTED(8052) || !indexOp->keyInfo.append(src, len)) { jam(); // Failed to seize keyInfo, abort transaction #ifdef VM_TRACE @@ -11788,15 +11850,17 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal, apiConnectptr.i = indexOp->connectionIndex; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 4000; + terrorCode = 289; + if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo)) + apiConnectptr.p->m_exec_flag= 1; abortErrorLab(signal); - return false; + return -1; } if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { jam(); - return true; + return 0; } - return false; + return 1; } bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp) @@ -11808,12 +11872,13 @@ bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp) * Save signal INDXATTRINFO * Return true if we have received all needed data */ -bool Dbtc::saveINDXATTRINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) +int +Dbtc::saveINDXATTRINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len) { - if (!indexOp->attrInfo.append(src, len)) { + if (ERROR_INSERTED(8051) || !indexOp->attrInfo.append(src, len)) { jam(); #ifdef VM_TRACE ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n"); @@ -11821,15 +11886,17 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal, apiConnectptr.i = indexOp->connectionIndex; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 4000; + terrorCode = 289; + if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo)) + apiConnectptr.p->m_exec_flag= 1; abortErrorLab(signal); - return false; + return -1; } if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { jam(); - return true; + return 0; } - return false; + return 1; } bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp) @@ -12013,6 +12080,9 @@ void Dbtc::execTCKEYREF(Signal* signal) tcIndxRef->transId[0] = tcKeyRef->transId[0]; tcIndxRef->transId[1] = tcKeyRef->transId[1]; tcIndxRef->errorCode = tcKeyRef->errorCode; + + releaseIndexOperation(regApiPtr, indexOp); + sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -12557,7 +12627,18 @@ void Dbtc::executeIndexOperation(Signal* signal, bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr, TcIndexOperationPtr& indexOpPtr) { - return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr); + if (regApiPtr->theSeizedIndexOperations.seize(indexOpPtr)) + { + ndbassert(indexOpPtr.p->expectedKeyInfo == 0); + ndbassert(indexOpPtr.p->keyInfo.getSize() == 0); + ndbassert(indexOpPtr.p->expectedAttrInfo == 0); + ndbassert(indexOpPtr.p->attrInfo.getSize() == 0); + ndbassert(indexOpPtr.p->expectedTransIdAI == 0); + ndbassert(indexOpPtr.p->transIdAI.getSize() == 0); + return true; + } + + return false; } void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr, diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp index ee0f194211b..812f071e037 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp @@ -179,7 +179,8 @@ Dbtup::dealloc_tuple(Signal* signal, &disk, tmpptr, gci); } - if (! (bits & Tuple_header::LCP_SKIP) && lcpScan_ptr_i != RNIL) + if (! (bits & (Tuple_header::LCP_SKIP | Tuple_header::ALLOC)) && + lcpScan_ptr_i != RNIL) { ScanOpPtr scanOp; c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i); diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 7471e9c84f8..15647861eef 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -202,9 +202,10 @@ Ndb::NDB_connect(Uint32 tNode) DBUG_PRINT("info", ("unsuccessful connect tReturnCode %d, tNdbCon->Status() %d", tReturnCode, tNdbCon->Status())); - if (theError.code == 299) + if (theError.code == 299 || // single user mode + theError.code == 281 ) // cluster shutdown in progress { - // single user mode so no need to retry with other node + // no need to retry with other node DBUG_RETURN(-1); } DBUG_RETURN(3); diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp index a0244bde95b..0fc96add084 100644 --- a/storage/ndb/src/ndbapi/NdbBlob.cpp +++ b/storage/ndb/src/ndbapi/NdbBlob.cpp @@ -1136,7 +1136,12 @@ NdbBlob::readTableParts(char* buf, Uint32 part, Uint32 count) while (n < count) { NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable); if (tOp == NULL || - tOp->committedRead() == -1 || + /* + * This was committedRead() before. However lock on main + * table tuple does not fully protect blob parts since DBTUP + * commits each tuple separately. + */ + tOp->readTuple() == -1 || setPartKeyValue(tOp, part + n) == -1 || tOp->getValue((Uint32)3, buf) == NULL) { setErrorCode(tOp); diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp index ada0372a184..55c6f0f4b99 100644 --- a/storage/ndb/src/ndbapi/NdbTransaction.cpp +++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp @@ -453,12 +453,27 @@ NdbTransaction::executeNoBlobs(NdbTransaction::ExecType aTypeOfExec, while (1) { int noOfComp = tNdb->sendPollNdb(3 * timeout, 1, forceSend); if (noOfComp == 0) { - /** - * This timeout situation can occur if NDB crashes. + /* + * Just for fun, this is only one of two places where + * we could hit this error... It's quite possible we + * hit it in Ndbif.cpp in Ndb::check_send_timeout() + * + * We behave rather similarly in both places. + * Hitting this is certainly a bug though... */ - ndbout << "This timeout should never occur, execute(..)" << endl; - theError.code = 4012; - setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure" + g_eventLogger.error("WARNING: Timeout in executeNoBlobs() waiting for " + "response from NDB data nodes. This should NEVER " + "occur. You have likely hit a NDB Bug. Please " + "file a bug."); + DBUG_PRINT("error",("This timeout should never occure, execute()")); + g_eventLogger.error("Forcibly trying to rollback txn (%p" + ") to try to clean up data node resources.", + this); + executeNoBlobs(NdbTransaction::Rollback); + theError.code = 4012; + theError.status= NdbError::PermanentError; + theError.classification= NdbError::TimeoutExpired; + setOperationErrorCodeAbort(4012); // ndbd timeout DBUG_RETURN(-1); }//if @@ -522,7 +537,12 @@ NdbTransaction::executeAsynchPrepare(NdbTransaction::ExecType aTypeOfExec, */ if (theError.code != 0) DBUG_PRINT("enter", ("Resetting error %d on execute", theError.code)); - theError.code = 0; + /** + * for timeout (4012) we want sendROLLBACK to behave differently. + * Else, normal behaviour of reset errcode + */ + if (theError.code != 4012) + theError.code = 0; NdbScanOperation* tcOp = m_theFirstScanOperation; if (tcOp != 0){ // Execute any cursor operations @@ -843,6 +863,12 @@ NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal; tSignal.setData(theTCConPtr, 1); tSignal.setData(tTransId1, 2); tSignal.setData(tTransId2, 3); + if(theError.code == 4012) + { + g_eventLogger.error("Sending TCROLLBACKREQ with Bad flag"); + tSignal.setLength(tSignal.getLength() + 1); // + flags + tSignal.setData(0x1, 4); // potentially bad data + } tReturnCode = tp->sendSignal(&tSignal,theDBnode); if (tReturnCode != -1) { theSendStatus = sendTC_ROLLBACK; diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index 914acd17c08..a0417e5b118 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -189,6 +189,7 @@ ErrorBundle ErrorCodes[] = { { 4032, DMEC, TR, "Out of Send Buffer space in NDB API" }, { 1501, DMEC, TR, "Out of undo space" }, { 288, DMEC, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" }, + { 289, DMEC, TR, "Out of transaction buffer memory in TC (increase TransactionBufferMemory)" }, /** * InsufficientSpace @@ -289,6 +290,7 @@ ErrorBundle ErrorCodes[] = { /** * Application error */ + { 281, HA_ERR_NO_CONNECTION, AE, "Operation not allowed due to cluster shutdown in progress" }, { 299, DMEC, AE, "Operation not allowed or aborted due to single user mode" }, { 763, DMEC, AE, "Alter table requires cluster nodes to have exact same version" }, { 823, DMEC, AE, "Too much attrinfo from application in tuple manager" }, diff --git a/storage/ndb/test/include/HugoTransactions.hpp b/storage/ndb/test/include/HugoTransactions.hpp index caed577f8c9..e2b12f261a8 100644 --- a/storage/ndb/test/include/HugoTransactions.hpp +++ b/storage/ndb/test/include/HugoTransactions.hpp @@ -20,7 +20,7 @@ #include #include #include - +class NDBT_Stats; class HugoTransactions : public HugoOperations { public: @@ -109,10 +109,24 @@ public: void setRetryMax(int retryMax = 100) { m_retryMax = retryMax; } Uint32 m_latest_gci; + + void setStatsLatency(NDBT_Stats* stats) { m_stats_latency = stats; } + + // allows multiple threads to update separate batches + void setThrInfo(int thr_count, int thr_no) { + m_thr_count = thr_count; + m_thr_no = thr_no; + } + protected: NDBT_ResultRow row; int m_defaultScanUpdateMethod; int m_retryMax; + + NDBT_Stats* m_stats_latency; + + int m_thr_count; // 0 if no separation between threads + int m_thr_no; }; diff --git a/storage/ndb/test/include/NDBT_Thread.hpp b/storage/ndb/test/include/NDBT_Thread.hpp new file mode 100644 index 00000000000..5b724991b29 --- /dev/null +++ b/storage/ndb/test/include/NDBT_Thread.hpp @@ -0,0 +1,226 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef NDB_THREAD_HPP +#define NDB_THREAD_HPP + +#include +#include +#include + +// NDBT_Thread ctor -> NDBT_Thread_run -> thr.run() +extern "C" { +static void* NDBT_Thread_run(void* arg); +} + +// Function to run in a thread. + +typedef void NDBT_ThreadFunc(class NDBT_Thread&); + +/* + * NDBT_Thread + * + * Represents a thread. The thread pauses at startup. + * Main process sets a function to run. When the function + * returns, the thread pauses again to wait for a command. + * This allows main process to sync with the thread and + * exchange data with it. + * + * Input to thread is typically options. The input area + * is read-only in the thread. Output from thread is + * results such as statistics. Error code is handled + * separately. + * + * Pointer to Ndb object and method to create it are + * provided for convenience. + */ + +class NDBT_ThreadSet; + +class NDBT_Thread { +public: + NDBT_Thread(); + NDBT_Thread(NDBT_ThreadSet* thread_set, int thread_no); + void create(NDBT_ThreadSet* thread_set, int thread_no); + ~NDBT_Thread(); + + // if part of a set + inline NDBT_ThreadSet& get_thread_set() const { + assert(m_thread_set != 0); + return *m_thread_set; + } + inline int get_thread_no() const { + return m_thread_no; + } + + // { Wait -> Start -> Stop }+ -> Exit + enum State { + Wait = 1, // wait for command + Start, // run current function + Stop, // stopped (paused) when current function done + Exit // exit thread + }; + + // tell thread to start running current function + void start(); + // wait for thread to stop when function is done + void stop(); + // tell thread to exit + void exit(); + // collect thread after exit + void join(); + + // set function to run + inline void set_func(NDBT_ThreadFunc* func) { + m_func = func; + } + + // input area + inline void set_input(const void* input) { + m_input = input; + } + inline const void* get_input() const { + return m_input; + } + + // output area + inline void set_output(void* output) { + m_output = output; + } + inline void* get_output() const { + return m_output; + } + template inline void set_output() { + set_output(new T); + } + inline void delete_output() { + delete m_output; + m_output = 0; + } + + // thread-specific Ndb object + inline class Ndb* get_ndb() const { + return m_ndb; + } + int connect(class Ndb_cluster_connection*, const char* db = "TEST_DB"); + void disconnect(); + + // error code (OS, Ndb, other) + void clear_err() { + m_err = 0; + } + void set_err(int err) { + m_err = err; + } + int get_err() const { + return m_err; + } + +private: + friend class NDBT_ThreadSet; + friend void* NDBT_Thread_run(void* arg); + + enum { Magic = 0xabacadae }; + Uint32 m_magic; + + State m_state; + NDBT_ThreadSet* m_thread_set; + int m_thread_no; + + NDBT_ThreadFunc* m_func; + const void* m_input; + void* m_output; + class Ndb* m_ndb; + int m_err; + + // run the thread + void run(); + + void lock() { + NdbMutex_Lock(m_mutex); + } + void unlock() { + NdbMutex_Unlock(m_mutex); + } + + void wait() { + NdbCondition_Wait(m_cond, m_mutex); + } + void signal() { + NdbCondition_Signal(m_cond); + } + + NdbMutex* m_mutex; + NdbCondition* m_cond; + NdbThread* m_thread; + void* m_status; +}; + +/* + * A set of threads, indexed from 0 to count-1. Methods + * are applied to each thread (serially). Input area is + * common to all threads. Output areas are allocated + * separately according to a template class. + */ + +class NDBT_ThreadSet { +public: + NDBT_ThreadSet(int count); + ~NDBT_ThreadSet(); + + inline int get_count() const { + return m_count; + } + inline NDBT_Thread& get_thread(int n) { + assert(n < m_count && m_thread[n] != 0); + return *m_thread[n]; + } + + // tell each thread to start running + void start(); + // wait for each thread to stop + void stop(); + // tell each thread to exit + void exit(); + // collect each thread after exit + void join(); + + // set function to run in each thread + void set_func(NDBT_ThreadFunc* func); + + // set input area (same instance in each thread) + void set_input(const void* input); + + // set output areas + template inline void set_output() { + for (int n = 0; n < m_count; n++) { + NDBT_Thread& thr = *m_thread[n]; + thr.set_output(); + } + } + void delete_output(); + + // thread-specific Ndb objects + int connect(class Ndb_cluster_connection*, const char* db = "TEST_DB"); + void disconnect(); + + int get_err() const; + +private: + int m_count; + NDBT_Thread** m_thread; +}; + +#endif diff --git a/storage/ndb/test/ndbapi/testIndex.cpp b/storage/ndb/test/ndbapi/testIndex.cpp index 7691f036a46..00e559c7a0f 100644 --- a/storage/ndb/test/ndbapi/testIndex.cpp +++ b/storage/ndb/test/ndbapi/testIndex.cpp @@ -1298,6 +1298,103 @@ runBug25059(NDBT_Context* ctx, NDBT_Step* step) return res; } +int tcSaveINDX_test(NDBT_Context* ctx, NDBT_Step* step, int inject_err) +{ + int result= NDBT_OK; + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary * dict = pNdb->getDictionary(); + const NdbDictionary::Index * idx = dict->getIndex(pkIdxName, + ctx->getTab()->getName()); + + HugoOperations ops(*ctx->getTab(), idx); + + g_err << "Using INDEX: " << pkIdxName << endl; + + NdbRestarter restarter; + + int loops = ctx->getNumLoops(); + const int rows = ctx->getNumRecords(); + const int batchsize = ctx->getProperty("BatchSize", 1); + + for(int bs=1; bs < loops; bs++) + { + int c= 0; + while (c++ < loops) + { + g_err << "BS " << bs << " LOOP #" << c << endl; + + g_err << "inserting error on op#" << c << endl; + + CHECK(ops.startTransaction(pNdb) == 0); + for(int i=1;i<=c;i++) + { + if(i==c) + { + if(restarter.insertErrorInAllNodes(inject_err)!=0) + { + g_err << "**** FAILED to insert error" << endl; + result= NDBT_FAILED; + break; + } + } + CHECK(ops.indexReadRecords(pNdb, pkIdxName, i,false,1) == 0); + if(i%bs==0 || i==c) + { + if(i$conf<" exit fi @@ -92,7 +103,7 @@ fi # Setup the clone source location # #################################### -src_clone=$src_clone_base-$clone +src_clone=${src_clone_base}${clone} ####################################### # Check to see if the lock file exists# @@ -125,7 +136,14 @@ fi # You can add more to this path# ################################ -dst_place=${build_dir}/clone-mysql-$clone-$DATE.$$ +if [ -z "$tag" ] +then + dst_place=${build_dir}/clone-$clone-$DATE.$$ +else + dst_place=${build_dir}/clone-$tag-$DATE.$$ + extra_args="$extra_args --clone=$tag" + extra_clone="-r$tag" +fi ######################################### # Delete source and pull down the latest# @@ -134,7 +152,12 @@ dst_place=${build_dir}/clone-mysql-$clone-$DATE.$$ if [ "$do_clone" ] then rm -rf $dst_place - bk clone $src_clone $dst_place + if [ `echo $src_clone | grep -c 'file:\/\/'` = 1 ] + then + bk clone -l $extra_clone $src_clone $dst_place + else + bk clone $extra_clone $src_clone $dst_place + fi fi ########################################## @@ -156,7 +179,7 @@ fi ################################ script=$install_dir/mysql-test/ndb/autotest-run.sh -$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock +sh -x $script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock $extra_args if [ "$build" ] then diff --git a/storage/ndb/test/run-test/autotest-run.sh b/storage/ndb/test/run-test/autotest-run.sh index 34c3fe53949..b543cd1efb9 100644 --- a/storage/ndb/test/run-test/autotest-run.sh +++ b/storage/ndb/test/run-test/autotest-run.sh @@ -216,8 +216,8 @@ fi # Make directories needed p=`pwd` -run_dir=$install_dir/run-$RUN-mysql-$clone-$target -res_dir=$base_dir/result-$RUN-mysql-$clone-$target/$DATE +run_dir=$install_dir/run-$RUN-$clone-$target +res_dir=$base_dir/result-$RUN-$clone-$target/$DATE tar_dir=$base_dir/saved-results mkdir -p $run_dir $res_dir $tar_dir @@ -246,7 +246,7 @@ cd $res_dir echo "date=$DATE" > info.txt echo "suite=$RUN" >> info.txt -echo "clone=mysql-$clone" >> info.txt +echo "clone=$clone" >> info.txt echo "arch=$target" >> info.txt find . | xargs chmod ugo+r diff --git a/storage/ndb/test/run-test/conf-test.cnf b/storage/ndb/test/run-test/conf-test.cnf new file mode 100644 index 00000000000..e528eeb1d8b --- /dev/null +++ b/storage/ndb/test/run-test/conf-test.cnf @@ -0,0 +1,26 @@ +[atrt] +basedir = CHOOSE_dir +baseport = 14000 +clusters = .2node + +[ndb_mgmd] + +[mysqld] +skip-innodb +skip-bdb + +[cluster_config.2node] +ndb_mgmd = CHOOSE_host1 +ndbd = CHOOSE_host2,CHOOSE_host3 +ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1 + +NoOfReplicas = 2 +IndexMemory = 25M +DataMemory = 100M +BackupMemory = 64M +MaxNoOfConcurrentScans = 100 +MaxNoOfSavedMessages= 1000 +SendBufferMemory = 2M +NoOfFragmentLogFiles = 4 +FragmentLogFileSize = 64M + diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index ed4dbf67b20..b7a3a15dae7 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -938,3 +938,10 @@ max-time: 120 cmd: testMgm args: -n ApiMgmStructEventTimeout T1 +max-time: 180 +cmd: testIndex +args: -n Bug28804 T1 T3 + +max-time: 180 +cmd: testIndex +args: -n Bug28804_ATTRINFO T1 T3 diff --git a/storage/ndb/test/run-test/upgrade-boot.sh b/storage/ndb/test/run-test/upgrade-boot.sh new file mode 100644 index 00000000000..d3542166551 --- /dev/null +++ b/storage/ndb/test/run-test/upgrade-boot.sh @@ -0,0 +1,218 @@ +#!/bin/sh +############################################################# +# This script created by Jonas does the following # +# Cleans up clones and pevious builds, pulls new clones, # +# builds, deploys, configures the tests and launches ATRT # +############################################################# + +############### +#Script setup # +############## + +save_args=$* +VERSION="upgrade-boot.sh version 1.00" + +DATE=`date '+%Y-%m-%d'` +HOST=`hostname -s` +export DATE HOST + +set -e + +echo "`date` starting: $*" + +verbose=0 +do_clone=yes +build=yes + +tag0= +tag1= +conf= +extra_args= +extra_clone= +LOCK=$HOME/.autotest-lock + +############################ +# Read command line entries# +############################ + +while [ "$1" ] +do + case "$1" in + --no-clone) do_clone="";; + --no-build) build="";; + --verbose) verbose=`expr $verbose + 1`;; + --clone=*) clone0=`echo $1 | sed s/--clone=//`;; + --clone0=*) clone0=`echo $1 | sed s/--clone0=//`;; + --clone1=*) clone1=`echo $1 | sed s/--clone1=//`;; + --version) echo $VERSION; exit;; + --conf=*) conf=`echo $1 | sed s/--conf=//`;; + --tag=*) tag0=`echo $1 | sed s/--tag=//`;; + --tag0=*) tag0=`echo $1 | sed s/--tag0=//`;; + --tag1=*) tag1=`echo $1 | sed s/--tag1=//`;; + --*) echo "Unknown arg: $1";; + *) RUN=$*;; + esac + shift +done + +if [ -z "$clone1" ] +then + clone1=$clone0 +fi + +if [ -z "$tag0" ] +then + echo "No tag0 specified" + exit +fi + +if [ -z "$tag1" ] +then + echo "No tag1 specified" + exit +fi + +################################# +#Make sure the configfile exists# +#if it does not exit. if it does# +# (.) load it # +################################# +if [ -z "$conf" ] +then + if [ -f "`pwd`/autotest.conf" ] + then + conf="`pwd`/autotest.conf" + elif [ -f "$HOME/autotest.conf" ] + then + conf="$HOME/autotest.conf" + fi +fi + +if [ -f $conf ] +then + . $conf +else + echo "Can't find config file: >$conf<" + exit +fi + +############################### +# Validate that all interesting +# variables where set in conf +############################### +vars="src_clone_base install_dir build_dir" +for i in $vars +do + t=`echo echo \\$$i` + if [ -z "`eval $t`" ] + then + echo "Invalid config: $conf, variable $i is not set" + exit + fi +done + +############################### +#Print out the enviroment vars# +############################### + +if [ $verbose -gt 0 ] +then + env +fi + +#################################### +# Setup the lock file name and path# +# Setup the clone source location # +#################################### + +src_clone0=${src_clone_base}${clone0} +src_clone1=${src_clone_base}${clone1} + +####################################### +# Check to see if the lock file exists# +# If it does exit. # +####################################### + +if [ -f $LOCK ] +then + echo "Lock file exists: $LOCK" + exit 1 +fi + +####################################### +# If the lock file does not exist then# +# create it with date and run info # +####################################### + +echo "$DATE $RUN" > $LOCK + +############################# +#If any errors here down, we# +# trap them, and remove the # +# Lock file before exit # +############################# +if [ `uname -s` != "SunOS" ] +then + trap "rm -f $LOCK" ERR +fi + +# You can add more to this path# +################################ + +dst_place0=${build_dir}/clone-$tag0-$DATE.$$ +dst_place1=${build_dir}/clone-$tag1-$DATE.$$ + +######################################### +# Delete source and pull down the latest# +######################################### + +if [ "$do_clone" ] +then + rm -rf $dst_place0 $dst_place1 + if [ `echo $src_clone0 | grep -c 'file:\/\/'` = 1 ] + then + bk clone -l -r$tag0 $src_clone0 $dst_place0 + else + bk clone -r$tag0 $src_clone0 $dst_place0 + fi + + if [ `echo $src_clone1 | grep -c 'file:\/\/'` = 1 ] + then + bk clone -l -r$tag1 $src_clone1 $dst_place1 + else + bk clone -r$tag1 $src_clone1 $dst_place1 + fi +fi + +########################################## +# Build the source, make installs, and # +# create the database to be rsynced # +########################################## +install_dir0=$install_dir/$tag0 +install_dir1=$install_dir/$tag1 +if [ "$build" ] +then + cd $dst_place0 + rm -rf $install_dir0 + BUILD/compile-ndb-autotest --prefix=$install_dir0 + make install + + cd $dst_place1 + rm -rf $install_dir1 + BUILD/compile-ndb-autotest --prefix=$install_dir1 + make install +fi + + +################################ +# Start run script # +################################ + +script=$install_dir1/mysql-test/ndb/upgrade-run.sh +$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock $extra_args + +if [ "$build" ] +then + rm -rf $dst_place0 $dst_place1 +fi +rm -f $LOCK diff --git a/storage/ndb/test/src/HugoTransactions.cpp b/storage/ndb/test/src/HugoTransactions.cpp index 456782f4726..3a1600815e0 100644 --- a/storage/ndb/test/src/HugoTransactions.cpp +++ b/storage/ndb/test/src/HugoTransactions.cpp @@ -14,8 +14,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "HugoTransactions.hpp" +#include #include - +#include HugoTransactions::HugoTransactions(const NdbDictionary::Table& _tab, const NdbDictionary::Index* idx): @@ -24,6 +25,10 @@ HugoTransactions::HugoTransactions(const NdbDictionary::Table& _tab, m_defaultScanUpdateMethod = 3; setRetryMax(); + m_stats_latency = 0; + + m_thr_count = 0; + m_thr_no = -1; } HugoTransactions::~HugoTransactions(){ @@ -820,6 +825,16 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, return NDBT_FAILED; } + MicroSecondTimer timer_start; + MicroSecondTimer timer_stop; + bool timer_active = + m_stats_latency != 0 && + r >= batch && // first batch is "warmup" + r + batch != records; // last batch is usually partial + + if (timer_active) + NdbTick_getMicroTimer(&timer_start); + if(pkReadRecord(pNdb, r, batch, lm) != NDBT_OK) { ERR(pTrans->getNdbError()); @@ -892,6 +907,12 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, } closeTransaction(pNdb); + + if (timer_active) { + NdbTick_getMicroTimer(&timer_stop); + NDB_TICKS ticks = NdbTick_getMicrosPassed(timer_start, timer_stop); + m_stats_latency->addObservation((double)ticks); + } } deallocRows(); g_info << reads << " records read" << endl; @@ -913,9 +934,17 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, allocRows(batch); g_info << "|- Updating records (batch=" << batch << ")..." << endl; + int batch_no = 0; while (r < records){ if(r + batch > records) batch = records - r; + + if (m_thr_count != 0 && m_thr_no != batch_no % m_thr_count) + { + r += batch; + batch_no++; + continue; + } if (retryAttempt >= m_retryMax){ g_info << "ERROR: has retried this operation " << retryAttempt @@ -963,6 +992,16 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, return NDBT_FAILED; } + MicroSecondTimer timer_start; + MicroSecondTimer timer_stop; + bool timer_active = + m_stats_latency != 0 && + r >= batch && // first batch is "warmup" + r + batch != records; // last batch is usually partial + + if (timer_active) + NdbTick_getMicroTimer(&timer_start); + if(pIndexScanOp) { int rows_found = 0; @@ -1039,8 +1078,15 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, } closeTransaction(pNdb); - + + if (timer_active) { + NdbTick_getMicroTimer(&timer_stop); + NDB_TICKS ticks = NdbTick_getMicrosPassed(timer_start, timer_stop); + m_stats_latency->addObservation((double)ticks); + } + r += batch; // Read next record + batch_no++; } deallocRows(); @@ -1228,10 +1274,18 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, int check; g_info << "|- Deleting records..." << endl; + int batch_no = 0; while (r < records){ if(r + batch > records) batch = records - r; + if (m_thr_count != 0 && m_thr_no != batch_no % m_thr_count) + { + r += batch; + batch_no++; + continue; + } + if (retryAttempt >= m_retryMax){ g_info << "ERROR: has retried this operation " << retryAttempt << " times, failing!" << endl; @@ -1255,6 +1309,16 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, return NDBT_FAILED; } + MicroSecondTimer timer_start; + MicroSecondTimer timer_stop; + bool timer_active = + m_stats_latency != 0 && + r >= batch && // first batch is "warmup" + r + batch != records; // last batch is usually partial + + if (timer_active) + NdbTick_getMicroTimer(&timer_start); + if(pkDeleteRecord(pNdb, r, batch) != NDBT_OK) { ERR(pTrans->getNdbError()); @@ -1303,9 +1367,15 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, m_latest_gci = pTrans->getGCI(); } closeTransaction(pNdb); - - r += batch; // Read next record + if (timer_active) { + NdbTick_getMicroTimer(&timer_stop); + NDB_TICKS ticks = NdbTick_getMicrosPassed(timer_start, timer_stop); + m_stats_latency->addObservation((double)ticks); + } + + r += batch; // Read next record + batch_no++; } g_info << "|- " << deleted << " records deleted" << endl; diff --git a/storage/ndb/test/src/Makefile.am b/storage/ndb/test/src/Makefile.am index 37f6497e508..a025579cb72 100644 --- a/storage/ndb/test/src/Makefile.am +++ b/storage/ndb/test/src/Makefile.am @@ -24,7 +24,7 @@ libNDBT_a_SOURCES = \ NdbRestarter.cpp NdbRestarts.cpp NDBT_Output.cpp \ NdbBackup.cpp NdbConfig.cpp NdbGrep.cpp NDBT_Table.cpp \ NdbSchemaCon.cpp NdbSchemaOp.cpp getarg.c \ - CpcClient.cpp NdbMixRestarter.cpp + CpcClient.cpp NdbMixRestarter.cpp NDBT_Thread.cpp INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/common/mgmcommon -I$(top_srcdir)/storage/ndb/include/mgmcommon -I$(top_srcdir)/storage/ndb/include/kernel -I$(top_srcdir)/storage/ndb/src/mgmapi diff --git a/storage/ndb/test/src/NDBT_Thread.cpp b/storage/ndb/test/src/NDBT_Thread.cpp new file mode 100644 index 00000000000..56cf2f6815b --- /dev/null +++ b/storage/ndb/test/src/NDBT_Thread.cpp @@ -0,0 +1,283 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include +#include +#include + +NDBT_Thread::NDBT_Thread() +{ + create(0, -1); +} + +NDBT_Thread::NDBT_Thread(NDBT_ThreadSet* thread_set, int thread_no) +{ + create(thread_set, thread_no); +} + +void +NDBT_Thread::create(NDBT_ThreadSet* thread_set, int thread_no) +{ + m_magic = NDBT_Thread::Magic; + + m_state = Wait; + m_thread_set = thread_set; + m_thread_no = thread_no; + m_func = 0; + m_input = 0; + m_output = 0; + m_ndb = 0; + m_err = 0; + + m_mutex = NdbMutex_Create(); + assert(m_mutex != 0); + m_cond = NdbCondition_Create(); + assert(m_cond != 0); + + char buf[20]; + sprintf(buf, "NDBT_%04u"); + const char* name = strdup(buf); + assert(name != 0); + + unsigned stacksize = 512 * 1024; + NDB_THREAD_PRIO prio = NDB_THREAD_PRIO_LOW; + m_thread = NdbThread_Create(NDBT_Thread_run, + (void**)this, stacksize, name, prio); + assert(m_thread != 0); +} + +NDBT_Thread::~NDBT_Thread() +{ + if (m_thread != 0) { + NdbThread_Destroy(&m_thread); + m_thread = 0; + } + if (m_cond != 0) { + NdbCondition_Destroy(m_cond); + m_cond = 0; + } + if (m_mutex != 0) { + NdbMutex_Destroy(m_mutex); + m_mutex = 0; + } +} + +static void* +NDBT_Thread_run(void* arg) +{ + assert(arg != 0); + NDBT_Thread& thr = *(NDBT_Thread*)arg; + assert(thr.m_magic == NDBT_Thread::Magic); + thr.run(); + return 0; +} + +void +NDBT_Thread::run() +{ + while (1) { + lock(); + while (m_state != Start && m_state != Exit) { + wait(); + } + if (m_state == Exit) { + unlock(); + break; + } + (*m_func)(*this); + m_state = Stop; + signal(); + unlock(); + } +} + +// methods for main process + +void +NDBT_Thread::start() +{ + lock(); + m_state = Start; + signal(); + unlock(); +} + +void +NDBT_Thread::stop() +{ + lock(); + while (m_state != Stop) + wait(); + m_state = Wait; + unlock(); +} + +void +NDBT_Thread::exit() +{ + lock(); + m_state = Exit; + signal(); + unlock(); +}; + +void +NDBT_Thread::join() +{ + NdbThread_WaitFor(m_thread, &m_status); + m_thread = 0; +} + +int +NDBT_Thread::connect(class Ndb_cluster_connection* ncc, const char* db) +{ + m_ndb = new Ndb(ncc, db); + if (m_ndb->init() == -1 || + m_ndb->waitUntilReady() == -1) { + m_err = m_ndb->getNdbError().code; + return -1; + } + return 0; +} + +void +NDBT_Thread::disconnect() +{ + delete m_ndb; + m_ndb = 0; +} + +// set of threads + +NDBT_ThreadSet::NDBT_ThreadSet(int count) +{ + m_count = count; + m_thread = new NDBT_Thread* [count]; + for (int n = 0; n < count; n++) { + m_thread[n] = new NDBT_Thread(this, n); + } +} + +NDBT_ThreadSet::~NDBT_ThreadSet() +{ + delete_output(); + for (int n = 0; n < m_count; n++) { + delete m_thread[n]; + m_thread[n] = 0; + } + delete [] m_thread; +} + +void +NDBT_ThreadSet::start() +{ + for (int n = 0; n < m_count; n++) { + NDBT_Thread& thr = *m_thread[n]; + thr.start(); + } +} + +void +NDBT_ThreadSet::stop() +{ + for (int n = 0; n < m_count; n++) { + NDBT_Thread& thr = *m_thread[n]; + thr.stop(); + } +} + +void +NDBT_ThreadSet::exit() +{ + for (int n = 0; n < m_count; n++) { + NDBT_Thread& thr = *m_thread[n]; + thr.exit(); + } +} + +void +NDBT_ThreadSet::join() +{ + for (int n = 0; n < m_count; n++) { + NDBT_Thread& thr = *m_thread[n]; + thr.join(); + } +} + +void +NDBT_ThreadSet::set_func(NDBT_ThreadFunc* func) +{ + for (int n = 0; n < m_count; n++) { + NDBT_Thread& thr = *m_thread[n]; + thr.set_func(func); + } +} + +void +NDBT_ThreadSet::set_input(const void* input) +{ + for (int n = 0; n < m_count; n++) { + NDBT_Thread& thr = *m_thread[n]; + thr.set_input(input); + } +} + +void +NDBT_ThreadSet::delete_output() +{ + for (int n = 0; n < m_count; n++) { + if (m_thread[n] != 0) { + NDBT_Thread& thr = *m_thread[n]; + thr.delete_output(); + } + } +} + +int +NDBT_ThreadSet::connect(class Ndb_cluster_connection* ncc, const char* db) +{ + for (int n = 0; n < m_count; n++) { + assert(m_thread[n] != 0); + NDBT_Thread& thr = *m_thread[n]; + if (thr.connect(ncc, db) == -1) + return -1; + } + return 0; +} + +void +NDBT_ThreadSet::disconnect() +{ + for (int n = 0; n < m_count; n++) { + if (m_thread[n] != 0) { + NDBT_Thread& thr = *m_thread[n]; + thr.disconnect(); + } + } +} + +int +NDBT_ThreadSet::get_err() const +{ + for (int n = 0; n < m_count; n++) { + if (m_thread[n] != 0) { + NDBT_Thread& thr = *m_thread[n]; + int err = thr.get_err(); + if (err != 0) + return err; + } + } + return 0; +} diff --git a/storage/ndb/test/tools/hugoPkDelete.cpp b/storage/ndb/test/tools/hugoPkDelete.cpp index b185eacdddf..aa8e6c654a7 100644 --- a/storage/ndb/test/tools/hugoPkDelete.cpp +++ b/storage/ndb/test/tools/hugoPkDelete.cpp @@ -20,22 +20,41 @@ #include #include #include +#include +#include #include #include #include +static NDBT_ThreadFunc hugoPkDelete; + +struct ThrInput { + const NdbDictionary::Table* pTab; + int records; + int batch; + int stats; +}; + +struct ThrOutput { + NDBT_Stats latency; +}; + int main(int argc, const char** argv){ ndb_init(); int _records = 0; int _loops = 1; - int _batch = 0; + int _threads = 1; + int _stats = 0; + int _batch = 1; const char* _tabname = NULL; int _help = 0; struct getargs args[] = { { "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" }, + { "threads", 't', arg_integer, &_threads, "number of threads (default 1)", "threads" }, + { "stats", 's', arg_flag, &_stats, "report latency per batch", "stats" }, // { "batch", 'b', arg_integer, &_batch, "batch value", "batch" }, { "records", 'r', arg_integer, &_records, "Number of records", "records" }, { "usage", '?', arg_flag, &_help, "Print help", "" } @@ -81,12 +100,57 @@ int main(int argc, const char** argv){ return NDBT_ProgramExit(NDBT_WRONGARGS); } - HugoTransactions hugoTrans(*pTab); + // threads + NDBT_ThreadSet ths(_threads); + + // create Ndb object for each thread + if (ths.connect(&con, "TEST_DB") == -1) { + ndbout << "connect failed: err=" << ths.get_err() << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + + // input is options + ThrInput input; + ths.set_input(&input); + input.pTab = pTab; + input.records = _records; + input.batch = _batch; + input.stats = _stats; + + // output is stats + ThrOutput output; + ths.set_output(); + int i = 0; - while (i<_loops || _loops==0) { + while (i < _loops || _loops == 0) { ndbout << i << ": "; - if (hugoTrans.pkDelRecords(&MyNdb, _records) != 0){ - return NDBT_ProgramExit(NDBT_FAILED); + + ths.set_func(hugoPkDelete); + ths.start(); + ths.stop(); + + if (ths.get_err()) + NDBT_ProgramExit(NDBT_FAILED); + + if (_stats) { + NDBT_Stats latency; + + // add stats from each thread + int n; + for (n = 0; n < ths.get_count(); n++) { + NDBT_Thread& thr = ths.get_thread(n); + ThrOutput* output = (ThrOutput*)thr.get_output(); + latency += output->latency; + } + + ndbout + << "latency per batch (us): " + << " samples=" << latency.getCount() + << " min=" << (int)latency.getMin() + << " max=" << (int)latency.getMax() + << " mean=" << (int)latency.getMean() + << " stddev=" << (int)latency.getStddev() + << endl; } i++; } @@ -94,3 +158,23 @@ int main(int argc, const char** argv){ return NDBT_ProgramExit(NDBT_OK); } +static void hugoPkDelete(NDBT_Thread& thr) +{ + const ThrInput* input = (const ThrInput*)thr.get_input(); + ThrOutput* output = (ThrOutput*)thr.get_output(); + + HugoTransactions hugoTrans(*input->pTab); + output->latency.reset(); + if (input->stats) + hugoTrans.setStatsLatency(&output->latency); + + NDBT_ThreadSet& ths = thr.get_thread_set(); + hugoTrans.setThrInfo(ths.get_count(), thr.get_thread_no()); + + int ret; + ret = hugoTrans.pkDelRecords(thr.get_ndb(), + input->records, + input->batch); + if (ret != 0) + thr.set_err(ret); +} diff --git a/storage/ndb/test/tools/hugoPkRead.cpp b/storage/ndb/test/tools/hugoPkRead.cpp index dd14203c16e..232f55b35b8 100644 --- a/storage/ndb/test/tools/hugoPkRead.cpp +++ b/storage/ndb/test/tools/hugoPkRead.cpp @@ -20,17 +20,33 @@ #include #include #include +#include +#include #include #include #include +static NDBT_ThreadFunc hugoPkRead; + +struct ThrInput { + const NdbDictionary::Table* pTab; + int records; + int batch; + int stats; +}; + +struct ThrOutput { + NDBT_Stats latency; +}; int main(int argc, const char** argv){ ndb_init(); int _records = 0; int _loops = 1; + int _threads = 1; + int _stats = 0; int _abort = 0; int _batch = 1; const char* _tabname = NULL; @@ -39,6 +55,8 @@ int main(int argc, const char** argv){ struct getargs args[] = { { "aborts", 'a', arg_integer, &_abort, "percent of transactions that are aborted", "abort%" }, { "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" }, + { "threads", 't', arg_integer, &_threads, "number of threads (default 1)", "threads" }, + { "stats", 's', arg_flag, &_stats, "report latency per batch", "stats" }, { "batch", 'b', arg_integer, &_batch, "batch value(not 0)", "batch" }, { "records", 'r', arg_integer, &_records, "Number of records", "records" }, { "usage", '?', arg_flag, &_help, "Print help", "" } @@ -64,6 +82,7 @@ int main(int argc, const char** argv){ { return NDBT_ProgramExit(NDBT_FAILED); } + Ndb MyNdb(&con, "TEST_DB" ); if(MyNdb.init() != 0){ @@ -81,12 +100,57 @@ int main(int argc, const char** argv){ return NDBT_ProgramExit(NDBT_WRONGARGS); } - HugoTransactions hugoTrans(*pTab); + // threads + NDBT_ThreadSet ths(_threads); + + // create Ndb object for each thread + if (ths.connect(&con, "TEST_DB") == -1) { + ndbout << "connect failed: err=" << ths.get_err() << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + + // input is options + ThrInput input; + ths.set_input(&input); + input.pTab = pTab; + input.records = _records; + input.batch = _batch; + input.stats = _stats; + + // output is stats + ThrOutput output; + ths.set_output(); + int i = 0; - while (i<_loops || _loops==0) { + while (i < _loops || _loops == 0) { ndbout << i << ": "; - if (hugoTrans.pkReadRecords(&MyNdb, _records, _batch) != 0){ - return NDBT_ProgramExit(NDBT_FAILED); + + ths.set_func(hugoPkRead); + ths.start(); + ths.stop(); + + if (ths.get_err()) + NDBT_ProgramExit(NDBT_FAILED); + + if (_stats) { + NDBT_Stats latency; + + // add stats from each thread + int n; + for (n = 0; n < ths.get_count(); n++) { + NDBT_Thread& thr = ths.get_thread(n); + ThrOutput* output = (ThrOutput*)thr.get_output(); + latency += output->latency; + } + + ndbout + << "latency per batch (us): " + << " samples=" << latency.getCount() + << " min=" << (int)latency.getMin() + << " max=" << (int)latency.getMax() + << " mean=" << (int)latency.getMean() + << " stddev=" << (int)latency.getStddev() + << endl; } i++; } @@ -94,3 +158,20 @@ int main(int argc, const char** argv){ return NDBT_ProgramExit(NDBT_OK); } +static void hugoPkRead(NDBT_Thread& thr) +{ + const ThrInput* input = (const ThrInput*)thr.get_input(); + ThrOutput* output = (ThrOutput*)thr.get_output(); + + HugoTransactions hugoTrans(*input->pTab); + output->latency.reset(); + if (input->stats) + hugoTrans.setStatsLatency(&output->latency); + + int ret; + ret = hugoTrans.pkReadRecords(thr.get_ndb(), + input->records, + input->batch); + if (ret != 0) + thr.set_err(ret); +} diff --git a/storage/ndb/test/tools/hugoPkUpdate.cpp b/storage/ndb/test/tools/hugoPkUpdate.cpp index 3e950bc96cd..b920a4f396a 100644 --- a/storage/ndb/test/tools/hugoPkUpdate.cpp +++ b/storage/ndb/test/tools/hugoPkUpdate.cpp @@ -20,24 +20,43 @@ #include #include #include +#include +#include #include #include #include +static NDBT_ThreadFunc hugoPkUpdate; + +struct ThrInput { + const NdbDictionary::Table* pTab; + int records; + int batch; + int stats; +}; + +struct ThrOutput { + NDBT_Stats latency; +}; + int main(int argc, const char** argv){ ndb_init(); int _records = 0; int _loops = 1; + int _threads = 1; + int _stats = 0; int _abort = 0; - int _batch = 0; + int _batch = 1; const char* _tabname = NULL, *db = 0; int _help = 0; struct getargs args[] = { { "aborts", 'a', arg_integer, &_abort, "percent of transactions that are aborted", "abort%" }, { "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" }, + { "threads", 't', arg_integer, &_threads, "number of threads (default 1)", "threads" }, + { "stats", 's', arg_flag, &_stats, "report latency per batch", "stats" }, // { "batch", 'b', arg_integer, &_batch, "batch value", "batch" }, { "records", 'r', arg_integer, &_records, "Number of records", "records" }, { "usage", '?', arg_flag, &_help, "Print help", "" }, @@ -83,16 +102,81 @@ int main(int argc, const char** argv){ return NDBT_ProgramExit(NDBT_WRONGARGS); } - HugoTransactions hugoTrans(*pTab); + // threads + NDBT_ThreadSet ths(_threads); + + // create Ndb object for each thread + if (ths.connect(&con, "TEST_DB") == -1) { + ndbout << "connect failed: err=" << ths.get_err() << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + + // input is options + ThrInput input; + ths.set_input(&input); + input.pTab = pTab; + input.records = _records; + input.batch = _batch; + input.stats = _stats; + + // output is stats + ThrOutput output; + ths.set_output(); + int i = 0; - while (i<_loops || _loops==0) { - ndbout << "loop " << i << ": "; - if (hugoTrans.pkUpdateRecords(&MyNdb, - _records) != 0){ - return NDBT_ProgramExit(NDBT_FAILED); + while (i < _loops || _loops == 0) { + ndbout << i << ": "; + + ths.set_func(hugoPkUpdate); + ths.start(); + ths.stop(); + + if (ths.get_err()) + NDBT_ProgramExit(NDBT_FAILED); + + if (_stats) { + NDBT_Stats latency; + + // add stats from each thread + int n; + for (n = 0; n < ths.get_count(); n++) { + NDBT_Thread& thr = ths.get_thread(n); + ThrOutput* output = (ThrOutput*)thr.get_output(); + latency += output->latency; + } + + ndbout + << "latency per batch (us): " + << " samples=" << latency.getCount() + << " min=" << (int)latency.getMin() + << " max=" << (int)latency.getMax() + << " mean=" << (int)latency.getMean() + << " stddev=" << (int)latency.getStddev() + << endl; } i++; } return NDBT_ProgramExit(NDBT_OK); } + +static void hugoPkUpdate(NDBT_Thread& thr) +{ + const ThrInput* input = (const ThrInput*)thr.get_input(); + ThrOutput* output = (ThrOutput*)thr.get_output(); + + HugoTransactions hugoTrans(*input->pTab); + output->latency.reset(); + if (input->stats) + hugoTrans.setStatsLatency(&output->latency); + + NDBT_ThreadSet& ths = thr.get_thread_set(); + hugoTrans.setThrInfo(ths.get_count(), thr.get_thread_no()); + + int ret; + ret = hugoTrans.pkUpdateRecords(thr.get_ndb(), + input->records, + input->batch); + if (ret != 0) + thr.set_err(ret); +}