From f05a67c72b65ebbc6459e16353231924242865c0 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Thu, 8 Feb 2007 01:09:38 +0700 Subject: [PATCH 01/21] ndb: correction of "ignore error" patch --- sql/ha_ndbcluster.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a65ca1c6736..191fa64f321 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -259,17 +259,16 @@ static int ndb_to_mysql_error(const NdbError *ndberr) int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans) { - int res= trans->execute(NdbTransaction::NoCommit, - NdbOperation::AO_IgnoreError, - h->m_force_send); - if (res == -1) + if (trans->execute(NdbTransaction::NoCommit, + NdbOperation::AO_IgnoreError, + h->m_force_send) == -1) return -1; const NdbError &err= trans->getNdbError(); if (err.classification != NdbError::NoError && err.classification != NdbError::ConstraintViolation && err.classification != NdbError::NoDataFound) - return res; + return -1; return 0; } From 18f63370aae5a90590a7dc47d1c7565e742d046d Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Thu, 8 Feb 2007 13:40:05 +0700 Subject: [PATCH 02/21] Bug #24000 ndb_config.cpp has the wrong name ndb_condig.cpp in source distro --- storage/ndb/tools/Makefile.am | 2 +- storage/ndb/tools/{ndb_condig.cpp => ndb_config.cpp} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename storage/ndb/tools/{ndb_condig.cpp => ndb_config.cpp} (100%) diff --git a/storage/ndb/tools/Makefile.am b/storage/ndb/tools/Makefile.am index 3d0c6f79146..7480b9a2ae9 100644 --- a/storage/ndb/tools/Makefile.am +++ b/storage/ndb/tools/Makefile.am @@ -50,7 +50,7 @@ ndb_restore_SOURCES = restore/restore_main.cpp \ restore/Restore.cpp \ ../test/src/NDBT_ResultRow.cpp $(tools_common_sources) -ndb_config_SOURCES = ndb_condig.cpp \ +ndb_config_SOURCES = ndb_config.cpp \ ../src/mgmsrv/Config.cpp \ ../src/mgmsrv/ConfigInfo.cpp \ ../src/mgmsrv/InitConfigFileParser.cpp diff --git a/storage/ndb/tools/ndb_condig.cpp b/storage/ndb/tools/ndb_config.cpp similarity index 100% rename from storage/ndb/tools/ndb_condig.cpp rename to storage/ndb/tools/ndb_config.cpp From 088b3855a9351c9464697a8a0d9846f132f23134 Mon Sep 17 00:00:00 2001 From: "lzhou/zhl@dev3-63.(none)" <> Date: Fri, 9 Feb 2007 13:31:51 +0000 Subject: [PATCH 03/21] BUG#22025 Return correct error message when transporter error occur. --- ndb/src/common/debugger/EventLogger.cpp | 99 ++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 4 deletions(-) diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index 3efd52808e2..39daa3effe8 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -16,6 +16,7 @@ #include #include "EventLogger.hpp" +#include #include #include @@ -528,10 +529,100 @@ void getTextUndoLogBlocked(QQQQ) { theData[2]); } void getTextTransporterError(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Transporter to node %d reported error 0x%x", - theData[1], - theData[2]); + struct myTransporterError{ + int errorNum; + char errorString[256]; + }; + int i = 0; + int lenth = 0; + static const struct myTransporterError TransporterErrorString[]= + { + //TE_NO_ERROR = 0 + {TE_NO_ERROR,"No error"}, + //TE_ERROR_CLOSING_SOCKET = 0x1 + {TE_ERROR_CLOSING_SOCKET,"Error found during closing of socket"}, + //TE_ERROR_IN_SELECT_BEFORE_ACCEPT = 0x2 + {TE_ERROR_IN_SELECT_BEFORE_ACCEPT,"Error found before accept. The transporter will retry"}, + //TE_INVALID_MESSAGE_LENGTH = 0x3 | TE_DO_DISCONNECT + {TE_INVALID_MESSAGE_LENGTH,"Error found in message (invalid message length)"}, + //TE_INVALID_CHECKSUM = 0x4 | TE_DO_DISCONNECT + {TE_INVALID_CHECKSUM,"Error found in message (checksum)"}, + //TE_COULD_NOT_CREATE_SOCKET = 0x5 + {TE_COULD_NOT_CREATE_SOCKET,"Error found while creating socket(can't create socket)"}, + //TE_COULD_NOT_BIND_SOCKET = 0x6 + {TE_COULD_NOT_BIND_SOCKET,"Error found while binding server socket"}, + //TE_LISTEN_FAILED = 0x7 + {TE_LISTEN_FAILED,"Error found while listening to server socket"}, + //TE_ACCEPT_RETURN_ERROR = 0x8 + {TE_ACCEPT_RETURN_ERROR,"Error found during accept(accept return error)"}, + //TE_SHM_DISCONNECT = 0xb | TE_DO_DISCONNECT + {TE_SHM_DISCONNECT,"The remote node has disconnected"}, + //TE_SHM_IPC_STAT = 0xc | TE_DO_DISCONNECT + {TE_SHM_IPC_STAT,"Unable to check shm segment"}, + //TE_SHM_UNABLE_TO_CREATE_SEGMENT = 0xd + {TE_SHM_UNABLE_TO_CREATE_SEGMENT,"Unable to create shm segment"}, + //TE_SHM_UNABLE_TO_ATTACH_SEGMENT = 0xe + {TE_SHM_UNABLE_TO_ATTACH_SEGMENT,"Unable to attach shm segment"}, + //TE_SHM_UNABLE_TO_REMOVE_SEGMENT = 0xf + {TE_SHM_UNABLE_TO_REMOVE_SEGMENT,"Unable to remove shm segment"}, + //TE_TOO_SMALL_SIGID = 0x10 + {TE_TOO_SMALL_SIGID,"Sig ID too small"}, + //TE_TOO_LARGE_SIGID = 0x11 + {TE_TOO_LARGE_SIGID,"Sig ID too large"}, + //TE_WAIT_STACK_FULL = 0x12 | TE_DO_DISCONNECT + {TE_WAIT_STACK_FULL,"Wait stack was full"}, + //TE_RECEIVE_BUFFER_FULL = 0x13 | TE_DO_DISCONNECT + {TE_RECEIVE_BUFFER_FULL,"Receive buffer was full"}, + //TE_SIGNAL_LOST_SEND_BUFFER_FULL = 0x14 | TE_DO_DISCONNECT + {TE_SIGNAL_LOST_SEND_BUFFER_FULL,"Send buffer was full,and trying to force send fails"}, + //TE_SIGNAL_LOST = 0x15 + {TE_SIGNAL_LOST,"Send failed for unknown reason(signal lost)"}, + //TE_SEND_BUFFER_FULL = 0x16 + {TE_SEND_BUFFER_FULL,"The send buffer was full, but sleeping for a while solved"}, + //TE_SCI_LINK_ERROR = 0x0017 + {TE_SCI_LINK_ERROR,"There is no link from this node to the switch"}, + //TE_SCI_UNABLE_TO_START_SEQUENCE = 0x18 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_START_SEQUENCE,"Could not start a sequence, because system resources are exumed or no sequence has been created"}, + //TE_SCI_UNABLE_TO_REMOVE_SEQUENCE = 0x19 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_REMOVE_SEQUENCE,"Could not remove a sequence"}, + //TE_SCI_UNABLE_TO_CREATE_SEQUENCE = 0x1a | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_CREATE_SEQUENCE,"Could not create a sequence, because system resources are exempted. Must reboot"}, + //TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR = 0x1b | TE_DO_DISCONNECT + {TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR,"Tried to send data on redundant link but failed"}, + //TE_SCI_CANNOT_INIT_LOCALSEGMENT = 0x1c | TE_DO_DISCONNECT + {TE_SCI_CANNOT_INIT_LOCALSEGMENT,"Cannot initialize local segment"}, + //TE_SCI_CANNOT_MAP_REMOTESEGMENT = 0x1d | TE_DO_DISCONNEC + {TE_SCI_CANNOT_MAP_REMOTESEGMENT,"Cannot map remote segment"}, + //TE_SCI_UNABLE_TO_UNMAP_SEGMENT = 0x1e | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_UNMAP_SEGMENT,"Cannot free the resources used by this segment (step 1)"}, + //TE_SCI_UNABLE_TO_REMOVE_SEGMENT = 0x1f | TE_DO_DISCONNEC + {TE_SCI_UNABLE_TO_REMOVE_SEGMENT,"Cannot free the resources used by this segment (step 2)"}, + //TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT = 0x20 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT,"Cannot disconnect from a remote segment"}, + //TE_SHM_IPC_PERMANENT = 0x21 + {TE_SHM_IPC_PERMANENT,"Shm ipc Permanent error"}, + //TE_SCI_UNABLE_TO_CLOSE_CHANNEL = 0x22 + {TE_SCI_UNABLE_TO_CLOSE_CHANNEL,"Unable to close the sci channel and the resources allocated"} + }; + + lenth = sizeof(TransporterErrorString)/sizeof(struct myTransporterError); + for(i=0; i Date: Fri, 9 Feb 2007 16:31:12 +0100 Subject: [PATCH 04/21] bug #26225 Engine condition pushdown doesn't work with prepare statements (ps_7ndb): ensure that query plan for prepared statement is analyzed correctly --- sql/ha_ndbcluster.cc | 2 +- sql/item_func.cc | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 514d4b83a04..d1d3484d432 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -7058,7 +7058,7 @@ void ndb_serialize_cond(const Item *item, void *arg) Check that the field is part of the table of the handler instance and that we expect a field with of this result type. */ - if (context->table == field->table) + if (context->table->s == field->table->s) { const NDBTAB *tab= (const NDBTAB *) context->ndb_table; DBUG_PRINT("info", ("FIELD_ITEM")); diff --git a/sql/item_func.cc b/sql/item_func.cc index a294bbd7a71..0866e927fef 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -231,6 +231,8 @@ void Item_func::traverse_cond(Cond_traverser traverser, (*traverser)(this, argument); } } + else + (*traverser)(this, argument); } From 113a84bd33a32e4055b0ed6bac6eaad6111ec9cc Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/linux.site" <> Date: Fri, 9 Feb 2007 21:04:43 +0100 Subject: [PATCH 05/21] Enabled engine_condition_pushdown (on) as default --- mysql-test/r/ndb_basic.result | 2 +- mysql-test/r/ndb_blob.result | 4 ++-- sql/mysqld.cc | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index b4a55641e80..7717e9fe72a 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -568,7 +568,7 @@ t1 insert into t1 values (1,1),(2,1),(3,1),(4,1),(5,2),(6,1),(7,1); explain select * from t1 where a12345678901234567890123456789a1234567890=2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref a12345678901234567890123456789a1234567890 a12345678901234567890123456789a1234567890 5 const # Using where +1 SIMPLE t1 ref a12345678901234567890123456789a1234567890 a12345678901234567890123456789a1234567890 5 const # Using where with pushed condition select * from t1 where a12345678901234567890123456789a1234567890=2; a1234567890123456789012345678901234567890 a12345678901234567890123456789a1234567890 5 2 diff --git a/mysql-test/r/ndb_blob.result b/mysql-test/r/ndb_blob.result index 04f2cea6250..ad33c7994d1 100644 --- a/mysql-test/r/ndb_blob.result +++ b/mysql-test/r/ndb_blob.result @@ -242,7 +242,7 @@ insert into t1 values(9,'b9',999,'dd9'); commit; explain select * from t1 where c >= 100 order by a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 4 NULL # Using where; Using filesort +1 SIMPLE t1 range c c 4 NULL # Using where with pushed condition; Using filesort select * from t1 where c >= 100 order by a; a b c d 1 b1 111 dd1 @@ -278,7 +278,7 @@ insert into t1 values(2,@b2,222,@d2); commit; explain select * from t1 where c >= 100 order by a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 4 NULL # Using where; Using filesort +1 SIMPLE t1 range c c 4 NULL # Using where with pushed condition; Using filesort select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3) from t1 where c >= 100 order by a; a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 26ab79ac207..4845c3ae765 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5077,7 +5077,7 @@ struct my_option my_long_options[] = "Push supported query conditions to the storage engine.", (gptr*) &global_system_variables.engine_condition_pushdown, (gptr*) &global_system_variables.engine_condition_pushdown, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, /* See how it's handled in get_one_option() */ {"event-scheduler", OPT_EVENT_SCHEDULER, "Enable/disable the event scheduler.", NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, From fc94ac8a10cbb9d5a1778e5f658feaeafd2a38a5 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Tue, 13 Feb 2007 02:38:54 +0100 Subject: [PATCH 06/21] ndb - new atrt --- storage/ndb/src/cw/cpcd/Makefile.am | 2 +- .../ndb/src/mgmsrv/InitConfigFileParser.cpp | 2 +- storage/ndb/test/run-test/Makefile.am | 22 +- .../ndb/test/run-test/atrt-gather-result.sh | 2 +- storage/ndb/test/run-test/atrt.hpp | 161 +++ storage/ndb/test/run-test/autotest-boot.sh | 165 +++ storage/ndb/test/run-test/autotest-run.sh | 269 ++++ .../test/run-test/conf-daily-basic-ndb08.txt | 19 - .../test/run-test/conf-daily-devel-ndb08.txt | 19 - .../run-test/conf-daily-devel-ndbmaster.txt | 22 - .../test/run-test/conf-daily-sql-ndb08.txt | 20 - .../run-test/conf-daily-sql-ndbmaster.txt | 20 - storage/ndb/test/run-test/conf-dl145a.cnf | 23 + storage/ndb/test/run-test/conf-dl145a.txt | 22 - storage/ndb/test/run-test/conf-ndbmaster.cnf | 23 + storage/ndb/test/run-test/conf-ndbmaster.txt | 22 - storage/ndb/test/run-test/conf-repl.cnf | 28 + storage/ndb/test/run-test/conf-shark.txt | 22 - storage/ndb/test/run-test/example-my.cnf | 116 ++ storage/ndb/test/run-test/files.cpp | 383 ++++++ storage/ndb/test/run-test/main.cpp | 1118 ++++++++++------- storage/ndb/test/run-test/run-test.hpp | 95 -- storage/ndb/test/run-test/setup.cpp | 965 ++++++++++++++ storage/ndb/test/run-test/test-tests.txt | 24 + storage/ndb/test/tools/Makefile.am | 1 + 25 files changed, 2856 insertions(+), 709 deletions(-) create mode 100644 storage/ndb/test/run-test/atrt.hpp create mode 100644 storage/ndb/test/run-test/autotest-boot.sh create mode 100644 storage/ndb/test/run-test/autotest-run.sh delete mode 100644 storage/ndb/test/run-test/conf-daily-basic-ndb08.txt delete mode 100644 storage/ndb/test/run-test/conf-daily-devel-ndb08.txt delete mode 100644 storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt delete mode 100644 storage/ndb/test/run-test/conf-daily-sql-ndb08.txt delete mode 100644 storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt create mode 100644 storage/ndb/test/run-test/conf-dl145a.cnf delete mode 100644 storage/ndb/test/run-test/conf-dl145a.txt create mode 100644 storage/ndb/test/run-test/conf-ndbmaster.cnf delete mode 100644 storage/ndb/test/run-test/conf-ndbmaster.txt create mode 100644 storage/ndb/test/run-test/conf-repl.cnf delete mode 100644 storage/ndb/test/run-test/conf-shark.txt create mode 100644 storage/ndb/test/run-test/example-my.cnf create mode 100644 storage/ndb/test/run-test/files.cpp delete mode 100644 storage/ndb/test/run-test/run-test.hpp create mode 100644 storage/ndb/test/run-test/setup.cpp create mode 100644 storage/ndb/test/run-test/test-tests.txt diff --git a/storage/ndb/src/cw/cpcd/Makefile.am b/storage/ndb/src/cw/cpcd/Makefile.am index dfd2e8c270b..efc828e21a9 100644 --- a/storage/ndb/src/cw/cpcd/Makefile.am +++ b/storage/ndb/src/cw/cpcd/Makefile.am @@ -26,7 +26,7 @@ LDADD_LOC = \ include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_util.mk.am -ndb_cpcd_LDFLAGS = @ndb_bin_am_ldflags@ +ndb_cpcd_LDFLAGS = -static @ndb_bin_am_ldflags@ # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp index ca0471f97b4..c691951d584 100644 --- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp +++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp @@ -836,7 +836,7 @@ InitConfigFileParser::parse_mycnf() opt.arg_type = REQUIRED_ARG; options.push_back(opt); - opt.name = "api"; + opt.name = "ndbapi"; opt.id = 256; opt.value = (gptr*)malloc(sizeof(char*)); opt.var_type = GET_STR; diff --git a/storage/ndb/test/run-test/Makefile.am b/storage/ndb/test/run-test/Makefile.am index b5cb69d266e..d6c6536cfc8 100644 --- a/storage/ndb/test/run-test/Makefile.am +++ b/storage/ndb/test/run-test/Makefile.am @@ -18,20 +18,18 @@ testdir=$(prefix)/mysql-test/ndb include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_util.mk.am include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am +include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am test_PROGRAMS = atrt test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \ - conf-daily-basic-ndb08.txt \ - conf-daily-devel-ndb08.txt \ - conf-daily-sql-ndb08.txt \ - conf-ndbmaster.txt \ - conf-shark.txt \ - conf-dl145a.txt + conf-ndbmaster.cnf \ + conf-dl145a.cnf test-tests.txt test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \ - atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh + atrt-clear-result.sh autotest-run.sh + +atrt_SOURCES = main.cpp setup.cpp files.cpp -atrt_SOURCES = main.cpp run-test.hpp INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/test/include LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \ $(top_builddir)/storage/ndb/src/libndbclient.la \ @@ -39,6 +37,14 @@ LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \ $(top_builddir)/mysys/libmysys.a \ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ +atrt_CXXFLAGS = -I$(top_srcdir)/ndb/src/mgmapi \ + -I$(top_srcdir)/ndb/src/mgmsrv \ + -I$(top_srcdir)/ndb/include/mgmcommon \ + -DMYSQLCLUSTERDIR="\"\"" \ + -DDEFAULT_PREFIX="\"$(prefix)\"" + +atrt_LDFLAGS = -static @ndb_bin_am_ldflags@ + wrappersdir=$(prefix)/bin wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run diff --git a/storage/ndb/test/run-test/atrt-gather-result.sh b/storage/ndb/test/run-test/atrt-gather-result.sh index 93d4ae428d0..ca5f0a76736 100755 --- a/storage/ndb/test/run-test/atrt-gather-result.sh +++ b/storage/ndb/test/run-test/atrt-gather-result.sh @@ -8,7 +8,7 @@ rm -rf * while [ $# -gt 0 ] do - rsync -a "$1" . + rsync -a --exclude='ndb_*_fs/*' "$1" . shift done diff --git a/storage/ndb/test/run-test/atrt.hpp b/storage/ndb/test/run-test/atrt.hpp new file mode 100644 index 00000000000..14d2dccd245 --- /dev/null +++ b/storage/ndb/test/run-test/atrt.hpp @@ -0,0 +1,161 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef atrt_config_hpp +#define atrt_config_hpp + +#include +#include +#include +#include +#include +#include +#include + +enum ErrorCodes +{ + ERR_OK = 0, + ERR_NDB_FAILED = 101, + ERR_SERVERS_FAILED = 102, + ERR_MAX_TIME_ELAPSED = 103 +}; + +struct atrt_host +{ + size_t m_index; + BaseString m_user; + BaseString m_basedir; + BaseString m_hostname; + SimpleCpcClient * m_cpcd; + Vector m_processes; +}; + +struct atrt_options +{ + enum Feature { + AO_REPLICATION = 1, + AO_NDBCLUSTER = 2 + }; + + int m_features; + Properties m_loaded; + Properties m_generated; +}; + +struct atrt_process +{ + size_t m_index; + struct atrt_host * m_host; + struct atrt_cluster * m_cluster; + + enum Type { + AP_ALL = 255 + ,AP_NDBD = 1 + ,AP_NDB_API = 2 + ,AP_NDB_MGMD = 4 + ,AP_MYSQLD = 16 + ,AP_CLIENT = 32 + ,AP_CLUSTER = 256 // Used for options parsing for "cluster" options + } m_type; + + SimpleCpcClient::Process m_proc; + + NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm + atrt_process * m_mysqld; // if type == client + atrt_process * m_rep_src; // if type == mysqld + Vector m_rep_dst; // if type == mysqld + + atrt_options m_options; +}; + +struct atrt_cluster +{ + BaseString m_name; + BaseString m_dir; + Vector m_processes; + atrt_options m_options; +}; + +struct atrt_config +{ + bool m_generated; + BaseString m_key; + BaseString m_replication; + Vector m_hosts; + Vector m_clusters; + Vector m_processes; +}; + +struct atrt_testcase +{ + bool m_report; + bool m_run_all; + time_t m_max_time; + BaseString m_command; + BaseString m_args; + BaseString m_name; +}; + +extern Logger g_logger; + +void require(bool x); +bool parse_args(int argc, char** argv); +bool setup_config(atrt_config&); +bool configure(atrt_config&, int setup); +bool setup_directories(atrt_config&, int setup); +bool setup_files(atrt_config&, int setup, int sshx); + +bool deploy(atrt_config&); +bool sshx(atrt_config&, unsigned procmask); +bool start(atrt_config&, unsigned procmask); + +bool remove_dir(const char *, bool incl = true); +bool connect_hosts(atrt_config&); +bool connect_ndb_mgm(atrt_config&); +bool wait_ndb(atrt_config&, int ndb_mgm_node_status); +bool start_processes(atrt_config&, int); +bool stop_processes(atrt_config&, int); +bool update_status(atrt_config&, int); +int is_running(atrt_config&, int); +bool gather_result(atrt_config&, int * result); + +bool read_test_case(FILE *, atrt_testcase&, int& line); +bool setup_test_case(atrt_config&, const atrt_testcase&); + +bool setup_hosts(atrt_config&); + +/** + * Global variables... + */ +extern Logger g_logger; +extern atrt_config g_config; + +extern const char * g_cwd; +extern const char * g_my_cnf; +extern const char * g_user; +extern const char * g_basedir; +extern const char * g_prefix; +extern int g_baseport; +extern int g_fqpn; +extern int g_default_ports; + +extern const char * g_clusters; + +extern const char *save_file; +extern const char *save_group_suffix; +extern char *save_extra_file; + +#endif diff --git a/storage/ndb/test/run-test/autotest-boot.sh b/storage/ndb/test/run-test/autotest-boot.sh new file mode 100644 index 00000000000..31f611460ec --- /dev/null +++ b/storage/ndb/test/run-test/autotest-boot.sh @@ -0,0 +1,165 @@ +#!/bin/sh +############################################################# +# This script created by Jonas does the following # +# Cleans up clones and pevious builds, pulls new clones, # +# builds, deploys, configures the tests and launches ATRT # +############################################################# + +############### +#Script setup # +############## + +save_args=$* +VERSION="autotest-boot.sh version 1.00" + +DATE=`date '+%Y-%m-%d'` +HOST=`hostname -s` +export DATE HOST + +set -e + +echo "`date` starting: $*" + +verbose=0 +do_clone=yes +build=yes + +conf= +LOCK=$HOME/.autotest-lock + +############################ +# Read command line entries# +############################ + +while [ "$1" ] +do + case "$1" in + --no-clone) do_clone="";; + --no-build) build="";; + --verbose) verbose=`expr $verbose + 1`;; + --clone=*) clone=`echo $1 | sed s/--clone=//`;; + --version) echo $VERSION; exit;; + --conf=*) conf=`echo $1 | sed s/--conf=//`;; + *) RUN=$*;; + esac + shift +done + +################################# +#Make sure the configfile exists# +#if it does not exit. if it does# +# (.) load it # +################################# +if [ -z "$conf" ] +then + conf=`pwd`/autotest.conf +fi + +if [ -f $conf ] +then + . $conf +else + echo "Can't find config file: $conf" + exit +fi + +############################### +# Validate that all interesting +# variables where set in conf +############################### +vars="src_clone_base install_dir build_dir" +for i in $vars +do + t=`echo echo \\$$i` + if [ -z "`eval $t`" ] + then + echo "Invalid config: $conf, variable $i is not set" + exit + fi +done + +############################### +#Print out the enviroment vars# +############################### + +if [ $verbose -gt 0 ] +then + env +fi + +#################################### +# Setup the lock file name and path# +# Setup the clone source location # +#################################### + +src_clone=$src_clone_base-$clone + +####################################### +# Check to see if the lock file exists# +# If it does exit. # +####################################### + +if [ -f $LOCK ] +then + echo "Lock file exists: $LOCK" + exit 1 +fi + +####################################### +# If the lock file does not exist then# +# create it with date and run info # +####################################### + +echo "$DATE $RUN" > $LOCK + +############################# +#If any errors here down, we# +# trap them, and remove the # +# Lock file before exit # +############################# +if [ `uname -s` != "SunOS" ] +then + trap "rm -f $LOCK" ERR +fi + +# You can add more to this path# +################################ + +dst_place=${build_dir}/clone-mysql-$clone-$DATE.$$ + +######################################### +# Delete source and pull down the latest# +######################################### + +if [ "$do_clone" ] +then + rm -rf $dst_place + bk clone $src_clone $dst_place +fi + +########################################## +# Build the source, make installs, and # +# create the database to be rsynced # +########################################## + +if [ "$build" ] +then + cd $dst_place + rm -rf $install_dir + BUILD/compile-ndb-autotest --prefix=$install_dir + make install +fi + + +################################ +# Start run script # +################################ + +script=$install_dir/mysql-test/ndb/autotest-run.sh +$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock + +if [ "$build" ] +then + rm -rf $dst_place +fi +rm -f $LOCK diff --git a/storage/ndb/test/run-test/autotest-run.sh b/storage/ndb/test/run-test/autotest-run.sh new file mode 100644 index 00000000000..6136a3a1060 --- /dev/null +++ b/storage/ndb/test/run-test/autotest-run.sh @@ -0,0 +1,269 @@ +#!/bin/sh +############################################################# +# This script created by Jonas does the following # +# Cleans up clones and pevious builds, pulls new clones, # +# builds, deploys, configures the tests and launches ATRT # +############################################################# + +############### +#Script setup # +############## + +save_args=$* +VERSION="autotest-run.sh version 1.00" + +DATE=`date '+%Y-%m-%d'` +HOST=`hostname -s` +export DATE HOST + +set -e +ulimit -Sc unlimited + +echo "`date` starting: $*" + +RSYNC_RSH=ssh +export RSYNC_RSH + +verbose=0 +report=yes +nolock= +RUN="daily-basic" +conf=autotest.conf +LOCK=$HOME/.autotest-lock + +############################ +# Read command line entries# +############################ + +while [ "$1" ] +do + case "$1" in + --verbose) verbose=`expr $verbose + 1`;; + --conf=*) conf=`echo $1 | sed s/--conf=//`;; + --version) echo $VERSION; exit;; + --suite=*) RUN=`echo $1 | sed s/--suite=//`;; + --install-dir=*) install_dir=`echo $1 | sed s/--install-dir=//`;; + --clone=*) clone=`echo $1 | sed s/--clone=//`;; + --nolock) nolock=true;; + esac + shift +done + +################################# +#Make sure the configfile exists# +#if it does not exit. if it does# +# (.) load it # +################################# + +install_dir_save=$install_dir +if [ -f $conf ] +then + . $conf +else + echo "Can't find config file: $conf" + exit +fi +install_dir=$install_dir_save + +############################### +# Validate that all interesting +# variables where set in conf +############################### +vars="target base_dir install_dir hosts" +if [ "$report" ] +then + vars="$vars result_host result_path" +fi +for i in $vars +do + t=`echo echo \\$$i` + if [ -z "`eval $t`" ] + then + echo "Invalid config: $conf, variable $i is not set" + exit + fi +done + +############################### +#Print out the enviroment vars# +############################### + +if [ $verbose -gt 0 ] +then + env +fi + +####################################### +# Check to see if the lock file exists# +# If it does exit. # +####################################### + +if [ -z "$nolock" ] +then + if [ -f $LOCK ] + then + echo "Lock file exists: $LOCK" + exit 1 + fi + echo "$DATE $RUN" > $LOCK +fi + +############################# +#If any errors here down, we# +# trap them, and remove the # +# Lock file before exit # +############################# +if [ `uname -s` != "SunOS" ] +then + trap "rm -f $LOCK" ERR +fi + + +############################################### +# Check that all interesting files are present# +############################################### + +test_dir=$install_dir/mysql-test/ndb +atrt=$test_dir/atrt +test_file=$test_dir/$RUN-tests.txt + +if [ ! -f "$test_file" ] +then + echo "Cant find testfile: $test_file" + exit 1 +fi + +if [ ! -x "$atrt" ] +then + echo "Cant find atrt binary at $atrt" + exit 1 +fi + +############################ +# check ndb_cpcc fail hosts# +############################ +failed=`ndb_cpcc $hosts | awk '{ if($1=="Failed"){ print;}}'` +if [ "$failed" ] +then + echo "Cant contact cpcd on $failed, exiting" + exit 1 +fi + +############################# +# Function for replacing the# +# choose host with real host# +# names. Note $$ = PID # +############################# +choose(){ + SRC=$1 + TMP1=/tmp/choose.$$ + TMP2=/tmp/choose.$$.$$ + shift + + cp $SRC $TMP1 + i=1 + while [ $# -gt 0 ] + do + sed -e s,"CHOOSE_host$i",$1,g < $TMP1 > $TMP2 + mv $TMP2 $TMP1 + shift + i=`expr $i + 1` + done + cat $TMP1 + rm -f $TMP1 +} + +choose_conf(){ + if [ -f $test_dir/conf-$1-$HOST.cnf ] + then + echo "$test_dir/conf-$1-$HOST.cnf" + elif [ -f $test_dir/conf-$1.cnf ] + then + echo "$test_dir/conf-$1.cnf" + elif [ -f $test_dir/conf-$HOST.cnf ] + then + echo "$test_dir/conf-$HOST.cnf" + else + echo "Unable to find conf file looked for" 1>&2 + echo "$test_dir/conf-$1-$HOST.cnf and" 1>&2 + echo "$test_dir/conf-$HOST.cnf" 1>&2 + echo "$test_dir/conf-$1.cnf" 1>&2 + exit + fi +} + +######################################### +# Count how many computers we have ready# +######################################### + +count_hosts(){ + cnt=`grep "CHOOSE_host" $1 | awk '{for(i=1; i<=NF;i++) \ + if(index($i, "CHOOSE_host") > 0) print $i;}' | sort | uniq | wc -l` + echo $cnt +} + +conf=`choose_conf $RUN` +count=`count_hosts $conf` +avail=`echo $hosts | wc -w` +if [ $count -gt $avail ] + then + echo "Not enough hosts" + echo "Needs: $count available: $avail ($avail_hosts)" + exit 1 +fi + +### +# Make directories needed + +p=`pwd` +run_dir=$install_dir/run-$RUN-mysql-$clone-$target +res_dir=$base_dir/result-$RUN-mysql-$clone-$target/$DATE +tar_dir=$base_dir/saved-results + +mkdir -p $run_dir $res_dir $tar_dir +rm -rf $res_dir/* $run_dir/* + + +### +# +# Do sed substitiutions +# +cd $run_dir +choose $conf $hosts > d.tmp.$$ +sed -e s,CHOOSE_dir,"$run_dir/run",g < d.tmp.$$ > my.cnf + +# Setup configuration +$atrt Cdq my.cnf + +# Start... +$atrt --report-file=report.txt --log-file=log.txt --testcase-file=$test_dir/$RUN-tests.txt my.cnf + +# Make tar-ball +[ -f log.txt ] && mv log.txt $res_dir +[ -f report.txt ] && mv report.txt $res_dir +[ "`find . -name 'result*'`" ] && mv result* $res_dir +cd $res_dir + +echo "date=$DATE" > info.txt +echo "suite=$RUN" >> info.txt +echo "clone=$clone" >> info.txt +echo "arch=$target" >> info.txt + +cd .. +p2=`pwd` +cd .. +tarfile=res.$RUN.$clone.$target.$DATE.$HOST.$$.tgz +tar cfz $tar_dir/$tarfile `basename $p2`/$DATE + +if [ "$report" ] +then + scp $tar_dir/$tarfile $result_host:$result_path/ +fi + +cd $p +rm -rf $res_dir $run_dir + +if [ -z "$nolock" ] +then + rm -f $LOCK +fi diff --git a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt b/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt deleted file mode 100644 index bcd809593f3..00000000000 --- a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt +++ /dev/null @@ -1,19 +0,0 @@ -baseport: 14000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 14000 -ArbitrationRank: 1 -DataDir: . diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt b/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt deleted file mode 100644 index 8b340e6a39d..00000000000 --- a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt +++ /dev/null @@ -1,19 +0,0 @@ -baseport: 16000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 16000 -ArbitrationRank: 1 -DataDir: . diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt deleted file mode 100644 index 45e6e25f030..00000000000 --- a/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt +++ /dev/null @@ -1,22 +0,0 @@ -baseport: 16000 -basedir: CHOOSE_dir -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: CHOOSE_dir/run - -[MGM DEFAULT] -PortNumber: 16000 -ArbitrationRank: 1 -DataDir: . - -[TCP DEFAULT] -SendBufferMemory: 2M diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt b/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt deleted file mode 100644 index 0d6a99f8d48..00000000000 --- a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt +++ /dev/null @@ -1,20 +0,0 @@ -baseport: 16000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 -mysqld: CHOOSE_host1 CHOOSE_host4 -mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 16000 -ArbitrationRank: 1 -DataDir: . diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt deleted file mode 100644 index 0d6a99f8d48..00000000000 --- a/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt +++ /dev/null @@ -1,20 +0,0 @@ -baseport: 16000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 -mysqld: CHOOSE_host1 CHOOSE_host4 -mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 16000 -ArbitrationRank: 1 -DataDir: . diff --git a/storage/ndb/test/run-test/conf-dl145a.cnf b/storage/ndb/test/run-test/conf-dl145a.cnf new file mode 100644 index 00000000000..ea344f1a62a --- /dev/null +++ b/storage/ndb/test/run-test/conf-dl145a.cnf @@ -0,0 +1,23 @@ +[atrt] +basedir = CHOOSE_dir +baseport = 14000 +clusters = .2node + +[ndb_mgmd] + +[mysqld] +skip-innodb +skip-bdb + +[cluster_config.2node] +ndb_mgmd = CHOOSE_host1 +ndbd = CHOOSE_host2,CHOOSE_host3 +ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1 + +NoOfReplicas = 2 +IndexMemory = 100M +DataMemory = 300M +BackupMemory = 64M +MaxNoOfConcurrentScans = 100 +MaxNoOfSavedMessages= 1000 +SendBufferMemory = 2M diff --git a/storage/ndb/test/run-test/conf-dl145a.txt b/storage/ndb/test/run-test/conf-dl145a.txt deleted file mode 100644 index d0a240f09d1..00000000000 --- a/storage/ndb/test/run-test/conf-dl145a.txt +++ /dev/null @@ -1,22 +0,0 @@ -baseport: 14000 -basedir: /home/ndbdev/autotest/run -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /home/ndbdev/autotest/run - -[MGM DEFAULT] -PortNumber: 14000 -ArbitrationRank: 1 -DataDir: . - -[TCP DEFAULT] -SendBufferMemory: 2M diff --git a/storage/ndb/test/run-test/conf-ndbmaster.cnf b/storage/ndb/test/run-test/conf-ndbmaster.cnf new file mode 100644 index 00000000000..417e2988d0d --- /dev/null +++ b/storage/ndb/test/run-test/conf-ndbmaster.cnf @@ -0,0 +1,23 @@ +[atrt] +basedir = CHOOSE_dir +baseport = 14000 +clusters = .4node + +[ndb_mgmd] + +[mysqld] +skip-innodb +skip-bdb + +[cluster_config.4node] +ndb_mgmd = CHOOSE_host1 +ndbd = CHOOSE_host2,CHOOSE_host3,CHOOSE_host2,CHOOSE_host3 +ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1 + +NoOfReplicas = 2 +IndexMemory = 100M +DataMemory = 300M +BackupMemory = 64M +MaxNoOfConcurrentScans = 100 +MaxNoOfSavedMessages= 1000 +SendBufferMemory = 2M diff --git a/storage/ndb/test/run-test/conf-ndbmaster.txt b/storage/ndb/test/run-test/conf-ndbmaster.txt deleted file mode 100644 index 9f50432f5e3..00000000000 --- a/storage/ndb/test/run-test/conf-ndbmaster.txt +++ /dev/null @@ -1,22 +0,0 @@ -baseport: 14000 -basedir: CHOOSE_dir -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: CHOOSE_dir/run - -[MGM DEFAULT] -PortNumber: 14000 -ArbitrationRank: 1 -DataDir: . - -[TCP DEFAULT] -SendBufferMemory: 2M diff --git a/storage/ndb/test/run-test/conf-repl.cnf b/storage/ndb/test/run-test/conf-repl.cnf new file mode 100644 index 00000000000..57eb2ee413e --- /dev/null +++ b/storage/ndb/test/run-test/conf-repl.cnf @@ -0,0 +1,28 @@ +[atrt] +basedir=CHOOSE_dir +baseport=15000 +clusters= .master,.slave +replicate= 1.master:1.slave + +[ndb_mgmd] + +[mysqld] +skip-innodb +skip-bdb + +[cluster_config] +MaxNoOfSavedMessages= 1000 +DataMemory = 100M + +[cluster_config.master] +NoOfReplicas = 2 +ndb_mgmd = CHOOSE_host1 +ndbd = CHOOSE_host2,CHOOSE_host3 +mysqld = CHOOSE_host1 +ndbapi= CHOOSE_host1 + +[cluster_config.slave] +NoOfReplicas = 1 +ndb_mgmd = CHOOSE_host4 +ndbd = CHOOSE_host4 +mysqld = CHOOSE_host4 diff --git a/storage/ndb/test/run-test/conf-shark.txt b/storage/ndb/test/run-test/conf-shark.txt deleted file mode 100644 index d66d0280d8a..00000000000 --- a/storage/ndb/test/run-test/conf-shark.txt +++ /dev/null @@ -1,22 +0,0 @@ -baseport: 14000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host1 CHOOSE_host1 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 14000 -ArbitrationRank: 1 -DataDir: . - -[TCP DEFAULT] -SendBufferMemory: 2M diff --git a/storage/ndb/test/run-test/example-my.cnf b/storage/ndb/test/run-test/example-my.cnf new file mode 100644 index 00000000000..99e1ce9f75b --- /dev/null +++ b/storage/ndb/test/run-test/example-my.cnf @@ -0,0 +1,116 @@ +[atrt] +basedir=/home/jonas/atrt +baseport=10000 +clusters = .master +clusters= .master,.slave +replicate = 1.master:1.slave +replicate = 2.master:2.slave + +[cluster_config] +NoOfReplicas= 2 +IndexMemory= 10M +DataMemory= 50M +MaxNoOfConcurrentScans= 100 +Diskless = 1 + +[cluster_config.master] +ndb_mgmd = local1 +ndbd = local1,local1 +mysqld = local1,local1 +ndbapi= local1 +NoOfReplicas= 2 + +[cluster_config.slave] +ndb_mgmd = local1 +ndbd = local1 +ndbapi= local1 +mysqld = local1,local1 +NoOfReplicas= 1 + +[mysqld] +skip-innodb +skip-bdb + +# +# Generated by atrt +# Mon May 29 23:27:49 2006 + +[mysql_cluster.master] +ndb-connectstring= local1:10000 + +[cluster_config.ndb_mgmd.1.master] +PortNumber= 10000 + +[cluster_config.ndbd.1.master] +FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.1 + +[cluster_config.ndbd.2.master] +FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.2 + +[mysqld.1.master] +datadir= /home/jonas/atrt/cluster.master/mysqld.1 +socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock +port= 10001 +server-id= 1 +log-bin +ndb-connectstring= local1:10000 +ndbcluster + +[client.1.master] +socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock +port= 10001 + +[mysqld.2.master] +datadir= /home/jonas/atrt/cluster.master/mysqld.2 +socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock +port= 10002 +server-id= 2 +log-bin +ndb-connectstring= local1:10000 +ndbcluster + +[client.2.master] +socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock +port= 10002 + +[mysql_cluster.slave] +ndb-connectstring= local1:10003 + +[cluster_config.ndb_mgmd.1.slave] +PortNumber= 10003 + +[cluster_config.ndbd.1.slave] +FileSystemPath= /home/jonas/atrt/cluster.slave/ndbd.1 + +[mysqld.1.slave] +datadir= /home/jonas/atrt/cluster.slave/mysqld.1 +socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock +port= 10004 +server-id= 3 +master-host= local1 +master-port= 10001 +master-user= root +master-password= "" +ndb-connectstring= local1:10003 +ndbcluster + +[client.1.slave] +socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock +port= 10004 + +[mysqld.2.slave] +datadir= /home/jonas/atrt/cluster.slave/mysqld.2 +socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock +port= 10005 +server-id= 4 +master-host= local1 +master-port= 10002 +master-user= root +master-password= "" +ndb-connectstring= local1:10003 +ndbcluster + +[client.2.slave] +socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock +port= 10005 + diff --git a/storage/ndb/test/run-test/files.cpp b/storage/ndb/test/run-test/files.cpp new file mode 100644 index 00000000000..231f7c88abc --- /dev/null +++ b/storage/ndb/test/run-test/files.cpp @@ -0,0 +1,383 @@ +#include "atrt.hpp" +#include +#include + +static bool create_directory(const char * path); + +bool +setup_directories(atrt_config& config, int setup) +{ + /** + * 0 = validate + * 1 = setup + * 2 = setup+clean + */ + for (size_t i = 0; i < config.m_clusters.size(); i++) + { + atrt_cluster& cluster = *config.m_clusters[i]; + for (size_t j = 0; j /dev/null 2>&1", + g_prefix, val); + if (system(tmp.c_str()) != 0) + { + g_logger.error("Failed to mysql_install_db for %s", + proc.m_proc.m_cwd.c_str()); + } + else + { + g_logger.info("mysql_install_db for %s", + proc.m_proc.m_cwd.c_str()); + } + } + } + } + } + + FILE * out = NULL; + if (config.m_generated == false) + { + g_logger.info("Nothing configured..."); + } + else + { + out = fopen(mycnf.c_str(), "a+"); + if (out == 0) + { + g_logger.error("Failed to open %s for append", mycnf.c_str()); + return false; + } + time_t now = time(0); + fprintf(out, "#\n# Generated by atrt\n"); + fprintf(out, "# %s\n", ctime(&now)); + } + + for (size_t i = 0; i < config.m_clusters.size(); i++) + { + atrt_cluster& cluster = *config.m_clusters[i]; + if (out) + { + Properties::Iterator it(&cluster.m_options.m_generated); + printfile(out, cluster.m_options.m_generated, + "[mysql_cluster%s]", cluster.m_name.c_str()); + } + + for (size_t j = 0; jm_name.c_str()); + break; + case atrt_process::AP_NDBD: + printfile(out, proc.m_options.m_generated, + "[cluster_config.ndbd.%d%s]", + proc.m_index, proc.m_cluster->m_name.c_str()); + break; + case atrt_process::AP_MYSQLD: + printfile(out, proc.m_options.m_generated, + "[mysqld.%d%s]", + proc.m_index, proc.m_cluster->m_name.c_str()); + break; + case atrt_process::AP_NDB_API: + break; + case atrt_process::AP_CLIENT: + printfile(out, proc.m_options.m_generated, + "[client.%d%s]", + proc.m_index, proc.m_cluster->m_name.c_str()); + break; + case atrt_process::AP_ALL: + case atrt_process::AP_CLUSTER: + abort(); + } + } + + /** + * Create env.sh + */ + BaseString tmp; + tmp.assfmt("%s/env.sh", proc.m_proc.m_cwd.c_str()); + char **env = BaseString::argify(0, proc.m_proc.m_env.c_str()); + if (env[0]) + { + Vector keys; + FILE *fenv = fopen(tmp.c_str(), "w+"); + if (fenv == 0) + { + g_logger.error("Failed to open %s for writing", tmp.c_str()); + return false; + } + for (size_t k = 0; env[k]; k++) + { + tmp = env[k]; + int pos = tmp.indexOf('='); + require(pos > 0); + env[k][pos] = 0; + fprintf(fenv, "%s=\"%s\"\n", env[k], env[k]+pos+1); + keys.push_back(env[k]); + free(env[k]); + } + fprintf(fenv, "PATH=%s/bin:%s/libexec:$PATH\n", g_prefix, g_prefix); + keys.push_back("PATH"); + for (size_t k = 0; k list; + if (tmp.split(list, "/") == 0) + { + g_logger.error("Failed to create directory: %s", tmp.c_str()); + return false; + } + + BaseString cwd = "/"; + for (size_t i = 0; i < list.size(); i++) + { + cwd.append(list[i].c_str()); + cwd.append("/"); + mkdir(cwd.c_str(), S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP); + } + + struct stat sbuf; + if (lstat(path, &sbuf) != 0 || + !S_ISDIR(sbuf.st_mode)) + { + g_logger.error("Failed to create directory: %s (%s)", + tmp.c_str(), + cwd.c_str()); + return false; + } + + return true; +} + +bool +remove_dir(const char * path, bool inclusive) +{ + DIR* dirp = opendir(path); + + if (dirp == 0) + { + if(errno != ENOENT) + { + g_logger.error("Failed to remove >%s< errno: %d %s", + path, errno, strerror(errno)); + return false; + } + return true; + } + + struct dirent * dp; + BaseString name = path; + name.append("/"); + while ((dp = readdir(dirp)) != NULL) + { + if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0)) + { + BaseString tmp = name; + tmp.append(dp->d_name); + + if (remove(tmp.c_str()) == 0) + { + continue; + } + + if (!remove_dir(tmp.c_str())) + { + closedir(dirp); + return false; + } + } + } + + closedir(dirp); + if (inclusive) + { + if (rmdir(path) != 0) + { + g_logger.error("Failed to remove >%s< errno: %d %s", + path, errno, strerror(errno)); + return false; + } + } + return true; +} + diff --git a/storage/ndb/test/run-test/main.cpp b/storage/ndb/test/run-test/main.cpp index aef041d24d6..3836ab39f59 100644 --- a/storage/ndb/test/run-test/main.cpp +++ b/storage/ndb/test/run-test/main.cpp @@ -14,20 +14,19 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include -#include -#include -#include +#include "atrt.hpp" +#include +#include + #include -#include #include -#include "run-test.hpp" #include #include -#include -#include "CpcClient.hpp" +#include + +#define PATH_SEPARATOR "/" /** Global variables */ static const char progname[] = "ndb_atrt"; @@ -36,76 +35,198 @@ static const char * g_analyze_progname = "atrt-analyze-result.sh"; static const char * g_clear_progname = "atrt-clear-result.sh"; static const char * g_setup_progname = "atrt-setup.sh"; -static const char * g_setup_path = 0; -static const char * g_process_config_filename = "d.txt"; static const char * g_log_filename = 0; static const char * g_test_case_filename = 0; static const char * g_report_filename = 0; -static const char * g_default_user = 0; -static const char * g_default_base_dir = 0; -static int g_default_base_port = 0; -static int g_mysqld_use_base = 1; +static int g_do_setup = 0; +static int g_do_deploy = 0; +static int g_do_sshx = 0; +static int g_do_start = 0; +static int g_do_quit = 0; -static int g_report = 0; -static int g_verbosity = 0; +static int g_help = 0; +static int g_verbosity = 1; static FILE * g_report_file = 0; static FILE * g_test_case_file = stdin; +static int g_mode = 0; Logger g_logger; atrt_config g_config; +const char * g_user = 0; +int g_baseport = 10000; +int g_fqpn = 0; +int g_default_ports = 0; -static int g_mode_bench = 0; -static int g_mode_regression = 0; -static int g_mode_interactive = 0; -static int g_mode = 0; +const char * g_cwd = 0; +const char * g_basedir = 0; +const char * g_my_cnf = 0; +const char * g_prefix = 0; +const char * g_clusters = 0; +BaseString g_replicate; +const char *save_file = 0; +char *save_extra_file = 0; +const char *save_group_suffix = 0; +const char * g_dummy; +char * g_env_path = 0; -static -struct getargs args[] = { - { "process-config", 0, arg_string, &g_process_config_filename, 0, 0 }, - { "setup-path", 0, arg_string, &g_setup_path, 0, 0 }, - { 0, 'v', arg_counter, &g_verbosity, 0, 0 }, - { "log-file", 0, arg_string, &g_log_filename, 0, 0 }, - { "testcase-file", 'f', arg_string, &g_test_case_filename, 0, 0 }, - { 0, 'R', arg_flag, &g_report, 0, 0 }, - { "report-file", 0, arg_string, &g_report_filename, 0, 0 }, - { "interactive", 'i', arg_flag, &g_mode_interactive, 0, 0 }, - { "regression", 'r', arg_flag, &g_mode_regression, 0, 0 }, - { "bench", 'b', arg_flag, &g_mode_bench, 0, 0 }, +/** Dummy, extern declared in ndb_opts.h */ +int g_print_full_config = 0, opt_ndb_shm; +my_bool opt_core; + +static struct my_option g_options[] = +{ + { "help", '?', "Display this help and exit.", + (gptr*) &g_help, (gptr*) &g_help, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "version", 'V', "Output version information and exit.", 0, 0, 0, + GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "clusters", 256, "Cluster", + (gptr*) &g_clusters, (gptr*) &g_clusters, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "replicate", 1024, "replicate", + (gptr*) &g_dummy, (gptr*) &g_dummy, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "log-file", 256, "log-file", + (gptr*) &g_log_filename, (gptr*) &g_log_filename, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "testcase-file", 'f', "testcase-file", + (gptr*) &g_test_case_filename, (gptr*) &g_test_case_filename, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "report-file", 'r', "report-file", + (gptr*) &g_report_filename, (gptr*) &g_report_filename, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "basedir", 256, "Base path", + (gptr*) &g_basedir, (gptr*) &g_basedir, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "baseport", 256, "Base port", + (gptr*) &g_baseport, (gptr*) &g_baseport, + 0, GET_INT, REQUIRED_ARG, g_baseport, 0, 0, 0, 0, 0}, + { "prefix", 256, "mysql install dir", + (gptr*) &g_prefix, (gptr*) &g_prefix, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "verbose", 'v', "Verbosity", + (gptr*) &g_verbosity, (gptr*) &g_verbosity, + 0, GET_INT, REQUIRED_ARG, g_verbosity, 0, 0, 0, 0, 0}, + { "configure", 256, "configure", + (gptr*) &g_do_setup, (gptr*) &g_do_setup, + 0, GET_INT, REQUIRED_ARG, g_do_setup, 0, 0, 0, 0, 0 }, + { "deploy", 256, "deploy", + (gptr*) &g_do_deploy, (gptr*) &g_do_deploy, + 0, GET_INT, REQUIRED_ARG, g_do_deploy, 0, 0, 0, 0, 0 }, + { "sshx", 256, "sshx", + (gptr*) &g_do_sshx, (gptr*) &g_do_sshx, + 0, GET_INT, REQUIRED_ARG, g_do_sshx, 0, 0, 0, 0, 0 }, + { "start", 256, "start", + (gptr*) &g_do_start, (gptr*) &g_do_start, + 0, GET_INT, REQUIRED_ARG, g_do_start, 0, 0, 0, 0, 0 }, + { "fqpn", 256, "Fully qualified path-names ", + (gptr*) &g_fqpn, (gptr*) &g_fqpn, + 0, GET_INT, REQUIRED_ARG, g_fqpn, 0, 0, 0, 0, 0 }, + { "default-ports", 256, "Use default ports when possible", + (gptr*) &g_default_ports, (gptr*) &g_default_ports, + 0, GET_INT, REQUIRED_ARG, g_default_ports, 0, 0, 0, 0, 0 }, + { "mode", 256, "Mode 0=interactive 1=regression 2=bench", + (gptr*) &g_mode, (gptr*) &g_mode, + 0, GET_INT, REQUIRED_ARG, g_mode, 0, 0, 0, 0, 0 }, + { "quit", 256, "Quit before starting tests", + (gptr*) &g_mode, (gptr*) &g_do_quit, + 0, GET_BOOL, NO_ARG, g_do_quit, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; -const int arg_count = 10; +const int p_ndb = atrt_process::AP_NDB_MGMD | atrt_process::AP_NDBD; +const int p_servers = atrt_process::AP_MYSQLD; +const int p_clients = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API; int -main(int argc, const char ** argv){ +main(int argc, char ** argv) +{ ndb_init(); bool restart = true; int lineno = 1; int test_no = 1; + int return_code = 1; - const int p_ndb = atrt_process::NDB_MGM | atrt_process::NDB_DB; - const int p_servers = atrt_process::MYSQL_SERVER | atrt_process::NDB_REP; - const int p_clients = atrt_process::MYSQL_CLIENT | atrt_process::NDB_API; - g_logger.setCategory(progname); g_logger.enable(Logger::LL_ALL); g_logger.createConsoleHandler(); if(!parse_args(argc, argv)) goto end; - + g_logger.info("Starting..."); - if(!setup_config(g_config)) + g_config.m_generated = false; + g_config.m_replication = g_replicate; + if (!setup_config(g_config)) + goto end; + + if (!configure(g_config, g_do_setup)) goto end; - g_logger.info("Connecting to hosts"); - if(!connect_hosts(g_config)) + g_logger.info("Setting up directories"); + if (!setup_directories(g_config, g_do_setup)) goto end; + if (g_do_setup) + { + g_logger.info("Setting up files"); + if (!setup_files(g_config, g_do_setup, g_do_sshx)) + goto end; + } + + if (g_do_deploy) + { + if (!deploy(g_config)) + goto end; + } + + if (g_do_quit) + { + return_code = 0; + goto end; + } + if(!setup_hosts(g_config)) goto end; + if (g_do_sshx) + { + g_logger.info("Starting xterm-ssh"); + if (!sshx(g_config, g_do_sshx)) + goto end; + + g_logger.info("Done...sleeping"); + while(true) + { + NdbSleep_SecSleep(1); + } + return_code = 0; + goto end; + } + + g_logger.info("Connecting to hosts"); + if(!connect_hosts(g_config)) + goto end; + + if (g_do_start && !g_test_case_filename) + { + g_logger.info("Starting server processes: %x", g_do_start); + if (!start(g_config, g_do_start)) + goto end; + + g_logger.info("Done...sleeping"); + while(true) + { + NdbSleep_SecSleep(1); + } + return_code = 0; + goto end; + } + + return_code = 0; + /** * Main loop */ @@ -114,37 +235,22 @@ main(int argc, const char ** argv){ * Do we need to restart ndb */ if(restart){ - g_logger.info("(Re)starting ndb processes"); + g_logger.info("(Re)starting server processes processes"); if(!stop_processes(g_config, ~0)) goto end; - - if(!start_processes(g_config, atrt_process::NDB_MGM)) + + if (!setup_directories(g_config, 2)) goto end; - if(!connect_ndb_mgm(g_config)){ - goto end; - } - - if(!start_processes(g_config, atrt_process::NDB_DB)) + if (!setup_files(g_config, 2, 1)) goto end; - if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED)) - goto end; - - for(Uint32 i = 0; i<3; i++) - if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED)) - goto started; - - goto end; - - started: - if(!start_processes(g_config, p_servers)) - goto end; - - g_logger.info("Ndb start completed"); + if (!start(g_config, p_ndb | p_servers)) + goto end; + g_logger.info("All servers start completed"); } - const int start_line = lineno; + // const int start_line = lineno; atrt_testcase test_case; if(!read_test_case(g_test_case_file, test_case, lineno)) goto end; @@ -165,7 +271,7 @@ main(int argc, const char ** argv){ const time_t start = time(0); time_t now = start; do { - if(!update_status(g_config, atrt_process::ALL)) + if(!update_status(g_config, atrt_process::AP_ALL)) goto end; int count = 0; @@ -189,7 +295,7 @@ main(int argc, const char ** argv){ result = ERR_MAX_TIME_ELAPSED; break; } - sleep(1); + NdbSleep_SecSleep(1); } while(true); const time_t elapsed = time(0) - start; @@ -197,7 +303,8 @@ main(int argc, const char ** argv){ if(!stop_processes(g_config, p_clients)) goto end; - if(!gather_result(g_config, &result)) + int tmp, *rp = result ? &tmp : &result; + if(!gather_result(g_config, rp)) goto end; g_logger.info("#%d %s(%d)", @@ -205,29 +312,35 @@ main(int argc, const char ** argv){ (result == 0 ? "OK" : "FAILED"), result); if(g_report_file != 0){ - fprintf(g_report_file, "%s %s ; %d ; %d ; %ld\n", - test_case.m_command.c_str(), - test_case.m_args.c_str(), - test_no, result, elapsed); + fprintf(g_report_file, "%s ; %d ; %d ; %ld\n", + test_case.m_name.c_str(), test_no, result, elapsed); fflush(g_report_file); } - if(test_case.m_report || g_mode_bench || (g_mode_regression && result)){ - BaseString tmp; - tmp.assfmt("result.%d", test_no); - if(rename("result", tmp.c_str()) != 0){ - g_logger.critical("Failed to rename %s as %s", - "result", tmp.c_str()); - goto end; - } - } - - if(g_mode_interactive && result){ + if(g_mode == 0 && result){ g_logger.info ("Encountered failed test in interactive mode - terminating"); break; } + BaseString resdir; + resdir.assfmt("result.%d", test_no); + remove_dir(resdir.c_str(), true); + + if(test_case.m_report || g_mode == 2 || (g_mode && result)) + { + if(rename("result", resdir.c_str()) != 0) + { + g_logger.critical("Failed to rename %s as %s", + "result", resdir.c_str()); + goto end; + } + } + else + { + remove_dir("result", true); + } + if(result != 0){ restart = true; } else { @@ -247,23 +360,124 @@ main(int argc, const char ** argv){ g_test_case_file = 0; } - stop_processes(g_config, atrt_process::ALL); + stop_processes(g_config, atrt_process::AP_ALL); + return return_code; +} + +static +my_bool +get_one_option(int arg, const struct my_option * opt, char * value) +{ + if (arg == 1024) + { + if (g_replicate.length()) + g_replicate.append(";"); + g_replicate.append(value); + return 1; + } return 0; } bool -parse_args(int argc, const char** argv){ - int optind = 0; - if(getarg(args, arg_count, argc, argv, &optind)) { - arg_printusage(args, arg_count, progname, ""); +parse_args(int argc, char** argv) +{ + char buf[2048]; + if (getcwd(buf, sizeof(buf)) == 0) + { + g_logger.error("Unable to get current working directory"); + return false; + } + g_cwd = strdup(buf); + + struct stat sbuf; + BaseString mycnf; + if (argc > 1 && lstat(argv[argc-1], &sbuf) == 0) + { + mycnf.append(g_cwd); + mycnf.append(PATH_SEPARATOR); + mycnf.append(argv[argc-1]); + } + else + { + mycnf.append(g_cwd); + mycnf.append(PATH_SEPARATOR); + mycnf.append("my.cnf"); + if (lstat(mycnf.c_str(), &sbuf) != 0) + { + g_logger.error("Unable to stat %s", mycnf.c_str()); + return false; + } + } + + g_logger.info("Bootstrapping using %s", mycnf.c_str()); + + const char *groups[] = { "atrt", 0 }; + int ret = load_defaults(mycnf.c_str(), groups, &argc, &argv); + + save_file = defaults_file; + save_extra_file = defaults_extra_file; + save_group_suffix = defaults_group_suffix; + + if (save_extra_file) + { + g_logger.error("--defaults-extra-file(%s) is not supported...", + save_extra_file); + return false; + } + + if (ret || handle_options(&argc, &argv, g_options, get_one_option)) + { + g_logger.error("Failed to load defaults/handle_options"); return false; } - if(g_log_filename != 0){ + if (argc >= 2) + { + const char * arg = argv[argc-2]; + while(* arg) + { + switch(* arg){ + case 'c': + g_do_setup = (g_do_setup == 0) ? 1 : g_do_setup; + break; + case 'C': + g_do_setup = 2; + break; + case 'd': + g_do_deploy = 1; + break; + case 'x': + g_do_sshx = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API; + break; + case 'X': + g_do_sshx = atrt_process::AP_ALL; + break; + case 's': + g_do_start = p_ndb; + break; + case 'S': + g_do_start = p_ndb | p_servers; + break; + case 'f': + g_fqpn = 1; + break; + case 'q': + g_do_quit = 1; + break; + default: + g_logger.error("Unknown switch '%c'", *arg); + return false; + } + arg++; + } + } + + if(g_log_filename != 0) + { g_logger.removeConsoleHandler(); g_logger.addHandler(new FileLogHandler(g_log_filename)); } - + { int tmp = Logger::LL_WARNING - g_verbosity; tmp = (tmp < Logger::LL_DEBUG ? Logger::LL_DEBUG : tmp); @@ -271,252 +485,129 @@ parse_args(int argc, const char** argv){ g_logger.enable(Logger::LL_ON); g_logger.enable((Logger::LoggerLevel)tmp, Logger::LL_ALERT); } + + if(!g_basedir) + { + g_basedir = g_cwd; + g_logger.info("basedir not specified, using %s", g_basedir); + } - - - if(!g_process_config_filename){ - g_logger.critical("Process config not specified!"); - return false; + if (!g_prefix) + { + g_prefix = DEFAULT_PREFIX; } - if(!g_setup_path){ - char buf[1024]; - if(getcwd(buf, sizeof(buf))){ - g_setup_path = strdup(buf); - g_logger.info("Setup path not specified, using %s", buf); - } else { - g_logger.critical("Setup path not specified!\n"); + /** + * Add path to atrt-*.sh + */ + { + BaseString tmp; + const char* env = getenv("PATH"); + if (env && strlen(env)) + { + tmp.assfmt("PATH=%s:%s/mysql-test/ndb", + env, g_prefix); + } + else + { + tmp.assfmt("PATH=%s/mysql-test/ndb", g_prefix); + } + g_env_path = strdup(tmp.c_str()); + putenv(g_env_path); + } + + if (g_help) + { + my_print_help(g_options); + my_print_variables(g_options); + return 0; + } + + if(g_test_case_filename) + { + g_test_case_file = fopen(g_test_case_filename, "r"); + if(g_test_case_file == 0) + { + g_logger.critical("Unable to open file: %s", g_test_case_filename); + return false; + } + if (g_do_setup == 0) + g_do_setup = 2; + + if (g_do_start == 0) + g_do_start = p_ndb | p_servers; + + if (g_mode == 0) + g_mode = 1; + + if (g_do_sshx) + { + g_logger.critical("ssx specified...not possible with testfile"); return false; } } - if(g_report & !g_report_filename){ - g_report_filename = "report.txt"; - } + if (g_do_setup == 0) + { + BaseString tmp; + tmp.append(g_basedir); + tmp.append(PATH_SEPARATOR); + tmp.append("my.cnf"); + if (lstat(tmp.c_str(), &sbuf) != 0) + { + g_logger.error("Unable to stat %s", tmp.c_str()); + return false; + } - if(g_report_filename){ + if (!S_ISREG(sbuf.st_mode)) + { + g_logger.error("%s is not a regular file", tmp.c_str()); + return false; + } + + g_my_cnf = strdup(tmp.c_str()); + g_logger.info("Using %s", tmp.c_str()); + } + else + { + g_my_cnf = strdup(mycnf.c_str()); + } + + g_logger.info("Using --prefix=\"%s\"", g_prefix); + + if(g_report_filename) + { g_report_file = fopen(g_report_filename, "w"); - if(g_report_file == 0){ + if(g_report_file == 0) + { g_logger.critical("Unable to create report file: %s", g_report_filename); return false; } } - - if(g_test_case_filename){ - g_test_case_file = fopen(g_test_case_filename, "r"); - if(g_test_case_file == 0){ - g_logger.critical("Unable to open file: %s", g_test_case_filename); - return false; - } - } - int sum = g_mode_interactive + g_mode_regression + g_mode_bench; - if(sum == 0){ - g_mode_interactive = 1; - } - - if(sum > 1){ - g_logger.critical - ("Only one of bench/regression/interactive can be specified"); + if (g_clusters == 0) + { + g_logger.critical("No clusters specified"); return false; } - g_default_user = strdup(getenv("LOGNAME")); - + g_user = strdup(getenv("LOGNAME")); + return true; } - -static -atrt_host * -find(const BaseString& host, Vector & hosts){ - for(size_t i = 0; i split1; - if(tmp.split(split1, ":", 2) != 2){ - g_logger.warning("Invalid line %d in %s - ignoring", - lineno, g_process_config_filename); - continue; - } - - if(split1[0].trim() == "basedir"){ - g_default_base_dir = strdup(split1[1].trim().c_str()); - continue; - } - - if(split1[0].trim() == "baseport"){ - g_default_base_port = atoi(split1[1].trim().c_str()); - continue; - } - - if(split1[0].trim() == "user"){ - g_default_user = strdup(split1[1].trim().c_str()); - continue; - } - - if(split1[0].trim() == "mysqld-use-base" && split1[1].trim() == "no"){ - g_mysqld_use_base = 0; - continue; - } - - Vector hosts; - if(split1[1].trim().split(hosts) <= 0){ - g_logger.warning("Invalid line %d in %s - ignoring", - lineno, g_process_config_filename); - } - - // 1 - Check hosts - for(size_t i = 0; i tmp; - hosts[i].split(tmp, ":"); - BaseString hostname = tmp[0].trim(); - BaseString base_dir; - if(tmp.size() >= 2) - base_dir = tmp[1]; - else if(g_default_base_dir == 0){ - g_logger.critical("Basedir not specified..."); - return false; - } - - atrt_host * host_ptr; - if((host_ptr = find(hostname, config.m_hosts)) == 0){ - atrt_host host; - host.m_index = config.m_hosts.size(); - host.m_cpcd = new SimpleCpcClient(hostname.c_str(), 1234); - host.m_base_dir = (base_dir.empty() ? g_default_base_dir : base_dir); - host.m_user = g_default_user; - host.m_hostname = hostname.c_str(); - config.m_hosts.push_back(host); - } else { - if(!base_dir.empty() && (base_dir == host_ptr->m_base_dir)){ - g_logger.critical("Inconsistent base dir definition for host %s" - ", \"%s\" != \"%s\"", hostname.c_str(), - base_dir.c_str(), host_ptr->m_base_dir.c_str()); - return false; - } - } - } - - for(size_t i = 0; im_base_dir; - - const int index = config.m_processes.size() + 1; - - atrt_process proc; - proc.m_index = index; - proc.m_host = host; - proc.m_proc.m_id = -1; - proc.m_proc.m_type = "temporary"; - proc.m_proc.m_owner = "atrt"; - proc.m_proc.m_group = "group"; - proc.m_proc.m_cwd.assign(dir).append("/run/"); - proc.m_proc.m_stdout = "log.out"; - proc.m_proc.m_stderr = "2>&1"; - proc.m_proc.m_runas = proc.m_host->m_user; - proc.m_proc.m_ulimit = "c:unlimited"; - proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str()); - proc.m_proc.m_shutdown_options = ""; - proc.m_hostname = proc.m_host->m_hostname; - proc.m_ndb_mgm_port = g_default_base_port; - if(split1[0] == "mgm"){ - proc.m_type = atrt_process::NDB_MGM; - proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_mgmd"); - proc.m_proc.m_path.assign(dir).append("/libexec/ndb_mgmd"); - proc.m_proc.m_args = "--nodaemon -f config.ini"; - proc.m_proc.m_cwd.appfmt("%d.ndb_mgmd", index); - connect_string.appfmt("host=%s:%d;", - proc.m_hostname.c_str(), proc.m_ndb_mgm_port); - } else if(split1[0] == "ndb"){ - proc.m_type = atrt_process::NDB_DB; - proc.m_proc.m_name.assfmt("%d-%s", index, "ndbd"); - proc.m_proc.m_path.assign(dir).append("/libexec/ndbd"); - proc.m_proc.m_args = "--initial --nodaemon -n"; - proc.m_proc.m_cwd.appfmt("%d.ndbd", index); - } else if(split1[0] == "mysqld"){ - proc.m_type = atrt_process::MYSQL_SERVER; - proc.m_proc.m_name.assfmt("%d-%s", index, "mysqld"); - proc.m_proc.m_path.assign(dir).append("/libexec/mysqld"); - proc.m_proc.m_args = "--core-file --ndbcluster"; - proc.m_proc.m_cwd.appfmt("%d.mysqld", index); - proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice - } else if(split1[0] == "api"){ - proc.m_type = atrt_process::NDB_API; - proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api"); - proc.m_proc.m_path = ""; - proc.m_proc.m_args = ""; - proc.m_proc.m_cwd.appfmt("%d.ndb_api", index); - } else if(split1[0] == "mysql"){ - proc.m_type = atrt_process::MYSQL_CLIENT; - proc.m_proc.m_name.assfmt("%d-%s", index, "mysql"); - proc.m_proc.m_path = ""; - proc.m_proc.m_args = ""; - proc.m_proc.m_cwd.appfmt("%d.mysql", index); - } else { - g_logger.critical("%s:%d: Unhandled process type: %s", - g_process_config_filename, lineno, - split1[0].c_str()); - result = false; - goto end; - } - config.m_processes.push_back(proc); - } - } - - // Setup connect string - for(size_t i = 0; iconnect() != 0){ + if(config.m_hosts[i]->m_cpcd->connect() != 0){ g_logger.error("Unable to connect to cpc %s:%d", - config.m_hosts[i].m_cpcd->getHost(), - config.m_hosts[i].m_cpcd->getPort()); + config.m_hosts[i]->m_cpcd->getHost(), + config.m_hosts[i]->m_cpcd->getPort()); return false; } g_logger.debug("Connected to %s:%d", - config.m_hosts[i].m_cpcd->getHost(), - config.m_hosts[i].m_cpcd->getPort()); + config.m_hosts[i]->m_cpcd->getHost(), + config.m_hosts[i]->m_cpcd->getPort()); } return true; @@ -529,8 +620,10 @@ connect_ndb_mgm(atrt_process & proc){ g_logger.critical("Unable to create mgm handle"); return false; } - BaseString tmp = proc.m_hostname; - tmp.appfmt(":%d", proc.m_ndb_mgm_port); + BaseString tmp = proc.m_host->m_hostname; + const char * val; + proc.m_options.m_loaded.get("--PortNumber=", &val); + tmp.appfmt(":%s", val); if (ndb_mgm_set_connectstring(handle,tmp.c_str())) { @@ -551,8 +644,8 @@ connect_ndb_mgm(atrt_process & proc){ bool connect_ndb_mgm(atrt_config& config){ for(size_t i = 0; im_processes.size(); j++){ + atrt_process & proc = *cluster->m_processes[j]; + if((proc.m_type & atrt_process::AP_NDB_MGMD) != 0){ + handle = proc.m_ndb_mgm_handle; + break; } - } while(state == 0); - NdbAutoPtr tmp(state); + } + + if(handle == 0){ + g_logger.critical("Unable to find mgm handle"); + return false; + } - min2 = goal; - for(int i = 0; ino_of_nodes; i++){ - if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB){ - const int s = remap(state->node_states[i].node_status); - min2 = (min2 < s ? min2 : s ); - - if(s < remap(NDB_MGM_NODE_STATUS_NO_CONTACT) || - s > NDB_MGM_NODE_STATUS_STARTED){ - g_logger.critical("Strange DB status during start: %d %d", i, min2); + if(goal == NDB_MGM_NODE_STATUS_STARTED){ + /** + * 1) wait NOT_STARTED + * 2) send start + * 3) wait STARTED + */ + if(!wait_ndb(config, NDB_MGM_NODE_STATUS_NOT_STARTED)) + return false; + + ndb_mgm_start(handle, 0, 0); + } + + struct ndb_mgm_cluster_state * state; + + time_t now = time(0); + time_t end = now + 360; + int min = remap(NDB_MGM_NODE_STATUS_NO_CONTACT); + int min2 = goal; + + while(now < end){ + /** + * 1) retreive current state + */ + state = 0; + do { + state = ndb_mgm_get_status(handle); + if(state == 0){ + const int err = ndb_mgm_get_latest_error(handle); + g_logger.error("Unable to poll db state: %d %s %s", + ndb_mgm_get_latest_error(handle), + ndb_mgm_get_latest_error_msg(handle), + ndb_mgm_get_latest_error_desc(handle)); + if(err == NDB_MGM_SERVER_NOT_CONNECTED && connect_ndb_mgm(config)){ + g_logger.error("Reconnected..."); + continue; + } return false; } - - if(min2 < min){ - g_logger.critical("wait ndb failed node: %d %d %d %d", - state->node_states[i].node_id, min, min2, goal); + } while(state == 0); + NdbAutoPtr tmp(state); + + min2 = goal; + for(int j = 0; jno_of_nodes; j++){ + if(state->node_states[j].node_type == NDB_MGM_NODE_TYPE_NDB){ + const int s = remap(state->node_states[j].node_status); + min2 = (min2 < s ? min2 : s ); + + if(s < remap(NDB_MGM_NODE_STATUS_NO_CONTACT) || + s > NDB_MGM_NODE_STATUS_STARTED){ + g_logger.critical("Strange DB status during start: %d %d", + j, min2); + return false; + } + + if(min2 < min){ + g_logger.critical("wait ndb failed node: %d %d %d %d", + state->node_states[j].node_id, min, min2, goal); + } } } + + if(min2 < min){ + g_logger.critical("wait ndb failed %d %d %d", min, min2, goal); + return false; + } + + if(min2 == goal){ + cnt++; + goto next; + } + + min = min2; + now = time(0); } - if(min2 < min){ - g_logger.critical("wait ndb failed %d %d %d", min, min2, goal); - return false; - } - - if(min2 == goal){ - return true; - break; - } - - min = min2; - now = time(0); + g_logger.critical("wait ndb timed out %d %d %d", min, min2, goal); + break; + +next: + ; } - - g_logger.critical("wait ndb timed out %d %d %d", min, min2, goal); - - return false; + + return cnt == config.m_clusters.size(); } bool @@ -676,21 +779,19 @@ start_process(atrt_process & proc){ return false; } - BaseString path = proc.m_proc.m_cwd.substr(proc.m_host->m_base_dir.length()+BaseString("/run").length()); - BaseString tmp = g_setup_progname; - tmp.appfmt(" %s %s/%s/ %s", + tmp.appfmt(" %s %s/ %s", proc.m_host->m_hostname.c_str(), - g_setup_path, - path.c_str(), + proc.m_proc.m_cwd.c_str(), proc.m_proc.m_cwd.c_str()); - + + g_logger.debug("system(%s)", tmp.c_str()); const int r1 = system(tmp.c_str()); if(r1 != 0){ g_logger.critical("Failed to setup process"); return false; } - + { Properties reply; if(proc.m_host->m_cpcd->define_process(proc.m_proc, reply) != 0){ @@ -715,7 +816,7 @@ start_process(atrt_process & proc){ bool start_processes(atrt_config& config, int types){ for(size_t i = 0; ilist_processes(m_procs[i], p); + config.m_hosts[i]->m_cpcd->list_processes(m_procs[i], p); } for(size_t i = 0; i &h_procs= m_procs[proc.m_host->m_index]; bool found = false; @@ -798,7 +899,7 @@ update_status(atrt_config& config, int){ g_logger.error("update_status: not found"); g_logger.error("id: %d host: %s cmd: %s", proc.m_proc.m_id, - proc.m_hostname.c_str(), + proc.m_host->m_hostname.c_str(), proc.m_proc.m_path.c_str()); for(size_t j = 0; jm_base_dir.c_str(), - tc.m_command.c_str()); + for(; im_hostname.c_str(), + config.m_hosts[i]->m_basedir.c_str()); + + g_logger.debug("system(%s)", tmp.c_str()); + const int r1 = system(tmp.c_str()); + if(r1 != 0) + { + g_logger.critical("Failed to gather result!"); + return false; } } - const int r1 = system(tmp.c_str()); - if(r1 != 0){ - g_logger.critical("Failed to gather result"); - return false; - } - + g_logger.debug("system(%s)", g_analyze_progname); const int r2 = system(g_analyze_progname); - - if(r2 == -1 || r2 == (127 << 8)){ + + if(r2 == -1 || r2 == (127 << 8)) + { g_logger.critical("Failed to analyze results"); return false; } @@ -974,6 +1095,7 @@ gather_result(atrt_config& config, int * result){ bool setup_hosts(atrt_config& config){ + g_logger.debug("system(%s)", g_clear_progname); const int r1 = system(g_clear_progname); if(r1 != 0){ g_logger.critical("Failed to clear result"); @@ -982,21 +1104,143 @@ setup_hosts(atrt_config& config){ for(size_t i = 0; im_hostname.c_str(), + g_basedir, + config.m_hosts[i]->m_basedir.c_str()); + g_logger.debug("system(%s)", tmp.c_str()); const int r1 = system(tmp.c_str()); if(r1 != 0){ g_logger.critical("Failed to setup %s", - config.m_hosts[i].m_hostname.c_str()); + config.m_hosts[i]->m_hostname.c_str()); return false; } } return true; } +bool +deploy(atrt_config & config) +{ + for (size_t i = 0; im_hostname.c_str(), + g_prefix, + g_prefix); + + g_logger.info("rsyncing %s to %s", g_prefix, + config.m_hosts[i]->m_hostname.c_str()); + g_logger.debug("system(%s)", tmp.c_str()); + const int r1 = system(tmp.c_str()); + if(r1 != 0) + { + g_logger.critical("Failed to rsync %s to %s", + g_prefix, + config.m_hosts[i]->m_hostname.c_str()); + return false; + } + } + + return true; +} + +bool +sshx(atrt_config & config, unsigned mask) +{ + for (size_t i = 0; im_name.c_str(), + proc.m_host->m_hostname.c_str(), + proc.m_host->m_hostname.c_str(), + proc.m_proc.m_cwd.c_str()); + + g_logger.debug("system(%s)", tmp.c_str()); + const int r1 = system(tmp.c_str()); + if(r1 != 0) + { + g_logger.critical("Failed sshx (%s)", + tmp.c_str()); + return false; + } + NdbSleep_MilliSleep(300); // To prevent xlock problem + } + + return true; +} + +bool +start(atrt_config & config, unsigned proc_mask) +{ + if (proc_mask & atrt_process::AP_NDB_MGMD) + if(!start_processes(g_config, atrt_process::AP_NDB_MGMD)) + return false; + + if (proc_mask & atrt_process::AP_NDBD) + { + if(!connect_ndb_mgm(g_config)){ + return false; + } + + if(!start_processes(g_config, atrt_process::AP_NDBD)) + return false; + + if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED)) + return false; + + for(Uint32 i = 0; i<3; i++) + if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED)) + goto started; + return false; + } + +started: + if(!start_processes(g_config, p_servers & proc_mask)) + return false; + + return true; +} + +void +require(bool x) +{ + if (!x) + abort(); +} + template class Vector >; -template class Vector; -template class Vector; +template class Vector; +template class Vector; +template class Vector; diff --git a/storage/ndb/test/run-test/run-test.hpp b/storage/ndb/test/run-test/run-test.hpp deleted file mode 100644 index 2b259e83a60..00000000000 --- a/storage/ndb/test/run-test/run-test.hpp +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef atrt_config_hpp -#define atrt_config_hpp - -#include -#include -#include -#include -#include -#include - -#undef MYSQL_CLIENT - -enum ErrorCodes { - ERR_OK = 0, - ERR_NDB_FAILED = 101, - ERR_SERVERS_FAILED = 102, - ERR_MAX_TIME_ELAPSED = 103 -}; - -struct atrt_host { - size_t m_index; - BaseString m_user; - BaseString m_base_dir; - BaseString m_hostname; - SimpleCpcClient * m_cpcd; -}; - -struct atrt_process { - size_t m_index; - BaseString m_hostname; - struct atrt_host * m_host; - - enum Type { - ALL = 255, - NDB_DB = 1, - NDB_API = 2, - NDB_MGM = 4, - NDB_REP = 8, - MYSQL_SERVER = 16, - MYSQL_CLIENT = 32 - } m_type; - - SimpleCpcClient::Process m_proc; - short m_ndb_mgm_port; - NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm -}; - -struct atrt_config { - BaseString m_key; - Vector m_hosts; - Vector m_processes; -}; - -struct atrt_testcase { - bool m_report; - bool m_run_all; - time_t m_max_time; - BaseString m_command; - BaseString m_args; -}; - -extern Logger g_logger; - -bool parse_args(int argc, const char** argv); -bool setup_config(atrt_config&); -bool connect_hosts(atrt_config&); -bool connect_ndb_mgm(atrt_config&); -bool wait_ndb(atrt_config&, int ndb_mgm_node_status); -bool start_processes(atrt_config&, int); -bool stop_processes(atrt_config&, int); -bool update_status(atrt_config&, int); -int is_running(atrt_config&, int); -bool gather_result(atrt_config&, int * result); - -bool read_test_case(FILE *, atrt_testcase&, int& line); -bool setup_test_case(atrt_config&, const atrt_testcase&); - -bool setup_hosts(atrt_config&); - -#endif diff --git a/storage/ndb/test/run-test/setup.cpp b/storage/ndb/test/run-test/setup.cpp new file mode 100644 index 00000000000..cbb7a34f171 --- /dev/null +++ b/storage/ndb/test/run-test/setup.cpp @@ -0,0 +1,965 @@ +#include "atrt.hpp" +#include +#include +#include +#include + +static NdbOut& operator<<(NdbOut& out, const atrt_process& proc); +static atrt_host * find(const char * hostname, Vector&); +static bool load_process(atrt_config&, atrt_cluster&, atrt_process::Type, + size_t idx, const char * hostname); +static bool load_options(int argc, char** argv, int type, atrt_options&); + +enum { + PO_NDB = atrt_options::AO_NDBCLUSTER + + ,PO_REP_SLAVE = 256 + ,PO_REP_MASTER = 512 + ,PO_REP = (atrt_options::AO_REPLICATION | PO_REP_SLAVE | PO_REP_MASTER) +}; + +struct proc_option +{ + const char * name; + int type; + int options; +}; + +static +struct proc_option f_options[] = { + { "--FileSystemPath=", atrt_process::AP_NDBD, 0 } + ,{ "--PortNumber=", atrt_process::AP_NDB_MGMD, 0 } + ,{ "--datadir=", atrt_process::AP_MYSQLD, 0 } + ,{ "--socket=", atrt_process::AP_MYSQLD | atrt_process::AP_CLIENT, 0 } + ,{ "--port=", atrt_process::AP_MYSQLD | atrt_process::AP_CLIENT, 0 } + ,{ "--server-id=", atrt_process::AP_MYSQLD, PO_REP } + ,{ "--log-bin", atrt_process::AP_MYSQLD, PO_REP_MASTER } + ,{ "--master-host=", atrt_process::AP_MYSQLD, PO_REP_SLAVE } + ,{ "--master-port=", atrt_process::AP_MYSQLD, PO_REP_SLAVE } + ,{ "--master-user=", atrt_process::AP_MYSQLD, PO_REP_SLAVE } + ,{ "--master-password=", atrt_process::AP_MYSQLD, PO_REP_SLAVE } + ,{ "--ndb-connectstring=", atrt_process::AP_MYSQLD | atrt_process::AP_CLUSTER + ,PO_NDB } + ,{ "--ndbcluster", atrt_process::AP_MYSQLD, PO_NDB } + ,{ 0, 0, 0 } +}; +const char * ndbcs = "--ndb-connectstring="; + +bool +setup_config(atrt_config& config) +{ + BaseString tmp(g_clusters); + Vector clusters; + tmp.split(clusters, ","); + + bool fqpn = clusters.size() > 1 || g_fqpn; + + size_t j,k; + for (size_t i = 0; im_name = clusters[i]; + if (fqpn) + { + cluster->m_dir.assfmt("cluster%s/", cluster->m_name.c_str()); + } + else + { + cluster->m_dir = ""; + } + + int argc = 1; + const char * argv[] = { "atrt", 0, 0 }; + + BaseString buf; + buf.assfmt("--defaults-group-suffix=%s", clusters[i].c_str()); + argv[argc++] = buf.c_str(); + char ** tmp = (char**)argv; + const char *groups[] = { "cluster_config", 0 }; + int ret = load_defaults(g_my_cnf, groups, &argc, &tmp); + if (ret) + { + g_logger.error("Unable to load defaults for cluster: %s", + clusters[i].c_str()); + return false; + } + + struct + { + atrt_process::Type type; + const char * name; + const char * value; + } proc_args[] = { + { atrt_process::AP_NDB_MGMD, "--ndb_mgmd=", 0 }, + { atrt_process::AP_NDBD, "--ndbd=", 0 }, + { atrt_process::AP_NDB_API, "--ndbapi=", 0 }, + { atrt_process::AP_NDB_API, "--api=", 0 }, + { atrt_process::AP_MYSQLD, "--mysqld=", 0 }, + { atrt_process::AP_ALL, 0, 0} + }; + + /** + * Find all processes... + */ + for (j = 0; j<(size_t)argc; j++) + { + for (k = 0; proc_args[k].name; k++) + { + if (!strncmp(tmp[j], proc_args[k].name, strlen(proc_args[k].name))) + { + proc_args[k].value = tmp[j] + strlen(proc_args[k].name); + break; + } + } + } + + /** + * Load each process + */ + for (j = 0; proc_args[j].name; j++) + { + if (proc_args[j].value) + { + BaseString tmp(proc_args[j].value); + Vector list; + tmp.split(list, ","); + for (k = 0; km_options); + } + } + return true; +} + +static +atrt_host * +find(const char * hostname, Vector & hosts){ + for (size_t i = 0; im_hostname == hostname){ + return hosts[i]; + } + } + + atrt_host* host = new atrt_host; + host->m_index = hosts.size(); + host->m_cpcd = new SimpleCpcClient(hostname, 1234); + host->m_basedir = g_basedir; + host->m_user = g_user; + host->m_hostname = hostname; + hosts.push_back(host); + return host; +} + +static +bool +load_process(atrt_config& config, atrt_cluster& cluster, + atrt_process::Type type, + size_t idx, + const char * hostname) +{ + atrt_host * host_ptr = find(hostname, config.m_hosts); + atrt_process *proc_ptr = new atrt_process; + + config.m_processes.push_back(proc_ptr); + host_ptr->m_processes.push_back(proc_ptr); + cluster.m_processes.push_back(proc_ptr); + + atrt_process& proc = *proc_ptr; + + const size_t proc_no = config.m_processes.size(); + proc.m_index = idx; + proc.m_type = type; + proc.m_host = host_ptr; + proc.m_cluster = &cluster; + proc.m_options.m_features = 0; + proc.m_rep_src = 0; + proc.m_proc.m_id = -1; + proc.m_proc.m_type = "temporary"; + proc.m_proc.m_owner = "atrt"; + proc.m_proc.m_group = cluster.m_name.c_str(); + proc.m_proc.m_stdout = "log.out"; + proc.m_proc.m_stderr = "2>&1"; + proc.m_proc.m_runas = proc.m_host->m_user; + proc.m_proc.m_ulimit = "c:unlimited"; + proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", g_prefix); + proc.m_proc.m_env.appfmt(" MYSQL_HOME=%s", g_basedir); + proc.m_proc.m_shutdown_options = ""; + + int argc = 1; + const char * argv[] = { "atrt", 0, 0 }; + + BaseString buf[10]; + char ** tmp = (char**)argv; + const char *groups[] = { 0, 0, 0, 0 }; + switch(type){ + case atrt_process::AP_NDB_MGMD: + groups[0] = "cluster_config"; + buf[1].assfmt("cluster_config.ndb_mgmd.%d", idx); + groups[1] = buf[1].c_str(); + buf[0].assfmt("--defaults-group-suffix=%s", cluster.m_name.c_str()); + argv[argc++] = buf[0].c_str(); + break; + case atrt_process::AP_NDBD: + groups[0] = "cluster_config"; + buf[1].assfmt("cluster_config.ndbd.%d", idx); + groups[1] = buf[1].c_str(); + buf[0].assfmt("--defaults-group-suffix=%s", cluster.m_name.c_str()); + argv[argc++] = buf[0].c_str(); + break; + case atrt_process::AP_MYSQLD: + groups[0] = "mysqld"; + groups[1] = "mysql_cluster"; + buf[0].assfmt("--defaults-group-suffix=.%d%s",idx,cluster.m_name.c_str()); + argv[argc++] = buf[0].c_str(); + break; + case atrt_process::AP_CLIENT: + buf[0].assfmt("client.%d%s", idx, cluster.m_name.c_str()); + groups[0] = buf[0].c_str(); + break; + case atrt_process::AP_NDB_API: + break; + default: + g_logger.critical("Unhandled process type: %d", type); + return false; + } + + int ret = load_defaults(g_my_cnf, groups, &argc, &tmp); + if (ret) + { + g_logger.error("Unable to load defaults for cluster: %s", + cluster.m_name.c_str()); + return false; + } + + load_options(argc, tmp, type, proc.m_options); + + BaseString dir; + dir.assfmt("%s/%s", + proc.m_host->m_basedir.c_str(), + cluster.m_dir.c_str()); + + switch(type){ + case atrt_process::AP_NDB_MGMD: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndb_mgmd"); + proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndb_mgmd"); + proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf", + proc.m_host->m_basedir.c_str()); + proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s", + cluster.m_name.c_str()); + proc.m_proc.m_args.append(" --nodaemon --mycnf"); + proc.m_proc.m_cwd.assfmt("%sndb_mgmd.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s", + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_NDBD: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndbd"); + proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndbd"); + proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf", + proc.m_host->m_basedir.c_str()); + proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s", + cluster.m_name.c_str()); + proc.m_proc.m_args.append(" --nodaemon -n"); + proc.m_proc.m_cwd.assfmt("%sndbd.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s", + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_MYSQLD: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "mysqld"); + proc.m_proc.m_path.assign(g_prefix).append("/libexec/mysqld"); + proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf", + proc.m_host->m_basedir.c_str()); + proc.m_proc.m_args.appfmt(" --defaults-group-suffix=.%d%s", + proc.m_index, + cluster.m_name.c_str()); + proc.m_proc.m_args.append(" --core-file"); + proc.m_proc.m_cwd.appfmt("%smysqld.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=.%d%s", + proc.m_index, + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_NDB_API: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndb_api"); + proc.m_proc.m_path = ""; + proc.m_proc.m_args = ""; + proc.m_proc.m_cwd.appfmt("%sndb_api.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s", + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_CLIENT: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "mysql"); + proc.m_proc.m_path = ""; + proc.m_proc.m_args = ""; + proc.m_proc.m_cwd.appfmt("%s/client.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=.%d%s", + proc.m_index, + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_ALL: + case atrt_process::AP_CLUSTER: + g_logger.critical("Unhandled process type: %d", proc.m_type); + return false; + } + + if (proc.m_proc.m_path.length()) + { + proc.m_proc.m_env.appfmt(" CMD=\"%s", proc.m_proc.m_path.c_str()); + if (proc.m_proc.m_args.length()) + proc.m_proc.m_env.append(" "); + proc.m_proc.m_env.append(proc.m_proc.m_args); + proc.m_proc.m_env.append("\" "); + } + + if (type == atrt_process::AP_MYSQLD) + { + /** + * Add a client for each mysqld + */ + if (!load_process(config, cluster, atrt_process::AP_CLIENT, idx, hostname)) + { + return false; + } + } + + if (type == atrt_process::AP_CLIENT) + { + proc.m_mysqld = cluster.m_processes[cluster.m_processes.size()-2]; + } + + return true; +} + +static +bool +load_options(int argc, char** argv, int type, atrt_options& opts) +{ + for (size_t i = 0; i<(size_t)argc; i++) + { + for (size_t j = 0; f_options[j].name; j++) + { + const char * name = f_options[j].name; + const size_t len = strlen(name); + + if ((f_options[j].type & type) && strncmp(argv[i], name, len) == 0) + { + opts.m_loaded.put(name, argv[i]+len, true); + break; + } + } + } + return true; +} + +struct proc_rule_ctx +{ + int m_setup; + atrt_config* m_config; + atrt_host * m_host; + atrt_cluster* m_cluster; + atrt_process* m_process; +}; + +struct proc_rule +{ + int type; + bool (* func)(Properties& prop, proc_rule_ctx&, int extra); + int extra; +}; + +static bool pr_check_replication(Properties&, proc_rule_ctx&, int); +static bool pr_check_features(Properties&, proc_rule_ctx&, int); +static bool pr_fix_client(Properties&, proc_rule_ctx&, int); +static bool pr_proc_options(Properties&, proc_rule_ctx&, int); +static bool pr_fix_ndb_connectstring(Properties&, proc_rule_ctx&, int); +static bool pr_set_ndb_connectstring(Properties&, proc_rule_ctx&, int); +static bool pr_check_proc(Properties&, proc_rule_ctx&, int); + +static +proc_rule f_rules[] = +{ + { atrt_process::AP_CLUSTER, pr_check_features, 0 } + ,{ atrt_process::AP_MYSQLD, pr_check_replication, 0 } + ,{ (atrt_process::AP_ALL & ~atrt_process::AP_CLIENT), pr_proc_options, + ~(PO_REP | PO_NDB) } + ,{ (atrt_process::AP_ALL & ~atrt_process::AP_CLIENT), pr_proc_options, PO_REP } + ,{ atrt_process::AP_CLIENT, pr_fix_client, 0 } + ,{ atrt_process::AP_CLUSTER, pr_fix_ndb_connectstring, 0 } + ,{ atrt_process::AP_MYSQLD, pr_set_ndb_connectstring, 0 } + ,{ atrt_process::AP_ALL, pr_check_proc, 0 } + ,{ 0, 0, 0 } +}; + +bool +configure(atrt_config& config, int setup) +{ + Properties props; + + for (size_t i = 0; f_rules[i].func; i++) + { + bool ok = true; + proc_rule_ctx ctx; + bzero(&ctx, sizeof(ctx)); + ctx.m_setup = setup; + ctx.m_config = &config; + + for (size_t j = 0; j < config.m_clusters.size(); j++) + { + ctx.m_cluster = config.m_clusters[j]; + + if (f_rules[i].type & atrt_process::AP_CLUSTER) + { + g_logger.debug("applying rule %d to cluster %s", i, + ctx.m_cluster->m_name.c_str()); + if (! (* f_rules[i].func)(props, ctx, f_rules[i].extra)) + ok = false; + } + else + { + atrt_cluster& cluster = *config.m_clusters[j]; + for (size_t k = 0; k src; + Vector dst; + tmp.split(src, "."); + + if (src.size() != 2) + { + return 0; + } + atrt_cluster* cluster = 0; + BaseString cl; + cl.appfmt(".%s", src[1].c_str()); + for (size_t i = 0; im_name == cl) + { + cluster = config.m_clusters[i]; + break; + } + } + + if (cluster == 0) + { + return 0; + } + + int idx = atoi(src[0].c_str()) - 1; + for (size_t i = 0; im_processes.size(); i++) + { + if (cluster->m_processes[i]->m_type & type) + { + if (idx == 0) + return cluster->m_processes[i]; + else + idx --; + } + } + + return 0; +} + +static +bool +pr_check_replication(Properties& props, proc_rule_ctx& ctx, int) +{ + if (! (ctx.m_config->m_replication == "")) + { + Vector list; + ctx.m_config->m_replication.split(list, ";"); + atrt_config& config = *ctx.m_config; + + ctx.m_config->m_replication = ""; + + const char * msg = "Invalid replication specification"; + for (size_t i = 0; i rep; + list[i].split(rep, ":"); + if (rep.size() != 2) + { + g_logger.error("%s: %s (split: %d)", msg, list[i].c_str(), rep.size()); + return false; + } + + atrt_process* src = find(config, atrt_process::AP_MYSQLD,rep[0].c_str()); + atrt_process* dst = find(config, atrt_process::AP_MYSQLD,rep[1].c_str()); + + if (src == 0 || dst == 0) + { + g_logger.error("%s: %s (%d %d)", + msg, list[i].c_str(), src != 0, dst != 0); + return false; + } + + + if (dst->m_rep_src != 0) + { + g_logger.error("%s: %s : %s already has replication src (%s)", + msg, + list[i].c_str(), + dst->m_proc.m_cwd.c_str(), + dst->m_rep_src->m_proc.m_cwd.c_str()); + return false; + } + + dst->m_rep_src = src; + src->m_rep_dst.push_back(dst); + + src->m_options.m_features |= PO_REP_MASTER; + dst->m_options.m_features |= PO_REP_SLAVE; + } + } + return true; +} + +static +bool +pr_check_features(Properties& props, proc_rule_ctx& ctx, int) +{ + int features = 0; + atrt_cluster& cluster = *ctx.m_cluster; + for (size_t i = 0; im_type == atrt_process::AP_NDB_MGMD || + cluster.m_processes[i]->m_type == atrt_process::AP_NDB_API || + cluster.m_processes[i]->m_type == atrt_process::AP_NDBD) + { + features |= atrt_options::AO_NDBCLUSTER; + break; + } + } + + if (features) + { + cluster.m_options.m_features |= features; + for (size_t i = 0; im_options.m_features |= features; + } + } + return true; +} + +static +bool +pr_fix_client(Properties& props, proc_rule_ctx& ctx, int) +{ + for (size_t i = 0; f_options[i].name; i++) + { + proc_option& opt = f_options[i]; + const char * name = opt.name; + if (opt.type & atrt_process::AP_CLIENT) + { + const char * val; + atrt_process& proc = *ctx.m_process; + if (!proc.m_options.m_loaded.get(name, &val)) + { + require(proc.m_mysqld->m_options.m_loaded.get(name, &val)); + proc.m_options.m_loaded.put(name, val); + proc.m_options.m_generated.put(name, val); + } + } + } + + return true; +} + +static +Uint32 +try_default_port(atrt_process& proc, const char * name) +{ + Uint32 port = + strcmp(name, "--port=") == 0 ? 3306 : + strcmp(name, "--PortNumber=") == 0 ? 1186 : + 0; + + atrt_host * host = proc.m_host; + for (size_t i = 0; im_processes.size(); i++) + { + const char * val; + if (host->m_processes[i]->m_options.m_loaded.get(name, &val)) + { + if ((Uint32)atoi(val) == port) + return 0; + } + } + return port; +} + +static +bool +generate(atrt_process& proc, const char * name, Properties& props) +{ + atrt_options& opts = proc.m_options; + if (strcmp(name, "--port=") == 0 || + strcmp(name, "--PortNumber=") == 0) + { + Uint32 val; + if (g_default_ports == 0 || (val = try_default_port(proc, name)) == 0) + { + val = g_baseport; + props.get("--PortNumber=", &val); + props.put("--PortNumber=", (val + 1), true); + } + + char buf[255]; + snprintf(buf, sizeof(buf), "%u", val); + opts.m_loaded.put(name, buf); + opts.m_generated.put(name, buf); + return true; + } + else if (strcmp(name, "--datadir=") == 0) + { + opts.m_loaded.put(name, proc.m_proc.m_cwd.c_str()); + opts.m_generated.put(name, proc.m_proc.m_cwd.c_str()); + return true; + } + else if (strcmp(name, "--FileSystemPath=") == 0) + { + BaseString dir; + dir.append(proc.m_host->m_basedir); + dir.append("/"); + dir.append(proc.m_cluster->m_dir); + opts.m_loaded.put(name, dir.c_str()); + opts.m_generated.put(name, dir.c_str()); + return true; + } + else if (strcmp(name, "--socket=") == 0) + { + const char * sock = 0; + if (g_default_ports) + { + sock = "/tmp/mysql.sock"; + atrt_host * host = proc.m_host; + for (size_t i = 0; im_processes.size(); i++) + { + const char * val; + if (host->m_processes[i]->m_options.m_loaded.get(name, &val)) + { + if (strcmp(sock, val) == 0) + { + sock = 0; + break; + } + } + } + } + + BaseString tmp; + if (sock == 0) + { + tmp.assfmt("%s/mysql.sock", proc.m_proc.m_cwd.c_str()); + sock = tmp.c_str(); + } + + opts.m_loaded.put(name, sock); + opts.m_generated.put(name, sock); + return true; + } + else if (strcmp(name, "--server-id=") == 0) + { + Uint32 val = 1; + props.get(name, &val); + char buf[255]; + snprintf(buf, sizeof(buf), "%u", val); + opts.m_loaded.put(name, buf); + opts.m_generated.put(name, buf); + props.put(name, (val + 1), true); + return true; + } + else if (strcmp(name, "--log-bin") == 0) + { + opts.m_loaded.put(name, ""); + opts.m_generated.put(name, ""); + return true; + } + else if (strcmp(name, "--master-host=") == 0) + { + require(proc.m_rep_src != 0); + opts.m_loaded.put(name, proc.m_rep_src->m_host->m_hostname.c_str()); + opts.m_generated.put(name, proc.m_rep_src->m_host->m_hostname.c_str()); + return true; + } + else if (strcmp(name, "--master-port=") == 0) + { + const char* val; + require(proc.m_rep_src->m_options.m_loaded.get("--port=", &val)); + opts.m_loaded.put(name, val); + opts.m_generated.put(name, val); + return true; + } + else if (strcmp(name, "--master-user=") == 0) + { + opts.m_loaded.put(name, "root"); + opts.m_generated.put(name, "root"); + return true; + } + else if (strcmp(name, "--master-password=") == 0) + { + opts.m_loaded.put(name, "\"\""); + opts.m_generated.put(name, "\"\""); + return true; + } + + g_logger.warning("Unknown parameter: %s", name); + return true; +} + +static +bool +pr_proc_options(Properties& props, proc_rule_ctx& ctx, int extra) +{ + for (size_t i = 0; f_options[i].name; i++) + { + proc_option& opt = f_options[i]; + atrt_process& proc = *ctx.m_process; + const char * name = opt.name; + if (opt.type & proc.m_type) + { + if (opt.options == 0 || + (opt.options & extra & proc.m_options.m_features)) + { + const char * val; + if (!proc.m_options.m_loaded.get(name, &val)) + { + generate(proc, name, props); + } + } + } + } + return true; +} + +static +bool +pr_fix_ndb_connectstring(Properties& props, proc_rule_ctx& ctx, int) +{ + const char * val; + atrt_cluster& cluster = *ctx.m_cluster; + + if (cluster.m_options.m_features & atrt_options::AO_NDBCLUSTER) + { + if (!cluster.m_options.m_loaded.get(ndbcs, &val)) + { + /** + * Construct connect string for this cluster + */ + BaseString str; + for (size_t i = 0; im_type == atrt_process::AP_NDB_MGMD) + { + if (str.length()) + { + str.append(";"); + } + const char * port; + require(tmp->m_options.m_loaded.get("--PortNumber=", &port)); + str.appfmt("%s:%s", tmp->m_host->m_hostname.c_str(), port); + } + } + cluster.m_options.m_loaded.put(ndbcs, str.c_str()); + cluster.m_options.m_generated.put(ndbcs, str.c_str()); + cluster.m_options.m_loaded.get(ndbcs, &val); + } + + for (size_t i = 0; im_proc.m_env.appfmt(" NDB_CONNECTSTRING=%s", + val); + } + } + return true; +} + +static +bool +pr_set_ndb_connectstring(Properties& props, proc_rule_ctx& ctx, int) +{ + const char * val; + + atrt_process& proc = *ctx.m_process; + if (proc.m_options.m_features & atrt_options::AO_NDBCLUSTER) + { + if (!proc.m_options.m_loaded.get(ndbcs, &val)) + { + require(proc.m_cluster->m_options.m_loaded.get(ndbcs, &val)); + proc.m_options.m_loaded.put(ndbcs, val); + proc.m_options.m_generated.put(ndbcs, val); + } + + if (!proc.m_options.m_loaded.get("--ndbcluster", &val)) + { + proc.m_options.m_loaded.put("--ndbcluster", ""); + proc.m_options.m_generated.put("--ndbcluster", ""); + } + } + return true; +} + +static +bool +pr_check_proc(Properties& props, proc_rule_ctx& ctx, int) +{ + bool ok = true; + bool generated = false; + const int setup = ctx.m_setup; + atrt_process& proc = *ctx.m_process; + for (size_t i = 0; f_options[i].name; i++) + { + proc_option& opt = f_options[i]; + const char * name = opt.name; + if ((ctx.m_process->m_type & opt.type) && + (opt.options == 0 || (ctx.m_process->m_options.m_features & opt.options))) + { + const char * val; + if (!proc.m_options.m_loaded.get(name, &val)) + { + ok = false; + g_logger.warning("Missing paramter: %s for %s", + name, proc.m_proc.m_cwd.c_str()); + } + else if (proc.m_options.m_generated.get(name, &val)) + { + if (setup == 0) + { + ok = false; + g_logger.warning("Missing paramter: %s for %s", + name, proc.m_proc.m_cwd.c_str()); + } + else + { + generated = true; + } + } + } + } + + if (generated) + { + ctx.m_config->m_generated = true; + } + + //ndbout << proc << endl; + + return ok; +} + + +NdbOut& +operator<<(NdbOut& out, const atrt_process& proc) +{ + out << "[ atrt_process: "; + switch(proc.m_type){ + case atrt_process::AP_NDB_MGMD: + out << "ndb_mgmd"; + break; + case atrt_process::AP_NDBD: + out << "ndbd"; + break; + case atrt_process::AP_MYSQLD: + out << "mysqld"; + break; + case atrt_process::AP_NDB_API: + out << "ndbapi"; + break; + case atrt_process::AP_CLIENT: + out << "client"; + break; + default: + out << ""; + } + + out << " cluster: " << proc.m_cluster->m_name.c_str() + << " host: " << proc.m_host->m_hostname.c_str() + << endl << " cwd: " << proc.m_proc.m_cwd.c_str() + << endl << " path: " << proc.m_proc.m_path.c_str() + << endl << " args: " << proc.m_proc.m_args.c_str() + << endl << " env: " << proc.m_proc.m_env.c_str() << endl; + + proc.m_options.m_generated.print(stdout, "generated: "); + + out << " ]"; + +#if 0 + proc.m_index = 0; //idx; + proc.m_host = host_ptr; + proc.m_cluster = cluster; + proc.m_proc.m_id = -1; + proc.m_proc.m_type = "temporary"; + proc.m_proc.m_owner = "atrt"; + proc.m_proc.m_group = cluster->m_name.c_str(); + proc.m_proc.m_cwd.assign(dir).append("/atrt/").append(cluster->m_dir); + proc.m_proc.m_stdout = "log.out"; + proc.m_proc.m_stderr = "2>&1"; + proc.m_proc.m_runas = proc.m_host->m_user; + proc.m_proc.m_ulimit = "c:unlimited"; + proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir); + proc.m_proc.m_shutdown_options = ""; +#endif + + return out; +} + diff --git a/storage/ndb/test/run-test/test-tests.txt b/storage/ndb/test/run-test/test-tests.txt new file mode 100644 index 00000000000..b57023fc0c1 --- /dev/null +++ b/storage/ndb/test/run-test/test-tests.txt @@ -0,0 +1,24 @@ +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + +max-time: 1800 +cmd: testMgm +args: -n SingleUserMode T1 + +# +# +# SYSTEM RESTARTS +# +max-time: 1500 +cmd: testSystemRestart +args: -n SR3 T6 + +max-time: 1500 +cmd: testSystemRestart +args: -n SR4 T6 + +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + diff --git a/storage/ndb/test/tools/Makefile.am b/storage/ndb/test/tools/Makefile.am index 8c451c0b6a1..386a59f723f 100644 --- a/storage/ndb/test/tools/Makefile.am +++ b/storage/ndb/test/tools/Makefile.am @@ -38,6 +38,7 @@ include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am ndb_cpcc_LDADD = $(LDADD) +ndb_cpcc_LDFLAGS = -static # Don't update the files from bitkeeper %::SCCS/s.% From 368596042569dd2b8945474730591e51fdd4bfe3 Mon Sep 17 00:00:00 2001 From: "ndbdev@ndbmaster.mysql.com" <> Date: Tue, 13 Feb 2007 14:32:07 +0100 Subject: [PATCH 07/21] adopt to changes in load_defaults --- storage/ndb/test/run-test/main.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/ndb/test/run-test/main.cpp b/storage/ndb/test/run-test/main.cpp index 3836ab39f59..b44694fb24e 100644 --- a/storage/ndb/test/run-test/main.cpp +++ b/storage/ndb/test/run-test/main.cpp @@ -414,9 +414,9 @@ parse_args(int argc, char** argv) const char *groups[] = { "atrt", 0 }; int ret = load_defaults(mycnf.c_str(), groups, &argc, &argv); - save_file = defaults_file; - save_extra_file = defaults_extra_file; - save_group_suffix = defaults_group_suffix; + save_file = my_defaults_file; + save_extra_file = my_defaults_extra_file; + save_group_suffix = my_defaults_group_suffix; if (save_extra_file) { From 0a4505199cbe0f70a3cb25721a61baed1cab0618 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Wed, 14 Feb 2007 11:05:38 +0700 Subject: [PATCH 08/21] Bug#26293 cluster mgmt node sometimes doesn't receive events from all nodes on restart - signals where sometimes sent too early when setting up subscriptions --- .../kernel/signaldata/DumpStateOrd.hpp | 4 + ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 20 +++- ndb/src/mgmsrv/MgmtSrvr.cpp | 94 +++++++++++++++---- ndb/src/ndbapi/ClusterMgr.cpp | 5 +- ndb/src/ndbapi/ClusterMgr.hpp | 1 + ndb/src/ndbapi/SignalSender.cpp | 2 + ndb/src/ndbapi/SignalSender.hpp | 2 +- 7 files changed, 105 insertions(+), 23 deletions(-) diff --git a/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/ndb/include/kernel/signaldata/DumpStateOrd.hpp index 8d0961d1c27..5a1d9ece9cf 100644 --- a/ndb/include/kernel/signaldata/DumpStateOrd.hpp +++ b/ndb/include/kernel/signaldata/DumpStateOrd.hpp @@ -107,6 +107,10 @@ public: CmvmiDumpLongSignalMemory = 2601, CmvmiSetRestartOnErrorInsert = 2602, CmvmiTestLongSigWithDelay = 2603, + CmvmiDumpSubscriptions = 2604, /* note: done to respective outfile + to be able to debug if events + for some reason does not end up + in clusterlog */ // 7000 DIH // 7001 DIH // 7002 DIH diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 5dd1e527dd2..a9d9c991ca3 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -897,7 +897,7 @@ void Cmvmi::execSET_VAR_REQ(Signal* signal) case TimeToWaitAlive: // QMGR - case HeartbeatIntervalDbDb: // TODO ev till Ndbcnt också + case HeartbeatIntervalDbDb: // TODO possibly Ndbcnt too case HeartbeatIntervalDbApi: case ArbitTimeout: sendSignal(QMGR_REF, GSN_SET_VAR_REQ, signal, 3, JBB); @@ -1105,6 +1105,24 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal) } } + if (arg == DumpStateOrd::CmvmiDumpSubscriptions) + { + SubscriberPtr ptr; + subscribers.first(ptr); + g_eventLogger.info("List subscriptions:"); + while(ptr.i != RNIL) + { + g_eventLogger.info("Subscription: %u, nodeId: %u, ref: 0x%x", + ptr.i, refToNode(ptr.p->blockRef), ptr.p->blockRef); + for(Uint32 i = 0; i < LogLevel::LOGLEVEL_CATEGORIES; i++) + { + Uint32 level = ptr.p->logLevel.getLogLevel((LogLevel::EventCategory)i); + g_eventLogger.info("Category %u Level %u", i, level); + } + subscribers.next(ptr); + } + } + if (arg == DumpStateOrd::CmvmiDumpLongSignalMemory){ infoEvent("Cmvmi: g_sectionSegmentPool size: %d free: %d", g_sectionSegmentPool.getSize(), diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 0ee59f70885..5818e7fe3ae 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -704,7 +704,7 @@ int MgmtSrvr::okToSendTo(NodeId nodeId, bool unCond) return WRONG_PROCESS_TYPE; // Check if we have contact with it if(unCond){ - if(theFacade->theClusterMgr->getNodeInfo(nodeId).connected) + if(theFacade->theClusterMgr->getNodeInfo(nodeId).m_api_reg_conf) return 0; } else if (theFacade->get_node_alive(nodeId) == true) @@ -1562,32 +1562,85 @@ MgmtSrvr::status(int nodeId, } int -MgmtSrvr::setEventReportingLevelImpl(int nodeId, +MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg, const EventSubscribeReq& ll) { SignalSender ss(theFacade); - ss.lock(); - - SimpleSignal ssig; - EventSubscribeReq * dst = - CAST_PTR(EventSubscribeReq, ssig.getDataPtrSend()); - ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ, - EventSubscribeReq::SignalLength); - *dst = ll; - - NodeBitmask nodes; + NdbNodeBitmask nodes; + int retries = 30; nodes.clear(); - Uint32 max = (nodeId == 0) ? (nodeId = 1, MAX_NDB_NODES) : nodeId; - for(; (Uint32) nodeId <= max; nodeId++) + while (1) { - if (nodeTypes[nodeId] != NODE_TYPE_DB) - continue; - if (okToSendTo(nodeId, true)) - continue; - if (ss.sendSignal(nodeId, &ssig) == SEND_OK) + Uint32 nodeId, max; + ss.lock(); + SimpleSignal ssig; + EventSubscribeReq * dst = + CAST_PTR(EventSubscribeReq, ssig.getDataPtrSend()); + ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ, + EventSubscribeReq::SignalLength); + *dst = ll; + + if (nodeId_arg == 0) { - nodes.set(nodeId); + // all nodes + nodeId = 1; + max = MAX_NDB_NODES; } + else + { + // only one node + max = nodeId = nodeId_arg; + } + // first make sure nodes are sendable + for(; nodeId <= max; nodeId++) + { + if (nodeTypes[nodeId] != NODE_TYPE_DB) + continue; + if (okToSendTo(nodeId, true)) + { + if (theFacade->theClusterMgr->getNodeInfo(nodeId).connected == false) + { + // node not connected we can safely skip this one + continue; + } + // api_reg_conf not recevied yet, need to retry + break; + } + } + if (nodeId <= max) + { + if (--retries) + { + ss.unlock(); + NdbSleep_MilliSleep(100); + continue; + } + return SEND_OR_RECEIVE_FAILED; + } + + if (nodeId_arg == 0) + { + // all nodes + nodeId = 1; + max = MAX_NDB_NODES; + } + else + { + // only one node + max = nodeId = nodeId_arg; + } + // now send to all sendable nodes nodes + // note, lock is held, so states have not changed + for(; (Uint32) nodeId <= max; nodeId++) + { + if (nodeTypes[nodeId] != NODE_TYPE_DB) + continue; + if (theFacade->theClusterMgr->getNodeInfo(nodeId).connected == false) + continue; // node is not connected, skip + if (ss.sendSignal(nodeId, &ssig) == SEND_OK) + nodes.set(nodeId); + } + break; } if (nodes.isclear()) @@ -1598,6 +1651,7 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId, int error = 0; while (!nodes.isclear()) { + Uint32 nodeId; SimpleSignal *signal = ss.waitFor(); int gsn = signal->readSignalNumber(); nodeId = refToNode(signal->header.theSendersBlockRef); diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp index 2ff27ca893e..060e5f71b6c 100644 --- a/ndb/src/ndbapi/ClusterMgr.cpp +++ b/ndb/src/ndbapi/ClusterMgr.cpp @@ -327,7 +327,7 @@ ClusterMgr::showState(NodeId nodeId){ ClusterMgr::Node::Node() : m_state(NodeState::SL_NOTHING) { compatible = nfCompleteRep = true; - connected = defined = m_alive = false; + connected = defined = m_alive = m_api_reg_conf = false; m_state.m_connected_nodes.clear(); } @@ -401,6 +401,8 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){ node.m_info.m_version); } + node.m_api_reg_conf = true; + node.m_state = apiRegConf->nodeState; if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED || node.m_state.startLevel == NodeState::SL_SINGLEUSER)){ @@ -519,6 +521,7 @@ ClusterMgr::reportDisconnected(NodeId nodeId){ noOfConnectedNodes--; theNodes[nodeId].connected = false; + theNodes[nodeId].m_api_reg_conf = false; theNodes[nodeId].m_state.m_connected_nodes.clear(); reportNodeFailed(nodeId, true); diff --git a/ndb/src/ndbapi/ClusterMgr.hpp b/ndb/src/ndbapi/ClusterMgr.hpp index 32234a0b2f4..b05b73c8324 100644 --- a/ndb/src/ndbapi/ClusterMgr.hpp +++ b/ndb/src/ndbapi/ClusterMgr.hpp @@ -65,6 +65,7 @@ public: bool compatible; // Version is compatible bool nfCompleteRep; // NF Complete Rep has arrived bool m_alive; // Node is alive + bool m_api_reg_conf;// API_REGCONF has arrived NodeInfo m_info; NodeState m_state; diff --git a/ndb/src/ndbapi/SignalSender.cpp b/ndb/src/ndbapi/SignalSender.cpp index 804ea92877d..199c6d6e804 100644 --- a/ndb/src/ndbapi/SignalSender.cpp +++ b/ndb/src/ndbapi/SignalSender.cpp @@ -140,6 +140,8 @@ SignalSender::getNoOfConnectedNodes() const { SendStatus SignalSender::sendSignal(Uint16 nodeId, const SimpleSignal * s){ + assert(getNodeInfo(nodeId).m_api_reg_conf == true || + s->readSignalNumber() == GSN_API_REGREQ); return theFacade->theTransporterRegistry->prepareSend(&s->header, 1, // JBB &s->theData[0], diff --git a/ndb/src/ndbapi/SignalSender.hpp b/ndb/src/ndbapi/SignalSender.hpp index ec874e63c52..4cad759a334 100644 --- a/ndb/src/ndbapi/SignalSender.hpp +++ b/ndb/src/ndbapi/SignalSender.hpp @@ -32,7 +32,7 @@ public: Uint32 theData[25]; LinearSectionPtr ptr[3]; - int readSignalNumber() {return header.theVerId_signalNumber; } + int readSignalNumber() const {return header.theVerId_signalNumber; } Uint32 *getDataPtrSend() { return theData; } const Uint32 *getDataPtr() const { return theData; } From 1254253640d14dde8684ca29b8cc627ee6d2deef Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Wed, 14 Feb 2007 11:16:10 +0700 Subject: [PATCH 09/21] make sure some printouts in ndbd out file is done with g_eventLogger --- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 130 +++++++++++----------- ndb/src/kernel/vm/WatchDog.cpp | 7 +- 2 files changed, 71 insertions(+), 66 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 5ee3ac8d67d..7810aa45d00 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -1786,8 +1786,8 @@ void Dbdih::execSTART_PERMREQ(Signal* signal) return; }//if if (getNodeStatus(nodeId) != NodeRecord::DEAD){ - ndbout << "nodeStatus in START_PERMREQ = " - << (Uint32) getNodeStatus(nodeId) << endl; + g_eventLogger.error("nodeStatus in START_PERMREQ = %u", + (Uint32) getNodeStatus(nodeId)); ndbrequire(false); }//if @@ -4029,9 +4029,9 @@ void Dbdih::checkCopyTab(NodeRecordPtr failedNodePtr) jam(); break; default: - ndbout_c("outstanding gsn: %s(%d)", - getSignalName(c_nodeStartMaster.m_outstandingGsn), - c_nodeStartMaster.m_outstandingGsn); + g_eventLogger.error("outstanding gsn: %s(%d)", + getSignalName(c_nodeStartMaster.m_outstandingGsn), + c_nodeStartMaster.m_outstandingGsn); ndbrequire(false); } @@ -4472,9 +4472,10 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr) failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver; break; default: - ndbout << "activeStatus = " << (Uint32) failedNodePtr.p->activeStatus; - ndbout << " at failure after NODE_FAILREP of node = "; - ndbout << failedNodePtr.i << endl; + g_eventLogger.error("activeStatus = %u " + "at failure after NODE_FAILREP of node = %u", + (Uint32) failedNodePtr.p->activeStatus, + failedNodePtr.i); ndbrequire(false); break; }//switch @@ -4629,7 +4630,7 @@ Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){ /** * Node failure during master take over... */ - ndbout_c("Nodefail during master take over"); + g_eventLogger.info("Nodefail during master take over"); } setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER); @@ -4869,7 +4870,8 @@ void Dbdih::execMASTER_GCPCONF(Signal* signal) if (latestLcpId > SYSFILE->latestLCP_ID) { jam(); #if 0 - ndbout_c("Dbdih: Setting SYSFILE->latestLCP_ID to %d", latestLcpId); + g_eventLogger.info("Dbdih: Setting SYSFILE->latestLCP_ID to %d", + latestLcpId); SYSFILE->latestLCP_ID = latestLcpId; #endif SYSFILE->keepGCI = oldestKeepGci; @@ -5528,7 +5530,7 @@ Dbdih::checkLocalNodefailComplete(Signal* signal, Uint32 failedNodeId, if (ERROR_INSERTED(7030)) { - ndbout_c("Reenable GCP_PREPARE"); + g_eventLogger.info("Reenable GCP_PREPARE"); CLEAR_ERROR_INSERT_VALUE; } @@ -5701,7 +5703,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); #if 0 if(c_copyGCISlave.m_copyReason == CopyGCIReq::LOCAL_CHECKPOINT){ - ndbout_c("Dbdih: Also resetting c_copyGCISlave"); + g_eventLogger.info("Dbdih: Also resetting c_copyGCISlave"); c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE; c_copyGCISlave.m_expectedNextWord = 0; } @@ -5790,7 +5792,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){ if(c_lcpState.lcpStatus == LCP_TAB_SAVED){ #ifdef VM_TRACE - ndbout_c("Sending extra GSN_LCP_COMPLETE_REP to new master"); + g_eventLogger.info("Sending extra GSN_LCP_COMPLETE_REP to new master"); #endif sendLCP_COMPLETE_REP(signal); } @@ -5946,7 +5948,7 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal) nodePtr.p->lcpStateAtTakeOver = lcpState; #ifdef VM_TRACE - ndbout_c("MASTER_LCPCONF"); + g_eventLogger.info("MASTER_LCPCONF"); printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0); #endif @@ -6023,7 +6025,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId) // protocol. /* --------------------------------------------------------------------- */ #ifdef VM_TRACE - ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart"); + g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart"); #endif checkLcpStart(signal, __LINE__); break; @@ -6034,7 +6036,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId) // protocol by calculating the keep gci and storing the new lcp id. /* --------------------------------------------------------------------- */ #ifdef VM_TRACE - ndbout_c("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId"); + g_eventLogger.info("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId"); #endif if (c_lcpState.lcpStatus == LCP_STATUS_ACTIVE) { jam(); @@ -6045,7 +6047,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId) /*---------------------------------------------------------------------*/ Uint32 lcpId = SYSFILE->latestLCP_ID; #ifdef VM_TRACE - ndbout_c("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1); + g_eventLogger.info("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1); #endif SYSFILE->latestLCP_ID--; }//if @@ -6062,10 +6064,10 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId) * complete before finalising the LCP process. * ------------------------------------------------------------------ */ #ifdef VM_TRACE - ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> " - "startLcpRoundLoopLab(table=%u, fragment=%u)", - c_lcpMasterTakeOverState.minTableId, - c_lcpMasterTakeOverState.minFragId); + g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> " + "startLcpRoundLoopLab(table=%u, fragment=%u)", + c_lcpMasterTakeOverState.minTableId, + c_lcpMasterTakeOverState.minFragId); #endif c_lcpState.keepGci = SYSFILE->keepGCI; @@ -7376,8 +7378,8 @@ void Dbdih::checkGcpStopLab(Signal* signal) if (cgcpSameCounter == 1200) { jam(); #ifdef VM_TRACE - ndbout << "System crash due to GCP Stop in state = "; - ndbout << (Uint32) cgcpStatus << endl; + g_eventLogger.error("System crash due to GCP Stop in state = %u", + (Uint32) cgcpStatus); #endif crashSystemAtGcpStop(signal); return; @@ -7390,8 +7392,8 @@ void Dbdih::checkGcpStopLab(Signal* signal) if (cgcpSameCounter == 1200) { jam(); #ifdef VM_TRACE - ndbout << "System crash due to GCP Stop in state = "; - ndbout << (Uint32) cgcpStatus << endl; + g_eventLogger.error("System crash due to GCP Stop in state = %u", + (Uint32) cgcpStatus); #endif crashSystemAtGcpStop(signal); return; @@ -7582,7 +7584,7 @@ void Dbdih::GCP_SAVEhandling(Signal* signal, Uint32 nodeId) getNodeState().startLevel == NodeState::SL_STARTED){ jam(); #if 0 - ndbout_c("Dbdih: Clearing initial start ongoing"); + g_eventLogger.info("Dbdih: Clearing initial start ongoing"); #endif Sysfile::clearInitialStartOngoing(SYSFILE->systemRestartBits); } @@ -7601,7 +7603,7 @@ void Dbdih::execGCP_PREPARE(Signal* signal) if (ERROR_INSERTED(7030)) { cgckptflag = true; - ndbout_c("Delayed GCP_PREPARE 5s"); + g_eventLogger.info("Delayed GCP_PREPARE 5s"); sendSignalWithDelay(reference(), GSN_GCP_PREPARE, signal, 5000, signal->getLength()); return; @@ -7621,7 +7623,7 @@ void Dbdih::execGCP_PREPARE(Signal* signal) if (ERROR_INSERTED(7031)) { - ndbout_c("Crashing delayed in GCP_PREPARE 3s"); + g_eventLogger.info("Crashing delayed in GCP_PREPARE 3s"); signal->theData[0] = 9999; sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 3000, 1); return; @@ -8136,7 +8138,7 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId) * This is LCP master takeover */ #ifdef VM_TRACE - ndbout_c("initLcpLab aborted due to LCP master takeover - 1"); + g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 1"); #endif c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); sendMASTER_LCPCONF(signal); @@ -8149,7 +8151,7 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId) * Master take over but has not yet received MASTER_LCPREQ */ #ifdef VM_TRACE - ndbout_c("initLcpLab aborted due to LCP master takeover - 2"); + g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 2"); #endif return; } @@ -9380,9 +9382,10 @@ void Dbdih::checkTcCounterLab(Signal* signal) { CRASH_INSERTION(7009); if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) { - ndbout << "lcpStatus = " << (Uint32) c_lcpState.lcpStatus; - ndbout << "lcpStatusUpdatedPlace = " << - c_lcpState.lcpStatusUpdatedPlace << endl; + g_eventLogger.error("lcpStatus = %u" + "lcpStatusUpdatedPlace = %d", + (Uint32) c_lcpState.lcpStatus, + c_lcpState.lcpStatusUpdatedPlace); ndbrequire(false); return; }//if @@ -9935,9 +9938,8 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal) if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){ jam(); - ndbout_c("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ", - tableId, - fragId); + g_eventLogger.info("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ", + tableId, fragId); } else { jam(); /** @@ -10065,7 +10067,7 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr, }; #ifdef VM_TRACE - ndbout_c("Fragment Replica(node=%d) not found", nodeId); + g_eventLogger.info("Fragment Replica(node=%d) not found", nodeId); replicaPtr.i = fragPtrP->oldStoredReplicas; while(replicaPtr.i != RNIL){ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); @@ -10078,9 +10080,9 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr, }//if }; if(replicaPtr.i != RNIL){ - ndbout_c("...But was found in oldStoredReplicas"); + g_eventLogger.info("...But was found in oldStoredReplicas"); } else { - ndbout_c("...And wasn't found in oldStoredReplicas"); + g_eventLogger.info("...And wasn't found in oldStoredReplicas"); } #endif ndbrequire(false); @@ -10114,8 +10116,8 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport) ndbrequire(replicaPtr.p->lcpOngoingFlag == true); if(lcpNo != replicaPtr.p->nextLcp){ - ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d", - lcpNo, replicaPtr.p->nextLcp); + g_eventLogger.error("lcpNo = %d replicaPtr.p->nextLcp = %d", + lcpNo, replicaPtr.p->nextLcp); ndbrequire(false); } ndbrequire(lcpNo == replicaPtr.p->nextLcp); @@ -10150,7 +10152,7 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport) // Not all fragments in table have been checkpointed. /* ----------------------------------------------------------------- */ if(0) - ndbout_c("reportLcpCompletion: fragment %d not ready", fid); + g_eventLogger.info("reportLcpCompletion: fragment %d not ready", fid); return false; }//if }//for @@ -10267,7 +10269,7 @@ void Dbdih::execLCP_COMPLETE_REP(Signal* signal) jamEntry(); #if 0 - ndbout_c("LCP_COMPLETE_REP"); + g_eventLogger.info("LCP_COMPLETE_REP"); printLCP_COMPLETE_REP(stdout, signal->getDataPtr(), signal->length(), number()); @@ -10353,7 +10355,7 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal) if(c_lcpMasterTakeOverState.state != LMTOS_IDLE){ jam(); #ifdef VM_TRACE - ndbout_c("Exiting from allNodesLcpCompletedLab"); + g_eventLogger.info("Exiting from allNodesLcpCompletedLab"); #endif return; } @@ -10582,14 +10584,14 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal) infoEvent("Detected GCP stop...sending kill to %s", c_GCP_SAVEREQ_Counter.getText()); - ndbout_c("Detected GCP stop...sending kill to %s", - c_GCP_SAVEREQ_Counter.getText()); + g_eventLogger.error("Detected GCP stop...sending kill to %s", + c_GCP_SAVEREQ_Counter.getText()); return; } case GCP_SAVE_LQH_FINISHED: - ndbout_c("m_copyReason: %d m_waiting: %d", - c_copyGCIMaster.m_copyReason, - c_copyGCIMaster.m_waiting); + g_eventLogger.error("m_copyReason: %d m_waiting: %d", + c_copyGCIMaster.m_copyReason, + c_copyGCIMaster.m_waiting); break; case GCP_READY: // shut up lint case GCP_PREPARE_SENT: @@ -10597,11 +10599,11 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal) break; } - ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d", - c_copyGCISlave.m_senderData, - c_copyGCISlave.m_senderRef, - c_copyGCISlave.m_copyReason, - c_copyGCISlave.m_expectedNextWord); + g_eventLogger.error("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d", + c_copyGCISlave.m_senderData, + c_copyGCISlave.m_senderRef, + c_copyGCISlave.m_copyReason, + c_copyGCISlave.m_expectedNextWord); FileRecordPtr file0Ptr; file0Ptr.i = crestartInfoFile[0]; @@ -12804,9 +12806,9 @@ void Dbdih::setLcpActiveStatusEnd() nodePtr.i = getOwnNodeId(); ptrAss(nodePtr, nodeRecord); ndbrequire(nodePtr.p->activeStatus == Sysfile::NS_Active); - ndbout_c("NR: setLcpActiveStatusEnd - m_participatingLQH"); + g_eventLogger.info("NR: setLcpActiveStatusEnd - m_participatingLQH"); } else { - ndbout_c("NR: setLcpActiveStatusEnd - !m_participatingLQH"); + g_eventLogger.info("NR: setLcpActiveStatusEnd - !m_participatingLQH"); } } @@ -13637,8 +13639,8 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) } if(arg == DumpStateOrd::EnableUndoDelayDataWrite){ - ndbout << "Dbdih:: delay write of datapages for table = " - << dumpState->args[1]<< endl; + g_eventLogger.info("Dbdih:: delay write of datapages for table = %s", + dumpState->args[1]); // Send this dump to ACC and TUP EXECUTE_DIRECT(DBACC, GSN_DUMP_STATE_ORD, signal, 2); EXECUTE_DIRECT(DBTUP, GSN_DUMP_STATE_ORD, signal, 2); @@ -13655,13 +13657,13 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) }//if if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) { // Set time between LCP to min value - ndbout << "Set time between LCP to min value" << endl; + g_eventLogger.info("Set time between LCP to min value"); c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min return; } if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) { // Set time between LCP to max value - ndbout << "Set time between LCP to max value" << endl; + g_eventLogger.info("Set time between LCP to max value"); c_lcpState.clcpDelay = 31; // TimeBetweenLocalCheckpoints.max return; } @@ -13697,7 +13699,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) { cgcpDelay = signal->theData[1]; } - ndbout_c("Setting time between gcp : %d", cgcpDelay); + g_eventLogger.info("Setting time between gcp : %d", cgcpDelay); } if (arg == 7021 && signal->getLength() == 2) @@ -13820,7 +13822,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){ while(index < count){ if(nodePtr.p->queuedChkpt[index].tableId == tabPtr.i){ jam(); - // ndbout_c("Unqueuing %d", index); + // g_eventLogger.info("Unqueuing %d", index); count--; for(Uint32 i = index; i #include #include - +#include + +extern EventLogger g_eventLogger; + extern "C" void* runWatchDog(void* w){ @@ -125,7 +128,7 @@ WatchDog::run(){ last_stuck_action = "Unknown place"; break; }//switch - ndbout << "Ndb kernel is stuck in: " << last_stuck_action << endl; + g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action); if(alerts == 3){ shutdownSystem(last_stuck_action); } From 2805c8352ac5c74c329ec07cc8c368329e7fad5c Mon Sep 17 00:00:00 2001 From: "stewart@willster.(none)" <> Date: Wed, 14 Feb 2007 15:22:03 +1100 Subject: [PATCH 10/21] WL#3704 mgmapi timeouts (and cleanups) indicate units in SocketOutputStream timeout rename timeout in SocketOutputStream to reflect units (ms) In 5.0 as well as is safe cleanup patch that will help with merging. --- ndb/include/util/OutputStream.hpp | 6 +++--- ndb/src/common/util/OutputStream.cpp | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ndb/include/util/OutputStream.hpp b/ndb/include/util/OutputStream.hpp index 460915e12e7..35ef3c5fed4 100644 --- a/ndb/include/util/OutputStream.hpp +++ b/ndb/include/util/OutputStream.hpp @@ -42,10 +42,10 @@ public: class SocketOutputStream : public OutputStream { NDB_SOCKET_TYPE m_socket; - unsigned m_timeout; + unsigned m_timeout_ms; public: - SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned writeTimeout = 1000); - + SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms = 1000); + int print(const char * fmt, ...); int println(const char * fmt, ...); }; diff --git a/ndb/src/common/util/OutputStream.cpp b/ndb/src/common/util/OutputStream.cpp index cccd76eac2c..eada1452f02 100644 --- a/ndb/src/common/util/OutputStream.cpp +++ b/ndb/src/common/util/OutputStream.cpp @@ -42,16 +42,16 @@ FileOutputStream::println(const char * fmt, ...){ } SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket, - unsigned timeout){ + unsigned write_timeout_ms){ m_socket = socket; - m_timeout = timeout; + m_timeout_ms = write_timeout_ms; } int SocketOutputStream::print(const char * fmt, ...){ va_list ap; va_start(ap, fmt); - const int ret = vprint_socket(m_socket, m_timeout, fmt, ap); + const int ret = vprint_socket(m_socket, m_timeout_ms, fmt, ap); va_end(ap); return ret; } @@ -59,7 +59,7 @@ int SocketOutputStream::println(const char * fmt, ...){ va_list ap; va_start(ap, fmt); - const int ret = vprintln_socket(m_socket, m_timeout, fmt, ap); + const int ret = vprintln_socket(m_socket, m_timeout_ms, fmt, ap); va_end(ap); return ret; } From faec3f4fb16c427d2fe1406b638733b824aa46de Mon Sep 17 00:00:00 2001 From: "stewart@willster.(none)" <> Date: Wed, 14 Feb 2007 15:28:04 +1100 Subject: [PATCH 11/21] BUG#26352 unused ndb_mgm_rep_command in mgmapi.cpp --- ndb/src/mgmapi/mgmapi.cpp | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index fa7aed8b182..cacb55fe8fc 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -2187,43 +2187,6 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype, return nodeid; } -/***************************************************************************** - * Global Replication - ******************************************************************************/ -extern "C" -int -ndb_mgm_rep_command(NdbMgmHandle handle, unsigned int request, - unsigned int* replication_id, - struct ndb_mgm_reply* /*reply*/) -{ - SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_rep_command"); - const ParserRow replication_reply[] = { - MGM_CMD("global replication reply", NULL, ""), - MGM_ARG("result", String, Mandatory, "Error message"), - MGM_ARG("id", Int, Optional, "Id of global replication"), - MGM_END() - }; - CHECK_HANDLE(handle, -1); - CHECK_CONNECTED(handle, -1); - - Properties args; - args.put("request", request); - const Properties *reply; - reply = ndb_mgm_call(handle, replication_reply, "rep", &args); - CHECK_REPLY(reply, -1); - - const char * result; - reply->get("result", &result); - reply->get("id", replication_id); - if(strcmp(result,"Ok")!=0) { - delete reply; - return -1; - } - - delete reply; - return 0; -} - extern "C" int ndb_mgm_set_int_parameter(NdbMgmHandle handle, From 02c847eb0320acfb0111f3e0b7875c5163e25a92 Mon Sep 17 00:00:00 2001 From: "stewart@willster.(none)" <> Date: Wed, 14 Feb 2007 15:35:29 +1100 Subject: [PATCH 12/21] BUG#26353 remove GET_CONFIG_BACKWARDS_COMPAT from ndb_mgmd there is backwards compatibility for GET CONFIG style configuration requests in ndb_mgmd. None of the ndbd versions that use this couldn't possibly connect to a 5.0 or 5.1 cluster. remove the backwards compat remove it --- ndb/src/mgmsrv/Services.cpp | 86 ++----------------------------------- ndb/src/mgmsrv/Services.hpp | 9 ---- 2 files changed, 3 insertions(+), 92 deletions(-) diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index a2dec949f67..aa35c826535 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -332,19 +332,6 @@ MgmApiSession::runSession() switch(ctx.m_status) { case Parser_t::UnknownCommand: -#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT - /* Backwards compatibility for old NDBs that still use - * the old "GET CONFIG" command. - */ - size_t i; - for(i=0; iprintln("Expected 2 arguments for GET CONFIG"); - return; - } - - /* Put arguments in properties object so we can call the real function */ - args.put("version", version); - args.put("node", node); - getConfig_common(ctx, args, true); -} -#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */ - -void -MgmApiSession::getConfig(Parser_t::Context &ctx, - const class Properties &args) { - getConfig_common(ctx, args); -} - static Properties * backward(const char * base, const Properties* reply){ Properties * ret = new Properties(); @@ -560,9 +521,9 @@ MgmApiSession::get_nodeid(Parser_t::Context &, } void -MgmApiSession::getConfig_common(Parser_t::Context &, - const class Properties &args, - bool compat) { +MgmApiSession::getConfig(Parser_t::Context &, + const class Properties &args) +{ Uint32 version, node = 0; args.get("version", &version); @@ -576,47 +537,6 @@ MgmApiSession::getConfig_common(Parser_t::Context &, return; } - if(version > 0 && version < makeVersion(3, 5, 0) && compat){ - Properties *reply = backward("", conf->m_oldConfig); - reply->put("Version", version); - reply->put("LocalNodeId", node); - - backward("", reply); - //reply->print(); - - const Uint32 size = reply->getPackedSize(); - Uint32 *buffer = new Uint32[size/4+1]; - - reply->pack(buffer); - delete reply; - - const int uurows = (size + 44)/45; - char * uubuf = new char[uurows * 62+5]; - - const int uusz = uuencode_mem(uubuf, (char *)buffer, size); - delete[] buffer; - - m_output->println("GET CONFIG %d %d %d %d %d", - 0, version, node, size, uusz); - - m_output->println("begin 664 Ndb_cfg.bin"); - - /* XXX Need to write directly to the socket, because the uubuf is not - * NUL-terminated. This could/should probably be done in a nicer way. - */ - write_socket(m_socket, MAX_WRITE_TIMEOUT, uubuf, uusz); - delete[] uubuf; - - m_output->println("end"); - m_output->println(""); - return; - } - - if(compat){ - m_output->println("GET CONFIG %d %d %d %d %d",1, version, 0, 0, 0); - return; - } - if(node != 0){ bool compatible; switch (m_mgmsrv.getNodeType(node)) { diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp index 4d904e8369e..48ba55a8b12 100644 --- a/ndb/src/mgmsrv/Services.hpp +++ b/ndb/src/mgmsrv/Services.hpp @@ -24,9 +24,6 @@ #include "MgmtSrvr.hpp" -/** Undefine this to remove backwards compatibility for "GET CONFIG". */ -#define MGM_GET_CONFIG_BACKWARDS_COMPAT - class MgmApiSession : public SocketServer::Session { static void stop_session_if_timed_out(SocketServer::Session *_s, void *data); @@ -42,9 +39,6 @@ private: char m_err_str[1024]; int m_stopSelf; // -1 is restart, 0 do nothing, 1 stop - void getConfig_common(Parser_t::Context &ctx, - const class Properties &args, - bool compat = false); const char *get_error_text(int err_no) { return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); } @@ -55,9 +49,6 @@ public: void getStatPort(Parser_t::Context &ctx, const class Properties &args); void getConfig(Parser_t::Context &ctx, const class Properties &args); -#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT - void getConfig_old(Parser_t::Context &ctx); -#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */ void get_nodeid(Parser_t::Context &ctx, const class Properties &args); void getVersion(Parser_t::Context &ctx, const class Properties &args); From 2fb873f3716eef64543e353fcf8e5ca174aa932d Mon Sep 17 00:00:00 2001 From: "stewart@willster.(none)" <> Date: Wed, 14 Feb 2007 15:47:34 +1100 Subject: [PATCH 13/21] BUG#26355 FIXME: use constant for max loglevel in ndb_mgmd --- ndb/include/mgmapi/mgmapi.h | 2 ++ ndb/src/mgmsrv/Services.cpp | 8 +++----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h index 2423048f98f..e5889a1ee6d 100644 --- a/ndb/include/mgmapi/mgmapi.h +++ b/ndb/include/mgmapi/mgmapi.h @@ -16,6 +16,8 @@ #ifndef MGMAPI_H #define MGMAPI_H +#define NDB_MGM_MAX_LOGLEVEL 15 + /** * @mainpage MySQL Cluster Management API * diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index aa35c826535..01f71aa24f5 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -764,8 +764,7 @@ MgmApiSession::setClusterLogLevel(Parser::Context &, DBUG_PRINT("enter",("node=%d, category=%d, level=%d", node, cat, level)); - /* XXX should use constants for this value */ - if(level > 15) { + if(level > NDB_MGM_MAX_LOGLEVEL) { m_output->println(reply); m_output->println("result: Invalid loglevel %d", level); m_output->println(""); @@ -809,8 +808,7 @@ MgmApiSession::setLogLevel(Parser::Context &, args.get("category", &cat); args.get("level", &level); - /* XXX should use constants for this value */ - if(level > 15) { + if(level > NDB_MGM_MAX_LOGLEVEL) { m_output->println("set loglevel reply"); m_output->println("result: Invalid loglevel", errorString.c_str()); m_output->println(""); @@ -1510,7 +1508,7 @@ MgmApiSession::listen_event(Parser::Context & ctx, } int level = atoi(spec[1].c_str()); - if(level < 0 || level > 15){ + if(level < 0 || level > NDB_MGM_MAX_LOGLEVEL){ msg.appfmt("Invalid level: >%s<", spec[1].c_str()); result = -1; goto done; From 5c756c18223bf102e31a8d56f315db8b33aad5bd Mon Sep 17 00:00:00 2001 From: "stewart@willster.(none)" <> Date: Wed, 14 Feb 2007 15:51:16 +1100 Subject: [PATCH 14/21] BUG#26356 uninitialised data sent from TAMPER_ORD to DIHNDBTAMPER Don't send uninit data in TAMPER_ORD to DIHNDBTAMPER Even though this data is unused from the CMVMI (mgmd) signal, we shouldn't be doing stuff with uninited data. --- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 5dd1e527dd2..00343172cb0 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -839,7 +839,7 @@ void Cmvmi::execTAMPER_ORD(Signal* signal) // to be able to indicate if we really introduced an error. #ifdef ERROR_INSERT TamperOrd* const tamperOrd = (TamperOrd*)&signal->theData[0]; - + signal->theData[2] = 0; signal->theData[1] = tamperOrd->errorNo; signal->theData[0] = 5; sendSignal(DBDIH_REF, GSN_DIHNDBTAMPER, signal, 3,JBB); From e1b75c178414dd53145df35700df9a0bad903ba8 Mon Sep 17 00:00:00 2001 From: "stewart@willster.(none)" <> Date: Wed, 14 Feb 2007 15:55:08 +1100 Subject: [PATCH 15/21] BUG#26357 remove unused GSN_STATISTICS_REQ and CONF remove GSN_STATISTICS_REQ and CONF These are unused and have been since BK import. not needed. --- ndb/include/kernel/GlobalSignalNumbers.h | 4 ++-- .../debugger/signaldata/SignalNames.cpp | 4 +--- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 19 ------------------- ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp | 1 - 4 files changed, 3 insertions(+), 25 deletions(-) diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index 08d35a0b0cb..78be268cbc7 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -540,13 +540,13 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_ABORT_ALL_REF 446 #define GSN_ABORT_ALL_CONF 447 -#define GSN_STATISTICS_REQ 448 +/* 448 unused - formerly GSN_STATISTICS_REQ */ #define GSN_STOP_ORD 449 #define GSN_TAMPER_ORD 450 #define GSN_SET_VAR_REQ 451 #define GSN_SET_VAR_CONF 452 #define GSN_SET_VAR_REF 453 -#define GSN_STATISTICS_CONF 454 +/* 454 unused - formerly GSN_STATISTICS_CONF */ #define GSN_START_ORD 455 /* 457 unused */ diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp index 9839fd32cf2..36185638b57 100644 --- a/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp @@ -380,15 +380,13 @@ const GsnName SignalNames [] = { ,{ GSN_TUP_WRITELOG_REQ, "TUP_WRITELOG_REQ" } ,{ GSN_LQH_WRITELOG_REQ, "LQH_WRITELOG_REQ" } - ,{ GSN_STATISTICS_REQ, "STATISTICS_REQ" } ,{ GSN_START_ORD, "START_ORD" } ,{ GSN_STOP_ORD, "STOP_ORD" } ,{ GSN_TAMPER_ORD, "TAMPER_ORD" } ,{ GSN_SET_VAR_REQ, "SET_VAR_REQ" } ,{ GSN_SET_VAR_CONF, "SET_VAR_CONF" } ,{ GSN_SET_VAR_REF, "SET_VAR_REF" } - ,{ GSN_STATISTICS_CONF, "STATISTICS_CONF" } - + ,{ GSN_EVENT_SUBSCRIBE_REQ, "EVENT_SUBSCRIBE_REQ" } ,{ GSN_EVENT_SUBSCRIBE_CONF, "EVENT_SUBSCRIBE_CONF" } ,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" } diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 00343172cb0..9e8a1df0138 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -77,7 +77,6 @@ Cmvmi::Cmvmi(const Configuration & conf) : addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ); addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD); - addRecSignal(GSN_STATISTICS_REQ, &Cmvmi::execSTATISTICS_REQ); addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD); addRecSignal(GSN_SET_VAR_REQ, &Cmvmi::execSET_VAR_REQ); addRecSignal(GSN_SET_VAR_CONF, &Cmvmi::execSET_VAR_CONF); @@ -703,24 +702,6 @@ Cmvmi::execTEST_ORD(Signal * signal){ #endif } -void Cmvmi::execSTATISTICS_REQ(Signal* signal) -{ - // TODO Note ! This is only a test implementation... - - static int stat1 = 0; - jamEntry(); - - //ndbout << "data 1: " << signal->theData[1]; - - int x = signal->theData[0]; - stat1++; - signal->theData[0] = stat1; - sendSignal(x, GSN_STATISTICS_CONF, signal, 7, JBB); - -}//execSTATISTICS_REQ() - - - void Cmvmi::execSTOP_ORD(Signal* signal) { jamEntry(); diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp index e3a20795701..b4ed5e45490 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp @@ -55,7 +55,6 @@ private: void execSIZEALT_ACK(Signal* signal); void execTEST_ORD(Signal* signal); - void execSTATISTICS_REQ(Signal* signal); void execSTOP_ORD(Signal* signal); void execSTART_ORD(Signal* signal); void execTAMPER_ORD(Signal* signal); From 7ed1f6ac01e87d46bcb33a3aa6d2d367be24841c Mon Sep 17 00:00:00 2001 From: "stewart@willster.(none)" <> Date: Wed, 14 Feb 2007 16:03:25 +1100 Subject: [PATCH 16/21] BUG#26358 remove unused and non-working SET_VAR_REQ/CONF signals remove SET_VAR_REQ,REF,CONF - unused, #if 0 and confusing Remove the dead SET_VAR_REQ,REF,CONF signals. They: a) don't currently work b) will cause confusion with future mgmd updates for signal interface to configuration things (e.g. port numbers) --- ndb/include/kernel/GlobalSignalNumbers.h | 6 +- .../debugger/signaldata/SignalNames.cpp | 3 - ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 157 ------------------ ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp | 5 - ndb/src/kernel/blocks/dbacc/Dbacc.hpp | 1 - ndb/src/kernel/blocks/dbacc/DbaccInit.cpp | 1 - ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 27 --- ndb/src/kernel/blocks/dbdih/Dbdih.hpp | 1 - ndb/src/kernel/blocks/dbdih/DbdihInit.cpp | 1 - ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 24 --- ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 1 - ndb/src/kernel/blocks/dblqh/DblqhInit.cpp | 1 - ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 24 --- ndb/src/kernel/blocks/dbtc/Dbtc.hpp | 1 - ndb/src/kernel/blocks/dbtc/DbtcInit.cpp | 1 - ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 30 ---- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 1 - ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 28 ---- ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp | 1 - ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp | 1 - ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 17 -- ndb/src/kernel/blocks/qmgr/Qmgr.hpp | 1 - ndb/src/kernel/blocks/qmgr/QmgrInit.cpp | 1 - ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 28 ---- ndb/src/mgmsrv/MgmtSrvr.hpp | 1 - 25 files changed, 3 insertions(+), 360 deletions(-) diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index 78be268cbc7..1ffc198de41 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -543,9 +543,9 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; /* 448 unused - formerly GSN_STATISTICS_REQ */ #define GSN_STOP_ORD 449 #define GSN_TAMPER_ORD 450 -#define GSN_SET_VAR_REQ 451 -#define GSN_SET_VAR_CONF 452 -#define GSN_SET_VAR_REF 453 +/* 451 unused - formerly GSN_SET_VAR_REQ */ +/* 452 unused - formerly GSN_SET_VAR_CONF */ +/* 453 unused - formerly GSN_SET_VAR_REF */ /* 454 unused - formerly GSN_STATISTICS_CONF */ #define GSN_START_ORD 455 diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp index 36185638b57..66e7a10cdb7 100644 --- a/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp @@ -383,9 +383,6 @@ const GsnName SignalNames [] = { ,{ GSN_START_ORD, "START_ORD" } ,{ GSN_STOP_ORD, "STOP_ORD" } ,{ GSN_TAMPER_ORD, "TAMPER_ORD" } - ,{ GSN_SET_VAR_REQ, "SET_VAR_REQ" } - ,{ GSN_SET_VAR_CONF, "SET_VAR_CONF" } - ,{ GSN_SET_VAR_REF, "SET_VAR_REF" } ,{ GSN_EVENT_SUBSCRIBE_REQ, "EVENT_SUBSCRIBE_REQ" } ,{ GSN_EVENT_SUBSCRIBE_CONF, "EVENT_SUBSCRIBE_CONF" } diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 9e8a1df0138..b8b539e22c9 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -78,9 +78,6 @@ Cmvmi::Cmvmi(const Configuration & conf) : addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD); addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD); - addRecSignal(GSN_SET_VAR_REQ, &Cmvmi::execSET_VAR_REQ); - addRecSignal(GSN_SET_VAR_CONF, &Cmvmi::execSET_VAR_CONF); - addRecSignal(GSN_SET_VAR_REF, &Cmvmi::execSET_VAR_REF); addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD); addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD); addRecSignal(GSN_EVENT_SUBSCRIBE_REQ, @@ -828,160 +825,6 @@ void Cmvmi::execTAMPER_ORD(Signal* signal) }//execTAMPER_ORD() - - -void Cmvmi::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - jamEntry(); - switch (var) { - - // NDBCNTR_REF - - // DBTC - case TransactionDeadlockDetectionTimeout: - case TransactionInactiveTime: - case NoOfConcurrentProcessesHandleTakeover: - sendSignal(DBTC_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBDIH - case TimeBetweenLocalCheckpoints: - case TimeBetweenGlobalCheckpoints: - sendSignal(DBDIH_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBLQH - case NoOfConcurrentCheckpointsDuringRestart: - case NoOfConcurrentCheckpointsAfterRestart: - sendSignal(DBLQH_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBACC - case NoOfDiskPagesToDiskDuringRestartACC: - case NoOfDiskPagesToDiskAfterRestartACC: - sendSignal(DBACC_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBTUP - case NoOfDiskPagesToDiskDuringRestartTUP: - case NoOfDiskPagesToDiskAfterRestartTUP: - sendSignal(DBTUP_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBDICT - - // NDBCNTR - case TimeToWaitAlive: - - // QMGR - case HeartbeatIntervalDbDb: // TODO ev till Ndbcnt också - case HeartbeatIntervalDbApi: - case ArbitTimeout: - sendSignal(QMGR_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // NDBFS - - // CMVMI - case MaxNoOfSavedMessages: - case LockPagesInMainMemory: - case TimeBetweenWatchDogCheck: - case StopOnError: - handleSET_VAR_REQ(signal); - break; - - - // Not possible to update (this could of course be handled by each block - // instead but I havn't investigated where they belong) - case Id: - case ExecuteOnComputer: - case ShmKey: - case MaxNoOfConcurrentOperations: - case MaxNoOfConcurrentTransactions: - case MemorySpaceIndexes: - case MemorySpaceTuples: - case MemoryDiskPages: - case NoOfFreeDiskClusters: - case NoOfDiskClusters: - case NoOfFragmentLogFiles: - case NoOfDiskClustersPerDiskFile: - case NoOfDiskFiles: - case MaxNoOfSavedEvents: - default: - - int mgmtSrvr = setVarReq->mgmtSrvrBlockRef(); - sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB); - } // switch - -#endif -}//execSET_VAR_REQ() - - -void Cmvmi::execSET_VAR_CONF(Signal* signal) -{ - int mgmtSrvr = signal->theData[0]; - sendSignal(mgmtSrvr, GSN_SET_VAR_CONF, signal, 0, JBB); - -}//execSET_VAR_CONF() - - -void Cmvmi::execSET_VAR_REF(Signal* signal) -{ - int mgmtSrvr = signal->theData[0]; - sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB); - -}//execSET_VAR_REF() - - -void Cmvmi::handleSET_VAR_REQ(Signal* signal) { -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - switch (var) { - case MaxNoOfSavedMessages: - theConfig.maxNoOfErrorLogs(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case LockPagesInMainMemory: - int result; - if (val == 0) { - result = NdbMem_MemUnlockAll(); - } - else { - result = NdbMem_MemLockAll(); - } - if (result == 0) { - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - } - else { - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } - break; - - case TimeBetweenWatchDogCheck: - theConfig.timeBetweenWatchDogCheck(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case StopOnError: - theConfig.stopOnError(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - return; - } // switch -#endif -} - #ifdef VM_TRACE class RefSignalTest { public: diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp index b4ed5e45490..712e70039c9 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp @@ -58,16 +58,11 @@ private: void execSTOP_ORD(Signal* signal); void execSTART_ORD(Signal* signal); void execTAMPER_ORD(Signal* signal); - void execSET_VAR_REQ(Signal* signal); - void execSET_VAR_CONF(Signal* signal); - void execSET_VAR_REF(Signal* signal); void execDUMP_STATE_ORD(Signal* signal); void execEVENT_SUBSCRIBE_REQ(Signal *); void cancelSubscription(NodeId nodeId); - - void handleSET_VAR_REQ(Signal* signal); void execTESTSIG(Signal* signal); void execNODE_START_REP(Signal* signal); diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp index 43810a08ac7..02a7e69b684 100644 --- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp +++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp @@ -911,7 +911,6 @@ private: void execDROP_TAB_REQ(Signal* signal); void execFSREMOVECONF(Signal* signal); void execREAD_CONFIG_REQ(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execDUMP_STATE_ORD(Signal* signal); // Statement blocks diff --git a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp index 024a32ca95c..80664e8911a 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp @@ -178,7 +178,6 @@ Dbacc::Dbacc(const class Configuration & conf): addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ); addRecSignal(GSN_FSREMOVECONF, &Dbacc::execFSREMOVECONF); addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true); - addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ); initData(); diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 40af5a52c03..37f75d82710 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -11648,33 +11648,6 @@ Dbacc::execDUMP_STATE_ORD(Signal* signal) #endif }//Dbacc::execDUMP_STATE_ORD() -void Dbacc::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - - switch (var) { - - case NoOfDiskPagesToDiskAfterRestartACC: - clblPagesPerTick = val; - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case NoOfDiskPagesToDiskDuringRestartACC: - // Valid only during start so value not set. - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif - -}//execSET_VAR_REQ() - void Dbacc::execREAD_PSUEDO_REQ(Signal* signal){ jamEntry(); diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index e8f24876979..ba2d8a6522a 100644 --- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -693,7 +693,6 @@ private: void execFSREADREF(Signal *); void execFSWRITECONF(Signal *); void execFSWRITEREF(Signal *); - void execSET_VAR_REQ(Signal *); void execCHECKNODEGROUPSREQ(Signal *); void execSTART_INFOREQ(Signal*); void execSTART_INFOREF(Signal*); diff --git a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp index 360f320cb74..6e456c9c841 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp @@ -216,7 +216,6 @@ Dbdih::Dbdih(const class Configuration & config): addRecSignal(GSN_FSREADREF, &Dbdih::execFSREADREF, true); addRecSignal(GSN_FSWRITECONF, &Dbdih::execFSWRITECONF); addRecSignal(GSN_FSWRITEREF, &Dbdih::execFSWRITEREF, true); - addRecSignal(GSN_SET_VAR_REQ, &Dbdih::execSET_VAR_REQ); addRecSignal(GSN_START_INFOREQ, &Dbdih::execSTART_INFOREQ); diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 5ee3ac8d67d..6839dca892f 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -14026,30 +14026,6 @@ Dbdih::execNDB_TAMPER(Signal* signal) return; }//Dbdih::execNDB_TAMPER() -void Dbdih::execSET_VAR_REQ(Signal* signal) { -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - - switch (var) { - case TimeBetweenLocalCheckpoints: - c_lcpState.clcpDelay = val; - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case TimeBetweenGlobalCheckpoints: - cgcpDelay = val; - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif -} - void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){ BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0]; diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 817832bdfcb..40eb6a9c668 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -2201,7 +2201,6 @@ private: void execFSREADCONF(Signal* signal); void execFSREADREF(Signal* signal); void execSCAN_HBREP(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execTIME_SIGNAL(Signal* signal); void execFSSYNCCONF(Signal* signal); diff --git a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp index 0b395e250c1..3452269be51 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp @@ -314,7 +314,6 @@ Dblqh::Dblqh(const class Configuration & conf): addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF); addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF, true); addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF); - addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ); addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL); addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF); addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD); diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 6a439b24c03..7e920b2e7e4 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -18902,30 +18902,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal) }//Dblqh::execDUMP_STATE_ORD() -void Dblqh::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - - switch (var) { - - case NoOfConcurrentCheckpointsAfterRestart: - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case NoOfConcurrentCheckpointsDuringRestart: - // Valid only during start so value not set. - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif -}//execSET_VAR_REQ() - - /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* ---------------------- TRIGGER HANDLING ------------------------ */ diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index d6c4529bb72..514cc2183aa 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -1323,7 +1323,6 @@ private: void execTIME_SIGNAL(Signal* signal); void execAPI_FAILREQ(Signal* signal); void execSCAN_HBREP(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execABORT_ALL_REQ(Signal* signal); diff --git a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp index 0b46f598a89..73149f0b6fd 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp @@ -256,7 +256,6 @@ Dbtc::Dbtc(const class Configuration & conf): addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ); addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL); addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ); - addRecSignal(GSN_SET_VAR_REQ, &Dbtc::execSET_VAR_REQ); addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK); addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ); diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 2b2e0e649a4..2afa077b672 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -10999,36 +10999,6 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal) } }//Dbtc::execDUMP_STATE_ORD() -void Dbtc::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - - switch (var) { - - case TransactionInactiveTime: - jam(); - set_appl_timeout_value(val); - break; - case TransactionDeadlockDetectionTimeout: - set_timeout_value(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case NoOfConcurrentProcessesHandleTakeover: - set_no_parallel_takeover(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif -} - void Dbtc::execABORT_ALL_REQ(Signal* signal) { jamEntry(); diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 3079a530807..c78e7d9bced 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -1116,7 +1116,6 @@ private: void execFSREADCONF(Signal* signal); void execNDB_STTOR(Signal* signal); void execREAD_CONFIG_REQ(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execDROP_TAB_REQ(Signal* signal); void execALTER_TAB_REQ(Signal* signal); void execFSREMOVECONF(Signal* signal); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index f21f2eba9fc..0f83c45077b 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -103,7 +103,6 @@ Dbtup::Dbtup(const class Configuration & conf) addRecSignal(GSN_FSREADCONF, &Dbtup::execFSREADCONF); addRecSignal(GSN_NDB_STTOR, &Dbtup::execNDB_STTOR); addRecSignal(GSN_READ_CONFIG_REQ, &Dbtup::execREAD_CONFIG_REQ, true); - addRecSignal(GSN_SET_VAR_REQ, &Dbtup::execSET_VAR_REQ); // Trigger Signals addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtup::execCREATE_TRIG_REQ); @@ -1315,32 +1314,5 @@ void Dbtup::seizePendingFileOpenInfoRecord(PendingFileOpenInfoPtr& pfoiPtr) pfoiPtr.p->pfoNextRec = RNIL; }//Dbtup::seizePendingFileOpenInfoRecord() -void Dbtup::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)signal->getDataPtrSend(); - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - switch (var) { - - case NoOfDiskPagesToDiskAfterRestartTUP: - clblPagesPerTick = val; - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case NoOfDiskPagesToDiskDuringRestartTUP: - // Valid only during start so value not set. - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif - -}//execSET_VAR_REQ() - - diff --git a/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp index ec9d4a0dc60..80b0b29ea69 100644 --- a/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp +++ b/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp @@ -190,7 +190,6 @@ private: void execNDB_STARTCONF(Signal* signal); void execREAD_NODESREQ(Signal* signal); void execNDB_STARTREF(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execSTOP_PERM_REF(Signal* signal); void execSTOP_PERM_CONF(Signal* signal); diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp index 6df52b6fbe7..fdd6e7677d3 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp @@ -80,7 +80,6 @@ Ndbcntr::Ndbcntr(const class Configuration & conf): addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF); addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ); addRecSignal(GSN_NDB_STARTREF, &Ndbcntr::execNDB_STARTREF); - addRecSignal(GSN_SET_VAR_REQ, &Ndbcntr::execSET_VAR_REQ); addRecSignal(GSN_STOP_PERM_REF, &Ndbcntr::execSTOP_PERM_REF); addRecSignal(GSN_STOP_PERM_CONF, &Ndbcntr::execSTOP_PERM_CONF); diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 26e8f246293..87b71ba1c5f 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -2026,23 +2026,6 @@ Ndbcntr::execDUMP_STATE_ORD(Signal* signal) }//Ndbcntr::execDUMP_STATE_ORD() -void Ndbcntr::execSET_VAR_REQ(Signal* signal) { -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - - switch (var) { - case TimeToWaitAlive: - // Valid only during start so value not set. - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - }// switch -#endif -}//Ndbcntr::execSET_VAR_REQ() - void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{ NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0]; diff --git a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index dcca240eeb6..9d2ac80d905 100644 --- a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -242,7 +242,6 @@ private: void execAPI_REGREQ(Signal* signal); void execAPI_FAILCONF(Signal* signal); void execREAD_NODESREQ(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execREAD_NODESREF(Signal* signal); void execREAD_NODESCONF(Signal* signal); diff --git a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp index b8885569f0e..a087fe38c1c 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp @@ -82,7 +82,6 @@ Qmgr::Qmgr(const class Configuration & conf) addRecSignal(GSN_DISCONNECT_REP, &Qmgr::execDISCONNECT_REP); addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF); addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ); - addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ); addRecSignal(GSN_API_BROADCAST_REP, &Qmgr::execAPI_BROADCAST_REP); // Arbitration signals diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 0156f334051..c992ebe6cb9 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -4774,34 +4774,6 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal) #endif }//Qmgr::execDUMP_STATE_ORD() -void Qmgr::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - UintR val = setVarReq->value(); - - switch (var) { - case HeartbeatIntervalDbDb: - setHbDelay(val/10); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case HeartbeatIntervalDbApi: - setHbApiDelay(val/10); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case ArbitTimeout: - setArbitTimeout(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - }// switch -#endif -}//execSET_VAR_REQ() void Qmgr::execAPI_BROADCAST_REP(Signal* signal) diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 59f1487f7dc..62d2efd5b16 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -597,7 +597,6 @@ private: */ enum WaitSignalType { NO_WAIT, // We don't expect to receive any signal - WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF WAIT_SUBSCRIBE_CONF // Accept event subscription confirmation }; From 143f3e79644a517bdd8bcd4b0e4e3e3b63c21674 Mon Sep 17 00:00:00 2001 From: "stewart@willster.(none)" <> Date: Wed, 14 Feb 2007 16:49:40 +1100 Subject: [PATCH 17/21] BUG#20121 missing err msg for ENOSPC getting BACKUP_FRAGMENT_REF in LQH from BACKUP would bail on ndbrequire(false) instead of having good error message. Can re-use error code from BACKUP as it's a FsRef error code, which is NDBD_EXIT... except when it isn't. --- storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 76c3905b3aa..464acea8c74 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -62,6 +62,7 @@ #include #include #include +#include // Use DEBUG to print messages that should be // seen only when we debug the product @@ -11434,7 +11435,17 @@ void Dblqh::execLCP_PREPARE_CONF(Signal* signal) void Dblqh::execBACKUP_FRAGMENT_REF(Signal* signal) { - ndbrequire(false); + BackupFragmentRef *ref= (BackupFragmentRef*)signal->getDataPtr(); + char buf[100]; + BaseString::snprintf(buf,sizeof(buf), + "Unable to store fragment during LCP. NDBFS Error: %u", + ref->errorCode); + + progError(__LINE__, + (ref->errorCode & FsRef::FS_ERR_BIT)? + NDBD_EXIT_AFS_UNKNOWN + : ref->errorCode, + buf); } void Dblqh::execBACKUP_FRAGMENT_CONF(Signal* signal) From 120eaec17c53080984be3372dc392e54b4e9e6f2 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Wed, 14 Feb 2007 15:16:26 +0700 Subject: [PATCH 18/21] Bug #21033 Error 0 in readAutoIncrementValue() - do not call function if table has no autoincrement --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 30be53f1ddb..3d0d8a3f079 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3245,7 +3245,7 @@ int ha_ndbcluster::info(uint flag) if (flag & HA_STATUS_AUTO) { DBUG_PRINT("info", ("HA_STATUS_AUTO")); - if (m_table) + if (m_table && table->found_next_number_field) { Ndb *ndb= get_ndb(); From f92d3b3ab59531178c668cbe93c2fc777e5415c4 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Wed, 14 Feb 2007 15:26:06 +0700 Subject: [PATCH 19/21] backport some mysql-test ndbcluster things from 5.1 remove compiler warning EventLogger.cpp Bug#26176 NdbObjectIdMap::expand unable to expand!! mysqld got signal 11 - partial fix - object map to handle failed realloc --- mysql-test/ndb/ndb_config_2_node.ini | 10 +++ mysql-test/ndb/ndbcluster.sh | 97 +++++++++++++++++-------- ndb/src/common/debugger/EventLogger.cpp | 4 +- ndb/src/ndbapi/ObjectMap.hpp | 13 ++-- 4 files changed, 84 insertions(+), 40 deletions(-) diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini index a6a56376f33..aafed28dbc6 100644 --- a/mysql-test/ndb/ndb_config_2_node.ini +++ b/mysql-test/ndb/ndb_config_2_node.ini @@ -1,5 +1,6 @@ [ndbd default] NoOfReplicas= 2 +MaxNoOfConcurrentTransactions= 64 MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations DataMemory= CHOOSE_DataMemory IndexMemory= CHOOSE_IndexMemory @@ -7,6 +8,15 @@ Diskless= CHOOSE_Diskless TimeBetweenWatchDogCheck= 30000 DataDir= CHOOSE_FILESYSTEM MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes +MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes +TimeBetweenGlobalCheckpoints= 500 +NoOfFragmentLogFiles= 3 + +# +# Increase deadlock-timeout to cater for slow test-machines +# (possibly running several tests in parallell) +# +#TransactionDeadlockDetectionTimeout= 7500 [ndbd] HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh index c53bf1306e6..be08781361e 100644 --- a/mysql-test/ndb/ndbcluster.sh +++ b/mysql-test/ndb/ndbcluster.sh @@ -63,12 +63,17 @@ stop_ndb= initial_ndb= status_ndb= ndb_diskless=0 +ndbd_nodes=2 +relative_config_data_dir= +opt_core= ndb_no_ord=512 +ndb_no_attr=2048 ndb_con_op=105000 ndb_dmem=80M ndb_imem=24M +VERBOSE=100 NDB_MGM_EXTRA_OPTS= NDB_MGMD_EXTRA_OPTS= NDBD_EXTRA_OPTS= @@ -89,6 +94,9 @@ while test $# -gt 0; do --debug*) flags_ndb="$flags_ndb $1" ;; + --ndbd-nodes=*) + ndbd_nodes=`echo "$1" | sed -e "s;--ndbd-nodes=;;"` + ;; --status) status_ndb=1 ;; @@ -104,6 +112,9 @@ while test $# -gt 0; do --data-dir=*) fsdir=`echo "$1" | sed -e "s;--data-dir=;;"` ;; + --relative-config-data-dir) + relative_config_data_dir=1 + ;; --port=*) port=`echo "$1" | sed -e "s;--port=;;"` ;; @@ -122,6 +133,12 @@ while test $# -gt 0; do --character-sets-dir=*) CHARSETSDIR=`echo "$1" | sed -e "s;--character-sets-dir=;;"` ;; + --core) + opt_core="--core" + ;; + --verbose=*) + VERBOSE=`echo "$1" | sed -e "s;--verbose=;;"` + ;; -- ) shift; break ;; --* ) $ECHO "Unrecognized option: $1"; exit 1 ;; * ) break ;; @@ -130,9 +147,10 @@ while test $# -gt 0; do done fs_ndb="$fsdir/ndbcluster-$port" +config_ini=ndb/ndb_config_${ndbd_nodes}_node.ini NDB_HOME= -if [ ! -x "$fsdir" ]; then +if [ ! -d "$fsdir" ]; then echo "$fsdir missing" exit 1 fi @@ -148,11 +166,15 @@ if [ ! -x "$exec_waiter" ]; then echo "$exec_waiter missing" exit 1 fi +if [ ! -f "$config_ini" ]; then + echo "$config_ini missing, unsupported number of nodes" + exit 1 +fi -exec_mgmtclient="$exec_mgmtclient --no-defaults $NDB_MGM_EXTRA_OPTS" -exec_mgmtsrvr="$exec_mgmtsrvr --no-defaults $NDB_MGMD_EXTRA_OPTS" -exec_ndb="$exec_ndb --no-defaults $NDBD_EXTRA_OPTS --character-sets-dir=$CHARSETSDIR" -exec_waiter="$exec_waiter --no-defaults" +exec_mgmtclient="$exec_mgmtclient --no-defaults $opt_core $NDB_MGM_EXTRA_OPTS" +exec_mgmtsrvr="$exec_mgmtsrvr --no-defaults $opt_core $NDB_MGMD_EXTRA_OPTS" +exec_ndb="$exec_ndb --no-defaults $opt_core $NDBD_EXTRA_OPTS --character-sets-dir=$CHARSETSDIR" +exec_waiter="$exec_waiter --no-defaults $opt_core" ndb_host="localhost" ndb_mgmd_port=$port @@ -196,18 +218,24 @@ fi # Start management server as deamon # Edit file system path and ports in config file +if [ $relative_config_data_dir ] ; then + config_fs_ndb="." +else + config_fs_ndb=$fs_ndb +fi if [ $initial_ndb ] ; then - rm -f $fs_ndb/ndb_* 2>&1 | cat > /dev/null + rm -rf $fs_ndb/ndb_* 2>&1 | cat > /dev/null sed \ + -e s,"CHOOSE_MaxNoOfAttributes","$ndb_no_attr",g \ -e s,"CHOOSE_MaxNoOfOrderedIndexes","$ndb_no_ord",g \ -e s,"CHOOSE_MaxNoOfConcurrentOperations","$ndb_con_op",g \ -e s,"CHOOSE_DataMemory","$ndb_dmem",g \ -e s,"CHOOSE_IndexMemory","$ndb_imem",g \ -e s,"CHOOSE_Diskless","$ndb_diskless",g \ -e s,"CHOOSE_HOSTNAME_".*,"$ndb_host",g \ - -e s,"CHOOSE_FILESYSTEM","$fs_ndb",g \ + -e s,"CHOOSE_FILESYSTEM","$config_fs_ndb",g \ -e s,"CHOOSE_PORT_MGM","$ndb_mgmd_port",g \ - < ndb/ndb_config_2_node.ini \ + < "$config_ini" \ > "$fs_ndb/config.ini" fi @@ -218,7 +246,7 @@ if ( cd "$fs_ndb" ; $exec_mgmtsrvr -f config.ini ) ; then :; else echo "Unable to start $exec_mgmtsrvr from `pwd`" exit 1 fi -if sleep_until_file_created $fs_ndb/ndb_3.pid 120 +if sleep_until_file_created $fs_ndb/ndb_`expr $ndbd_nodes + 1`.pid 120 then :; else exit 1 fi @@ -226,38 +254,43 @@ cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile" # Start database node -echo "Starting ndbd" -( cd "$fs_ndb" ; $exec_ndb $flags_ndb & ) -if sleep_until_file_created $fs_ndb/ndb_1.pid 120 -then :; else - stop_default_ndbcluster - exit 1 -fi -cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile" - -# Start database node - -echo "Starting ndbd" -( cd "$fs_ndb" ; $exec_ndb $flags_ndb & ) -if sleep_until_file_created $fs_ndb/ndb_2.pid 120 -then :; else - stop_default_ndbcluster - exit 1 -fi -cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile" +id=1 +while [ $id -le $ndbd_nodes ] +do + if [ `expr $VERBOSE \> 1` = 1 ] ; then + echo "Starting ndbd $id($ndbd_nodes)" + fi + ( cd "$fs_ndb" ; $exec_ndb $flags_ndb & ) + if sleep_until_file_created $fs_ndb/ndb_${id}.pid 120 + then :; else + stop_default_ndbcluster + exit 1 + fi + cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile" + id=`expr $id + 1` +done # test if Ndb Cluster starts properly -echo "Waiting for NDB data nodes to start..." -if ( $exec_waiter ) | grep "NDBT_ProgramExit: 0 - OK" > /dev/null 2>&1; then :; else - echo "Ndbcluster startup failed" +if [ `expr $VERBOSE \> 1` = 1 ] ; then + echo "Waiting for NDB data nodes to start..." +fi +if ( $exec_waiter ) | grep "NDBT_ProgramExit: 0 - OK" > /dev/null 2>&1 ; then :; else + if [ `expr $VERBOSE \> 0` = 1 ] ; then + echo "Ndbcluster startup failed" + fi stop_default_ndbcluster exit 1 fi +if [ `expr $VERBOSE \> 1` = 1 ] ; then + echo "Ok" +fi cat `find "$fs_ndb" -name 'ndb_*.pid'` > $fs_ndb/$pidfile -status_ndbcluster +if [ `expr $VERBOSE \> 2` = 1 ] ; then + status_ndbcluster +fi } status_ndbcluster() { diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index 39daa3effe8..535158fb7d1 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -530,8 +530,8 @@ void getTextUndoLogBlocked(QQQQ) { } void getTextTransporterError(QQQQ) { struct myTransporterError{ - int errorNum; - char errorString[256]; + Uint32 errorNum; + char errorString[256]; }; int i = 0; int lenth = 0; diff --git a/ndb/src/ndbapi/ObjectMap.hpp b/ndb/src/ndbapi/ObjectMap.hpp index 486ef08abb8..0e0c9668164 100644 --- a/ndb/src/ndbapi/ObjectMap.hpp +++ b/ndb/src/ndbapi/ObjectMap.hpp @@ -46,7 +46,7 @@ private: } * m_map; NdbMutex * m_mutex; - void expand(Uint32 newSize); + int expand(Uint32 newSize); }; inline @@ -73,9 +73,8 @@ NdbObjectIdMap::map(void * object){ // lock(); - if(m_firstFree == InvalidId){ - expand(m_expandSize); - } + if(m_firstFree == InvalidId && expand(m_expandSize)) + return InvalidId; Uint32 ff = m_firstFree; m_firstFree = m_map[ff].m_next; @@ -130,7 +129,7 @@ NdbObjectIdMap::getObject(Uint32 id){ return 0; } -inline void +inline int NdbObjectIdMap::expand(Uint32 incSize){ NdbMutex_Lock(m_mutex); Uint32 newSize = m_size + incSize; @@ -149,9 +148,11 @@ NdbObjectIdMap::expand(Uint32 incSize){ } else { - ndbout_c("NdbObjectIdMap::expand unable to expand!!"); + NdbMutex_Unlock(m_mutex); + return -1; } NdbMutex_Unlock(m_mutex); + return 0; } #endif From 92f9a37dc15fd33e4e016ef978d93a405e48fc98 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Wed, 14 Feb 2007 15:33:48 +0700 Subject: [PATCH 20/21] ndb_config_4_node.ini, ndb_config_1_node.ini: new file --- mysql-test/ndb/ndb_config_1_node.ini | 43 +++++++++++++++++++++++ mysql-test/ndb/ndb_config_4_node.ini | 52 ++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 mysql-test/ndb/ndb_config_1_node.ini create mode 100644 mysql-test/ndb/ndb_config_4_node.ini diff --git a/mysql-test/ndb/ndb_config_1_node.ini b/mysql-test/ndb/ndb_config_1_node.ini new file mode 100644 index 00000000000..7b0d2829530 --- /dev/null +++ b/mysql-test/ndb/ndb_config_1_node.ini @@ -0,0 +1,43 @@ +[ndbd default] +NoOfReplicas= 1 +MaxNoOfConcurrentTransactions= 64 +MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations +DataMemory= CHOOSE_DataMemory +IndexMemory= CHOOSE_IndexMemory +Diskless= CHOOSE_Diskless +TimeBetweenWatchDogCheck= 30000 +DataDir= CHOOSE_FILESYSTEM +MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes +MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes +TimeBetweenGlobalCheckpoints= 500 +NoOfFragmentLogFiles= 3 + +# +# Increase deadlock-timeout to cater for slow test-machines +# (possibly running several tests in parallell) +# +#TransactionDeadlockDetectionTimeout= 7500 + +[ndbd] +HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress + +[ndb_mgmd] +HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress +DataDir= CHOOSE_FILESYSTEM # +PortNumber= CHOOSE_PORT_MGM + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] diff --git a/mysql-test/ndb/ndb_config_4_node.ini b/mysql-test/ndb/ndb_config_4_node.ini new file mode 100644 index 00000000000..2154475aa37 --- /dev/null +++ b/mysql-test/ndb/ndb_config_4_node.ini @@ -0,0 +1,52 @@ +[ndbd default] +NoOfReplicas= 2 +MaxNoOfConcurrentTransactions= 64 +MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations +DataMemory= CHOOSE_DataMemory +IndexMemory= CHOOSE_IndexMemory +Diskless= CHOOSE_Diskless +TimeBetweenWatchDogCheck= 30000 +DataDir= CHOOSE_FILESYSTEM +MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes +MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes +TimeBetweenGlobalCheckpoints= 500 +NoOfFragmentLogFiles= 3 + +# +# Increase deadlock-timeout to cater for slow test-machines +# (possibly running several tests in parallell) +# +#TransactionDeadlockDetectionTimeout= 7500 + +[ndbd] +HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress + +[ndbd] +HostName= CHOOSE_HOSTNAME_2 # hostname is a valid network adress + +[ndbd] +HostName= CHOOSE_HOSTNAME_3 # hostname is a valid network adress + +[ndbd] +HostName= CHOOSE_HOSTNAME_4 # hostname is a valid network adress + +[ndb_mgmd] +HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress +DataDir= CHOOSE_FILESYSTEM # +PortNumber= CHOOSE_PORT_MGM + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] + +[mysqld] From 8a146f846122235d2f34b924c23474c74735e4b3 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.mysql.com" <> Date: Wed, 14 Feb 2007 20:17:14 +0700 Subject: [PATCH 21/21] corrected manual merge --- storage/ndb/src/ndbapi/TransporterFacade.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp index 2402c979620..eabfc6bc371 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.cpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp @@ -1527,7 +1527,8 @@ SignalSender::sendSignal(Uint16 nodeId, const SimpleSignal * s){ signalLogger.flushSignalLog(); } #endif - + assert(getNodeInfo(nodeId).m_api_reg_conf == true || + s->readSignalNumber() == GSN_API_REGREQ); return theFacade->theTransporterRegistry->prepareSend(&s->header, 1, // JBB &s->theData[0],