From 47c877bdf83d4ddf620be82751c797d33bfeb37e Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Sep 2004 11:49:18 +0200 Subject: [PATCH 01/55] ndb_init() to all ndb programs ndb/examples/ndbapi_async_example/ndbapi_async.cpp: ndb_init() ndb/examples/ndbapi_example1/ndbapi_example1.cpp: ndb_init() ndb/examples/ndbapi_example2/ndbapi_example2.cpp: ndb_init() ndb/examples/ndbapi_example3/ndbapi_example3.cpp: ndb_init() ndb/examples/ndbapi_example4/ndbapi_example4.cpp: ndb_init() ndb/examples/ndbapi_example5/ndbapi_example5.cpp: ndb_init() ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp: ndb_init() ndb/examples/select_all/select_all.cpp: ndb_init() ndb/include/ndb_global.h: ndb_init() ndb/src/common/util/Makefile.am: ndb_init() ndb/src/kernel/blocks/backup/read.cpp: ndb_init() ndb/src/kernel/blocks/backup/restore/main.cpp: ndb_init() ndb/src/kernel/main.cpp: ndb_init() ndb/src/kernel/vm/Configuration.cpp: ndb_init() ndb/src/mgmclient/main.cpp: ndb_init() ndb/src/mgmsrv/main.cpp: ndb_init() ndb/src/mgmsrv/mkconfig/mkconfig.cpp: ndb_init() ndb/src/ndbapi/Ndbinit.cpp: ndb_init() ndb/test/ndbapi/acid.cpp: ndb_init() ndb/test/ndbapi/acid2.cpp: ndb_init() ndb/test/ndbapi/benchronja.cpp: ndb_init() ndb/test/ndbapi/bulk_copy.cpp: ndb_init() ndb/test/ndbapi/cdrserver.cpp: ndb_init() ndb/test/ndbapi/celloDb.cpp: ndb_init() ndb/test/ndbapi/create_all_tabs.cpp: ndb_init() ndb/test/ndbapi/create_tab.cpp: ndb_init() ndb/test/ndbapi/drop_all_tabs.cpp: ndb_init() ndb/test/ndbapi/flexAsynch.cpp: ndb_init() ndb/test/ndbapi/flexBench.cpp: ndb_init() ndb/test/ndbapi/flexHammer.cpp: ndb_init() ndb/test/ndbapi/flexScan.cpp: ndb_init() ndb/test/ndbapi/flexTT.cpp: ndb_init() ndb/test/ndbapi/flexTimedAsynch.cpp: ndb_init() ndb/test/ndbapi/flex_bench_mysql.cpp: ndb_init() ndb/test/ndbapi/index.cpp: ndb_init() ndb/test/ndbapi/index2.cpp: ndb_init() ndb/test/ndbapi/initronja.cpp: ndb_init() ndb/test/ndbapi/interpreterInTup.cpp: ndb_init() ndb/test/ndbapi/mainAsyncGenerator.cpp: ndb_init() ndb/test/ndbapi/msa.cpp: ndb_init() ndb/test/ndbapi/restarter.cpp: ndb_init() ndb/test/ndbapi/restarter2.cpp: ndb_init() ndb/test/ndbapi/restarts.cpp: ndb_init() ndb/test/ndbapi/size.cpp: ndb_init() ndb/test/ndbapi/slow_select.cpp: ndb_init() ndb/test/ndbapi/testBackup.cpp: ndb_init() ndb/test/ndbapi/testBasic.cpp: ndb_init() ndb/test/ndbapi/testBasicAsynch.cpp: ndb_init() ndb/test/ndbapi/testBlobs.cpp: ndb_init() ndb/test/ndbapi/testDataBuffers.cpp: ndb_init() ndb/test/ndbapi/testDeadlock.cpp: ndb_init() ndb/test/ndbapi/testDict.cpp: ndb_init() ndb/test/ndbapi/testGrep.cpp: ndb_init() ndb/test/ndbapi/testGrepVerify.cpp: ndb_init() ndb/test/ndbapi/testIndex.cpp: ndb_init() ndb/test/ndbapi/testInterpreter.cpp: ndb_init() ndb/test/ndbapi/testMgm.cpp: ndb_init() ndb/test/ndbapi/bank/bankCreator.cpp: ndb_init() ndb/test/ndbapi/bank/bankMakeGL.cpp: ndb_init() ndb/test/ndbapi/bank/bankSumAccounts.cpp: ndb_init() ndb/test/ndbapi/bank/bankTimer.cpp: ndb_init() ndb/test/ndbapi/bank/bankTransactionMaker.cpp: ndb_init() ndb/test/ndbapi/bank/bankValidateAllGLs.cpp: ndb_init() ndb/test/ndbapi/bank/testBank.cpp: ndb_init() ndb/test/ndbapi/testNdbApi.cpp: ndb_init() ndb/test/ndbapi/testNodeRestart.cpp: ndb_init() ndb/test/ndbapi/testOIBasic.cpp: ndb_init() ndb/test/ndbapi/testOperations.cpp: ndb_init() ndb/test/ndbapi/testOrderedIndex.cpp: ndb_init() ndb/test/ndbapi/testReadPerf.cpp: ndb_init() ndb/test/ndbapi/testRestartGci.cpp: ndb_init() ndb/test/ndbapi/testScan.cpp: ndb_init() ndb/test/ndbapi/testScanInterpreter.cpp: ndb_init() ndb/test/ndbapi/testScanPerf.cpp: ndb_init() ndb/test/ndbapi/testSystemRestart.cpp: ndb_init() ndb/test/ndbapi/testTimeout.cpp: ndb_init() ndb/test/ndbapi/testTransactions.cpp: ndb_init() ndb/test/ndbapi/test_event.cpp: ndb_init() ndb/test/run-test/main.cpp: ndb_init() ndb/test/src/NDBT_Test.cpp: ndb_init() ndb/test/tools/copy_tab.cpp: ndb_init() ndb/test/tools/cpcc.cpp: ndb_init() ndb/test/tools/create_index.cpp: ndb_init() ndb/test/tools/hugoCalculator.cpp: ndb_init() ndb/test/tools/hugoFill.cpp: ndb_init() ndb/test/tools/hugoLoad.cpp: ndb_init() ndb/test/tools/hugoLockRecords.cpp: ndb_init() ndb/test/tools/hugoPkDelete.cpp: ndb_init() ndb/test/tools/hugoPkRead.cpp: ndb_init() ndb/test/tools/hugoPkReadRecord.cpp: ndb_init() ndb/test/tools/hugoPkUpdate.cpp: ndb_init() ndb/test/tools/hugoScanRead.cpp: ndb_init() ndb/test/tools/hugoScanUpdate.cpp: ndb_init() ndb/test/tools/restart.cpp: ndb_init() ndb/test/tools/transproxy.cpp: ndb_init() ndb/test/tools/verify_index.cpp: ndb_init() ndb/tools/delete_all.cpp: ndb_init() ndb/tools/desc.cpp: ndb_init() ndb/tools/drop_index.cpp: ndb_init() ndb/tools/drop_tab.cpp: ndb_init() ndb/tools/listTables.cpp: ndb_init() ndb/tools/ndbsql.cpp: ndb_init() ndb/tools/select_all.cpp: ndb_init() ndb/tools/select_count.cpp: ndb_init() ndb/tools/waiter.cpp: ndb_init() --- .../ndbapi_async_example/ndbapi_async.cpp | 1 + .../ndbapi_example1/ndbapi_example1.cpp | 1 + .../ndbapi_example2/ndbapi_example2.cpp | 1 + .../ndbapi_example3/ndbapi_example3.cpp | 1 + .../ndbapi_example4/ndbapi_example4.cpp | 1 + .../ndbapi_example5/ndbapi_example5.cpp | 1 + .../ndbapi_scan_example/ndbapi_scan.cpp | 1 + ndb/examples/select_all/select_all.cpp | 1 + ndb/include/ndb_global.h | 3 ++ ndb/src/common/util/Makefile.am | 2 +- ndb/src/common/util/ndb_init.c | 29 +++++++++++++++++++ ndb/src/kernel/blocks/backup/read.cpp | 1 + ndb/src/kernel/blocks/backup/restore/main.cpp | 1 + ndb/src/kernel/main.cpp | 1 + ndb/src/kernel/vm/Configuration.cpp | 1 - ndb/src/mgmclient/main.cpp | 1 + ndb/src/mgmsrv/main.cpp | 2 +- ndb/src/mgmsrv/mkconfig/mkconfig.cpp | 1 + ndb/src/ndbapi/Ndbinit.cpp | 1 - ndb/test/ndbapi/acid.cpp | 1 + ndb/test/ndbapi/acid2.cpp | 1 + ndb/test/ndbapi/bank/bankCreator.cpp | 1 + ndb/test/ndbapi/bank/bankMakeGL.cpp | 1 + ndb/test/ndbapi/bank/bankSumAccounts.cpp | 1 + ndb/test/ndbapi/bank/bankTimer.cpp | 1 + ndb/test/ndbapi/bank/bankTransactionMaker.cpp | 1 + ndb/test/ndbapi/bank/bankValidateAllGLs.cpp | 1 + ndb/test/ndbapi/bank/testBank.cpp | 1 + ndb/test/ndbapi/benchronja.cpp | 1 + ndb/test/ndbapi/bulk_copy.cpp | 1 + ndb/test/ndbapi/cdrserver.cpp | 1 + ndb/test/ndbapi/celloDb.cpp | 1 + ndb/test/ndbapi/create_all_tabs.cpp | 1 + ndb/test/ndbapi/create_tab.cpp | 1 + ndb/test/ndbapi/drop_all_tabs.cpp | 1 + ndb/test/ndbapi/flexAsynch.cpp | 1 + ndb/test/ndbapi/flexBench.cpp | 1 + ndb/test/ndbapi/flexHammer.cpp | 1 + ndb/test/ndbapi/flexScan.cpp | 1 + ndb/test/ndbapi/flexTT.cpp | 1 + ndb/test/ndbapi/flexTimedAsynch.cpp | 1 + ndb/test/ndbapi/flex_bench_mysql.cpp | 1 + ndb/test/ndbapi/index.cpp | 1 + ndb/test/ndbapi/index2.cpp | 1 + ndb/test/ndbapi/initronja.cpp | 1 + ndb/test/ndbapi/interpreterInTup.cpp | 1 + ndb/test/ndbapi/mainAsyncGenerator.cpp | 1 + ndb/test/ndbapi/msa.cpp | 1 + ndb/test/ndbapi/restarter.cpp | 1 + ndb/test/ndbapi/restarter2.cpp | 1 + ndb/test/ndbapi/restarts.cpp | 1 + ndb/test/ndbapi/size.cpp | 1 + ndb/test/ndbapi/slow_select.cpp | 1 + ndb/test/ndbapi/testBackup.cpp | 1 + ndb/test/ndbapi/testBasic.cpp | 1 + ndb/test/ndbapi/testBasicAsynch.cpp | 1 + ndb/test/ndbapi/testBlobs.cpp | 1 + ndb/test/ndbapi/testDataBuffers.cpp | 1 + ndb/test/ndbapi/testDeadlock.cpp | 1 + ndb/test/ndbapi/testDict.cpp | 1 + ndb/test/ndbapi/testGrep.cpp | 1 + ndb/test/ndbapi/testGrepVerify.cpp | 1 + ndb/test/ndbapi/testIndex.cpp | 1 + ndb/test/ndbapi/testInterpreter.cpp | 1 + ndb/test/ndbapi/testMgm.cpp | 1 + ndb/test/ndbapi/testNdbApi.cpp | 1 + ndb/test/ndbapi/testNodeRestart.cpp | 1 + ndb/test/ndbapi/testOIBasic.cpp | 1 + ndb/test/ndbapi/testOperations.cpp | 1 + ndb/test/ndbapi/testOrderedIndex.cpp | 1 + ndb/test/ndbapi/testReadPerf.cpp | 1 + ndb/test/ndbapi/testRestartGci.cpp | 1 + ndb/test/ndbapi/testScan.cpp | 1 + ndb/test/ndbapi/testScanInterpreter.cpp | 1 + ndb/test/ndbapi/testScanPerf.cpp | 1 + ndb/test/ndbapi/testSystemRestart.cpp | 1 + ndb/test/ndbapi/testTimeout.cpp | 1 + ndb/test/ndbapi/testTransactions.cpp | 1 + ndb/test/ndbapi/test_event.cpp | 1 + ndb/test/run-test/main.cpp | 1 + ndb/test/src/NDBT_Test.cpp | 1 - ndb/test/tools/copy_tab.cpp | 1 + ndb/test/tools/cpcc.cpp | 1 + ndb/test/tools/create_index.cpp | 1 + ndb/test/tools/hugoCalculator.cpp | 1 + ndb/test/tools/hugoFill.cpp | 1 + ndb/test/tools/hugoLoad.cpp | 1 + ndb/test/tools/hugoLockRecords.cpp | 1 + ndb/test/tools/hugoPkDelete.cpp | 1 + ndb/test/tools/hugoPkRead.cpp | 1 + ndb/test/tools/hugoPkReadRecord.cpp | 1 + ndb/test/tools/hugoPkUpdate.cpp | 1 + ndb/test/tools/hugoScanRead.cpp | 1 + ndb/test/tools/hugoScanUpdate.cpp | 1 + ndb/test/tools/restart.cpp | 1 + ndb/test/tools/transproxy.cpp | 1 + ndb/test/tools/verify_index.cpp | 1 + ndb/tools/delete_all.cpp | 1 + ndb/tools/desc.cpp | 1 + ndb/tools/drop_index.cpp | 1 + ndb/tools/drop_tab.cpp | 1 + ndb/tools/listTables.cpp | 2 +- ndb/tools/ndbsql.cpp | 1 + ndb/tools/select_all.cpp | 2 +- ndb/tools/select_count.cpp | 1 + ndb/tools/waiter.cpp | 1 + 106 files changed, 133 insertions(+), 7 deletions(-) create mode 100644 ndb/src/common/util/ndb_init.c diff --git a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp index 7abebcc832d..c34d8de9ec5 100644 --- a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp +++ b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp @@ -454,6 +454,7 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData) int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database /******************************************* diff --git a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp index 879d86de824..53d23dd7133 100644 --- a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp +++ b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp @@ -44,6 +44,7 @@ int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database NdbDictionary::Table myTable; NdbDictionary::Column myColumn; diff --git a/ndb/examples/ndbapi_example2/ndbapi_example2.cpp b/ndb/examples/ndbapi_example2/ndbapi_example2.cpp index 1c61721c037..95a7bae66b8 100644 --- a/ndb/examples/ndbapi_example2/ndbapi_example2.cpp +++ b/ndb/examples/ndbapi_example2/ndbapi_example2.cpp @@ -39,6 +39,7 @@ static void callback(int result, NdbConnection* NdbObject, void* aObject); int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB_2" ); // Object representing the database NdbConnection* myNdbConnection[2]; // For transactions diff --git a/ndb/examples/ndbapi_example3/ndbapi_example3.cpp b/ndb/examples/ndbapi_example3/ndbapi_example3.cpp index 36d2cf1608c..91d9ff122ba 100644 --- a/ndb/examples/ndbapi_example3/ndbapi_example3.cpp +++ b/ndb/examples/ndbapi_example3/ndbapi_example3.cpp @@ -176,6 +176,7 @@ int executeInsertTransaction(int transactionId, Ndb* myNdb) { int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database /******************************************* diff --git a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp index 520172b9b0c..a37822dbf0c 100644 --- a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp +++ b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp @@ -44,6 +44,7 @@ int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database NdbDictionary::Table myTable; NdbDictionary::Column myColumn; diff --git a/ndb/examples/ndbapi_example5/ndbapi_example5.cpp b/ndb/examples/ndbapi_example5/ndbapi_example5.cpp index a9d3099883c..77f74e7bb63 100644 --- a/ndb/examples/ndbapi_example5/ndbapi_example5.cpp +++ b/ndb/examples/ndbapi_example5/ndbapi_example5.cpp @@ -65,6 +65,7 @@ int myCreateEvent(Ndb* myNdb, int main() { + ndb_init(); Ndb* myNdb = myCreateNdb(); NdbDictionary::Dictionary *myDict; diff --git a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp index 7c3a66326c6..98cfd1ef0d5 100644 --- a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp +++ b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp @@ -761,6 +761,7 @@ int scan_print(Ndb * myNdb, int parallelism, int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database diff --git a/ndb/examples/select_all/select_all.cpp b/ndb/examples/select_all/select_all.cpp index 3cdbdc47e62..bd25fb60128 100644 --- a/ndb/examples/select_all/select_all.cpp +++ b/ndb/examples/select_all/select_all.cpp @@ -112,6 +112,7 @@ const char* ResultSetContainer::getAttrName(int i) const {return m_names[i];} int main(int argc, const char** argv) { + ndb_init(); Ndb* myNdb = new Ndb("ndbapi_example4"); // Object representing the database NdbConnection* myNdbConnection; // For transactions NdbOperation* myNdbOperation; // For operations diff --git a/ndb/include/ndb_global.h b/ndb/include/ndb_global.h index 0ae781ba5c2..19bd387c457 100644 --- a/ndb/include/ndb_global.h +++ b/ndb/include/ndb_global.h @@ -76,6 +76,9 @@ extern "C" { #include +/* call in main() - does not return on error */ +extern int ndb_init(void); + #ifndef HAVE_STRDUP extern char * strdup(const char *s); #endif diff --git a/ndb/src/common/util/Makefile.am b/ndb/src/common/util/Makefile.am index 678added01e..efb249dd330 100644 --- a/ndb/src/common/util/Makefile.am +++ b/ndb/src/common/util/Makefile.am @@ -9,7 +9,7 @@ libgeneral_la_SOURCES = \ NdbSqlUtil.cpp new.cpp \ uucode.c random.c getarg.c version.c \ strdup.c strlcat.c strlcpy.c \ - ConfigValues.cpp + ConfigValues.cpp ndb_init.c include $(top_srcdir)/ndb/config/common.mk.am include $(top_srcdir)/ndb/config/type_util.mk.am diff --git a/ndb/src/common/util/ndb_init.c b/ndb/src/common/util/ndb_init.c new file mode 100644 index 00000000000..b160ed3636b --- /dev/null +++ b/ndb/src/common/util/ndb_init.c @@ -0,0 +1,29 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include +#include + +int +ndb_init() +{ + if (my_init()) { + const char* err = "my_init() failed - exit\n"; + write(2, err, strlen(err)); + exit(1); + } + return 0; +} diff --git a/ndb/src/kernel/blocks/backup/read.cpp b/ndb/src/kernel/blocks/backup/read.cpp index 921c352ea13..89cc08ee9de 100644 --- a/ndb/src/kernel/blocks/backup/read.cpp +++ b/ndb/src/kernel/blocks/backup/read.cpp @@ -48,6 +48,7 @@ static Uint32 logEntryNo; int main(int argc, const char * argv[]){ + ndb_init(); if(argc <= 1){ printf("Usage: %s ", argv[0]); exit(1); diff --git a/ndb/src/kernel/blocks/backup/restore/main.cpp b/ndb/src/kernel/blocks/backup/restore/main.cpp index a330aa51373..077e9f4340c 100644 --- a/ndb/src/kernel/blocks/backup/restore/main.cpp +++ b/ndb/src/kernel/blocks/backup/restore/main.cpp @@ -206,6 +206,7 @@ free_data_callback() int main(int argc, const char** argv) { + ndb_init(); if (!readArguments(argc, argv)) { return -1; diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index 491733975a8..20844db75b6 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -56,6 +56,7 @@ const char programName[] = "NDB Kernel"; NDB_MAIN(ndb_kernel){ + ndb_init(); // Print to stdout/console g_eventLogger.createConsoleHandler(); g_eventLogger.setCategory("NDB"); diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 8907cb9f640..1dbd0477d54 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -105,7 +105,6 @@ Configuration::init(int argc, const char** argv){ } // check for depricated flag '-i' - my_init(); #ifndef DBUG_OFF if (debug_option) DBUG_PUSH(debug_option); diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp index df6659df0b1..69f968677cd 100644 --- a/ndb/src/mgmclient/main.cpp +++ b/ndb/src/mgmclient/main.cpp @@ -44,6 +44,7 @@ handler(int sig){ } int main(int argc, const char** argv){ + ndb_init(); int optind = 0; const char *_host = 0; int _port = 0; diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index cecf1c1e499..484ca941414 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -129,6 +129,7 @@ int num_args = sizeof(args) / sizeof(args[0]); * MAIN */ NDB_MAIN(mgmsrv){ + ndb_init(); /** * OSE specific. Enable shared ownership of file system resources. * This is needed in order to use the cluster log since the events @@ -151,7 +152,6 @@ NDB_MAIN(mgmsrv){ glob.daemon= 0; } - my_init(); #ifndef DBUG_OFF if (debug_option) DBUG_PUSH(debug_option); diff --git a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp b/ndb/src/mgmsrv/mkconfig/mkconfig.cpp index 3b2046d7b49..28823aaa35e 100644 --- a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp +++ b/ndb/src/mgmsrv/mkconfig/mkconfig.cpp @@ -32,6 +32,7 @@ void usage(const char * prg){ NDB_COMMAND(mkconfig, "mkconfig", "mkconfig", "Make a binary configuration from a config file", 16384){ + ndb_init(); if(argc < 3){ usage(argv[0]); return 0; diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp index 8e3bf69c7f5..885c6e49eda 100644 --- a/ndb/src/ndbapi/Ndbinit.cpp +++ b/ndb/src/ndbapi/Ndbinit.cpp @@ -62,7 +62,6 @@ Ndb::Ndb( const char* aDataBase , const char* aSchema) { abort(); // old and new Ndb constructor used mixed theNoOfNdbObjects++; if (global_ndb_cluster_connection == 0) { - my_init(); global_ndb_cluster_connection= new Ndb_cluster_connection(ndbConnectString); global_ndb_cluster_connection->connect(); } diff --git a/ndb/test/ndbapi/acid.cpp b/ndb/test/ndbapi/acid.cpp index 157b3c7b3ef..3eb1625be26 100644 --- a/ndb/test/ndbapi/acid.cpp +++ b/ndb/test/ndbapi/acid.cpp @@ -434,6 +434,7 @@ extern "C" void* NdbThreadFuncRead(void* pArg) NDB_COMMAND(acid, "acid", "acid", "acid", 65535) { + ndb_init(); long nSeconds = 60; int rc = NDBT_OK; diff --git a/ndb/test/ndbapi/acid2.cpp b/ndb/test/ndbapi/acid2.cpp index 434a0450daa..7bd7ec00ac5 100644 --- a/ndb/test/ndbapi/acid2.cpp +++ b/ndb/test/ndbapi/acid2.cpp @@ -610,6 +610,7 @@ extern "C" void* ThreadFunc(void*) int main(int argc, char* argv[]) { + ndb_init(); Uint32 nSeconds = 1; Uint32 nThread = 1; diff --git a/ndb/test/ndbapi/bank/bankCreator.cpp b/ndb/test/ndbapi/bank/bankCreator.cpp index 5331ec6ba69..301d8bda6d2 100644 --- a/ndb/test/ndbapi/bank/bankCreator.cpp +++ b/ndb/test/ndbapi/bank/bankCreator.cpp @@ -27,6 +27,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/bank/bankMakeGL.cpp b/ndb/test/ndbapi/bank/bankMakeGL.cpp index 54bc559fbf9..9e2762ed8ae 100644 --- a/ndb/test/ndbapi/bank/bankMakeGL.cpp +++ b/ndb/test/ndbapi/bank/bankMakeGL.cpp @@ -27,6 +27,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/bank/bankSumAccounts.cpp b/ndb/test/ndbapi/bank/bankSumAccounts.cpp index c0a903f9034..b576161b27b 100644 --- a/ndb/test/ndbapi/bank/bankSumAccounts.cpp +++ b/ndb/test/ndbapi/bank/bankSumAccounts.cpp @@ -27,6 +27,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/bank/bankTimer.cpp b/ndb/test/ndbapi/bank/bankTimer.cpp index ba3165fccb4..874afd9c21e 100644 --- a/ndb/test/ndbapi/bank/bankTimer.cpp +++ b/ndb/test/ndbapi/bank/bankTimer.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; int _wait = 30; diff --git a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp b/ndb/test/ndbapi/bank/bankTransactionMaker.cpp index fe9b53e0c8d..e5ff9aeb918 100644 --- a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp +++ b/ndb/test/ndbapi/bank/bankTransactionMaker.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; int _wait = 20; diff --git a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp b/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp index f9d974bb5f7..cf298ecc8e3 100644 --- a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp +++ b/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/bank/testBank.cpp b/ndb/test/ndbapi/bank/testBank.cpp index 77ac1172d7c..3ef2799cd3c 100644 --- a/ndb/test/ndbapi/bank/testBank.cpp +++ b/ndb/test/ndbapi/bank/testBank.cpp @@ -141,6 +141,7 @@ TESTCASE("Bank", NDBT_TESTSUITE_END(testBank); int main(int argc, const char** argv){ + ndb_init(); // Tables should not be auto created testBank.setCreateTable(false); diff --git a/ndb/test/ndbapi/benchronja.cpp b/ndb/test/ndbapi/benchronja.cpp index ce0aee35e8f..91b2a041186 100644 --- a/ndb/test/ndbapi/benchronja.cpp +++ b/ndb/test/ndbapi/benchronja.cpp @@ -66,6 +66,7 @@ static int ThreadReady[MAXTHREADS]; static int ThreadStart[MAXTHREADS]; NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){ + ndb_init(); ThreadNdb tabThread[MAXTHREADS]; int i = 0 ; diff --git a/ndb/test/ndbapi/bulk_copy.cpp b/ndb/test/ndbapi/bulk_copy.cpp index 18881cae216..8821a92fb27 100644 --- a/ndb/test/ndbapi/bulk_copy.cpp +++ b/ndb/test/ndbapi/bulk_copy.cpp @@ -221,6 +221,7 @@ int insertFile(Ndb* pNdb, int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; int _help = 0; diff --git a/ndb/test/ndbapi/cdrserver.cpp b/ndb/test/ndbapi/cdrserver.cpp index 8354d28f53f..8d15061e94b 100644 --- a/ndb/test/ndbapi/cdrserver.cpp +++ b/ndb/test/ndbapi/cdrserver.cpp @@ -113,6 +113,7 @@ using namespace std; int main(int argc, const char** argv) { + ndb_init(); /******** NDB ***********/ /* Ndb MyNdb( "TEST_DB" ); diff --git a/ndb/test/ndbapi/celloDb.cpp b/ndb/test/ndbapi/celloDb.cpp index ec61e783585..2d6401c355a 100644 --- a/ndb/test/ndbapi/celloDb.cpp +++ b/ndb/test/ndbapi/celloDb.cpp @@ -73,6 +73,7 @@ static int failed = 0 ; NDB_COMMAND(celloDb, "celloDb", "celloDb", "celloDb", 65535) { + ndb_init(); int tTableId; int i; diff --git a/ndb/test/ndbapi/create_all_tabs.cpp b/ndb/test/ndbapi/create_all_tabs.cpp index 55d04888144..97236b98b36 100644 --- a/ndb/test/ndbapi/create_all_tabs.cpp +++ b/ndb/test/ndbapi/create_all_tabs.cpp @@ -25,6 +25,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _temp = false; int _help = 0; diff --git a/ndb/test/ndbapi/create_tab.cpp b/ndb/test/ndbapi/create_tab.cpp index c2e3b7f64ea..f3f18982ed0 100644 --- a/ndb/test/ndbapi/create_tab.cpp +++ b/ndb/test/ndbapi/create_tab.cpp @@ -25,6 +25,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _temp = false; int _help = 0; diff --git a/ndb/test/ndbapi/drop_all_tabs.cpp b/ndb/test/ndbapi/drop_all_tabs.cpp index 59c57396acd..c024a81a5e6 100644 --- a/ndb/test/ndbapi/drop_all_tabs.cpp +++ b/ndb/test/ndbapi/drop_all_tabs.cpp @@ -23,6 +23,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp index 9192ec21b93..8c0ba46130c 100644 --- a/ndb/test/ndbapi/flexAsynch.cpp +++ b/ndb/test/ndbapi/flexAsynch.cpp @@ -145,6 +145,7 @@ tellThreads(StartType what) NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535) { + ndb_init(); ThreadNdb* pThreadData; int tLoops=0, i; int returnValue = NDBT_OK; diff --git a/ndb/test/ndbapi/flexBench.cpp b/ndb/test/ndbapi/flexBench.cpp index 38c8f6e280f..b19944498f4 100644 --- a/ndb/test/ndbapi/flexBench.cpp +++ b/ndb/test/ndbapi/flexBench.cpp @@ -281,6 +281,7 @@ tellThreads(ThreadData* pt, StartType what) NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535) { + ndb_init(); ThreadData* pThreadsData; int tLoops = 0, i; int returnValue = NDBT_OK; diff --git a/ndb/test/ndbapi/flexHammer.cpp b/ndb/test/ndbapi/flexHammer.cpp index c1c47923de9..80cc7c5a53f 100644 --- a/ndb/test/ndbapi/flexHammer.cpp +++ b/ndb/test/ndbapi/flexHammer.cpp @@ -178,6 +178,7 @@ tellThreads(ThreadNdb* threadArrayP, const StartType what) NDB_COMMAND(flexHammer, "flexHammer", "flexHammer", "flexHammer", 65535) //main(int argc, const char** argv) { + ndb_init(); ThreadNdb* pThreads = NULL; // Pointer to thread data array Ndb* pMyNdb = NULL; // Pointer to Ndb object int tLoops = 0; diff --git a/ndb/test/ndbapi/flexScan.cpp b/ndb/test/ndbapi/flexScan.cpp index 5b5b4dde730..b09d71fb010 100644 --- a/ndb/test/ndbapi/flexScan.cpp +++ b/ndb/test/ndbapi/flexScan.cpp @@ -297,6 +297,7 @@ static int checkThreadResults(ThreadNdb *threadArrayP, char *action) NDB_COMMAND(flexScan, "flexScan", "flexScan", "flexScan", 65535) { + ndb_init(); ThreadNdb* pThreads = NULL; Ndb* pMyNdb = NULL; int tLoops = 0; diff --git a/ndb/test/ndbapi/flexTT.cpp b/ndb/test/ndbapi/flexTT.cpp index c0ff31d1677..162fc080218 100644 --- a/ndb/test/ndbapi/flexTT.cpp +++ b/ndb/test/ndbapi/flexTT.cpp @@ -171,6 +171,7 @@ tellThreads(StartType what) NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535) { + ndb_init(); ThreadNdb* pThreadData; int returnValue = NDBT_OK; int i; diff --git a/ndb/test/ndbapi/flexTimedAsynch.cpp b/ndb/test/ndbapi/flexTimedAsynch.cpp index 761be53fdd3..27380cc79fd 100644 --- a/ndb/test/ndbapi/flexTimedAsynch.cpp +++ b/ndb/test/ndbapi/flexTimedAsynch.cpp @@ -174,6 +174,7 @@ void deleteAttributeSpace(){ NDB_COMMAND(flexTimedAsynch, "flexTimedAsynch", "flexTimedAsynch [-tpoilcas]", "flexTimedAsynch", 65535) { + ndb_init(); ThreadNdb tabThread[MAXTHREADS]; int tLoops=0; int returnValue; diff --git a/ndb/test/ndbapi/flex_bench_mysql.cpp b/ndb/test/ndbapi/flex_bench_mysql.cpp index 7cc883ab3e6..8e1fbcd9058 100644 --- a/ndb/test/ndbapi/flex_bench_mysql.cpp +++ b/ndb/test/ndbapi/flex_bench_mysql.cpp @@ -308,6 +308,7 @@ tellThreads(ThreadData* pt, StartType what) NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535) { + ndb_init(); ThreadData* pThreadsData; int tLoops = 0; int returnValue = NDBT_OK; diff --git a/ndb/test/ndbapi/index.cpp b/ndb/test/ndbapi/index.cpp index 508186de529..ed34fc19f43 100644 --- a/ndb/test/ndbapi/index.cpp +++ b/ndb/test/ndbapi/index.cpp @@ -771,6 +771,7 @@ static void dropIndex(Ndb &myNdb, unsigned int noOfIndexes) NDB_COMMAND(indexTest, "indexTest", "indexTest", "indexTest", 65535) { + ndb_init(); bool createTableOp, createIndexOp, dropIndexOp, insertOp, updateOp, deleteOp, readOp, readIndexOp, updateIndexOp, deleteIndexOp, twoKey, longKey; unsigned int noOfTuples = 1; unsigned int noOfOperations = 1; diff --git a/ndb/test/ndbapi/index2.cpp b/ndb/test/ndbapi/index2.cpp index e49113d2f1b..fb2275605d8 100644 --- a/ndb/test/ndbapi/index2.cpp +++ b/ndb/test/ndbapi/index2.cpp @@ -608,6 +608,7 @@ static void dropIndex(Ndb &myNdb, unsigned int noOfIndexes) NDB_COMMAND(indexTest, "indexTest", "indexTest", "indexTest", 65535) { + ndb_init(); bool createTableOp, createIndexOp, dropIndexOp, insertOp, updateOp, deleteOp, readOp, readIndexOp, updateIndexOp, deleteIndexOp, twoKey, longKey; unsigned int noOfTuples = 1; unsigned int noOfOperations = 1; diff --git a/ndb/test/ndbapi/initronja.cpp b/ndb/test/ndbapi/initronja.cpp index b3215104822..3ce274e4319 100644 --- a/ndb/test/ndbapi/initronja.cpp +++ b/ndb/test/ndbapi/initronja.cpp @@ -46,6 +46,7 @@ static char attrName[MAXATTR][MAXSTRLEN]; inline int InsertRecords(Ndb*, int) ; NDB_COMMAND(initronja, "initronja", "initronja", "initronja", 65535){ + ndb_init(); Ndb* pNdb = NULL ; NdbSchemaCon *MySchemaTransaction = NULL ; diff --git a/ndb/test/ndbapi/interpreterInTup.cpp b/ndb/test/ndbapi/interpreterInTup.cpp index 47960cd5d12..20d84e6e96d 100644 --- a/ndb/test/ndbapi/interpreterInTup.cpp +++ b/ndb/test/ndbapi/interpreterInTup.cpp @@ -105,6 +105,7 @@ int bTestPassed = 0; int main(int argc, const char** argv) { + ndb_init(); int operationType = 0; int tupTest = 0; diff --git a/ndb/test/ndbapi/mainAsyncGenerator.cpp b/ndb/test/ndbapi/mainAsyncGenerator.cpp index f613c66d07b..16cb50e160f 100644 --- a/ndb/test/ndbapi/mainAsyncGenerator.cpp +++ b/ndb/test/ndbapi/mainAsyncGenerator.cpp @@ -282,6 +282,7 @@ threadRoutine(void *arg) NDB_COMMAND(DbAsyncGenerator, "DbAsyncGenerator", "DbAsyncGenerator", "DbAsyncGenerator", 65535) { + ndb_init(); int i; int j; int k; diff --git a/ndb/test/ndbapi/msa.cpp b/ndb/test/ndbapi/msa.cpp index 7a734f9cb79..e39f7a8c64a 100644 --- a/ndb/test/ndbapi/msa.cpp +++ b/ndb/test/ndbapi/msa.cpp @@ -971,6 +971,7 @@ void ShowHelp(const char* szCmd) int main(int argc, char* argv[]) { + ndb_init(); int iRes = -1; g_nNumThreads = 0; g_nMaxCallsPerSecond = 0; diff --git a/ndb/test/ndbapi/restarter.cpp b/ndb/test/ndbapi/restarter.cpp index 9a522f5dcac..d6831494b48 100644 --- a/ndb/test/ndbapi/restarter.cpp +++ b/ndb/test/ndbapi/restarter.cpp @@ -28,6 +28,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _hostName = NULL; int _loops = 10; diff --git a/ndb/test/ndbapi/restarter2.cpp b/ndb/test/ndbapi/restarter2.cpp index f2bcf6f8e7b..846748a7bba 100644 --- a/ndb/test/ndbapi/restarter2.cpp +++ b/ndb/test/ndbapi/restarter2.cpp @@ -26,6 +26,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _hostName = NULL; int _loops = 10; diff --git a/ndb/test/ndbapi/restarts.cpp b/ndb/test/ndbapi/restarts.cpp index 0ec2883d53c..184e754de4a 100644 --- a/ndb/test/ndbapi/restarts.cpp +++ b/ndb/test/ndbapi/restarts.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _restartName = NULL; int _loops = 1; diff --git a/ndb/test/ndbapi/size.cpp b/ndb/test/ndbapi/size.cpp index c506771ebde..ff178b11d68 100644 --- a/ndb/test/ndbapi/size.cpp +++ b/ndb/test/ndbapi/size.cpp @@ -19,6 +19,7 @@ int main(void) { + ndb_init(); printf("cdrstruct=%d\n",sizeof(struct cdr_record)); printf("long int=%d\n",sizeof(long int)); printf("int=%d\n",sizeof(int)); diff --git a/ndb/test/ndbapi/slow_select.cpp b/ndb/test/ndbapi/slow_select.cpp index a953e1539d0..625dbc34457 100644 --- a/ndb/test/ndbapi/slow_select.cpp +++ b/ndb/test/ndbapi/slow_select.cpp @@ -36,6 +36,7 @@ static void lookup(); int main(void){ + ndb_init(); Ndb g_ndb("test"); g_ndb.init(1024); diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp index d328a7db292..a3fc0fdc5bf 100644 --- a/ndb/test/ndbapi/testBackup.cpp +++ b/ndb/test/ndbapi/testBackup.cpp @@ -473,6 +473,7 @@ TESTCASE("FailSlave", NDBT_TESTSUITE_END(testBackup); int main(int argc, const char** argv){ + ndb_init(); return testBackup.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testBasic.cpp b/ndb/test/ndbapi/testBasic.cpp index 26622f9b066..7d03016b87a 100644 --- a/ndb/test/ndbapi/testBasic.cpp +++ b/ndb/test/ndbapi/testBasic.cpp @@ -1278,6 +1278,7 @@ TESTCASE("MassiveTransaction", NDBT_TESTSUITE_END(testBasic); int main(int argc, const char** argv){ + ndb_init(); return testBasic.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testBasicAsynch.cpp b/ndb/test/ndbapi/testBasicAsynch.cpp index a97920e53da..6daa22fdc6a 100644 --- a/ndb/test/ndbapi/testBasicAsynch.cpp +++ b/ndb/test/ndbapi/testBasicAsynch.cpp @@ -181,6 +181,7 @@ TESTCASE("PkDeleteAsynch", NDBT_TESTSUITE_END(testBasicAsynch); int main(int argc, const char** argv){ + ndb_init(); return testBasicAsynch.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp index 64881ca39ab..e18f4a8bd1a 100644 --- a/ndb/test/ndbapi/testBlobs.cpp +++ b/ndb/test/ndbapi/testBlobs.cpp @@ -1338,6 +1338,7 @@ static struct { NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535) { + ndb_init(); while (++argv, --argc > 0) { const char* arg = argv[0]; if (strcmp(arg, "-batch") == 0) { diff --git a/ndb/test/ndbapi/testDataBuffers.cpp b/ndb/test/ndbapi/testDataBuffers.cpp index 2e29dbb0d7b..94658d5c6b9 100644 --- a/ndb/test/ndbapi/testDataBuffers.cpp +++ b/ndb/test/ndbapi/testDataBuffers.cpp @@ -545,6 +545,7 @@ testcase(int flag) NDB_COMMAND(testDataBuffers, "testDataBuffers", "testDataBuffers", "testDataBuffers", 65535) { + ndb_init(); while (++argv, --argc > 0) { char const* p = argv[0]; if (*p++ != '-' || strlen(p) != 1) diff --git a/ndb/test/ndbapi/testDeadlock.cpp b/ndb/test/ndbapi/testDeadlock.cpp index f51b3cea1e5..a445823b8a8 100644 --- a/ndb/test/ndbapi/testDeadlock.cpp +++ b/ndb/test/ndbapi/testDeadlock.cpp @@ -491,6 +491,7 @@ wl1822_main(char scantx) NDB_COMMAND(testOdbcDriver, "testDeadlock", "testDeadlock", "testDeadlock", 65535) { + ndb_init(); while (++argv, --argc > 0) { const char* arg = argv[0]; if (strcmp(arg, "-scan") == 0) { diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index a0c7bb1414b..f4dd266414b 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -1576,6 +1576,7 @@ TESTCASE("DictionaryPerf", NDBT_TESTSUITE_END(testDict); int main(int argc, const char** argv){ + ndb_init(); // Tables should not be auto created testDict.setCreateTable(false); myRandom48Init(NdbTick_CurrentMillisecond()); diff --git a/ndb/test/ndbapi/testGrep.cpp b/ndb/test/ndbapi/testGrep.cpp index 0bf84cb4ec8..713aefbeafa 100644 --- a/ndb/test/ndbapi/testGrep.cpp +++ b/ndb/test/ndbapi/testGrep.cpp @@ -533,6 +533,7 @@ TESTCASE("FailSlave", NDBT_TESTSUITE_END(testGrep); int main(int argc, const char** argv){ + ndb_init(); return testGrep.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testGrepVerify.cpp b/ndb/test/ndbapi/testGrepVerify.cpp index 05445c1ba1b..52dcda9a162 100644 --- a/ndb/test/ndbapi/testGrepVerify.cpp +++ b/ndb/test/ndbapi/testGrepVerify.cpp @@ -40,6 +40,7 @@ continue; } int main(int argc, const char** argv){ + ndb_init(); const char * connectString = NULL; diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index 6ebbfd8b680..bef3b310c96 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1528,6 +1528,7 @@ TESTCASE("UniqueNull", NDBT_TESTSUITE_END(testIndex); int main(int argc, const char** argv){ + ndb_init(); return testIndex.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testInterpreter.cpp b/ndb/test/ndbapi/testInterpreter.cpp index 9c584d6f581..0baba33d2b2 100644 --- a/ndb/test/ndbapi/testInterpreter.cpp +++ b/ndb/test/ndbapi/testInterpreter.cpp @@ -224,6 +224,7 @@ TESTCASE("NdbErrorOperation", NDBT_TESTSUITE_END(testInterpreter); int main(int argc, const char** argv){ + ndb_init(); // TABLE("T1"); return testInterpreter.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testMgm.cpp b/ndb/test/ndbapi/testMgm.cpp index d5b9372cc9b..ef653d3f972 100644 --- a/ndb/test/ndbapi/testMgm.cpp +++ b/ndb/test/ndbapi/testMgm.cpp @@ -178,6 +178,7 @@ TESTCASE("SingleUserMode", NDBT_TESTSUITE_END(testMgm); int main(int argc, const char** argv){ + ndb_init(); myRandom48Init(NdbTick_CurrentMillisecond()); return testMgm.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp index 5b171d42578..47987629fe3 100644 --- a/ndb/test/ndbapi/testNdbApi.cpp +++ b/ndb/test/ndbapi/testNdbApi.cpp @@ -1006,6 +1006,7 @@ TESTCASE("NdbErrorOperation", NDBT_TESTSUITE_END(testNdbApi); int main(int argc, const char** argv){ + ndb_init(); // TABLE("T1"); return testNdbApi.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index 89b38c78e71..6bfe59f8d3f 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -434,6 +434,7 @@ TESTCASE("StopOnError", NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ + ndb_init(); #if 0 // It might be interesting to have longer defaults for num // loops in this test diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index 29d03f0c33e..1bb8b251d01 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -3201,6 +3201,7 @@ runtest(Par par) NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) { + ndb_init(); while (++argv, --argc > 0) { const char* arg = argv[0]; if (*arg != '-') { diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp index bb58e69e898..ba41e1d1c40 100644 --- a/ndb/test/ndbapi/testOperations.cpp +++ b/ndb/test/ndbapi/testOperations.cpp @@ -230,6 +230,7 @@ runClearTable(NDBT_Context* ctx, NDBT_Step* step){ int main(int argc, const char** argv){ + ndb_init(); NDBT_TestSuite ts("testOperations"); for(Uint32 i = 0; i int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _to_tabname = NULL; diff --git a/ndb/test/tools/cpcc.cpp b/ndb/test/tools/cpcc.cpp index e30d458ffee..dd59e577f2c 100644 --- a/ndb/test/tools/cpcc.cpp +++ b/ndb/test/tools/cpcc.cpp @@ -173,6 +173,7 @@ add_hosts(Vector & hosts, BaseString list){ int main(int argc, const char** argv){ + ndb_init(); int help = 0; const char *cmd=0, *name=0, *group=0, *owner=0; int list = 0, start = 0, stop = 0, rm = 0; diff --git a/ndb/test/tools/create_index.cpp b/ndb/test/tools/create_index.cpp index f883755ea24..75a657522f6 100644 --- a/ndb/test/tools/create_index.cpp +++ b/ndb/test/tools/create_index.cpp @@ -26,6 +26,7 @@ int main(int argc, const char** argv){ + ndb_init(); const char* _dbname = "TEST_DB"; int _help = 0; diff --git a/ndb/test/tools/hugoCalculator.cpp b/ndb/test/tools/hugoCalculator.cpp index 7f2751be2ba..82c4bbff1a4 100644 --- a/ndb/test/tools/hugoCalculator.cpp +++ b/ndb/test/tools/hugoCalculator.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv) { + ndb_init(); int _row = 0; int _column = 0; int _updates = 0; diff --git a/ndb/test/tools/hugoFill.cpp b/ndb/test/tools/hugoFill.cpp index dee6ce2e6c8..6253bd1bb12 100644 --- a/ndb/test/tools/hugoFill.cpp +++ b/ndb/test/tools/hugoFill.cpp @@ -25,6 +25,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _records = 0; const char* _tabname = NULL; diff --git a/ndb/test/tools/hugoLoad.cpp b/ndb/test/tools/hugoLoad.cpp index be7f878d106..c697ad22aad 100644 --- a/ndb/test/tools/hugoLoad.cpp +++ b/ndb/test/tools/hugoLoad.cpp @@ -24,6 +24,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _records = 0; const char* _tabname = NULL; diff --git a/ndb/test/tools/hugoLockRecords.cpp b/ndb/test/tools/hugoLockRecords.cpp index e2c2cd13f00..629408d401d 100644 --- a/ndb/test/tools/hugoLockRecords.cpp +++ b/ndb/test/tools/hugoLockRecords.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoPkDelete.cpp b/ndb/test/tools/hugoPkDelete.cpp index 1855f19796f..78a90ebcb46 100644 --- a/ndb/test/tools/hugoPkDelete.cpp +++ b/ndb/test/tools/hugoPkDelete.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoPkRead.cpp b/ndb/test/tools/hugoPkRead.cpp index 50351f08195..cf08b137e8e 100644 --- a/ndb/test/tools/hugoPkRead.cpp +++ b/ndb/test/tools/hugoPkRead.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoPkReadRecord.cpp b/ndb/test/tools/hugoPkReadRecord.cpp index 85f20bd2060..38b7cae2bf4 100644 --- a/ndb/test/tools/hugoPkReadRecord.cpp +++ b/ndb/test/tools/hugoPkReadRecord.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv) { + ndb_init(); int _row = 0; int _hex = 0; int _primaryKey = 0; diff --git a/ndb/test/tools/hugoPkUpdate.cpp b/ndb/test/tools/hugoPkUpdate.cpp index e7edc3a991d..286be14a01c 100644 --- a/ndb/test/tools/hugoPkUpdate.cpp +++ b/ndb/test/tools/hugoPkUpdate.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoScanRead.cpp b/ndb/test/tools/hugoScanRead.cpp index 47ea8f4a8a7..cdfdcea4654 100644 --- a/ndb/test/tools/hugoScanRead.cpp +++ b/ndb/test/tools/hugoScanRead.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoScanUpdate.cpp b/ndb/test/tools/hugoScanUpdate.cpp index 3e2255ca0f3..96a487a02bf 100644 --- a/ndb/test/tools/hugoScanUpdate.cpp +++ b/ndb/test/tools/hugoScanUpdate.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/restart.cpp b/ndb/test/tools/restart.cpp index 88cfb231a72..9ad20801fd7 100644 --- a/ndb/test/tools/restart.cpp +++ b/ndb/test/tools/restart.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _hostName = NULL; int _initial = 0; diff --git a/ndb/test/tools/transproxy.cpp b/ndb/test/tools/transproxy.cpp index 384a8a34f03..90e216ec785 100644 --- a/ndb/test/tools/transproxy.cpp +++ b/ndb/test/tools/transproxy.cpp @@ -346,6 +346,7 @@ start() int main(int av, char** ac) { + ndb_init(); debug("start"); hostname = "ndb-srv7"; if (Ndb_getInAddr(&hostaddr.sin_addr, hostname) != 0) { diff --git a/ndb/test/tools/verify_index.cpp b/ndb/test/tools/verify_index.cpp index 1295b657e9b..6c8e304e1a1 100644 --- a/ndb/test/tools/verify_index.cpp +++ b/ndb/test/tools/verify_index.cpp @@ -27,6 +27,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _parallelism = 240; const char* _tabname = NULL; const char* _indexname = NULL; diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp index 5110947c6a2..aa5798376ae 100644 --- a/ndb/tools/delete_all.cpp +++ b/ndb/tools/delete_all.cpp @@ -26,6 +26,7 @@ static int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism=240); int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _dbname = "TEST_DB"; diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp index 859a9544a79..0ab11a0fdd2 100644 --- a/ndb/tools/desc.cpp +++ b/ndb/tools/desc.cpp @@ -22,6 +22,7 @@ int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _dbname = "TEST_DB"; int _unqualified = 0; diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp index 327f15741c9..70c29461c23 100644 --- a/ndb/tools/drop_index.cpp +++ b/ndb/tools/drop_index.cpp @@ -23,6 +23,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _dbname = "TEST_DB"; diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp index 70e5d85aabe..15c229cb0fb 100644 --- a/ndb/tools/drop_tab.cpp +++ b/ndb/tools/drop_tab.cpp @@ -23,6 +23,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _dbname = "TEST_DB"; diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp index d6465f3214f..8cea9f7a566 100644 --- a/ndb/tools/listTables.cpp +++ b/ndb/tools/listTables.cpp @@ -167,6 +167,7 @@ const char *debug_option= 0; #endif int main(int argc, const char** argv){ + ndb_init(); int _loops = 1; const char* _tabname = NULL; const char* _dbname = "TEST_DB"; @@ -209,7 +210,6 @@ int main(int argc, const char** argv){ _tabname = argv[optind]; #ifndef DBUG_OFF - my_init(); if (debug_option) DBUG_PUSH(debug_option); #endif diff --git a/ndb/tools/ndbsql.cpp b/ndb/tools/ndbsql.cpp index 6af5f47f6f4..1997e4abebd 100644 --- a/ndb/tools/ndbsql.cpp +++ b/ndb/tools/ndbsql.cpp @@ -671,6 +671,7 @@ void print_help_virtual() { int main(int argc, const char** argv) { + ndb_init(); const char* usage = "Usage: ndbsql [-h] [-d dsn] [-f file] [stmt]\n-h help\n-d \n-f batch mode\nstmt single SQL statement\n"; const char* dsn = "TEST_DB"; bool helpFlg = false, batchMode = false; diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp index eb95947fc0f..b6b9437e573 100644 --- a/ndb/tools/select_all.cpp +++ b/ndb/tools/select_all.cpp @@ -42,6 +42,7 @@ int scanReadRecords(Ndb*, bool orderby); int main(int argc, const char** argv){ + ndb_init(); int _parallelism = 240; const char* _delimiter = "\t"; int _header = true; @@ -89,7 +90,6 @@ int main(int argc, const char** argv){ _tabname = argv[optind]; #ifndef DBUG_OFF - my_init(); if (debug_option) DBUG_PUSH(debug_option); #endif diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp index bb7c9dea49b..6650421e637 100644 --- a/ndb/tools/select_count.cpp +++ b/ndb/tools/select_count.cpp @@ -33,6 +33,7 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab, UtilTransactions::ScanLock lock); int main(int argc, const char** argv){ + ndb_init(); const char* _dbname = "TEST_DB"; int _parallelism = 240; int _help = 0; diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp index 63469c6d746..c27b46c9356 100644 --- a/ndb/tools/waiter.cpp +++ b/ndb/tools/waiter.cpp @@ -30,6 +30,7 @@ int waitClusterStatus(const char* _addr, ndb_mgm_node_status _status, unsigned int _timeout); int main(int argc, const char** argv){ + ndb_init(); const char* _hostName = NULL; int _no_contact = 0; From d3f031e97b9815e0401a23626289c4e894e7d1fd Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Sep 2004 12:36:21 +0200 Subject: [PATCH 02/55] delete unnecessary include my_sys.h --- ndb/src/kernel/vm/Configuration.cpp | 1 - ndb/src/mgmsrv/main.cpp | 1 - ndb/src/ndbapi/Ndbinit.cpp | 1 - ndb/test/src/NDBT_Test.cpp | 1 - ndb/tools/listTables.cpp | 1 - ndb/tools/select_all.cpp | 1 - 6 files changed, 6 deletions(-) diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 1dbd0477d54..c0015172512 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -15,7 +15,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include -#include #include "Configuration.hpp" #include diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index 484ca941414..40385219faf 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -15,7 +15,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include -#include #include "MgmtSrvr.hpp" #include "EventLogger.hpp" diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp index 885c6e49eda..9495c05cef2 100644 --- a/ndb/src/ndbapi/Ndbinit.cpp +++ b/ndb/src/ndbapi/Ndbinit.cpp @@ -16,7 +16,6 @@ #include -#include #include "NdbApiSignal.hpp" #include "NdbImpl.hpp" diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index 851c9e6c80a..2d70e221cfd 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -15,7 +15,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include -#include #include "NDBT.hpp" #include "NDBT_Test.hpp" diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp index 8cea9f7a566..4b24929ee4b 100644 --- a/ndb/tools/listTables.cpp +++ b/ndb/tools/listTables.cpp @@ -22,7 +22,6 @@ */ #include -#include #include #include diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp index b6b9437e573..8fb8437ba5f 100644 --- a/ndb/tools/select_all.cpp +++ b/ndb/tools/select_all.cpp @@ -16,7 +16,6 @@ #include -#include #include From f0f29d338396aa6687d52fdc3a88cda0774cac81 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Sep 2004 14:52:39 +0200 Subject: [PATCH 03/55] ndb: set column type sets column defaults --- .../ndbapi_async_example/ndbapi_async.cpp | 10 ++- .../ndbapi_example1/ndbapi_example1.cpp | 4 +- .../ndbapi_example4/ndbapi_example4.cpp | 4 +- .../ndbapi_scan_example/ndbapi_scan.cpp | 9 ++- ndb/include/ndbapi/NdbDictionary.hpp | 16 +++-- ndb/src/ndbapi/NdbDictionary.cpp | 38 ++++++++++- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 63 ++++++++++++++++--- ndb/src/ndbapi/NdbDictionaryImpl.hpp | 2 +- ndb/test/include/NDBT_Table.hpp | 4 +- ndb/test/ndbapi/index.cpp | 16 ++--- ndb/test/ndbapi/index2.cpp | 4 +- ndb/test/ndbapi/testDict.cpp | 2 +- ndb/test/ndbapi/testOIBasic.cpp | 5 +- 13 files changed, 131 insertions(+), 46 deletions(-) diff --git a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp index c34d8de9ec5..76ce1a8efe3 100644 --- a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp +++ b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp @@ -46,9 +46,9 @@ * * NdbDictionary::Column * setName() - * setPrimaryKey() * setType() * setLength() + * setPrimaryKey() * setNullable() * * NdbDictionary::Table @@ -234,9 +234,9 @@ int create_table(Ndb * myNdb) * Column REG_NO */ myColumn.setName("REG_NO"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myTable.addColumn(myColumn); @@ -244,9 +244,9 @@ int create_table(Ndb * myNdb) * Column BRAND */ myColumn.setName("BRAND"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Char); myColumn.setLength(20); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); @@ -254,9 +254,9 @@ int create_table(Ndb * myNdb) * Column COLOR */ myColumn.setName("COLOR"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Char); myColumn.setLength(20); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); @@ -494,5 +494,3 @@ int main() std::cout << "Number of temporary errors: " << tempErrors << std::endl; delete myNdb; } - - diff --git a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp index 53d23dd7133..03a84aa249b 100644 --- a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp +++ b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp @@ -79,16 +79,16 @@ int main() myTable.setName("MYTABLENAME"); myColumn.setName("ATTR1"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myTable.addColumn(myColumn); myColumn.setName("ATTR2"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); diff --git a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp index a37822dbf0c..fcb770d49e9 100644 --- a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp +++ b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp @@ -80,16 +80,16 @@ int main() myTable.setName("MYTABLENAME"); myColumn.setName("ATTR1"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myTable.addColumn(myColumn); myColumn.setName("ATTR2"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); diff --git a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp index 98cfd1ef0d5..22641bc5b57 100644 --- a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp +++ b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp @@ -47,9 +47,9 @@ * * NdbDictionary::Column * setName() - * setPrimaryKey() * setType() * setLength() + * setPrimaryKey() * setNullable() * * NdbDictionary::Table @@ -165,24 +165,24 @@ int create_table(Ndb * myNdb) myTable.setName("GARAGE"); myColumn.setName("REG_NO"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myTable.addColumn(myColumn); myColumn.setName("BRAND"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Char); myColumn.setLength(20); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); myColumn.setName("COLOR"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Char); myColumn.setLength(20); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); @@ -814,4 +814,3 @@ int main() delete myNdb; } - diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 5c470c1d25f..3257133bd82 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -257,6 +257,10 @@ public: /** * Set type of column * @param type Type of column + * + * @note setType resets all column attributes + * to (type dependent) defaults and should be the first + * method to call. Default type is Unsigned. */ void setType(Type type); @@ -306,23 +310,23 @@ public: * to store in table's blob attribute. This part is normally in * main memory and can be indexed and interpreted. */ - void setInlineSize(int size) { setPrecision(size); } - int getInlineSize() const { return getPrecision(); } + void setInlineSize(int size); + int getInlineSize() const; /** * For blob, set or get "part size" i.e. number of bytes to store in * each tuple of the "blob table". Can be set to zero to omit parts * and to allow only inline bytes ("tinyblob"). */ - void setPartSize(int size) { setScale(size); } - int getPartSize() const { return getScale(); } + void setPartSize(int size); + int getPartSize() const; /** * For blob, set or get "stripe size" i.e. number of consecutive * parts to store in each node group. */ - void setStripeSize(int size) { setLength(size); } - int getStripeSize() const { return getLength(); } + void setStripeSize(int size); + int getStripeSize() const; /** * Get size of element diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index 8000b53d3be..add1fa4cc91 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -65,7 +65,7 @@ NdbDictionary::Column::getName() const { void NdbDictionary::Column::setType(Type t){ - m_impl.m_type = t; + m_impl.init(t); } NdbDictionary::Column::Type @@ -103,6 +103,42 @@ NdbDictionary::Column::getLength() const{ return m_impl.m_length; } +void +NdbDictionary::Column::setInlineSize(int size) +{ + m_impl.m_precision = size; +} + +int +NdbDictionary::Column::getInlineSize() const +{ + return m_impl.m_precision; +} + +void +NdbDictionary::Column::setPartSize(int size) +{ + m_impl.m_scale = size; +} + +int +NdbDictionary::Column::getPartSize() const +{ + return m_impl.m_scale; +} + +void +NdbDictionary::Column::setStripeSize(int size) +{ + m_impl.m_length = size; +} + +int +NdbDictionary::Column::getStripeSize() const +{ + return m_impl.m_length; +} + int NdbDictionary::Column::getSize() const{ return m_impl.m_attrSize; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 78a363c1a5f..64f349be53e 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -87,10 +87,57 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col) } void -NdbColumnImpl::init() +NdbColumnImpl::init(Type t) { m_attrId = -1; - m_type = NdbDictionary::Column::Unsigned; + m_type = t; + switch (m_type) { + case Tinyint: + case Tinyunsigned: + case Smallint: + case Smallunsigned: + case Mediumint: + case Mediumunsigned: + case Int: + case Unsigned: + case Bigint: + case Bigunsigned: + case Float: + case Double: + m_precision = 0; + m_scale = 0; + m_length = 1; + break; + case Decimal: + m_precision = 10; + m_scale = 0; + m_length = 1; + break; + case Char: + case Varchar: + m_precision = 0; + m_scale = 0; + m_length = 1; + break; + case Binary: + case Varbinary: + case Datetime: + case Timespec: + m_precision = 0; + m_scale = 0; + m_length = 1; + break; + case Blob: + m_precision = 256; + m_scale = 8000; + m_length = 4; + break; + case Text: + m_precision = 256; + m_scale = 8000; + m_length = 4; + break; + } m_pk = false; m_nullable = false; m_tupleKey = false; @@ -98,12 +145,10 @@ NdbColumnImpl::init() m_distributionKey = false; m_distributionGroup = false; m_distributionGroupBits = 8; - m_length = 1; - m_scale = 5; - m_precision = 5; m_keyInfoPos = 0; - m_attrSize = 4, - m_arraySize = 1, + // next 2 are set at run time + m_attrSize = 0; + m_arraySize = 0; m_autoIncrement = false; m_autoIncrementInitialValue = 1; m_blobTable = NULL; @@ -209,14 +254,18 @@ NdbColumnImpl::create_psuedo(const char * name){ if(!strcmp(name, "NDB$FRAGMENT")){ col->setType(NdbDictionary::Column::Unsigned); col->m_impl.m_attrId = AttributeHeader::FRAGMENT; + col->m_impl.m_attrSize = 4; + col->m_impl.m_arraySize = 1; } else if(!strcmp(name, "NDB$ROW_COUNT")){ col->setType(NdbDictionary::Column::Bigunsigned); col->m_impl.m_attrId = AttributeHeader::ROW_COUNT; col->m_impl.m_attrSize = 8; + col->m_impl.m_arraySize = 1; } else if(!strcmp(name, "NDB$COMMIT_COUNT")){ col->setType(NdbDictionary::Column::Bigunsigned); col->m_impl.m_attrId = AttributeHeader::COMMIT_COUNT; col->m_impl.m_attrSize = 8; + col->m_impl.m_arraySize = 1; } else { abort(); } diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index da5e7e45c36..d77cc4d44bc 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -52,7 +52,7 @@ public: NdbColumnImpl(NdbDictionary::Column &); // This is not a copy constructor ~NdbColumnImpl(); NdbColumnImpl& operator=(const NdbColumnImpl&); - void init(); + void init(Type t = Unsigned); int m_attrId; BaseString m_name; diff --git a/ndb/test/include/NDBT_Table.hpp b/ndb/test/include/NDBT_Table.hpp index 59db3ed1092..d2f99b85187 100644 --- a/ndb/test/include/NDBT_Table.hpp +++ b/ndb/test/include/NDBT_Table.hpp @@ -33,10 +33,10 @@ public: { assert(_name != 0); + setType(_type); + setLength(_length); setNullable(_nullable); setPrimaryKey(_pk); - setLength(_length); - setType(_type); } }; diff --git a/ndb/test/ndbapi/index.cpp b/ndb/test/ndbapi/index.cpp index ed34fc19f43..c22da594164 100644 --- a/ndb/test/ndbapi/index.cpp +++ b/ndb/test/ndbapi/index.cpp @@ -81,63 +81,63 @@ static void createTable(Ndb &myNdb, bool storeInACC, bool twoKey, bool longKey) int res; column.setName("NAME"); - column.setPrimaryKey(true); column.setType(NdbDictionary::Column::Char); column.setLength((longKey)? 1024 // 1KB => long key :12); + column.setPrimaryKey(true); column.setNullable(false); table.addColumn(column); if (twoKey) { column.setName("KEY2"); - column.setPrimaryKey(true); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(true); column.setNullable(false); table.addColumn(column); } column.setName("PNUM1"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("PNUM2"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("PNUM3"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("PNUM4"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("AGE"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("STRING_AGE"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Char); column.setLength(1); column.setLength(256); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); diff --git a/ndb/test/ndbapi/index2.cpp b/ndb/test/ndbapi/index2.cpp index fb2275605d8..f739468d7df 100644 --- a/ndb/test/ndbapi/index2.cpp +++ b/ndb/test/ndbapi/index2.cpp @@ -81,16 +81,16 @@ static void createTable(Ndb &myNdb, bool storeInACC, bool twoKey, bool longKey) int res; column.setName("X"); - column.setPrimaryKey(true); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(true); column.setNullable(false); table.addColumn(column); column.setName("Y"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index f4dd266414b..7cba5ce4cc8 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -1128,9 +1128,9 @@ runCreateAutoincrementTable(NDBT_Context* ctx, NDBT_Step* step){ myTable.setName(tabname); myColumn.setName("ATTR1"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myColumn.setAutoIncrement(true); if (startvalue != ~0) // check that default value starts with 1 diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index 1bb8b251d01..ac28b96af80 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -979,9 +979,9 @@ createtable(Par par) for (unsigned k = 0; k < tab.m_cols; k++) { const Col& col = tab.m_col[k]; NdbDictionary::Column c(col.m_name); - c.setPrimaryKey(col.m_pk); c.setType(col.m_type); c.setLength(col.m_length); + c.setPrimaryKey(col.m_pk); c.setNullable(col.m_nullable); t.addColumn(c); } @@ -2236,9 +2236,8 @@ pkreadfast(Par par, unsigned count) keyrow.calc(par, i); CHK(keyrow.selrow(par) == 0); NdbRecAttr* rec; - CHK(con.getValue((Uint32)0, rec) == 0); - CHK(con.executeScan() == 0); // get 1st column + CHK(con.getValue((Uint32)0, rec) == 0); CHK(con.execute(Commit) == 0); con.closeTransaction(); } From 39c2f42168f0cba3bda903c80b2bd48804725a15 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Sep 2004 17:44:13 +0200 Subject: [PATCH 04/55] ndb charsets: DICT --- ndb/include/kernel/signaldata/DictTabInfo.hpp | 4 +- ndb/include/ndbapi/NdbDictionary.hpp | 10 ++ ndb/include/util/NdbSqlUtil.hpp | 7 ++ ndb/src/common/util/NdbSqlUtil.cpp | 77 ++++++++++++++ ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 2 + ndb/src/ndbapi/NdbDictionary.cpp | 20 +++- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 100 ++++++++++-------- ndb/src/ndbapi/NdbDictionaryImpl.hpp | 10 ++ ndb/src/ndbapi/ndberror.c | 3 + sql/ha_ndbcluster.cc | 26 +++-- 10 files changed, 202 insertions(+), 57 deletions(-) diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp index dec7145c897..a9a50f19fbc 100644 --- a/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -438,8 +438,8 @@ public: case DictTabInfo::ExtText: AttributeType = DictTabInfo::StringType; AttributeSize = DictTabInfo::an8Bit; - // head + inline part [ attr precision ] - AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + AttributeExtPrecision; + // head + inline part [ attr precision lower half ] + AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF); return true; }; return false; diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 3257133bd82..51a6895648f 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -32,6 +32,8 @@ #include class Ndb; +struct charset_info_st; +typedef struct charset_info_st CHARSET_INFO; /** * @class NdbDictionary @@ -305,6 +307,14 @@ public: */ int getLength() const; + /** + * For Char or Varchar or Text, set or get MySQL CHARSET_INFO. This + * specifies both character set and collation. See get_charset() + * etc in MySQL. (The cs is not "const" in MySQL). + */ + void setCharset(CHARSET_INFO* cs); + CHARSET_INFO* getCharset() const; + /** * For blob, set or get "inline size" i.e. number of initial bytes * to store in table's blob attribute. This part is normally in diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp index 1d3e96d5c7e..df1cb716f93 100644 --- a/ndb/include/util/NdbSqlUtil.hpp +++ b/ndb/include/util/NdbSqlUtil.hpp @@ -90,6 +90,13 @@ public: */ static const Type& getType(Uint32 typeId); + /** + * Check character set. + */ + static bool usable_in_pk(Uint32 typeId, const void* cs); + static bool usable_in_hash_index(Uint32 typeId, const void* cs); + static bool usable_in_ordered_index(Uint32 typeId, const void* cs); + private: /** * List of all types. Must match Type::Enum. diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index 84a6f6e6c21..afb9bcfff62 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -529,6 +529,83 @@ NdbSqlUtil::cmpText(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size return CmpUnknown; } +// check charset + +bool +NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info) +{ + const Type& type = getType(typeId); + switch (type.m_typeId) { + case Type::Undefined: + break; + case Type::Char: + { + const CHARSET_INFO *cs = (const CHARSET_INFO*)info; + return + cs != 0 && + cs->cset != 0 && + cs->coll != 0 && + cs->coll->strnxfrm != 0 && + cs->strxfrm_multiply == 1; // current limitation + } + break; + case Type::Varchar: + return true; // Varchar not used via MySQL + case Type::Blob: + case Type::Text: + break; + default: + return true; + } + return false; +} + +bool +NdbSqlUtil::usable_in_hash_index(Uint32 typeId, const void* info) +{ + return usable_in_pk(typeId, info); +} + +bool +NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info) +{ + const Type& type = getType(typeId); + switch (type.m_typeId) { + case Type::Undefined: + break; + case Type::Char: + { + const CHARSET_INFO *cs = (const CHARSET_INFO*)info; + return + cs != 0 && + cs->cset != 0 && + cs->coll != 0 && + cs->coll->strnxfrm != 0 && + cs->coll->strnncollsp != 0 && + cs->strxfrm_multiply == 1; // current limitation + } + break; + case Type::Varchar: + return true; // Varchar not used via MySQL + case Type::Text: + { + const CHARSET_INFO *cs = (const CHARSET_INFO*)info; + return + cs != 0 && + cs->mbmaxlen == 1 && // extra limitation + cs->cset != 0 && + cs->coll != 0 && + cs->coll->strnxfrm != 0 && + cs->coll->strnncollsp != 0 && + cs->strxfrm_multiply == 1; // current limitation + } + break; + default: + return true; + } + return false; +} + #ifdef NDB_SQL_UTIL_TEST #include diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 7126842459e..d82083684b7 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -6317,6 +6317,8 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) w.add(DictTabInfo::AttributeStoredInd, (Uint32)DictTabInfo::Stored); // ext type overrides w.add(DictTabInfo::AttributeExtType, aRec->extType); + w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision); + w.add(DictTabInfo::AttributeExtScale, aRec->extScale); w.add(DictTabInfo::AttributeExtLength, aRec->extLength); w.add(DictTabInfo::AttributeEnd, (Uint32)true); } diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index add1fa4cc91..6396cb6bb1d 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -109,6 +109,18 @@ NdbDictionary::Column::setInlineSize(int size) m_impl.m_precision = size; } +void +NdbDictionary::Column::setCharset(CHARSET_INFO* cs) +{ + m_impl.m_cs = cs; +} + +CHARSET_INFO* +NdbDictionary::Column::getCharset() const +{ + return m_impl.m_cs; +} + int NdbDictionary::Column::getInlineSize() const { @@ -856,6 +868,8 @@ NdbDictionary::Dictionary::getNdbError() const { NdbOut& operator<<(NdbOut& out, const NdbDictionary::Column& col) { + const CHARSET_INFO *cs = col.getCharset(); + const char *csname = cs ? cs->name : "?"; out << col.getName() << " "; switch (col.getType()) { case NdbDictionary::Column::Tinyint: @@ -898,10 +912,10 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) out << "Decimal(" << col.getScale() << "," << col.getPrecision() << ")"; break; case NdbDictionary::Column::Char: - out << "Char(" << col.getLength() << ")"; + out << "Char(" << col.getLength() << ";" << csname << ")"; break; case NdbDictionary::Column::Varchar: - out << "Varchar(" << col.getLength() << ")"; + out << "Varchar(" << col.getLength() << ";" << csname << ")"; break; case NdbDictionary::Column::Binary: out << "Binary(" << col.getLength() << ")"; @@ -921,7 +935,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) break; case NdbDictionary::Column::Text: out << "Text(" << col.getInlineSize() << "," << col.getPartSize() - << ";" << col.getStripeSize() << ")"; + << ";" << col.getStripeSize() << ";" << csname << ")"; break; case NdbDictionary::Column::Undefined: out << "Undefined"; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 64f349be53e..c2c987f3bdb 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -36,6 +36,7 @@ #include "NdbEventOperationImpl.hpp" #include "NdbBlob.hpp" #include +#include #define DEBUG_PRINT 0 #define INCOMPATIBLE_VERSION -2 @@ -64,6 +65,7 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col) m_name = col.m_name; m_type = col.m_type; m_precision = col.m_precision; + m_cs = col.m_cs; m_scale = col.m_scale; m_length = col.m_length; m_pk = col.m_pk; @@ -89,6 +91,9 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col) void NdbColumnImpl::init(Type t) { + // do not use default_charset_info as it may not be initialized yet + // use binary collation until NDB tests can handle charsets + CHARSET_INFO* default_cs = &my_charset_latin1_bin; m_attrId = -1; m_type = t; switch (m_type) { @@ -107,17 +112,20 @@ NdbColumnImpl::init(Type t) m_precision = 0; m_scale = 0; m_length = 1; + m_cs = NULL; break; case Decimal: m_precision = 10; m_scale = 0; m_length = 1; + m_cs = NULL; break; case Char: case Varchar: m_precision = 0; m_scale = 0; m_length = 1; + m_cs = default_cs; break; case Binary: case Varbinary: @@ -126,16 +134,19 @@ NdbColumnImpl::init(Type t) m_precision = 0; m_scale = 0; m_length = 1; + m_cs = NULL; break; case Blob: m_precision = 256; m_scale = 8000; m_length = 4; + m_cs = NULL; break; case Text: m_precision = 256; m_scale = 8000; m_length = 4; + m_cs = default_cs; break; } m_pk = false; @@ -191,52 +202,12 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const return false; } } - if(m_length != col.m_length){ + if (m_precision != col.m_precision || + m_scale != col.m_scale || + m_length != col.m_length || + m_cs != col.m_cs) { return false; } - - switch(m_type){ - case NdbDictionary::Column::Undefined: - break; - case NdbDictionary::Column::Tinyint: - case NdbDictionary::Column::Tinyunsigned: - case NdbDictionary::Column::Smallint: - case NdbDictionary::Column::Smallunsigned: - case NdbDictionary::Column::Mediumint: - case NdbDictionary::Column::Mediumunsigned: - case NdbDictionary::Column::Int: - case NdbDictionary::Column::Unsigned: - case NdbDictionary::Column::Float: - break; - case NdbDictionary::Column::Decimal: - if(m_scale != col.m_scale || - m_precision != col.m_precision){ - return false; - } - break; - case NdbDictionary::Column::Char: - case NdbDictionary::Column::Varchar: - case NdbDictionary::Column::Binary: - case NdbDictionary::Column::Varbinary: - if(m_length != col.m_length){ - return false; - } - break; - case NdbDictionary::Column::Bigint: - case NdbDictionary::Column::Bigunsigned: - case NdbDictionary::Column::Double: - case NdbDictionary::Column::Datetime: - case NdbDictionary::Column::Timespec: - break; - case NdbDictionary::Column::Blob: - case NdbDictionary::Column::Text: - if (m_precision != col.m_precision || - m_scale != col.m_scale || - m_length != col.m_length) { - return false; - } - break; - } if (m_autoIncrement != col.m_autoIncrement){ return false; } @@ -1176,6 +1147,7 @@ indexTypeMapping[] = { { -1, -1 } }; +// TODO: remove, api-kernel type codes must match now static const ApiKernelMapping columnTypeMapping[] = { @@ -1282,9 +1254,23 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, return 703; } col->m_extType = attrDesc.AttributeExtType; - col->m_precision = attrDesc.AttributeExtPrecision; + col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF); col->m_scale = attrDesc.AttributeExtScale; col->m_length = attrDesc.AttributeExtLength; + // charset in upper half of precision + unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16); + // charset is defined exactly for char types + if (col->getCharType() != (cs_number != 0)) { + delete impl; + return 703; + } + if (col->getCharType()) { + col->m_cs = get_charset(cs_number, MYF(0)); + if (col->m_cs == NULL) { + delete impl; + return 743; + } + } // translate to old kernel types and sizes if (! attrDesc.translateExtType()) { @@ -1535,9 +1521,23 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, getKernelConstant(col->m_type, columnTypeMapping, DictTabInfo::ExtUndefined); - tmpAttr.AttributeExtPrecision = col->m_precision; + tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF); tmpAttr.AttributeExtScale = col->m_scale; tmpAttr.AttributeExtLength = col->m_length; + // charset is defined exactly for char types + if (col->getCharType() != (col->m_cs != NULL)) { + m_error.code = 703; + return -1; + } + // primary key type check + if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) { + m_error.code = 743; + return -1; + } + // charset in upper half of precision + if (col->getCharType()) { + tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16); + } // DICT will ignore and recompute this (void)tmpAttr.translateExtType(); @@ -1999,6 +1999,14 @@ NdbDictInterface::createIndex(Ndb & ndb, m_error.code = 4245; return -1; } + // index key type check + if (it == DictTabInfo::UniqueHashIndex && + ! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) || + it == DictTabInfo::OrderedIndex && + ! NdbSqlUtil::usable_in_ordered_index(col->m_type, col->m_cs)) { + m_error.code = 743; + return -1; + } attributeList.id[i] = col->m_attrId; } if (it == DictTabInfo::UniqueHashIndex) { diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index d77cc4d44bc..cf659c71397 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -60,6 +60,7 @@ public: int m_precision; int m_scale; int m_length; + CHARSET_INFO * m_cs; // not const in MySQL bool m_pk; bool m_tupleKey; @@ -82,6 +83,7 @@ public: Uint32 m_keyInfoPos; Uint32 m_extType; // used by restore (kernel type in versin v2x) bool getInterpretableType() const ; + bool getCharType() const; bool getBlobType() const; /** @@ -446,6 +448,14 @@ NdbColumnImpl::getInterpretableType() const { m_type == NdbDictionary::Column::Bigunsigned); } +inline +bool +NdbColumnImpl::getCharType() const { + return (m_type == NdbDictionary::Column::Char || + m_type == NdbDictionary::Column::Varchar || + m_type == NdbDictionary::Column::Text); +} + inline bool NdbColumnImpl::getBlobType() const { diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 7991004e3d0..2ebcf4be444 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -280,6 +280,9 @@ ErrorBundle ErrorCodes[] = { { 739, SE, "Unsupported primary key length" }, { 740, SE, "Nullable primary key not supported" }, { 741, SE, "Unsupported alter table" }, + { 742, SE, "Unsupported attribute type in index" }, + { 743, SE, "Unsupported character set in table or index" }, + { 744, SE, "Character conversion error" }, { 241, SE, "Invalid schema object version" }, { 283, SE, "Table is being dropped" }, { 284, SE, "Table not defined in transaction coordinator" }, diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bb3e54e74d5..95247063e31 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2897,6 +2897,8 @@ static int create_ndb_column(NDBCOL &col, { // Set name col.setName(field->field_name); + // Get char set + CHARSET_INFO *cs= field->charset(); // Set type and sizes const enum enum_field_types mysql_type= field->real_type(); switch (mysql_type) { @@ -2968,15 +2970,19 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_STRING: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Binary); - else + else { col.setType(NDBCOL::Char); + col.setCharset(cs); + } col.setLength(field->pack_length()); break; case MYSQL_TYPE_VAR_STRING: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Varbinary); - else + else { col.setType(NDBCOL::Varchar); + col.setCharset(cs); + } col.setLength(field->pack_length()); break; // Blob types (all come in as MYSQL_TYPE_BLOB) @@ -2984,8 +2990,10 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_TINY_BLOB: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Blob); - else + else { col.setType(NDBCOL::Text); + col.setCharset(cs); + } col.setInlineSize(256); // No parts col.setPartSize(0); @@ -2995,8 +3003,10 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_BLOB: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Blob); - else + else { col.setType(NDBCOL::Text); + col.setCharset(cs); + } // Use "<=" even if "<" is the exact condition if (field->max_length() <= (1 << 8)) goto mysql_type_tiny_blob; @@ -3015,8 +3025,10 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_MEDIUM_BLOB: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Blob); - else + else { col.setType(NDBCOL::Text); + col.setCharset(cs); + } col.setInlineSize(256); col.setPartSize(4000); col.setStripeSize(8); @@ -3025,8 +3037,10 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_LONG_BLOB: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Blob); - else + else { col.setType(NDBCOL::Text); + col.setCharset(cs); + } col.setInlineSize(256); col.setPartSize(8000); col.setStripeSize(4); From 16bd53d4b35b61e75f694c3694e1b6438b30ec0b Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Sep 2004 15:47:04 +0000 Subject: [PATCH 05/55] fixed return code for ndb_backup added possibility to set number of accounts in createAndLoadBank bumbed up number of threads in tastBackup BackupBank ndb/src/kernel/blocks/backup/restore/main.cpp: fixed return code ndb/test/ndbapi/bank/Bank.hpp: added possibility to set number of accounts in createAndLoadBank ndb/test/ndbapi/bank/BankLoad.cpp: added possibility to set number of accounts in createAndLoadBank ndb/test/ndbapi/testBackup.cpp: bumbed up number of threads in tastBackup BackupBank ndb/test/src/NdbBackup.cpp: some small optimizations --- ndb/src/kernel/blocks/backup/restore/main.cpp | 4 +-- ndb/test/ndbapi/bank/Bank.hpp | 2 +- ndb/test/ndbapi/bank/BankLoad.cpp | 4 +-- ndb/test/ndbapi/testBackup.cpp | 11 ++++++- ndb/test/src/NdbBackup.cpp | 29 ++++++++----------- 5 files changed, 27 insertions(+), 23 deletions(-) diff --git a/ndb/src/kernel/blocks/backup/restore/main.cpp b/ndb/src/kernel/blocks/backup/restore/main.cpp index a330aa51373..db0b0405b4c 100644 --- a/ndb/src/kernel/blocks/backup/restore/main.cpp +++ b/ndb/src/kernel/blocks/backup/restore/main.cpp @@ -331,7 +331,7 @@ main(int argc, const char** argv) for (i= 0; i < g_consumers.size(); i++) g_consumers[i]->endOfTuples(); - + RestoreLogIterator logIter(metaData); if (!logIter.readHeader()) { @@ -357,7 +357,7 @@ main(int argc, const char** argv) } } clearConsumers(); - return 1; + return 0; } // main template class Vector; diff --git a/ndb/test/ndbapi/bank/Bank.hpp b/ndb/test/ndbapi/bank/Bank.hpp index e6816fd7111..2a8e22931a8 100644 --- a/ndb/test/ndbapi/bank/Bank.hpp +++ b/ndb/test/ndbapi/bank/Bank.hpp @@ -29,7 +29,7 @@ public: Bank(); - int createAndLoadBank(bool overWrite); + int createAndLoadBank(bool overWrite, int num_accounts=10); int dropBank(); int performTransactions(int maxSleepBetweenTrans = 20, int yield=0); diff --git a/ndb/test/ndbapi/bank/BankLoad.cpp b/ndb/test/ndbapi/bank/BankLoad.cpp index bbaac27735b..39dc8097115 100644 --- a/ndb/test/ndbapi/bank/BankLoad.cpp +++ b/ndb/test/ndbapi/bank/BankLoad.cpp @@ -53,7 +53,7 @@ int Bank::getNumAccountTypes(){ return accountTypesSize; } -int Bank::createAndLoadBank(bool ovrWrt){ +int Bank::createAndLoadBank(bool ovrWrt, int num_accounts){ m_ndb.init(); if (m_ndb.waitUntilReady() != 0) @@ -78,7 +78,7 @@ int Bank::createAndLoadBank(bool ovrWrt){ if (loadAccountType() != NDBT_OK) return NDBT_FAILED; - if (loadAccount(10) != NDBT_OK) + if (loadAccount(num_accounts) != NDBT_OK) return NDBT_FAILED; if (loadSystemValues() != NDBT_OK) diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp index d328a7db292..31a85245025 100644 --- a/ndb/test/ndbapi/testBackup.cpp +++ b/ndb/test/ndbapi/testBackup.cpp @@ -215,7 +215,7 @@ int runDropTable(NDBT_Context* ctx, NDBT_Step* step){ int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){ Bank bank; int overWriteExisting = true; - if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK) + if (bank.createAndLoadBank(overWriteExisting, 10) != NDBT_OK) return NDBT_FAILED; return NDBT_OK; } @@ -428,6 +428,15 @@ TESTCASE("BackupBank", INITIALIZER(runCreateBank); STEP(runBankTimer); STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); STEP(runBankGL); // TODO STEP(runBankSum); STEP(runBackupBank); diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp index f33c5d8c313..71b4b49b3a6 100644 --- a/ndb/test/src/NdbBackup.cpp +++ b/ndb/test/src/NdbBackup.cpp @@ -140,14 +140,16 @@ NdbBackup::execRestore(bool _restore_data, */ snprintf(buf, buf_len, - "scp %s:%s/BACKUP/BACKUP-%d/* .", + "scp %s:%s/BACKUP/BACKUP-%d/BACKUP-%d*.%d.* .", host, path, - _backup_id); + _backup_id, + _backup_id, + _node_id); ndbout << "buf: "<< buf < 1); - // restore metadata first - res = execRestore(false, true, ndbNodes[0].node_id, _backup_id); - + // restore metadata first and data for first node + res = execRestore(true, true, ndbNodes[0].node_id, _backup_id); - // Restore data once for each node - for(size_t i = 0; i < ndbNodes.size(); i++){ - res = execRestore(true, false, ndbNodes[i].node_id, _backup_id); - } + // Restore data once for each node + for(size_t i = 1; i < ndbNodes.size(); i++){ + res = execRestore(true, false, ndbNodes[i].node_id, _backup_id); } return 0; From 83bf02510ceb595f5210b65a02456e44710791fd Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Sep 2004 17:09:25 +0000 Subject: [PATCH 06/55] testBank optimized for fewer timeouts changed for consistency in "time" tests moved SR_UNDO tests to basic added testBackup -n BackupBank ndb/test/ndbapi/bank/Bank.cpp: optimized for fewer timeouts changed for consistency in "time" ndb/test/ndbapi/bank/Bank.hpp: optimized for fewer timeouts changed for consistency in "time" ndb/test/run-test/daily-basic-tests.txt: moved SR_UNDO tests to basic ndb/test/run-test/daily-devel-tests.txt: moved SR_UNDO tests to basic added testBackup -n BackupBank --- ndb/test/ndbapi/bank/Bank.cpp | 121 ++++++++++++------------ ndb/test/ndbapi/bank/Bank.hpp | 3 + ndb/test/run-test/daily-basic-tests.txt | 15 +++ ndb/test/run-test/daily-devel-tests.txt | 23 +---- 4 files changed, 82 insertions(+), 80 deletions(-) diff --git a/ndb/test/ndbapi/bank/Bank.cpp b/ndb/test/ndbapi/bank/Bank.cpp index 4581d1a9842..c6029259357 100644 --- a/ndb/test/ndbapi/bank/Bank.cpp +++ b/ndb/test/ndbapi/bank/Bank.cpp @@ -156,7 +156,14 @@ int Bank::performTransactionImpl1(int fromAccountId, int check; + // Ok, all clear to do the transaction + Uint64 transId; + if (getNextTransactionId(transId) != NDBT_OK){ + return NDBT_FAILED; + } + NdbConnection* pTrans = m_ndb.startTransaction(); + if( pTrans == NULL ) { const NdbError err = m_ndb.getNdbError(); if (err.status == NdbError::TemporaryError){ @@ -167,6 +174,13 @@ int Bank::performTransactionImpl1(int fromAccountId, return NDBT_FAILED; } + Uint64 currTime; + if (prepareGetCurrTimeOp(pTrans, currTime) != NDBT_OK){ + ERR(pTrans->getNdbError()); + m_ndb.closeTransaction(pTrans); + return NDBT_FAILED; + } + /** * Check balance on from account */ @@ -205,29 +219,6 @@ int Bank::performTransactionImpl1(int fromAccountId, return NDBT_FAILED; } - check = pTrans->execute(NoCommit); - if( check == -1 ) { - const NdbError err = pTrans->getNdbError(); - m_ndb.closeTransaction(pTrans); - if (err.status == NdbError::TemporaryError){ - ERR(err); - return NDBT_TEMPORARY; - } - ERR(err); - return NDBT_FAILED; - } - - Uint32 balanceFrom = balanceFromRec->u_32_value(); - // ndbout << "balanceFrom: " << balanceFrom << endl; - - if (((Int64)balanceFrom - amount) < 0){ - m_ndb.closeTransaction(pTrans); - //ndbout << "Not enough funds" << endl; - return NOT_ENOUGH_FUNDS; - } - - Uint32 fromAccountType = fromAccountTypeRec->u_32_value(); - /** * Read balance on to account */ @@ -278,21 +269,22 @@ int Bank::performTransactionImpl1(int fromAccountId, return NDBT_FAILED; } + + Uint32 balanceFrom = balanceFromRec->u_32_value(); + // ndbout << "balanceFrom: " << balanceFrom << endl; + + if (((Int64)balanceFrom - amount) < 0){ + m_ndb.closeTransaction(pTrans); + //ndbout << "Not enough funds" << endl; + return NOT_ENOUGH_FUNDS; + } + + Uint32 fromAccountType = fromAccountTypeRec->u_32_value(); + Uint32 balanceTo = balanceToRec->u_32_value(); // ndbout << "balanceTo: " << balanceTo << endl; Uint32 toAccountType = toAccountTypeRec->u_32_value(); - // Ok, all clear to do the transaction - Uint64 transId; - if (getNextTransactionId(transId) != NDBT_OK){ - return NDBT_FAILED; - } - - Uint64 currTime; - if (getCurrTime(currTime) != NDBT_OK){ - return NDBT_FAILED; - } - /** * Update balance on from account */ @@ -1988,35 +1980,13 @@ int Bank::readSystemValue(SystemValueId sysValId, Uint64 & value){ ERR(m_ndb.getNdbError()); return NDBT_FAILED; } - - NdbOperation* pOp = pTrans->getNdbOperation("SYSTEM_VALUES"); - if (pOp == NULL) { + + if (prepareReadSystemValueOp(pTrans, sysValId, value) != NDBT_OK) { ERR(pTrans->getNdbError()); m_ndb.closeTransaction(pTrans); return NDBT_FAILED; } - - check = pOp->readTuple(); - if( check == -1 ) { - ERR(pTrans->getNdbError()); - m_ndb.closeTransaction(pTrans); - return NDBT_FAILED; - } - - check = pOp->equal("SYSTEM_VALUES_ID", sysValId); - if( check == -1 ) { - ERR(pTrans->getNdbError()); - m_ndb.closeTransaction(pTrans); - return NDBT_FAILED; - } - - NdbRecAttr* valueRec = pOp->getValue("VALUE"); - if( valueRec ==NULL ) { - ERR(pTrans->getNdbError()); - m_ndb.closeTransaction(pTrans); - return NDBT_FAILED; - } - + check = pTrans->execute(Commit); if( check == -1 ) { ERR(pTrans->getNdbError()); @@ -2024,13 +1994,38 @@ int Bank::readSystemValue(SystemValueId sysValId, Uint64 & value){ return NDBT_FAILED; } - value = valueRec->u_64_value(); - m_ndb.closeTransaction(pTrans); return NDBT_OK; } +int Bank::prepareReadSystemValueOp(NdbConnection* pTrans, SystemValueId sysValId, Uint64 & value){ + + int check; + + NdbOperation* pOp = pTrans->getNdbOperation("SYSTEM_VALUES"); + if (pOp == NULL) { + return NDBT_FAILED; + } + + check = pOp->readTuple(); + if( check == -1 ) { + return NDBT_FAILED; + } + + check = pOp->equal("SYSTEM_VALUES_ID", sysValId); + if( check == -1 ) { + return NDBT_FAILED; + } + + NdbRecAttr* valueRec = pOp->getValue("VALUE", (char *)&value); + if( valueRec == NULL ) { + return NDBT_FAILED; + } + + return NDBT_OK; +} + int Bank::writeSystemValue(SystemValueId sysValId, Uint64 value){ int check; @@ -2307,6 +2302,10 @@ int Bank::getCurrTime(Uint64 &time){ return readSystemValue(CurrentTime, time); } +int Bank::prepareGetCurrTimeOp(NdbConnection *pTrans, Uint64 &time){ + return prepareReadSystemValueOp(pTrans, CurrentTime, time); +} + int Bank::performSumAccounts(int maxSleepBetweenSums, int yield){ if (init() != NDBT_OK) diff --git a/ndb/test/ndbapi/bank/Bank.hpp b/ndb/test/ndbapi/bank/Bank.hpp index 2a8e22931a8..34c5ff51cc2 100644 --- a/ndb/test/ndbapi/bank/Bank.hpp +++ b/ndb/test/ndbapi/bank/Bank.hpp @@ -118,6 +118,9 @@ private: int incCurrTime(Uint64 &value); int getCurrTime(Uint64 &time); + int prepareReadSystemValueOp(NdbConnection*, SystemValueId sysValId, Uint64 &time); + int prepareGetCurrTimeOp(NdbConnection*, Uint64 &time); + int createTables(); int createTable(const char* tabName); diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 631378cb636..8d7e8a06c72 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -1006,3 +1006,18 @@ max-time: 1500 cmd: testSystemRestart args: -n SR2 T7 +max-time: 1500 +cmd: testSystemRestart +args: -n SR_UNDO T1 + +max-time: 1500 +cmd: testSystemRestart +args: -n SR_UNDO T6 + +max-time: 1500 +cmd: testSystemRestart +args: -n SR_UNDO T7 + +max-time: 1500 +cmd: testSystemRestart +args: -n SR_UNDO T8 diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt index 35e0d2e9a46..723d241aa46 100644 --- a/ndb/test/run-test/daily-devel-tests.txt +++ b/ndb/test/run-test/daily-devel-tests.txt @@ -26,10 +26,10 @@ max-time: 600 cmd: atrt-testBackup args: -n BackupOne T1 T6 T3 I3 -#max-time: 600 -#cmd: testBackup -#args: -n BackupBank T6 -# +max-time: 1000 +cmd: testBackup +args: -n BackupBank T6 + # # MGMAPI AND MGSRV # @@ -41,21 +41,6 @@ args: -n SingleUserMode T1 # # SYSTEM RESTARTS # -max-time: 1500 -cmd: testSystemRestart -args: -n SR_UNDO T1 - -max-time: 1500 -cmd: testSystemRestart -args: -n SR_UNDO T6 - -max-time: 1500 -cmd: testSystemRestart -args: -n SR_UNDO T7 - -max-time: 1500 -cmd: testSystemRestart -args: -n SR_UNDO T8 max-time: 1500 cmd: testSystemRestart From 497f8063098e6a03f27be60bd4d3c66cf4efec6f Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Sep 2004 17:25:59 +0000 Subject: [PATCH 07/55] HugoTransactions.cpp changed to do execute(commit);restart() transaction instead of closeTransaction();start new select_all.cpp moved my_init outside DBUG_OFF ndb/test/src/HugoTransactions.cpp: changed to do execute(commit);restart() transaction instead of closeTransaction();start new ndb/tools/select_all.cpp: moved my_init outside DBUG_OFF --- ndb/test/src/HugoTransactions.cpp | 7 +++++-- ndb/tools/select_all.cpp | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 05039562c76..53809ecc851 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -728,7 +728,8 @@ HugoTransactions::loadTable(Ndb* pNdb, if (doSleep > 0) NdbSleep_MilliSleep(doSleep); - if (first_batch || !oneTrans) { + // if (first_batch || !oneTrans) { + if (first_batch) { first_batch = false; pTrans = pNdb->startTransaction(); @@ -774,8 +775,10 @@ HugoTransactions::loadTable(Ndb* pNdb, // Execute the transaction and insert the record if (!oneTrans || (c + batch) >= records) { - closeTrans = true; + // closeTrans = true; + closeTrans = false; check = pTrans->execute( Commit ); + pTrans->restart(); } else { closeTrans = false; check = pTrans->execute( NoCommit ); diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp index eb95947fc0f..a99bad48f1f 100644 --- a/ndb/tools/select_all.cpp +++ b/ndb/tools/select_all.cpp @@ -88,8 +88,8 @@ int main(int argc, const char** argv){ } _tabname = argv[optind]; -#ifndef DBUG_OFF my_init(); +#ifndef DBUG_OFF if (debug_option) DBUG_PUSH(debug_option); #endif From 49dc3a53011fde94b900762084cd0631cb4fb24a Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Sep 2004 20:41:49 +0200 Subject: [PATCH 08/55] logging_ok: Logging to logging@openlogging.org accepted SCI_Transporter.hpp, SCI_Transporter.cpp: Major fix-up of SCI Transporter, fixed so that it works with single card, fixed wrap around, added lots of DBUG statements, merged with new transporter handling TransporterRegistry.cpp: Some fixes for wrap around needed plus DBUG handling TCP_Transporter.hpp, TCP_Transporter.cpp: Added DBUG statements SHM_Transporter.hpp, SHM_Transporter.cpp: Fixed SHM Transporter SHM_Buffer.hpp: Fixed SHM Buffer to handle wrap around properly IPCConfig.cpp: Fixed up config of SCI SocketServer.cpp: Added DBUG support for SocketServer threads ConfigInfo.cpp: Config changes for SCI TransporterDefinitions.hpp, mgmapi_config_parameters.h: SCI fixes Makefile.am, type_ndbapitools.mk.am, type_ndbapitest.mk.am: Added SCI library path to Makefiles configure.in: Fixed small bug with shared mem and sci together in configure acinclude.m4: Added possibility of providing SCI library path in confgure acinclude.m4: Added possibility of providing SCI library path in confgure configure.in: Fixed small bug with shared mem and sci together in configure ndb/config/type_ndbapitest.mk.am: Added SCI library path to Makefiles ndb/config/type_ndbapitools.mk.am: Added SCI library path to Makefiles ndb/src/cw/cpcd/Makefile.am: Added SCI library path to Makefiles ndb/src/kernel/Makefile.am: Added SCI library path to Makefiles ndb/src/kernel/blocks/backup/restore/Makefile.am: Added SCI library path to Makefiles ndb/src/mgmsrv/Makefile.am: Added SCI library path to Makefiles sql/Makefile.am: Added SCI library path to Makefiles ndb/src/common/transporter/Makefile.am: Added SCI library path to Makefiles ndb/include/mgmapi/mgmapi_config_parameters.h: SCI fixes ndb/include/transporter/TransporterDefinitions.hpp: SCI fixes ndb/src/mgmsrv/ConfigInfo.cpp: Config changes for SCI ndb/src/common/util/SocketServer.cpp: Added DBUG support for SocketServer threads ndb/src/common/mgmcommon/IPCConfig.cpp: Fixed up config of SCI ndb/src/common/transporter/SHM_Buffer.hpp: Fixed SHM Buffer to handle wrap around properly ndb/src/common/transporter/SHM_Transporter.cpp: Fixed SHM Transporter ndb/src/common/transporter/SHM_Transporter.hpp: Fixed SHM Transporter ndb/src/common/transporter/TCP_Transporter.cpp: Added DBUG statements ndb/src/common/transporter/TCP_Transporter.hpp: Added DBUG statements ndb/src/common/transporter/TransporterRegistry.cpp: Some fixes for wrap around needed plus DBUG handling ndb/src/common/transporter/SCI_Transporter.cpp: Major fix-up of SCI Transporter, fixed so that it works with single card, fixed wrap around, added lots of DBUG statements, merged with new transporter handling ndb/src/common/transporter/SCI_Transporter.hpp: Major fix-up of SCI Transporter, fixed so that it works with single card, fixed wrap around, added lots of DBUG statements, merged with new transporter handling BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 2 + acinclude.m4 | 50 +- configure.in | 4 +- ndb/config/type_ndbapitest.mk.am | 2 +- ndb/config/type_ndbapitools.mk.am | 2 +- ndb/include/mgmapi/mgmapi_config_parameters.h | 18 +- .../transporter/TransporterDefinitions.hpp | 12 +- ndb/src/common/mgmcommon/IPCConfig.cpp | 81 +- ndb/src/common/transporter/Makefile.am | 2 +- .../common/transporter/SCI_Transporter.cpp | 744 +++++++++--------- .../common/transporter/SCI_Transporter.hpp | 34 +- ndb/src/common/transporter/SHM_Buffer.hpp | 38 +- .../common/transporter/SHM_Transporter.cpp | 61 +- .../common/transporter/SHM_Transporter.hpp | 10 +- .../common/transporter/TCP_Transporter.cpp | 13 +- .../common/transporter/TCP_Transporter.hpp | 3 +- .../transporter/TransporterRegistry.cpp | 60 +- ndb/src/common/util/SocketServer.cpp | 8 +- ndb/src/cw/cpcd/Makefile.am | 2 +- ndb/src/kernel/Makefile.am | 2 +- .../kernel/blocks/backup/restore/Makefile.am | 2 +- ndb/src/mgmsrv/ConfigInfo.cpp | 87 +- ndb/src/mgmsrv/Makefile.am | 2 +- sql/Makefile.am | 2 +- 24 files changed, 702 insertions(+), 539 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index c3ca14ab929..c6fbf5d23f5 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -100,6 +100,7 @@ miguel@hegel.txg.br miguel@light. miguel@light.local miguel@sartre.local +mikael@mc04.(none) mikron@c-fb0ae253.1238-1-64736c10.cust.bredbandsbolaget.se mikron@mikael-ronstr-ms-dator.local mmatthew@markslaptop. @@ -158,6 +159,7 @@ ram@ram.(none) ranger@regul.home.lan rburnett@build.mysql.com root@home.(none) +root@mc04.(none) root@x3.internalnet salle@banica.(none) salle@geopard.(none) diff --git a/acinclude.m4 b/acinclude.m4 index dff3b22ecec..c73f14b638c 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -1551,16 +1551,43 @@ dnl Sets HAVE_NDBCLUSTER_DB if --with-ndbcluster is used dnl --------------------------------------------------------------------------- AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ + AC_ARG_WITH([ndb-sci], + AC_HELP_STRING([--with-ndb-sci=DIR], + [Provide MySQL with a custom location of + sci library. Given DIR, sci library is + assumed to be in $DIR/lib and header files + in $DIR/include.]), + [mysql_sci_dir=${withval}], + [mysql_sci_dir=""]) + + case "$mysql_sci_dir" in + "no" ) + have_ndb_sci=no + AC_MSG_RESULT([-- not including sci transporter]) + ;; + * ) + if test -f "$mysql_sci_dir/lib/libsisci.a" -a \ + -f "$mysql_sci_dir/include/sisci_api.h"; then + NDB_SCI_INCLUDES="-I$mysql_sci_dir/include" + NDB_SCI_LIBS="-L$mysql_sci_dir/lib -lsisci" + AC_MSG_RESULT([-- including sci transporter]) + AC_DEFINE([NDB_SCI_TRANSPORTER], [1], + [Including Ndb Cluster DB sci transporter]) + AC_SUBST(NDB_SCI_INCLUDES) + AC_SUBST(NDB_SCI_LIBS) + have_ndb_sci="yes" + AC_MSG_RESULT([found sci transporter in $mysql_sci_dir/{include, lib}]) + else + AC_MSG_RESULT([could not find sci transporter in $mysql_sci_dir/{include, lib}]) + fi + ;; + esac + AC_ARG_WITH([ndb-shm], [ --with-ndb-shm Include the NDB Cluster shared memory transporter], [ndb_shm="$withval"], [ndb_shm=no]) - AC_ARG_WITH([ndb-sci], - [ - --with-ndb-sci Include the NDB Cluster sci transporter], - [ndb_sci="$withval"], - [ndb_sci=no]) AC_ARG_WITH([ndb-test], [ --with-ndb-test Include the NDB Cluster ndbapi test programs], @@ -1593,19 +1620,6 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ ;; esac - have_ndb_sci=no - case "$ndb_sci" in - yes ) - AC_MSG_RESULT([-- including sci transporter]) - AC_DEFINE([NDB_SCI_TRANSPORTER], [1], - [Including Ndb Cluster DB sci transporter]) - have_ndb_sci="yes" - ;; - * ) - AC_MSG_RESULT([-- not including sci transporter]) - ;; - esac - have_ndb_test=no case "$ndb_test" in yes ) diff --git a/configure.in b/configure.in index 9e23b6cf61c..bc05940b018 100644 --- a/configure.in +++ b/configure.in @@ -3024,11 +3024,11 @@ AC_SUBST([ndb_port_base]) ndb_transporter_opt_objs="" if test X"$have_ndb_shm" = Xyes then - ndb_transporter_opt_objs="$(ndb_transporter_opt_objs) SHM_Transporter.lo SHM_Transporter.unix.lo" + ndb_transporter_opt_objs="$ndb_transporter_opt_objs SHM_Transporter.lo SHM_Transporter.unix.lo" fi if test X"$have_ndb_sci" = Xyes then - ndb_transporter_opt_objs="$(ndb_transporter_opt_objs) SCI_Transporter.lo" + ndb_transporter_opt_objs="$ndb_transporter_opt_objs SCI_Transporter.lo" fi AC_SUBST([ndb_transporter_opt_objs]) diff --git a/ndb/config/type_ndbapitest.mk.am b/ndb/config/type_ndbapitest.mk.am index 8ac39aec8cf..f1fd8286337 100644 --- a/ndb/config/type_ndbapitest.mk.am +++ b/ndb/config/type_ndbapitest.mk.am @@ -3,7 +3,7 @@ LDADD += $(top_builddir)/ndb/test/src/libNDBT.a \ $(top_builddir)/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a + $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \ -I$(top_srcdir)/ndb/include \ diff --git a/ndb/config/type_ndbapitools.mk.am b/ndb/config/type_ndbapitools.mk.am index 3b5d40874b2..ed6d8699e05 100644 --- a/ndb/config/type_ndbapitools.mk.am +++ b/ndb/config/type_ndbapitools.mk.am @@ -3,7 +3,7 @@ LDADD += \ $(top_builddir)/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a + $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \ -I$(top_srcdir)/ndb/include \ diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/ndb/include/mgmapi/mgmapi_config_parameters.h index 4a4863298dd..68eff84dd03 100644 --- a/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -117,16 +117,14 @@ #define CFG_SHM_KEY 502 #define CFG_SHM_BUFFER_MEM 503 -#define CFG_SCI_ID_0 550 -#define CFG_SCI_ID_1 551 -#define CFG_SCI_SEND_LIMIT 552 -#define CFG_SCI_BUFFER_MEM 553 -#define CFG_SCI_NODE1_ADAPTERS 554 -#define CFG_SCI_NODE1_ADAPTER0 555 -#define CFG_SCI_NODE1_ADAPTER1 556 -#define CFG_SCI_NODE2_ADAPTERS 554 -#define CFG_SCI_NODE2_ADAPTER0 555 -#define CFG_SCI_NODE2_ADAPTER1 556 +#define CFG_SCI_HOST1_ID_0 550 +#define CFG_SCI_HOST1_ID_1 551 +#define CFG_SCI_HOST2_ID_0 552 +#define CFG_SCI_HOST2_ID_1 553 +#define CFG_SCI_HOSTNAME_1 554 +#define CFG_SCI_HOSTNAME_2 555 +#define CFG_SCI_SEND_LIMIT 556 +#define CFG_SCI_BUFFER_MEM 557 #define CFG_OSE_HOSTNAME_1 600 #define CFG_OSE_HOSTNAME_2 601 diff --git a/ndb/include/transporter/TransporterDefinitions.hpp b/ndb/include/transporter/TransporterDefinitions.hpp index 445e8b889d2..a8da8068552 100644 --- a/ndb/include/transporter/TransporterDefinitions.hpp +++ b/ndb/include/transporter/TransporterDefinitions.hpp @@ -59,8 +59,6 @@ struct TCP_TransporterConfiguration { NodeId localNodeId; Uint32 sendBufferSize; // Size of SendBuffer of priority B Uint32 maxReceiveSize; // Maximum no of bytes to receive - Uint32 byteOrder; - bool compression; bool checksum; bool signalId; }; @@ -72,10 +70,8 @@ struct SHM_TransporterConfiguration { Uint32 port; NodeId remoteNodeId; NodeId localNodeId; - bool compression; bool checksum; bool signalId; - int byteOrder; Uint32 shmKey; Uint32 shmSize; @@ -89,10 +85,8 @@ struct OSE_TransporterConfiguration { const char *localHostName; NodeId remoteNodeId; NodeId localNodeId; - bool compression; bool checksum; bool signalId; - int byteOrder; Uint32 prioASignalSize; Uint32 prioBSignalSize; @@ -103,20 +97,20 @@ struct OSE_TransporterConfiguration { * SCI Transporter Configuration */ struct SCI_TransporterConfiguration { + const char *remoteHostName; + const char *localHostName; + Uint32 port; Uint32 sendLimit; // Packet size Uint32 bufferSize; // Buffer size Uint32 nLocalAdapters; // 1 or 2, the number of adapters on local host - Uint32 nRemoteAdapters; Uint32 remoteSciNodeId0; // SCInodeId for adapter 1 Uint32 remoteSciNodeId1; // SCInodeId for adapter 2 NodeId localNodeId; // Local node Id NodeId remoteNodeId; // Remote node Id - Uint32 byteOrder; - bool compression; bool checksum; bool signalId; diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp index a76c541f3f6..83aa3e88b41 100644 --- a/ndb/src/common/mgmcommon/IPCConfig.cpp +++ b/ndb/src/common/mgmcommon/IPCConfig.cpp @@ -133,7 +133,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){ Uint32 compression; Uint32 checksum; if(!tmp->get("SendSignalId", &sendSignalId)) continue; - if(!tmp->get("Compression", &compression)) continue; if(!tmp->get("Checksum", &checksum)) continue; const char * type; @@ -143,8 +142,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){ SHM_TransporterConfiguration conf; conf.localNodeId = the_ownId; conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2); - conf.byteOrder = 0; - conf.compression = compression; conf.checksum = checksum; conf.signalId = sendSignalId; @@ -164,8 +161,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){ SCI_TransporterConfiguration conf; conf.localNodeId = the_ownId; conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2); - conf.byteOrder = 0; - conf.compression = compression; conf.checksum = checksum; conf.signalId = sendSignalId; @@ -174,18 +169,16 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){ if(the_ownId == nodeId1){ if(!tmp->get("Node1_NoOfAdapters", &conf.nLocalAdapters)) continue; - if(!tmp->get("Node2_NoOfAdapters", &conf.nRemoteAdapters)) continue; if(!tmp->get("Node2_Adapter", 0, &conf.remoteSciNodeId0)) continue; - if(conf.nRemoteAdapters > 1){ + if(conf.nLocalAdapters > 1){ if(!tmp->get("Node2_Adapter", 1, &conf.remoteSciNodeId1)) continue; } } else { if(!tmp->get("Node2_NoOfAdapters", &conf.nLocalAdapters)) continue; - if(!tmp->get("Node1_NoOfAdapters", &conf.nRemoteAdapters)) continue; if(!tmp->get("Node1_Adapter", 0, &conf.remoteSciNodeId0)) continue; - if(conf.nRemoteAdapters > 1){ + if(conf.nLocalAdapters > 1){ if(!tmp->get("Node1_Adapter", 1, &conf.remoteSciNodeId1)) continue; } } @@ -243,8 +236,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){ conf.localHostName = ownHostName; conf.remoteNodeId = remoteNodeId; conf.localNodeId = ownNodeId; - conf.byteOrder = 0; - conf.compression = compression; conf.checksum = checksum; conf.signalId = sendSignalId; @@ -270,8 +261,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){ conf.localHostName = ownHostName; conf.remoteNodeId = remoteNodeId; conf.localNodeId = ownNodeId; - conf.byteOrder = 0; - conf.compression = compression; conf.checksum = checksum; conf.signalId = sendSignalId; @@ -344,6 +333,7 @@ Uint32 IPCConfig::configureTransporters(Uint32 nodeId, const class ndb_mgm_configuration & config, class TransporterRegistry & tr){ + DBUG_ENTER("IPCConfig::configureTransporters"); Uint32 noOfTransportersCreated= 0, server_port= 0; ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION); @@ -374,14 +364,13 @@ IPCConfig::configureTransporters(Uint32 nodeId, } server_port= tmp_server_port; } - + DBUG_PRINT("info", ("Transporter between this node %d and node %d using port %d, signalId %d, checksum %d", + nodeId, remoteNodeId, tmp_server_port, sendSignalId, checksum)); switch(type){ case CONNECTION_TYPE_SHM:{ SHM_TransporterConfiguration conf; conf.localNodeId = nodeId; conf.remoteNodeId = remoteNodeId; - conf.byteOrder = 0; - conf.compression = 0; conf.checksum = checksum; conf.signalId = sendSignalId; @@ -391,45 +380,60 @@ IPCConfig::configureTransporters(Uint32 nodeId, conf.port= tmp_server_port; if(!tr.createTransporter(&conf)){ + DBUG_PRINT("error", ("Failed to create SCI Transporter from %d to %d", + conf.localNodeId, conf.remoteNodeId)); ndbout << "Failed to create SHM Transporter from: " << conf.localNodeId << " to: " << conf.remoteNodeId << endl; } else { noOfTransportersCreated++; } + DBUG_PRINT("info", ("Created SHM Transporter using shmkey %d, buf size = %d", + conf.shmKey, conf.shmSize)); break; } case CONNECTION_TYPE_SCI:{ SCI_TransporterConfiguration conf; + const char * host1, * host2; conf.localNodeId = nodeId; conf.remoteNodeId = remoteNodeId; - conf.byteOrder = 0; - conf.compression = 0; conf.checksum = checksum; conf.signalId = sendSignalId; + conf.port= tmp_server_port; + if(iter.get(CFG_SCI_HOSTNAME_1, &host1)) break; + if(iter.get(CFG_SCI_HOSTNAME_2, &host2)) break; + + conf.localHostName = (nodeId == nodeId1 ? host1 : host2); + conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1); + if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sendLimit)) break; if(iter.get(CFG_SCI_BUFFER_MEM, &conf.bufferSize)) break; - - if(nodeId == nodeId1){ - if(iter.get(CFG_SCI_NODE1_ADAPTERS, &conf.nLocalAdapters)) break; - if(iter.get(CFG_SCI_NODE2_ADAPTERS, &conf.nRemoteAdapters)) break; - if(iter.get(CFG_SCI_NODE2_ADAPTER0, &conf.remoteSciNodeId0)) break; - if(conf.nRemoteAdapters > 1){ - if(iter.get(CFG_SCI_NODE2_ADAPTER1, &conf.remoteSciNodeId1)) break; - } + if (nodeId == nodeId1) { + if(iter.get(CFG_SCI_HOST2_ID_0, &conf.remoteSciNodeId0)) break; + if(iter.get(CFG_SCI_HOST2_ID_1, &conf.remoteSciNodeId1)) break; } else { - if(iter.get(CFG_SCI_NODE2_ADAPTERS, &conf.nLocalAdapters)) break; - if(iter.get(CFG_SCI_NODE1_ADAPTERS, &conf.nRemoteAdapters)) break; - if(iter.get(CFG_SCI_NODE1_ADAPTER0, &conf.remoteSciNodeId0)) break; - if(conf.nRemoteAdapters > 1){ - if(iter.get(CFG_SCI_NODE1_ADAPTER1, &conf.remoteSciNodeId1)) break; - } + if(iter.get(CFG_SCI_HOST1_ID_0, &conf.remoteSciNodeId0)) break; + if(iter.get(CFG_SCI_HOST1_ID_1, &conf.remoteSciNodeId1)) break; } - - if(!tr.createTransporter(&conf)){ + if (conf.remoteSciNodeId1 == 0) { + conf.nLocalAdapters = 1; + } else { + conf.nLocalAdapters = 2; + } + if(!tr.createTransporter(&conf)){ + DBUG_PRINT("error", ("Failed to create SCI Transporter from %d to %d", + conf.localNodeId, conf.remoteNodeId)); ndbout << "Failed to create SCI Transporter from: " << conf.localNodeId << " to: " << conf.remoteNodeId << endl; } else { + DBUG_PRINT("info", ("Created SCI Transporter: Adapters = %d, remote SCI node id %d", + conf.nLocalAdapters, conf.remoteSciNodeId0)); + DBUG_PRINT("info", ("Host 1 = %s, Host 2 = %s, sendLimit = %d, buf size = %d", + conf.localHostName, conf.remoteHostName, conf.sendLimit, conf.bufferSize)); + if (conf.nLocalAdapters > 1) { + DBUG_PRINT("info", ("Fault-tolerant with 2 Remote Adapters, second remote SCI node id = %d", + conf.remoteSciNodeId1)); + } noOfTransportersCreated++; continue; } @@ -457,8 +461,6 @@ IPCConfig::configureTransporters(Uint32 nodeId, conf.remoteNodeId = remoteNodeId; conf.localHostName = (nodeId == nodeId1 ? host1 : host2); conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1); - conf.byteOrder = 0; - conf.compression = 0; conf.checksum = checksum; conf.signalId = sendSignalId; @@ -468,6 +470,9 @@ IPCConfig::configureTransporters(Uint32 nodeId, } else { noOfTransportersCreated++; } + DBUG_PRINT("info", ("Created TCP Transporter: sendBufferSize = %d, maxReceiveSize = %d", + conf.sendBufferSize, conf.maxReceiveSize)); + break; case CONNECTION_TYPE_OSE:{ OSE_TransporterConfiguration conf; @@ -483,8 +488,6 @@ IPCConfig::configureTransporters(Uint32 nodeId, conf.remoteNodeId = remoteNodeId; conf.localHostName = (nodeId == nodeId1 ? host1 : host2); conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1); - conf.byteOrder = 0; - conf.compression = 0; conf.checksum = checksum; conf.signalId = sendSignalId; @@ -505,6 +508,6 @@ IPCConfig::configureTransporters(Uint32 nodeId, tr.m_service_port= server_port; - return noOfTransportersCreated; + DBUG_RETURN(noOfTransportersCreated); } diff --git a/ndb/src/common/transporter/Makefile.am b/ndb/src/common/transporter/Makefile.am index 218b261606d..9d91a210d46 100644 --- a/ndb/src/common/transporter/Makefile.am +++ b/ndb/src/common/transporter/Makefile.am @@ -13,7 +13,7 @@ EXTRA_libtransporter_la_SOURCES = SHM_Transporter.cpp SHM_Transporter.unix.cpp S libtransporter_la_LIBADD = @ndb_transporter_opt_objs@ libtransporter_la_DEPENDENCIES = @ndb_transporter_opt_objs@ -INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter +INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter @NDB_SCI_INCLUDES@ include $(top_srcdir)/ndb/config/common.mk.am include $(top_srcdir)/ndb/config/type_util.mk.am diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/ndb/src/common/transporter/SCI_Transporter.cpp index c52c8a9d8c0..465d7827069 100644 --- a/ndb/src/common/transporter/SCI_Transporter.cpp +++ b/ndb/src/common/transporter/SCI_Transporter.cpp @@ -24,23 +24,30 @@ #include "TransporterInternalDefinitions.hpp" #include - + +#include +#include + #define FLAGS 0 - -SCI_Transporter::SCI_Transporter(Uint32 packetSize, +#define DEBUG_TRANSPORTER +SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg, + const char *lHostName, + const char *rHostName, + int r_port, + Uint32 packetSize, Uint32 bufferSize, Uint32 nAdapters, Uint16 remoteSciNodeId0, Uint16 remoteSciNodeId1, NodeId _localNodeId, NodeId _remoteNodeId, - int byte_order, - bool compr, bool chksm, bool signalId, Uint32 reportFreq) : - Transporter(_localNodeId, _remoteNodeId, byte_order, compr, chksm, signalId) -{ + Transporter(t_reg, lHostName, rHostName, r_port, _localNodeId, + _remoteNodeId, 0, false, chksm, signalId) +{ + DBUG_ENTER("SCI_Transporter::SCI_Transporter"); m_PacketSize = (packetSize + 3)/4 ; m_BufferSize = bufferSize; m_sendBuffer.m_buffer = NULL; @@ -56,10 +63,6 @@ SCI_Transporter::SCI_Transporter(Uint32 packetSize, m_initLocal=false; - m_remoteNodes= new Uint16[m_numberOfRemoteNodes]; - if(m_remoteNodes == NULL) { - //DO WHAT?? - } m_swapCounter=0; m_failCounter=0; m_remoteNodes[0]=remoteSciNodeId0; @@ -94,20 +97,19 @@ SCI_Transporter::SCI_Transporter(Uint32 packetSize, i4096=0; i4097=0; #endif - + DBUG_VOID_RETURN; } void SCI_Transporter::disconnectImpl() { + DBUG_ENTER("SCI_Transporter::disconnectImpl"); sci_error_t err; if(m_mapped){ setDisconnect(); -#ifdef DEBUG_TRANSPORTER - ndbout << "DisconnectImpl " << getConnectionStatus() << endl; - ndbout << "remote node " << remoteNodeId << endl; -#endif + DBUG_PRINT("info", ("connect status = %d, remote node = %d", + (int)getConnectionStatus(), remoteNodeId)); disconnectRemote(); disconnectLocal(); } @@ -124,65 +126,56 @@ void SCI_Transporter::disconnectImpl() SCIClose(sciAdapters[i].scidesc, FLAGS, &err); if(err != SCI_ERR_OK) { - reportError(callbackObj, localNodeId, TE_SCI_UNABLE_TO_CLOSE_CHANNEL); -#ifdef DEBUG_TRANSPORTER - fprintf(stderr, - "\nCannot close channel to the driver. Error code 0x%x", - err); -#endif - } + report_error(TE_SCI_UNABLE_TO_CLOSE_CHANNEL); + DBUG_PRINT("error", ("Cannot close channel to the driver. Error code 0x%x", + err)); + } } } m_sciinit=false; #ifdef DEBUG_TRANSPORTER - ndbout << "total: " << i1024+ i10242048 + i2048+i2049 << endl; + ndbout << "total: " << i1024+ i10242048 + i2048+i2049 << endl; ndbout << "<1024: " << i1024 << endl; ndbout << "1024-2047: " << i10242048 << endl; ndbout << "==2048: " << i2048 << endl; ndbout << "2049-4096: " << i20484096 << endl; ndbout << "==4096: " << i4096 << endl; ndbout << ">4096: " << i4097 << endl; - #endif - + DBUG_VOID_RETURN; } bool SCI_Transporter::initTransporter() { - if(m_BufferSize < (2*MAX_MESSAGE_SIZE)){ - m_BufferSize = 2 * MAX_MESSAGE_SIZE; + DBUG_ENTER("SCI_Transporter::initTransporter"); + if(m_BufferSize < (2*MAX_MESSAGE_SIZE + 4096)){ + m_BufferSize = 2 * MAX_MESSAGE_SIZE + 4096; } - // Allocate buffers for sending - Uint32 sz = 0; - if(m_BufferSize < (m_PacketSize * 4)){ - sz = m_BufferSize + MAX_MESSAGE_SIZE; - } else { - /** - * 3 packages - */ - sz = (m_PacketSize * 4) * 3 + MAX_MESSAGE_SIZE; - } + // Allocate buffers for sending, send buffer size plus 2048 bytes for avoiding + // the need to send twice when a large message comes around. Send buffer size is + // measured in words. + Uint32 sz = 4 * m_PacketSize + MAX_MESSAGE_SIZE;; - m_sendBuffer.m_bufferSize = 4 * ((sz + 3) / 4); - m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_bufferSize / 4]; + m_sendBuffer.m_sendBufferSize = 4 * ((sz + 3) / 4); + m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_sendBufferSize / 4]; m_sendBuffer.m_dataSize = 0; - + + DBUG_PRINT("info", ("Created SCI Send Buffer with buffer size %d and packet size %d", + m_sendBuffer.m_sendBufferSize, m_PacketSize * 4)); if(!getLinkStatus(m_ActiveAdapterId) || - !getLinkStatus(m_StandbyAdapterId)) { -#ifdef DEBUG_TRANSPORTER - ndbout << "The link is not fully operational. " << endl; - ndbout << "Check the cables and the switches" << endl; -#endif + (m_adapters > 1 && + !getLinkStatus(m_StandbyAdapterId))) { + DBUG_PRINT("error", ("The link is not fully operational. Check the cables and the switches")); //reportDisconnect(remoteNodeId, 0); //doDisconnect(); //NDB should terminate - reportError(callbackObj, localNodeId, TE_SCI_LINK_ERROR); - return false; + report_error(TE_SCI_LINK_ERROR); + DBUG_RETURN(false); } - return true; + DBUG_RETURN(true); } // initTransporter() @@ -218,10 +211,8 @@ bool SCI_Transporter::getLinkStatus(Uint32 adapterNo) SCIQuery(SCI_Q_ADAPTER,(void*)(&queryAdapter),(Uint32)NULL,&error); if(error != SCI_ERR_OK) { -#ifdef DEBUG_TRANSPORTER - ndbout << "error querying adapter " << endl; -#endif - return false; + DBUG_PRINT("error", ("error %d querying adapter", error)); + return false; } if(linkstatus<=0) return false; @@ -231,6 +222,7 @@ bool SCI_Transporter::getLinkStatus(Uint32 adapterNo) sci_error_t SCI_Transporter::initLocalSegment() { + DBUG_ENTER("SCI_Transporter::initLocalSegment"); Uint32 segmentSize = m_BufferSize; Uint32 offset = 0; sci_error_t err; @@ -238,16 +230,12 @@ sci_error_t SCI_Transporter::initLocalSegment() { for(Uint32 i=0; i 0){ #ifdef DEBUG_TRANSPORTER @@ -363,15 +342,19 @@ bool SCI_Transporter::doSend() { i4097++; #endif if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { -#ifdef DEBUG_TRANSPORTER - ndbout << "Start sequence failed" << endl; -#endif - reportError(callbackObj, remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE); + DBUG_PRINT("error", ("Start sequence failed")); + report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); return false; } - tryagain: + tryagain: + retry++; + if (retry > 3) { + DBUG_PRINT("error", ("SCI Transfer failed")); + report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); + return false; + } Uint32 * insertPtr = (Uint32 *) (m_TargetSegm[m_ActiveAdapterId].writer)->getWritePtr(sizeToSend); @@ -390,44 +373,37 @@ bool SCI_Transporter::doSend() { &err); + if (err != SCI_ERR_OK) { if(err == SCI_ERR_OUT_OF_RANGE) { -#ifdef DEBUG_TRANSPORTER - ndbout << "Data transfer : out of range error \n" << endl; -#endif + DBUG_PRINT("error", ("Data transfer : out of range error")); goto tryagain; } if(err == SCI_ERR_SIZE_ALIGNMENT) { -#ifdef DEBUG_TRANSPORTER - ndbout << "Data transfer : aligne\n" << endl; -#endif + DBUG_PRINT("error", ("Data transfer : alignment error")); + DBUG_PRINT("info", ("sendPtr 0x%x, sizeToSend = %d", sendPtr, sizeToSend)); goto tryagain; } if(err == SCI_ERR_OFFSET_ALIGNMENT) { -#ifdef DEBUG_TRANSPORTER - ndbout << "Data transfer : offset alignment\n" << endl; -#endif + DBUG_PRINT("error", ("Data transfer : offset alignment")); goto tryagain; - } + } if(err == SCI_ERR_TRANSFER_FAILED) { //(m_TargetSegm[m_StandbyAdapterId].writer)->heavyLock(); if(getLinkStatus(m_ActiveAdapterId)) { - retry++; - if(retry>3) { - reportError(callbackObj, - remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - return false; - } goto tryagain; } + if (m_adapters == 1) { + DBUG_PRINT("error", ("SCI Transfer failed")); + report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); + return false; + } m_failCounter++; Uint32 temp=m_ActiveAdapterId; switch(m_swapCounter) { case 0: /**swap from active (0) to standby (1)*/ if(getLinkStatus(m_StandbyAdapterId)) { -#ifdef DEBUG_TRANSPORTER - ndbout << "Swapping from 0 to 1 " << endl; -#endif + DBUG_PRINT("error", ("Swapping from adapter 0 to 1")); failoverShmWriter(); SCIStoreBarrier(m_TargetSegm[m_StandbyAdapterId].sequence,0); m_ActiveAdapterId=m_StandbyAdapterId; @@ -436,26 +412,21 @@ bool SCI_Transporter::doSend() { FLAGS, &err); if(err!=SCI_ERR_OK) { - reportError(callbackObj, - remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEQUENCE); + report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE); + DBUG_PRINT("error", ("Unable to remove sequence")); return false; } if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { -#ifdef DEBUG_TRANSPORTER - ndbout << "Start sequence failed" << endl; -#endif - reportError(callbackObj, - remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE); + DBUG_PRINT("error", ("Start sequence failed")); + report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); return false; } m_swapCounter++; -#ifdef DEBUG_TRANSPORTER - ndbout << "failover complete.." << endl; -#endif + DBUG_PRINT("info", ("failover complete")); goto tryagain; } else { - reportError(callbackObj, - remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); + report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); + DBUG_PRINT("error", ("SCI Transfer failed")); return false; } return false; @@ -468,20 +439,15 @@ bool SCI_Transporter::doSend() { failoverShmWriter(); m_ActiveAdapterId=m_StandbyAdapterId; m_StandbyAdapterId=temp; -#ifdef DEBUG_TRANSPORTER - ndbout << "Swapping from 1 to 0 " << endl; -#endif + DBUG_PRINT("info", ("Swapping from 1 to 0")); if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - reportError(callbackObj, - remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE); + DBUG_PRINT("error", ("Unable to create sequence")); + report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); return false; } if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { -#ifdef DEBUG_TRANSPORTER - ndbout << "startSequence failed... disconnecting" << endl; -#endif - reportError(callbackObj, - remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE); + DBUG_PRINT("error", ("startSequence failed... disconnecting")); + report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); return false; } @@ -489,37 +455,36 @@ bool SCI_Transporter::doSend() { , FLAGS, &err); if(err!=SCI_ERR_OK) { - reportError(callbackObj, - remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEQUENCE); + DBUG_PRINT("error", ("Unable to remove sequence")); + report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE); return false; } if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) { - reportError(callbackObj, - remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE); + DBUG_PRINT("error", ("Unable to create sequence on standby")); + report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); return false; } m_swapCounter=0; -#ifdef DEBUG_TRANSPORTER - ndbout << "failover complete.." << endl; -#endif + DBUG_PRINT("info", ("failover complete..")); goto tryagain; } else { - reportError(callbackObj, - remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); + DBUG_PRINT("error", ("Unrecoverable data transfer error")); + report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); return false; } break; default: - reportError(callbackObj, - remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); + DBUG_PRINT("error", ("Unrecoverable data transfer error")); + report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); return false; break; } + } } else { SHM_Writer * writer = (m_TargetSegm[m_ActiveAdapterId].writer); writer->updateWritePtr(sizeToSend); @@ -535,13 +500,10 @@ bool SCI_Transporter::doSend() { /** * If we end up here, the SCI segment is full. */ -#ifdef DEBUG_TRANSPORTER - ndbout << "the segment is full for some reason" << endl; -#endif + DBUG_PRINT("error", ("the segment is full for some reason")); return false; } //if } - return true; } // doSend() @@ -557,11 +519,8 @@ void SCI_Transporter::failoverShmWriter() { void SCI_Transporter::setupLocalSegment() { - + DBUG_ENTER("SCI_Transporter::setupLocalSegment"); Uint32 sharedSize = 0; - sharedSize += 16; //SHM_Reader::getSharedSize(); - sharedSize += 16; //SHM_Writer::getSharedSize(); - sharedSize += 32; //SHM_Writer::getSharedSize(); sharedSize =4096; //start of the buffer is page aligend Uint32 sizeOfBuffer = m_BufferSize; @@ -570,207 +529,265 @@ void SCI_Transporter::setupLocalSegment() Uint32 * localReadIndex = (Uint32*)m_SourceSegm[m_ActiveAdapterId].mappedMemory; - Uint32 * localWriteIndex = - (Uint32*)(localReadIndex+ 1); - - Uint32 * localEndOfDataIndex = (Uint32*) - (localReadIndex + 2); - + Uint32 * localWriteIndex = (Uint32*)(localReadIndex+ 1); + Uint32 * localEndWriteIndex = (Uint32*)(localReadIndex + 2); m_localStatusFlag = (Uint32*)(localReadIndex + 3); - Uint32 * sharedLockIndex = (Uint32*) - (localReadIndex + 4); - - Uint32 * sharedHeavyLock = (Uint32*) - (localReadIndex + 5); - char * localStartOfBuf = (char*) ((char*)m_SourceSegm[m_ActiveAdapterId].mappedMemory+sharedSize); - - * localReadIndex = * localWriteIndex = 0; - * localEndOfDataIndex = sizeOfBuffer -1; - + * localReadIndex = 0; + * localWriteIndex = 0; + * localEndWriteIndex = 0; + const Uint32 slack = MAX_MESSAGE_SIZE; reader = new SHM_Reader(localStartOfBuf, sizeOfBuffer, slack, localReadIndex, + localEndWriteIndex, localWriteIndex); - * localReadIndex = 0; - * localWriteIndex = 0; - reader->clear(); + DBUG_VOID_RETURN; } //setupLocalSegment void SCI_Transporter::setupRemoteSegment() { + DBUG_ENTER("SCI_Transporter::setupRemoteSegment"); Uint32 sharedSize = 0; - sharedSize += 16; //SHM_Reader::getSharedSize(); - sharedSize += 16; //SHM_Writer::getSharedSize(); - sharedSize += 32; - sharedSize =4096; //start of the buffer is page aligend + sharedSize =4096; //start of the buffer is page aligned Uint32 sizeOfBuffer = m_BufferSize; + const Uint32 slack = MAX_MESSAGE_SIZE; sizeOfBuffer -= sharedSize; - Uint32 * segPtr = (Uint32*) m_TargetSegm[m_StandbyAdapterId].mappedMemory ; - - Uint32 * remoteReadIndex2 = (Uint32*)segPtr; - Uint32 * remoteWriteIndex2 = (Uint32*) (segPtr + 1); - Uint32 * remoteEndOfDataIndex2 = (Uint32*) (segPtr + 2); - Uint32 * sharedLockIndex2 = (Uint32*) (segPtr + 3); - m_remoteStatusFlag2 = (Uint32*)(segPtr + 4); - Uint32 * sharedHeavyLock2 = (Uint32*) (segPtr + 5); - - - char * remoteStartOfBuf2 = ( char*)((char *)segPtr+sharedSize); - - segPtr = (Uint32*) m_TargetSegm[m_ActiveAdapterId].mappedMemory ; + + Uint32 *segPtr = (Uint32*) m_TargetSegm[m_ActiveAdapterId].mappedMemory ; Uint32 * remoteReadIndex = (Uint32*)segPtr; - Uint32 * remoteWriteIndex = (Uint32*) (segPtr + 1); - Uint32 * remoteEndOfDataIndex = (Uint32*) (segPtr + 2); - Uint32 * sharedLockIndex = (Uint32*) (segPtr + 3); - m_remoteStatusFlag = (Uint32*)(segPtr + 4); - Uint32 * sharedHeavyLock = (Uint32*) (segPtr + 5); + Uint32 * remoteWriteIndex = (Uint32*)(segPtr + 1); + Uint32 * remoteEndWriteIndex = (Uint32*) (segPtr + 2); + m_remoteStatusFlag = (Uint32*)(segPtr + 3); char * remoteStartOfBuf = ( char*)((char*)segPtr+(sharedSize)); - * remoteReadIndex = * remoteWriteIndex = 0; - * remoteReadIndex2 = * remoteWriteIndex2 = 0; - * remoteEndOfDataIndex = sizeOfBuffer - 1; - * remoteEndOfDataIndex2 = sizeOfBuffer - 1; - - /** - * setup two writers. writer2 is used to mirror the changes of - * writer on the standby - * segment, so that in the case of a failover, we can switch - * to the stdby seg. quickly.* - */ - const Uint32 slack = MAX_MESSAGE_SIZE; - writer = new SHM_Writer(remoteStartOfBuf, sizeOfBuffer, slack, remoteReadIndex, + remoteEndWriteIndex, remoteWriteIndex); - writer2 = new SHM_Writer(remoteStartOfBuf2, - sizeOfBuffer, - slack, - remoteReadIndex2, - remoteWriteIndex2); - - * remoteReadIndex = 0; - * remoteWriteIndex = 0; - writer->clear(); - writer2->clear(); m_TargetSegm[0].writer=writer; - m_TargetSegm[1].writer=writer2; m_sendBuffer.m_forceSendLimit = writer->getBufferSize(); if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - reportThreadError(remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE); + report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); + DBUG_PRINT("error", ("Unable to create sequence on active")); doDisconnect(); } - if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) { - reportThreadError(remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE); - doDisconnect(); - } - - + if (m_adapters > 1) { + segPtr = (Uint32*) m_TargetSegm[m_StandbyAdapterId].mappedMemory ; + + Uint32 * remoteReadIndex2 = (Uint32*)segPtr; + Uint32 * remoteWriteIndex2 = (Uint32*) (segPtr + 1); + Uint32 * remoteEndWriteIndex2 = (Uint32*) (segPtr + 2); + m_remoteStatusFlag2 = (Uint32*)(segPtr + 3); + + char * remoteStartOfBuf2 = ( char*)((char *)segPtr+sharedSize); + + /** + * setup a writer. writer2 is used to mirror the changes of + * writer on the standby + * segment, so that in the case of a failover, we can switch + * to the stdby seg. quickly.* + */ + writer2 = new SHM_Writer(remoteStartOfBuf2, + sizeOfBuffer, + slack, + remoteReadIndex2, + remoteEndWriteIndex2, + remoteWriteIndex2); + + * remoteReadIndex = 0; + * remoteWriteIndex = 0; + * remoteEndWriteIndex = 0; + writer2->clear(); + m_TargetSegm[1].writer=writer2; + if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) { + report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); + DBUG_PRINT("error", ("Unable to create sequence on standby")); + doDisconnect(); + } + } + DBUG_VOID_RETURN; } //setupRemoteSegment - - -bool SCI_Transporter::connectImpl(Uint32 timeout) { - - sci_error_t err; - Uint32 offset = 0; - + +bool +SCI_Transporter::init_local() +{ + DBUG_ENTER("SCI_Transporter::init_local"); if(!m_initLocal) { if(initLocalSegment()!=SCI_ERR_OK){ - NdbSleep_MilliSleep(timeout); + NdbSleep_MilliSleep(10); //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! - reportThreadError(localNodeId, TE_SCI_CANNOT_INIT_LOCALSEGMENT); - return false; + report_error(TE_SCI_CANNOT_INIT_LOCALSEGMENT); + DBUG_RETURN(false); } - m_initLocal=true; + m_initLocal=true; } - - if(!m_mapped ) { - - for(Uint32 i=0; i < m_adapters ; i++) { - m_TargetSegm[i].rhm[i].remoteHandle=0; - SCIConnectSegment(sciAdapters[i].scidesc, - &(m_TargetSegm[i].rhm[i].remoteHandle), - m_remoteNodes[i], - remoteSegmentId(localNodeId, remoteNodeId), - i, - 0, - 0, - 0, - 0, - &err); - - if(err != SCI_ERR_OK) { - NdbSleep_MilliSleep(timeout); - return false; - } - - } - - - // Map the remote memory segment into program space - for(Uint32 i=0; i < m_adapters ; i++) { - m_TargetSegm[i].mappedMemory = - SCIMapRemoteSegment((m_TargetSegm[i].rhm[i].remoteHandle), - &(m_TargetSegm[i].rhm[i].map), - offset, - m_BufferSize, - NULL, - FLAGS, - &err); - - - if(err!= SCI_ERR_OK) { -#ifdef DEBUG_TRANSPORTER - ndbout_c("\nCannot map a segment to the remote node %d."); - ndbout_c("Error code 0x%x",m_RemoteSciNodeId, err); -#endif - //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! - reportThreadError(remoteNodeId, TE_SCI_CANNOT_MAP_REMOTESEGMENT); - return false; - } - - - } - m_mapped=true; - setupRemoteSegment(); - setConnected(); -#ifdef DEBUG_TRANSPORTER - ndbout << "connected and mapped to segment : " << endl; - ndbout << "remoteNode: " << m_remoteNodes[0] << endl; - ndbout << "remoteNode: " << m_remotenodes[1] << endl; - ndbout << "remoteSegId: " - << remoteSegmentId(localNodeId, remoteNodeId) - << endl; -#endif - return true; - } - else { - return getConnectionStatus(); - } -} // connectImpl() - + DBUG_RETURN(true); +} +bool +SCI_Transporter::init_remote() +{ + DBUG_ENTER("SCI_Transporter::init_remote"); + sci_error_t err; + Uint32 offset = 0; + if(!m_mapped ) { + DBUG_PRINT("info", ("Map remote segments")); + for(Uint32 i=0; i < m_adapters ; i++) { + m_TargetSegm[i].rhm[i].remoteHandle=0; + SCIConnectSegment(sciAdapters[i].scidesc, + &(m_TargetSegm[i].rhm[i].remoteHandle), + m_remoteNodes[i], + remoteSegmentId(localNodeId, remoteNodeId), + i, + 0, + 0, + 0, + 0, + &err); + + if(err != SCI_ERR_OK) { + NdbSleep_MilliSleep(10); + DBUG_PRINT("error", ("Error connecting segment, err 0x%x", err)); + DBUG_RETURN(false); + } + + } + // Map the remote memory segment into program space + for(Uint32 i=0; i < m_adapters ; i++) { + m_TargetSegm[i].mappedMemory = + SCIMapRemoteSegment((m_TargetSegm[i].rhm[i].remoteHandle), + &(m_TargetSegm[i].rhm[i].map), + offset, + m_BufferSize, + NULL, + FLAGS, + &err); + + + if(err!= SCI_ERR_OK) { + DBUG_PRINT("error", ("Cannot map a segment to the remote node %d. Error code 0x%x",m_RemoteSciNodeId, err)); + //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! + report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT); + DBUG_RETURN(false); + } + } + m_mapped=true; + setupRemoteSegment(); + setConnected(); + DBUG_PRINT("info", ("connected and mapped to segment, remoteNode: %d", + remoteNodeId)); + DBUG_PRINT("info", ("remoteSegId: %d", + remoteSegmentId(localNodeId, remoteNodeId))); + DBUG_RETURN(true); + } else { + DBUG_RETURN(getConnectionStatus()); + } +} + +bool +SCI_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) +{ + SocketInputStream s_input(sockfd); + SocketOutputStream s_output(sockfd); + char buf[256]; + DBUG_ENTER("SCI_Transporter::connect_client_impl"); + // Wait for server to create and attach + if (s_input.gets(buf, 256) == 0) { + DBUG_PRINT("error", ("No initial response from server in SCI")); + NDB_CLOSE_SOCKET(sockfd); + DBUG_RETURN(false); + } + + if (!init_local()) { + NDB_CLOSE_SOCKET(sockfd); + DBUG_RETURN(false); + } + + // Send ok to server + s_output.println("sci client 1 ok"); + + if (!init_remote()) { + NDB_CLOSE_SOCKET(sockfd); + DBUG_RETURN(false); + } + // Wait for ok from server + if (s_input.gets(buf, 256) == 0) { + DBUG_PRINT("error", ("No second response from server in SCI")); + NDB_CLOSE_SOCKET(sockfd); + DBUG_RETURN(false); + } + // Send ok to server + s_output.println("sci client 2 ok"); + + NDB_CLOSE_SOCKET(sockfd); + DBUG_PRINT("info", ("Successfully connected client to node %d", + remoteNodeId)); + DBUG_RETURN(true); +} + +bool +SCI_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) +{ + SocketOutputStream s_output(sockfd); + SocketInputStream s_input(sockfd); + char buf[256]; + DBUG_ENTER("SCI_Transporter::connect_server_impl"); + + if (!init_local()) { + NDB_CLOSE_SOCKET(sockfd); + DBUG_RETURN(false); + } + // Send ok to client + s_output.println("sci server 1 ok"); + + // Wait for ok from client + if (s_input.gets(buf, 256) == 0) { + DBUG_PRINT("error", ("No response from client in SCI")); + NDB_CLOSE_SOCKET(sockfd); + DBUG_RETURN(false); + } + + if (!init_remote()) { + NDB_CLOSE_SOCKET(sockfd); + DBUG_RETURN(false); + } + // Send ok to client + s_output.println("sci server 2 ok"); + // Wait for ok from client + if (s_input.gets(buf, 256) == 0) { + DBUG_PRINT("error", ("No second response from client in SCI")); + NDB_CLOSE_SOCKET(sockfd); + DBUG_RETURN(false); + } + + NDB_CLOSE_SOCKET(sockfd); + DBUG_PRINT("info", ("Successfully connected server to node %d", + remoteNodeId)); + DBUG_RETURN(true); +} + sci_error_t SCI_Transporter::createSequence(Uint32 adapterid) { sci_error_t err; SCICreateMapSequence((m_TargetSegm[adapterid].rhm[adapterid].map), @@ -795,13 +812,14 @@ sci_error_t SCI_Transporter::startSequence(Uint32 adapterid) { // If there still is an error then data cannot be safely send - return err; + return err; } // startSequence() bool SCI_Transporter::disconnectLocal() -{ +{ + DBUG_ENTER("SCI_Transporter::disconnectLocal"); sci_error_t err; m_ActiveAdapterId=0; @@ -809,31 +827,28 @@ bool SCI_Transporter::disconnectLocal() */ SCIUnmapSegment(m_SourceSegm[0].lhm[0].map,0,&err); - if(err!=SCI_ERR_OK) { - reportError(callbackObj, - remoteNodeId, TE_SCI_UNABLE_TO_UNMAP_SEGMENT); - return false; - } + if(err!=SCI_ERR_OK) { + report_error(TE_SCI_UNABLE_TO_UNMAP_SEGMENT); + DBUG_PRINT("error", ("Unable to unmap segment")); + DBUG_RETURN(false); + } SCIRemoveSegment((m_SourceSegm[m_ActiveAdapterId].localHandle), FLAGS, &err); if(err!=SCI_ERR_OK) { - reportError(callbackObj, remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEGMENT); - return false; + report_error(TE_SCI_UNABLE_TO_REMOVE_SEGMENT); + DBUG_PRINT("error", ("Unable to remove segment")); + DBUG_RETURN(false); } - - if(err == SCI_ERR_OK) { -#ifdef DEBUG_TRANSPORTER - printf("Local memory segment is unmapped and removed\n" ); -#endif - } - return true; + DBUG_PRINT("info", ("Local memory segment is unmapped and removed")); + DBUG_RETURN(true); } // disconnectLocal() bool SCI_Transporter::disconnectRemote() { + DBUG_ENTER("SCI_Transporter::disconnectRemote"); sci_error_t err; for(Uint32 i=0; i= send_buf_size) || + (curr_data_size >= sci_buffer_remaining)) { + /** + * The new message will not fit in the send buffer. We need to + * send the send buffer before filling it up with the new + * signal data. If current data size will spill over buffer edge + * we will also send to avoid writing larger than possible in + * buffer. + */ + if (!doSend()) { + /** + * We were not successfull sending, report 0 as meaning buffer full and + * upper levels handle retries and other recovery matters. + */ return 0; } } - + /** + * New signal fits, simply fill it up with more data. + */ Uint32 sz = m_sendBuffer.m_dataSize; return &m_sendBuffer.m_buffer[sz]; } @@ -918,10 +946,11 @@ void SCI_Transporter::updateWritePtr(Uint32 lenBytes, Uint32 prio){ Uint32 sz = m_sendBuffer.m_dataSize; - sz += (lenBytes / 4); + Uint32 packet_size = m_PacketSize; + sz += ((lenBytes + 3) >> 2); m_sendBuffer.m_dataSize = sz; - if(sz > m_PacketSize) { + if(sz > packet_size) { /**------------------------------------------------- * Buffer is full and we are ready to send. We will * not wait since the signal is already in the buffer. @@ -944,7 +973,8 @@ bool SCI_Transporter::getConnectionStatus() { if(*m_localStatusFlag == SCICONNECTED && (*m_remoteStatusFlag == SCICONNECTED || - *m_remoteStatusFlag2 == SCICONNECTED)) + ((m_adapters > 1) && + *m_remoteStatusFlag2 == SCICONNECTED))) return true; else return false; @@ -954,7 +984,9 @@ SCI_Transporter::getConnectionStatus() { void SCI_Transporter::setConnected() { *m_remoteStatusFlag = SCICONNECTED; - *m_remoteStatusFlag2 = SCICONNECTED; + if (m_adapters > 1) { + *m_remoteStatusFlag2 = SCICONNECTED; + } *m_localStatusFlag = SCICONNECTED; } @@ -963,8 +995,10 @@ void SCI_Transporter::setDisconnect() { if(getLinkStatus(m_ActiveAdapterId)) *m_remoteStatusFlag = SCIDISCONNECT; - if(getLinkStatus(m_StandbyAdapterId)) - *m_remoteStatusFlag2 = SCIDISCONNECT; + if (m_adapters > 1) { + if(getLinkStatus(m_StandbyAdapterId)) + *m_remoteStatusFlag2 = SCIDISCONNECT; + } } @@ -981,20 +1015,20 @@ static bool init = false; bool SCI_Transporter::initSCI() { + DBUG_ENTER("SCI_Transporter::initSCI"); if(!init){ sci_error_t error; // Initialize SISCI library SCIInitialize(0, &error); if(error != SCI_ERR_OK) { -#ifdef DEBUG_TRANSPORTER - ndbout_c("\nCannot initialize SISCI library."); - ndbout_c("\nInconsistency between SISCI library and SISCI driver.Error code 0x%x", error); -#endif - return false; + DBUG_PRINT("error", ("Cannot initialize SISCI library.")); + DBUG_PRINT("error", ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x", + error)); + DBUG_RETURN(false); } init = true; } - return true; + DBUG_RETURN(true); } diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/ndb/src/common/transporter/SCI_Transporter.hpp index 03496c2ce21..adc94f8bb4b 100644 --- a/ndb/src/common/transporter/SCI_Transporter.hpp +++ b/ndb/src/common/transporter/SCI_Transporter.hpp @@ -26,7 +26,7 @@ #include - /** +/** * The SCI Transporter * * The design goal of the SCI transporter is to deliver high performance @@ -135,15 +135,17 @@ public: bool getConnectionStatus(); private: - SCI_Transporter(Uint32 packetSize, + SCI_Transporter(TransporterRegistry &t_reg, + const char *local_host, + const char *remote_host, + int port, + Uint32 packetSize, Uint32 bufferSize, Uint32 nAdapters, Uint16 remoteSciNodeId0, Uint16 remoteSciNodeId1, NodeId localNodeID, NodeId remoteNodeID, - int byteorder, - bool compression, bool checksum, bool signalId, Uint32 reportFreq = 4096); @@ -160,7 +162,8 @@ private: /** * For statistics on transfered packets */ -#ifdef DEBUG_TRANSPORTER +//#ifdef DEBUG_TRANSPORTER +#if 1 Uint32 i1024; Uint32 i2048; Uint32 i2049; @@ -177,10 +180,8 @@ private: struct { Uint32 * m_buffer; // The buffer Uint32 m_dataSize; // No of words in buffer - Uint32 m_bufferSize; // Buffer size + Uint32 m_sendBufferSize; // Buffer size Uint32 m_forceSendLimit; // Send when buffer is this full - - bool full() const { return (m_dataSize * 4) > m_forceSendLimit ;} } m_sendBuffer; SHM_Reader * reader; @@ -196,7 +197,7 @@ private: Uint32 m_adapters; Uint32 m_numberOfRemoteNodes; - Uint16* m_remoteNodes; + Uint16 m_remoteNodes[2]; typedef struct SciAdapter { sci_desc_t scidesc; @@ -297,12 +298,12 @@ private: bool sendIsPossible(struct timeval * timeout); - void getReceivePtr(Uint32 ** ptr, Uint32 ** eod){ - reader->getReadPtr(* ptr, * eod); + void getReceivePtr(Uint32 ** ptr, Uint32 &size){ + size = reader->getReadPtr(* ptr); } - void updateReceivePtr(Uint32 * ptr){ - reader->updateReadPtr(ptr); + void updateReceivePtr(Uint32 size){ + reader->updateReadPtr(size); } /** @@ -341,7 +342,9 @@ private: */ void failoverShmWriter(); - + bool init_local(); + bool init_remote(); + protected: /** Perform a connection between segment @@ -350,7 +353,8 @@ protected: * retrying. * @return Returns true on success, otherwize falser */ - bool connectImpl(Uint32 timeOutMillis); + bool connect_server_impl(NDB_SOCKET_TYPE sockfd); + bool connect_client_impl(NDB_SOCKET_TYPE sockfd); /** * We will disconnect if: diff --git a/ndb/src/common/transporter/SHM_Buffer.hpp b/ndb/src/common/transporter/SHM_Buffer.hpp index 32e59dd57a2..b0dbd3362a8 100644 --- a/ndb/src/common/transporter/SHM_Buffer.hpp +++ b/ndb/src/common/transporter/SHM_Buffer.hpp @@ -42,17 +42,19 @@ public: Uint32 _sizeOfBuffer, Uint32 _slack, Uint32 * _readIndex, + Uint32 * _endWriteIndex, Uint32 * _writeIndex) : m_startOfBuffer(_startOfBuffer), m_totalBufferSize(_sizeOfBuffer), m_bufferSize(_sizeOfBuffer - _slack), m_sharedReadIndex(_readIndex), + m_sharedEndWriteIndex(_endWriteIndex), m_sharedWriteIndex(_writeIndex) { } void clear() { - m_readIndex = * m_sharedReadIndex; + m_readIndex = 0; } /** @@ -66,12 +68,12 @@ public: * returns ptr - where to start reading * sz - how much can I read */ - inline void getReadPtr(Uint32 * & ptr, Uint32 * & eod); + inline Uint32 getReadPtr(Uint32 * & ptr); /** * Update read ptr */ - inline void updateReadPtr(Uint32 * readPtr); + inline void updateReadPtr(Uint32 size); private: char * const m_startOfBuffer; @@ -80,6 +82,7 @@ private: Uint32 m_readIndex; Uint32 * m_sharedReadIndex; + Uint32 * m_sharedEndWriteIndex; Uint32 * m_sharedWriteIndex; }; @@ -97,19 +100,22 @@ SHM_Reader::empty() const{ * sz - how much can I read */ inline -void -SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod){ - +Uint32 +SHM_Reader::getReadPtr(Uint32 * & ptr) +{ + Uint32 *eod; Uint32 tReadIndex = m_readIndex; Uint32 tWriteIndex = * m_sharedWriteIndex; + Uint32 tEndWriteIndex = * m_sharedEndWriteIndex; ptr = (Uint32*)&m_startOfBuffer[tReadIndex]; if(tReadIndex <= tWriteIndex){ eod = (Uint32*)&m_startOfBuffer[tWriteIndex]; } else { - eod = (Uint32*)&m_startOfBuffer[m_bufferSize]; + eod = (Uint32*)&m_startOfBuffer[tEndWriteIndex]; } + return (Uint32)((char*)eod - (char*)ptr); } /** @@ -117,14 +123,14 @@ SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod){ */ inline void -SHM_Reader::updateReadPtr(Uint32 * ptr){ - - Uint32 tReadIndex = ((char *)ptr) - m_startOfBuffer; - +SHM_Reader::updateReadPtr(Uint32 size) +{ + Uint32 tReadIndex = m_readIndex; + tReadIndex += size; assert(tReadIndex < m_totalBufferSize); if(tReadIndex >= m_bufferSize){ - tReadIndex = 0; //-= m_bufferSize; + tReadIndex = 0; } m_readIndex = tReadIndex; @@ -139,17 +145,19 @@ public: Uint32 _sizeOfBuffer, Uint32 _slack, Uint32 * _readIndex, + Uint32 * _endWriteIndex, Uint32 * _writeIndex) : m_startOfBuffer(_startOfBuffer), m_totalBufferSize(_sizeOfBuffer), m_bufferSize(_sizeOfBuffer - _slack), m_sharedReadIndex(_readIndex), + m_sharedEndWriteIndex(_endWriteIndex), m_sharedWriteIndex(_writeIndex) { } void clear() { - m_writeIndex = * m_sharedWriteIndex; + m_writeIndex = 0; } inline char * getWritePtr(Uint32 sz); @@ -168,6 +176,7 @@ private: Uint32 m_writeIndex; Uint32 * m_sharedReadIndex; + Uint32 * m_sharedEndWriteIndex; Uint32 * m_sharedWriteIndex; }; @@ -206,7 +215,8 @@ SHM_Writer::updateWritePtr(Uint32 sz){ assert(tWriteIndex < m_totalBufferSize); if(tWriteIndex >= m_bufferSize){ - tWriteIndex = 0; //-= m_bufferSize; + * m_sharedEndWriteIndex = tWriteIndex; + tWriteIndex = 0; } m_writeIndex = tWriteIndex; diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/ndb/src/common/transporter/SHM_Transporter.cpp index aa6b650afa8..7c801658dbd 100644 --- a/ndb/src/common/transporter/SHM_Transporter.cpp +++ b/ndb/src/common/transporter/SHM_Transporter.cpp @@ -32,13 +32,12 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg, int r_port, NodeId lNodeId, NodeId rNodeId, - bool compression, bool checksum, bool signalId, key_t _shmKey, Uint32 _shmSize) : Transporter(t_reg, lHostName, rHostName, r_port, lNodeId, rNodeId, - 0, compression, checksum, signalId), + 0, false, checksum, signalId), shmKey(_shmKey), shmSize(_shmSize) { @@ -48,7 +47,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg, shmBuf = 0; reader = 0; writer = 0; - + setupBuffersDone=false; #ifdef DEBUG_TRANSPORTER printf("shm key (%d - %d) = %d\n", lNodeId, rNodeId, shmKey); @@ -83,36 +82,40 @@ SHM_Transporter::setupBuffers(){ Uint32 * sharedReadIndex1 = base1; Uint32 * sharedWriteIndex1 = base1 + 1; + Uint32 * sharedEndWriteIndex1 = base1 + 2; serverStatusFlag = base1 + 4; char * startOfBuf1 = shmBuf+sharedSize; Uint32 * base2 = (Uint32*)(shmBuf + sizeOfBuffer + sharedSize); Uint32 * sharedReadIndex2 = base2; Uint32 * sharedWriteIndex2 = base2 + 1; + Uint32 * sharedEndWriteIndex2 = base2 + 2; clientStatusFlag = base2 + 4; char * startOfBuf2 = ((char *)base2)+sharedSize; - * sharedReadIndex2 = * sharedWriteIndex2 = 0; - if(isServer){ * serverStatusFlag = 0; reader = new SHM_Reader(startOfBuf1, sizeOfBuffer, slack, sharedReadIndex1, + sharedEndWriteIndex1, sharedWriteIndex1); writer = new SHM_Writer(startOfBuf2, sizeOfBuffer, slack, sharedReadIndex2, + sharedEndWriteIndex2, sharedWriteIndex2); * sharedReadIndex1 = 0; - * sharedWriteIndex2 = 0; + * sharedWriteIndex1 = 0; + * sharedEndWriteIndex1 = 0; * sharedReadIndex2 = 0; - * sharedWriteIndex1 = 0; + * sharedWriteIndex2 = 0; + * sharedEndWriteIndex2 = 0; reader->clear(); writer->clear(); @@ -145,16 +148,19 @@ SHM_Transporter::setupBuffers(){ sizeOfBuffer, slack, sharedReadIndex2, + sharedEndWriteIndex2, sharedWriteIndex2); writer = new SHM_Writer(startOfBuf1, sizeOfBuffer, slack, sharedReadIndex1, + sharedEndWriteIndex1, sharedWriteIndex1); * sharedReadIndex2 = 0; * sharedWriteIndex1 = 0; + * sharedEndWriteIndex1 = 0; reader->clear(); writer->clear(); @@ -224,6 +230,7 @@ SHM_Transporter::prepareSend(const SignalHeader * const signalHeader, bool SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) { + DBUG_ENTER("SHM_Transporter::connect_server_impl"); SocketOutputStream s_output(sockfd); SocketInputStream s_input(sockfd); char buf[256]; @@ -233,7 +240,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) if (!ndb_shm_create()) { report_error(TE_SHM_UNABLE_TO_CREATE_SEGMENT); NDB_CLOSE_SOCKET(sockfd); - return false; + DBUG_RETURN(false); } _shmSegCreated = true; } @@ -243,7 +250,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) if (!ndb_shm_attach()) { report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT); NDB_CLOSE_SOCKET(sockfd); - return false; + DBUG_RETURN(false); } _attached = true; } @@ -254,7 +261,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) // Wait for ok from client if (s_input.gets(buf, 256) == 0) { NDB_CLOSE_SOCKET(sockfd); - return false; + DBUG_RETURN(false); } int r= connect_common(sockfd); @@ -265,17 +272,20 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) // Wait for ok from client if (s_input.gets(buf, 256) == 0) { NDB_CLOSE_SOCKET(sockfd); - return false; + DBUG_RETURN(false); } + DBUG_PRINT("info", ("Successfully connected server to node %d", + remoteNodeId)); } NDB_CLOSE_SOCKET(sockfd); - return r; + DBUG_RETURN(r); } bool SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) { + DBUG_ENTER("SHM_Transporter::connect_client_impl"); SocketInputStream s_input(sockfd); SocketOutputStream s_output(sockfd); char buf[256]; @@ -283,14 +293,18 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) // Wait for server to create and attach if (s_input.gets(buf, 256) == 0) { NDB_CLOSE_SOCKET(sockfd); - return false; + DBUG_PRINT("error", ("Server id %d did not attach", + remoteNodeId)); + DBUG_RETURN(false); } // Create if(!_shmSegCreated){ if (!ndb_shm_get()) { NDB_CLOSE_SOCKET(sockfd); - return false; + DBUG_PRINT("error", ("Failed create of shm seg to node %d", + remoteNodeId)); + DBUG_RETURN(false); } _shmSegCreated = true; } @@ -300,7 +314,9 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) if (!ndb_shm_attach()) { report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT); NDB_CLOSE_SOCKET(sockfd); - return false; + DBUG_PRINT("error", ("Failed attach of shm seg to node %d", + remoteNodeId)); + DBUG_RETURN(false); } _attached = true; } @@ -314,21 +330,28 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) // Wait for ok from server if (s_input.gets(buf, 256) == 0) { NDB_CLOSE_SOCKET(sockfd); - return false; + DBUG_PRINT("error", ("No ok from server node %d", + remoteNodeId)); + DBUG_RETURN(false); } // Send ok to server s_output.println("shm client 2 ok"); + DBUG_PRINT("info", ("Successfully connected client to node %d", + remoteNodeId)); } NDB_CLOSE_SOCKET(sockfd); - return r; + DBUG_RETURN(r); } bool SHM_Transporter::connect_common(NDB_SOCKET_TYPE sockfd) { - if (!checkConnected()) + if (!checkConnected()) { + DBUG_PRINT("error", ("Already connected to node %d", + remoteNodeId)); return false; + } if(!setupBuffersDone) { setupBuffers(); @@ -341,5 +364,7 @@ SHM_Transporter::connect_common(NDB_SOCKET_TYPE sockfd) return true; } + DBUG_PRINT("error", ("Failed to set up buffers to node %d", + remoteNodeId)); return false; } diff --git a/ndb/src/common/transporter/SHM_Transporter.hpp b/ndb/src/common/transporter/SHM_Transporter.hpp index be54d0daa2a..892acbb7ac4 100644 --- a/ndb/src/common/transporter/SHM_Transporter.hpp +++ b/ndb/src/common/transporter/SHM_Transporter.hpp @@ -38,7 +38,6 @@ public: int r_port, NodeId lNodeId, NodeId rNodeId, - bool compression, bool checksum, bool signalId, key_t shmKey, @@ -62,12 +61,12 @@ public: writer->updateWritePtr(lenBytes); } - void getReceivePtr(Uint32 ** ptr, Uint32 ** eod){ - reader->getReadPtr(* ptr, * eod); + void getReceivePtr(Uint32 ** ptr, Uint32 sz){ + sz = reader->getReadPtr(* ptr); } - void updateReceivePtr(Uint32 * ptr){ - reader->updateReadPtr(ptr); + void updateReceivePtr(Uint32 sz){ + reader->updateReadPtr(sz); } protected: @@ -127,6 +126,7 @@ protected: private: bool _shmSegCreated; bool _attached; + bool m_connected; key_t shmKey; volatile Uint32 * serverStatusFlag; diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/ndb/src/common/transporter/TCP_Transporter.cpp index 8833b51e236..b44afc7c136 100644 --- a/ndb/src/common/transporter/TCP_Transporter.cpp +++ b/ndb/src/common/transporter/TCP_Transporter.cpp @@ -70,11 +70,10 @@ TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg, int r_port, NodeId lNodeId, NodeId rNodeId, - int byte_order, - bool compr, bool chksm, bool signalId, + bool chksm, bool signalId, Uint32 _reportFreq) : Transporter(t_reg, lHostName, rHostName, r_port, lNodeId, rNodeId, - byte_order, compr, chksm, signalId), + 0, false, chksm, signalId), m_sendBuffer(sendBufSize) { maxReceiveSize = maxRecvSize; @@ -106,12 +105,14 @@ TCP_Transporter::~TCP_Transporter() { bool TCP_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) { - return connect_common(sockfd); + DBUG_ENTER("TCP_Transpporter::connect_server_impl"); + DBUG_RETURN(connect_common(sockfd)); } bool TCP_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) { - return connect_common(sockfd); + DBUG_ENTER("TCP_Transpporter::connect_client_impl"); + DBUG_RETURN(connect_common(sockfd)); } bool TCP_Transporter::connect_common(NDB_SOCKET_TYPE sockfd) @@ -119,6 +120,8 @@ bool TCP_Transporter::connect_common(NDB_SOCKET_TYPE sockfd) theSocket = sockfd; setSocketOptions(); setSocketNonBlocking(theSocket); + DBUG_PRINT("info", ("Successfully set-up TCP transporter to node %d", + remoteNodeId)); return true; } diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp index 958cfde03a1..48046310bf8 100644 --- a/ndb/src/common/transporter/TCP_Transporter.hpp +++ b/ndb/src/common/transporter/TCP_Transporter.hpp @@ -52,8 +52,7 @@ private: int r_port, NodeId lHostId, NodeId rHostId, - int byteorder, - bool compression, bool checksum, bool signalId, + bool checksum, bool signalId, Uint32 reportFreq = 4096); // Disconnect, delete send buffers and receive buffer diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index 01f1f74f053..ad8a2729c26 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -15,6 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include +#include #include #include "TransporterInternalDefinitions.hpp" @@ -48,9 +49,10 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd) { + DBUG_ENTER("SocketServer::Session * TransporterService::newSession"); if (m_auth && !m_auth->server_authenticate(sockfd)){ NDB_CLOSE_SOCKET(sockfd); - return 0; + DBUG_RETURN(0); } { @@ -60,27 +62,32 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd) char buf[256]; if (s_input.gets(buf, 256) == 0) { NDB_CLOSE_SOCKET(sockfd); - return 0; + DBUG_PRINT("error", ("Could not get node id from client")); + DBUG_RETURN(0); } if (sscanf(buf, "%d", &nodeId) != 1) { NDB_CLOSE_SOCKET(sockfd); - return 0; + DBUG_PRINT("error", ("Error in node id from client")); + DBUG_RETURN(0); } //check that nodeid is valid and that there is an allocated transporter - if ( nodeId < 0 || nodeId >= m_transporter_registry->maxTransporters) { - NDB_CLOSE_SOCKET(sockfd); - return 0; + if ( nodeId < 0 || nodeId >= (int)m_transporter_registry->maxTransporters) { + NDB_CLOSE_SOCKET(sockfd); + DBUG_PRINT("error", ("Node id out of range from client")); + DBUG_RETURN(0); } if (m_transporter_registry->theTransporters[nodeId] == 0) { NDB_CLOSE_SOCKET(sockfd); - return 0; + DBUG_PRINT("error", ("No transporter for this node id from client")); + DBUG_RETURN(0); } //check that the transporter should be connected if (m_transporter_registry->performStates[nodeId] != TransporterRegistry::CONNECTING) { NDB_CLOSE_SOCKET(sockfd); - return 0; + DBUG_PRINT("error", ("Transporter in wrong state for this node id from client")); + DBUG_RETURN(0); } Transporter *t= m_transporter_registry->theTransporters[nodeId]; @@ -93,7 +100,7 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd) t->connect_server(sockfd); } - return 0; + DBUG_RETURN(0); } TransporterRegistry::TransporterRegistry(void * callback, @@ -209,8 +216,6 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) { config->port, localNodeId, config->remoteNodeId, - config->byteOrder, - config->compression, config->checksum, config->signalId); if (t == NULL) @@ -264,8 +269,6 @@ TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) { conf->localHostName, conf->remoteNodeId, conf->remoteHostName, - conf->byteOrder, - conf->compression, conf->checksum, conf->signalId); if (t == NULL) @@ -306,15 +309,17 @@ TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) { if(theTransporters[config->remoteNodeId] != NULL) return false; - SCI_Transporter * t = new SCI_Transporter(config->sendLimit, + SCI_Transporter * t = new SCI_Transporter(*this, + config->localHostName, + config->remoteHostName, + config->port, + config->sendLimit, config->bufferSize, config->nLocalAdapters, config->remoteSciNodeId0, config->remoteSciNodeId1, localNodeId, config->remoteNodeId, - config->byteOrder, - config->compression, config->checksum, config->signalId); @@ -357,7 +362,6 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) { config->port, localNodeId, config->remoteNodeId, - config->compression, config->checksum, config->signalId, config->shmKey, @@ -853,10 +857,11 @@ TransporterRegistry::performReceive(){ const NodeId nodeId = t->getRemoteNodeId(); if(is_connected(nodeId)){ if(t->isConnected() && t->checkConnected()){ - Uint32 * readPtr, * eodPtr; - t->getReceivePtr(&readPtr, &eodPtr); - readPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]); - t->updateReceivePtr(readPtr); + Uint32 * readPtr; + Uint32 sz = 0; + t->getReceivePtr(&readPtr, sz); + Uint32 szUsed = unpack(readPtr, sz, nodeId, ioStates[nodeId]); + t->updateReceivePtr(szUsed); } } } @@ -868,10 +873,11 @@ TransporterRegistry::performReceive(){ const NodeId nodeId = t->getRemoteNodeId(); if(is_connected(nodeId)){ if(t->isConnected() && t->checkConnected()){ - Uint32 * readPtr, * eodPtr; - t->getReceivePtr(&readPtr, &eodPtr); - readPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]); - t->updateReceivePtr(readPtr); + Uint32 * readPtr; + Uint32 sz = 0; + t->getReceivePtr(&readPtr, sz); + Uint32 szUsed = unpack(readPtr, sz, nodeId, ioStates[nodeId]); + t->updateReceivePtr(szUsed); } } } @@ -1023,7 +1029,9 @@ TransporterRegistry::setIOState(NodeId nodeId, IOState state) { static void * run_start_clients_C(void * me) { + my_thread_init(); ((TransporterRegistry*) me)->start_clients_thread(); + my_thread_end(); NdbThread_Exit(0); return me; } @@ -1106,6 +1114,7 @@ TransporterRegistry::update_connections() void TransporterRegistry::start_clients_thread() { + DBUG_ENTER("TransporterRegistry::start_clients_thread"); while (m_run_start_clients_thread) { NdbSleep_MilliSleep(100); for (int i= 0, n= 0; n < nTransporters && m_run_start_clients_thread; i++){ @@ -1129,6 +1138,7 @@ TransporterRegistry::start_clients_thread() } } } + DBUG_VOID_RETURN; } bool diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp index 0cc06a54496..c432d686462 100644 --- a/ndb/src/common/util/SocketServer.cpp +++ b/ndb/src/common/util/SocketServer.cpp @@ -16,6 +16,7 @@ #include +#include #include @@ -176,9 +177,9 @@ extern "C" void* socketServerThread_C(void* _ss){ SocketServer * ss = (SocketServer *)_ss; - + my_thread_init(); ss->doRun(); - + my_thread_end(); NdbThread_Exit(0); return 0; } @@ -287,8 +288,10 @@ void* sessionThread_C(void* _sc){ SocketServer::Session * si = (SocketServer::Session *)_sc; + my_thread_init(); if(!transfer(si->m_socket)){ si->m_stopped = true; + my_thread_end(); NdbThread_Exit(0); return 0; } @@ -301,6 +304,7 @@ sessionThread_C(void* _sc){ } si->m_stopped = true; + my_thread_end(); NdbThread_Exit(0); return 0; } diff --git a/ndb/src/cw/cpcd/Makefile.am b/ndb/src/cw/cpcd/Makefile.am index e276d1a766d..6af44a359fc 100644 --- a/ndb/src/cw/cpcd/Makefile.am +++ b/ndb/src/cw/cpcd/Makefile.am @@ -7,7 +7,7 @@ LDADD_LOC = \ $(top_builddir)/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a + $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ include $(top_srcdir)/ndb/config/common.mk.am include $(top_srcdir)/ndb/config/type_util.mk.am diff --git a/ndb/src/kernel/Makefile.am b/ndb/src/kernel/Makefile.am index a6be3244b41..493ab4f9982 100644 --- a/ndb/src/kernel/Makefile.am +++ b/ndb/src/kernel/Makefile.am @@ -55,7 +55,7 @@ LDADD += \ $(top_builddir)/ndb/src/common/util/libgeneral.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a + $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/ndb/src/kernel/blocks/backup/restore/Makefile.am b/ndb/src/kernel/blocks/backup/restore/Makefile.am index eef5bc5a203..16550f13546 100644 --- a/ndb/src/kernel/blocks/backup/restore/Makefile.am +++ b/ndb/src/kernel/blocks/backup/restore/Makefile.am @@ -7,7 +7,7 @@ LDADD_LOC = \ $(top_builddir)/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a + $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ include $(top_srcdir)/ndb/config/common.mk.am diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index 948423f0109..b3afd57f6cd 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -125,11 +125,14 @@ ConfigInfo::m_SectionRules[] = { { "TCP", fixHostname, "HostName1" }, { "TCP", fixHostname, "HostName2" }, + { "SCI", fixHostname, "HostName1" }, + { "SCI", fixHostname, "HostName2" }, { "OSE", fixHostname, "HostName1" }, { "OSE", fixHostname, "HostName2" }, { "TCP", fixPortNumber, 0 }, // has to come after fixHostName { "SHM", fixPortNumber, 0 }, // has to come after fixHostName + { "SCI", fixPortNumber, 0 }, // has to come after fixHostName //{ "SHM", fixShmKey, 0 }, /** @@ -159,6 +162,8 @@ ConfigInfo::m_SectionRules[] = { { "TCP", checkTCPConstraints, "HostName1" }, { "TCP", checkTCPConstraints, "HostName2" }, + { "SCI", checkTCPConstraints, "HostName1" }, + { "SCI", checkTCPConstraints, "HostName2" }, { "*", checkMandatory, 0 }, @@ -1788,7 +1793,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", ConfigInfo::USED, false, - ConfigInfo::INT, + ConfigInfo::STRING, MANDATORY, "0", STR_VALUE(MAX_INT_RNIL) }, @@ -1800,16 +1805,50 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", ConfigInfo::USED, false, - ConfigInfo::INT, + ConfigInfo::STRING, MANDATORY, "0", STR_VALUE(MAX_INT_RNIL) }, { - CFG_SCI_ID_0, - "SciId0", + CFG_SCI_HOSTNAME_1, + "HostName1", "SCI", - "Local SCI-node id for adapter 0 (a computer can have two adapters)", + "Name/IP of computer on one side of the connection", + ConfigInfo::INTERNAL, + false, + ConfigInfo::STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_SCI_HOSTNAME_2, + "HostName2", + "SCI", + "Name/IP of computer on one side of the connection", + ConfigInfo::INTERNAL, + false, + ConfigInfo::STRING, + UNDEFINED, + 0, 0 }, + + { + CFG_CONNECTION_SERVER_PORT, + "PortNumber", + "SCI", + "Port used for this transporter", + ConfigInfo::USED, + false, + ConfigInfo::INT, + MANDATORY, + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_SCI_HOST1_ID_0, + "Host1SciId0", + "SCI", + "SCI-node id for adapter 0 on Host1 (a computer can have two adapters)", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1818,10 +1857,22 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { STR_VALUE(MAX_INT_RNIL) }, { - CFG_SCI_ID_1, - "SciId1", + CFG_SCI_HOST1_ID_1, + "Host1SciId1", "SCI", - "Local SCI-node id for adapter 1 (a computer can have two adapters)", + "SCI-node id for adapter 1 on Host1 (a computer can have two adapters)", + ConfigInfo::USED, + false, + ConfigInfo::INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + + { + CFG_SCI_HOST2_ID_0, + "Host2SciId0", + "SCI", + "SCI-node id for adapter 0 on Host2 (a computer can have two adapters)", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1829,6 +1880,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "0", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_SCI_HOST2_ID_1, + "Host2SciId1", + "SCI", + "SCI-node id for adapter 1 on Host2 (a computer can have two adapters)", + ConfigInfo::USED, + false, + ConfigInfo::INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + { CFG_CONNECTION_SEND_SIGNAL_ID, "SendSignalId", @@ -1862,8 +1925,8 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { false, ConfigInfo::INT, "2K", - "512", - STR_VALUE(MAX_INT_RNIL) }, + "128", + "32K" }, { CFG_SCI_BUFFER_MEM, @@ -1873,8 +1936,8 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ConfigInfo::USED, false, ConfigInfo::INT, - "1M", - "256K", + "192K", + "64K", STR_VALUE(MAX_INT_RNIL) }, { diff --git a/ndb/src/mgmsrv/Makefile.am b/ndb/src/mgmsrv/Makefile.am index 8fa9ec5f63e..5e048eb1418 100644 --- a/ndb/src/mgmsrv/Makefile.am +++ b/ndb/src/mgmsrv/Makefile.am @@ -29,7 +29,7 @@ LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \ $(top_builddir)/ndb/src/common/editline/libeditline.a \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a + $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ @TERMCAP_LIB@ DEFS_LOC = -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \ diff --git a/sql/Makefile.am b/sql/Makefile.am index d951aae91e1..19bdf8055f3 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -37,7 +37,7 @@ LDADD = @isam_libs@ \ $(top_builddir)/mysys/libmysys.a \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/regex/libregex.a \ - $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@ + $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@ @NDB_SCI_LIBS@ mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \ @bdb_libs@ @innodb_libs@ @pstack_libs@ \ From 69b9bbdb20a79583a51d2f3bccfea506d488363e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Sep 2004 12:57:15 +0200 Subject: [PATCH 09/55] ndb charsets: metadata in TUP, TUX --- ndb/include/kernel/signaldata/CreateTable.hpp | 3 +- ndb/include/kernel/signaldata/LqhFrag.hpp | 2 +- ndb/include/kernel/signaldata/TupFrag.hpp | 10 ++- ndb/include/util/NdbSqlUtil.hpp | 5 ++ ndb/src/common/util/NdbSqlUtil.cpp | 19 +++++ ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 33 ++++++++ ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 2 +- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 1 + .../kernel/blocks/dbtup/AttributeOffset.hpp | 61 ++++++++++++-- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 9 +- ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 1 + ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 82 +++++++++++++++---- .../kernel/blocks/dbtup/DbtupTabDesMan.cpp | 29 ++++++- ndb/src/kernel/blocks/dbtux/Dbtux.hpp | 6 +- ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp | 16 +++- ndb/src/kernel/vm/MetaData.hpp | 3 + 16 files changed, 243 insertions(+), 39 deletions(-) diff --git a/ndb/include/kernel/signaldata/CreateTable.hpp b/ndb/include/kernel/signaldata/CreateTable.hpp index 424367f28d5..67e510d2ed0 100644 --- a/ndb/include/kernel/signaldata/CreateTable.hpp +++ b/ndb/include/kernel/signaldata/CreateTable.hpp @@ -89,7 +89,8 @@ public: ArraySizeTooBig = 737, RecordTooBig = 738, InvalidPrimaryKeySize = 739, - NullablePrimaryKey = 740 + NullablePrimaryKey = 740, + InvalidCharset = 743 }; private: diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/ndb/include/kernel/signaldata/LqhFrag.hpp index 116e9c01ca0..13dfafcc653 100644 --- a/ndb/include/kernel/signaldata/LqhFrag.hpp +++ b/ndb/include/kernel/signaldata/LqhFrag.hpp @@ -130,7 +130,7 @@ private: Uint32 keyLength; Uint32 nextLCP; Uint32 noOfKeyAttr; - Uint32 noOfNewAttr; + Uint32 noOfNewAttr; // noOfCharsets in upper half Uint32 checksumIndicator; Uint32 noOfAttributeGroups; Uint32 GCPIndicator; diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/ndb/include/kernel/signaldata/TupFrag.hpp index c0ce22651aa..c1e861c5dff 100644 --- a/ndb/include/kernel/signaldata/TupFrag.hpp +++ b/ndb/include/kernel/signaldata/TupFrag.hpp @@ -119,12 +119,13 @@ class TupAddAttrReq { friend class Dblqh; friend class Dbtux; public: - STATIC_CONST( SignalLength = 4 ); + STATIC_CONST( SignalLength = 5 ); private: Uint32 tupConnectPtr; Uint32 notused1; Uint32 attrId; Uint32 attrDescriptor; + Uint32 extTypeInfo; }; class TupAddAttrConf { @@ -141,6 +142,10 @@ class TupAddAttrRef { friend class Dbtup; public: STATIC_CONST( SignalLength = 2 ); + enum ErrorCode { + NoError = 0, + InvalidCharset = 743 + }; private: Uint32 userPtr; Uint32 errorCode; @@ -178,7 +183,8 @@ public: STATIC_CONST( SignalLength = 2 ); enum ErrorCode { NoError = 0, - InvalidAttributeType = 831, + InvalidAttributeType = 742, + InvalidCharset = 743, InvalidNodeSize = 832 }; private: diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp index df1cb716f93..00216057d58 100644 --- a/ndb/include/util/NdbSqlUtil.hpp +++ b/ndb/include/util/NdbSqlUtil.hpp @@ -90,6 +90,11 @@ public: */ static const Type& getType(Uint32 typeId); + /** + * Get type by id but replace char type by corresponding binary type. + */ + static const Type& getTypeBinary(Uint32 typeId); + /** * Check character set. */ diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index afb9bcfff62..ffcf29a7242 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -176,6 +176,25 @@ NdbSqlUtil::getType(Uint32 typeId) return m_typeList[Type::Undefined]; } +const NdbSqlUtil::Type& +NdbSqlUtil::getTypeBinary(Uint32 typeId) +{ + switch (typeId) { + case Type::Char: + typeId = Type::Binary; + break; + case Type::Varchar: + typeId = Type::Varbinary; + break; + case Type::Text: + typeId = Type::Blob; + break; + default: + break; + } + return getType(typeId); +} + // compare int diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index d82083684b7..4757f1d2bf3 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -15,6 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include +#include #define DBDICT_C #include "Dbdict.hpp" @@ -4100,6 +4101,8 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->noOfKeyAttr = tabPtr.p->noOfPrimkey; req->noOfNewAttr = 0; + // noOfCharsets passed to TUP in upper half + req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16); req->checksumIndicator = 1; req->noOfAttributeGroups = 1; req->GCPIndicator = 0; @@ -4161,6 +4164,8 @@ Dbdict::sendLQHADDATTRREQ(Signal* signal, entry.attrId = attrPtr.p->attributeId; entry.attrDescriptor = attrPtr.p->attributeDescriptor; entry.extTypeInfo = attrPtr.p->extType; + // charset number passed to TUP, TUX in upper half + entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF); if (tabPtr.p->isIndex()) { Uint32 primaryAttrId; if (attrPtr.p->nextAttrInTable != RNIL) { @@ -4697,6 +4702,8 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, Uint32 keyLength = 0; Uint32 attrCount = tablePtr.p->noOfAttributes; Uint32 nullCount = 0; + Uint32 noOfCharsets = 0; + Uint16 charsets[128]; Uint32 recordLength = 0; AttributeRecordPtr attrPtr; c_attributeRecordHash.removeAll(); @@ -4751,6 +4758,31 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision; attrPtr.p->extScale = attrDesc.AttributeExtScale; attrPtr.p->extLength = attrDesc.AttributeExtLength; + // charset in upper half of precision + unsigned csNumber = (attrPtr.p->extPrecision >> 16); + if (csNumber != 0) { + CHARSET_INFO* cs = get_charset(csNumber, MYF(0)); + if (cs == NULL) { + parseP->errorCode = CreateTableRef::InvalidCharset; + parseP->errorLine = __LINE__; + return; + } + unsigned i = 0; + while (i < noOfCharsets) { + if (charsets[i] == csNumber) + break; + i++; + } + if (i == noOfCharsets) { + noOfCharsets++; + if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) { + parseP->errorCode = CreateTableRef::InvalidFormat; + parseP->errorLine = __LINE__; + return; + } + charsets[i] = csNumber; + } + } /** * Ignore incoming old-style type and recompute it. @@ -4814,6 +4846,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, tablePtr.p->noOfPrimkey = keyCount; tablePtr.p->noOfNullAttr = nullCount; + tablePtr.p->noOfCharsets = noOfCharsets; tablePtr.p->tupKeyLength = keyLength; tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS, diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 5ddaa67a7d6..a94af7b59c8 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -455,7 +455,7 @@ public: Uint16 totalAttrReceived; Uint16 fragCopyCreation; Uint16 noOfKeyAttr; - Uint16 noOfNewAttr; + Uint32 noOfNewAttr; // noOfCharsets in upper half Uint16 noOfAttributeGroups; Uint16 lh3DistrBits; Uint16 tableType; diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 3b05a133bbb..467df4aca3d 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -1444,6 +1444,7 @@ Dblqh::sendAddAttrReq(Signal* signal) tupreq->notused1 = 0; tupreq->attrId = attrId; tupreq->attrDescriptor = entry.attrDescriptor; + tupreq->extTypeInfo = entry.extTypeInfo; sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ, signal, TupAddAttrReq::SignalLength, JBB); return; diff --git a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp b/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp index 0f3881e9024..2c62adab3e5 100644 --- a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp +++ b/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp @@ -22,26 +22,59 @@ class AttributeOffset { private: static void setOffset(Uint32 & desc, Uint32 offset); + static void setCharsetPos(Uint32 & desc, Uint32 offset); static void setNullFlagPos(Uint32 & desc, Uint32 offset); static Uint32 getOffset(const Uint32 &); + static bool getCharsetFlag(const Uint32 &); + static Uint32 getCharsetPos(const Uint32 &); static Uint32 getNullFlagPos(const Uint32 &); static Uint32 getNullFlagOffset(const Uint32 &); static Uint32 getNullFlagBitOffset(const Uint32 &); static bool isNULL(const Uint32 &, const Uint32 &); }; -#define AO_ATTRIBUTE_OFFSET_MASK (0xffff) -#define AO_NULL_FLAG_POS_MASK (0x7ff) -#define AO_NULL_FLAG_POS_SHIFT (21) -#define AO_NULL_FLAG_WORD_MASK (31) -#define AO_NULL_FLAG_OFFSET_SHIFT (5) +/** + * Allow for 4096 attributes, all nullable, and for 128 different + * character sets. + * + * a = Attribute offset - 11 bits 0-10 ( addr word in 8 kb ) + * c = Has charset flag 1 bits 11-11 + * s = Charset pointer position - 7 bits 12-18 ( in table descriptor ) + * f = Null flag offset in word - 5 bits 20-24 ( address 32 bits ) + * w = Null word offset - 7 bits 25-32 ( f+w addr 4096 attrs ) + * + * 1111111111222222222233 + * 01234567890123456789012345678901 + * aaaaaaaaaaacsssssss fffffwwwwwww + */ + +#define AO_ATTRIBUTE_OFFSET_SHIFT 0 +#define AO_ATTRIBUTE_OFFSET_MASK 0x7ff + +#define AO_CHARSET_FLAG_SHIFT 11 +#define AO_CHARSET_POS_SHIFT 12 +#define AO_CHARSET_POS_MASK 127 + +#define AO_NULL_FLAG_POS_MASK 0xfff // f+w +#define AO_NULL_FLAG_POS_SHIFT 20 + +#define AO_NULL_FLAG_WORD_MASK 31 // f +#define AO_NULL_FLAG_OFFSET_SHIFT 5 inline void AttributeOffset::setOffset(Uint32 & desc, Uint32 offset){ ASSERT_MAX(offset, AO_ATTRIBUTE_OFFSET_MASK, "AttributeOffset::setOffset"); - desc |= offset; + desc |= (offset << AO_ATTRIBUTE_OFFSET_SHIFT); +} + +inline +void +AttributeOffset::setCharsetPos(Uint32 & desc, Uint32 offset) { + ASSERT_MAX(offset, AO_CHARSET_POS_MASK, "AttributeOffset::setCharsetPos"); + desc |= (1 << AO_CHARSET_FLAG_SHIFT); + desc |= (offset << AO_CHARSET_POS_SHIFT); } inline @@ -55,7 +88,21 @@ inline Uint32 AttributeOffset::getOffset(const Uint32 & desc) { - return desc & AO_ATTRIBUTE_OFFSET_MASK; + return (desc >> AO_ATTRIBUTE_OFFSET_SHIFT) & AO_ATTRIBUTE_OFFSET_MASK; +} + +inline +bool +AttributeOffset::getCharsetFlag(const Uint32 & desc) +{ + return (desc >> AO_CHARSET_FLAG_SHIFT) & 1; +} + +inline +Uint32 +AttributeOffset::getCharsetPos(const Uint32 & desc) +{ + return (desc >> AO_CHARSET_POS_SHIFT) & AO_CHARSET_POS_MASK; } inline diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index cb7e35ea73e..a36c73ec09a 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -502,6 +502,7 @@ struct Fragoperrec { Uint32 attributeCount; Uint32 freeNullBit; Uint32 noOfNewAttrCount; + Uint32 charsetIndex; BlockReference lqhBlockrefFrag; }; typedef Ptr FragoperrecPtr; @@ -785,6 +786,7 @@ struct Tablerec { ReadFunction* readFunctionArray; UpdateFunction* updateFunctionArray; + CHARSET_INFO** charsetArray; Uint32 readKeyArray; Uint32 tabDescriptor; @@ -796,6 +798,7 @@ struct Tablerec { Uint16 tupheadsize; Uint16 noOfAttr; Uint16 noOfKeyAttr; + Uint16 noOfCharsets; Uint16 noOfNewAttr; Uint16 noOfNullAttr; Uint16 noOfAttributeGroups; @@ -1909,7 +1912,8 @@ private: void updatePackedList(Signal* signal, Uint16 ahostIndex); void setUpDescriptorReferences(Uint32 descriptorReference, - Tablerec* const regTabPtr); + Tablerec* const regTabPtr, + const Uint32* offset); void setUpKeyArray(Tablerec* const regTabPtr); bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex); void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId); @@ -2098,7 +2102,8 @@ private: //----------------------------------------------------------------------------- // Public methods - Uint32 allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups); + Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset); + Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset); void freeTabDescr(Uint32 retRef, Uint32 retNo); Uint32 getTabDescrWord(Uint32 index); void setTabDescrWord(Uint32 index, Uint32 word); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index 1e57f127fbc..f3391ff7b59 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -1067,6 +1067,7 @@ Dbtup::initTab(Tablerec* const regTabPtr) }//for regTabPtr->readFunctionArray = NULL; regTabPtr->updateFunctionArray = NULL; + regTabPtr->charsetArray = NULL; regTabPtr->tabDescriptor = RNIL; regTabPtr->attributeGroupDescriptor = RNIL; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 09889a51fa3..dc04650cd1b 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -20,12 +20,14 @@ #include #include #include +#include #include #include #include #include #include #include "AttributeOffset.hpp" +#include #define ljam() { jamLine(20000 + __LINE__); } #define ljamEntry() { jamEntryLine(20000 + __LINE__); } @@ -52,7 +54,10 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) /* Uint32 schemaVersion = signal->theData[8];*/ Uint32 noOfKeyAttr = signal->theData[9]; - Uint32 noOfNewAttr = signal->theData[10]; + Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF); + /* DICT sends number of character sets in upper half */ + Uint32 noOfCharsets = (signal->theData[10] >> 16); + Uint32 checksumIndicator = signal->theData[11]; Uint32 noOfAttributeGroups = signal->theData[12]; Uint32 globalCheckpointIdIndicator = signal->theData[13]; @@ -75,6 +80,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) fragOperPtr.p->attributeCount = noOfAttributes; fragOperPtr.p->freeNullBit = noOfNullAttr; fragOperPtr.p->noOfNewAttrCount = noOfNewAttr; + fragOperPtr.p->charsetIndex = 0; ndbrequire(reqinfo == ZADDFRAG); @@ -156,6 +162,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regTabPtr.p->tupheadsize = regTabPtr.p->tupGCPIndex; regTabPtr.p->noOfKeyAttr = noOfKeyAttr; + regTabPtr.p->noOfCharsets = noOfCharsets; regTabPtr.p->noOfAttr = noOfAttributes; regTabPtr.p->noOfNewAttr = noOfNewAttr; regTabPtr.p->noOfNullAttr = noOfNullAttr; @@ -163,13 +170,14 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regTabPtr.p->notNullAttributeMask.clear(); - Uint32 tableDescriptorRef = allocTabDescr(noOfAttributes, noOfKeyAttr, noOfAttributeGroups); + Uint32 offset[10]; + Uint32 tableDescriptorRef = allocTabDescr(regTabPtr.p, offset); if (tableDescriptorRef == RNIL) { ljam(); fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); return; }//if - setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p); + setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset); } else { ljam(); fragOperPtr.p->definingFragment = false; @@ -251,6 +259,9 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); Uint32 attrId = signal->theData[2]; Uint32 attrDescriptor = signal->theData[3]; + // DICT sends extended type (ignored) and charset number + Uint32 extType = (signal->theData[4] & 0xFF); + Uint32 csNumber = (signal->theData[4] >> 16); regTabPtr.i = fragOperPtr.p->tableidFrag; ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); @@ -304,6 +315,29 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) } else { ndbrequire(false); }//if + if (csNumber != 0) { + CHARSET_INFO* cs = get_charset(csNumber, MYF(0)); + if (cs == NULL) { + ljam(); + terrorCode = TupAddAttrRef::InvalidCharset; + addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); + return; + } + Uint32 i = 0; + while (i < fragOperPtr.p->charsetIndex) { + ljam(); + if (regTabPtr.p->charsetArray[i] == cs) + break; + i++; + } + if (i == fragOperPtr.p->charsetIndex) { + ljam(); + ndbrequire(i < regTabPtr.p->noOfCharsets); + regTabPtr.p->charsetArray[i] = cs; + AttributeOffset::setCharsetPos(attrDes2, i); + fragOperPtr.p->charsetIndex++; + } + } setTabDescrWord(firstTabDesIndex + 1, attrDes2); if (regTabPtr.p->tupheadsize > MAX_TUPLE_SIZE_IN_WORDS) { @@ -340,20 +374,28 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) return; }//Dbtup::execTUP_ADD_ATTRREQ() -void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference, - Tablerec* const regTabPtr) -{ - Uint32 noOfAttributes = regTabPtr->noOfAttr; - descriptorReference += ZTD_SIZE; - ReadFunction * tmp = (ReadFunction*)&tableDescriptor[descriptorReference].tabDescr; - regTabPtr->readFunctionArray = tmp; - regTabPtr->updateFunctionArray = (UpdateFunction*)(tmp + noOfAttributes); +/* + * Descriptor has these parts: + * + * 0 readFunctionArray ( one for each attribute ) + * 1 updateFunctionArray ( ditto ) + * 2 charsetArray ( pointers to distinct CHARSET_INFO ) + * 3 readKeyArray ( attribute ids of keys ) + * 4 attributeGroupDescriptor ( currently size 1 but unused ) + * 5 tabDescriptor ( attribute descriptors, each ZAD_SIZE ) + */ - TableDescriptor * start = &tableDescriptor[descriptorReference]; - TableDescriptor * end = (TableDescriptor*)(tmp + 2 * noOfAttributes); - regTabPtr->readKeyArray = descriptorReference + (end - start); - regTabPtr->attributeGroupDescriptor = regTabPtr->readKeyArray + regTabPtr->noOfKeyAttr; - regTabPtr->tabDescriptor = regTabPtr->attributeGroupDescriptor + regTabPtr->noOfAttributeGroups; +void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference, + Tablerec* const regTabPtr, + const Uint32* offset) +{ + Uint32* desc = &tableDescriptor[descriptorReference].tabDescr; + regTabPtr->readFunctionArray = (ReadFunction*)(desc + offset[0]); + regTabPtr->updateFunctionArray = (UpdateFunction*)(desc + offset[1]); + regTabPtr->charsetArray = (CHARSET_INFO**)(desc + offset[2]); + regTabPtr->readKeyArray = descriptorReference + offset[3]; + regTabPtr->attributeGroupDescriptor = descriptorReference + offset[4]; + regTabPtr->tabDescriptor = descriptorReference + offset[5]; }//Dbtup::setUpDescriptorReferences() Uint32 @@ -491,14 +533,18 @@ void Dbtup::releaseTabDescr(Tablerec* const regTabPtr) Uint32 descriptor = regTabPtr->readKeyArray; if (descriptor != RNIL) { ljam(); + Uint32 offset[10]; + getTabDescrOffsets(regTabPtr, offset); + regTabPtr->tabDescriptor = RNIL; regTabPtr->readKeyArray = RNIL; regTabPtr->readFunctionArray = NULL; regTabPtr->updateFunctionArray = NULL; + regTabPtr->charsetArray = NULL; regTabPtr->attributeGroupDescriptor= RNIL; - Uint32 sizeFunctionArrays = 2 * (regTabPtr->noOfAttr * sizeOfReadFunction()); - descriptor -= (sizeFunctionArrays + ZTD_SIZE); + // move to start of descriptor + descriptor -= offset[3]; Uint32 retNo = getTabDescrWord(descriptor + ZTD_DATASIZE); ndbrequire(getTabDescrWord(descriptor + ZTD_HEADER) == ZTD_TYPE_NORMAL); ndbrequire(retNo == getTabDescrWord((descriptor + retNo) - ZTD_TR_SIZE)); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp index d31ab43f108..642ba270760 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp @@ -31,12 +31,33 @@ /* memory attached to fragments (could be allocated per table */ /* instead. Performs its task by a buddy algorithm. */ /* **************************************************************** */ -Uint32 Dbtup::allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups) + +Uint32 +Dbtup::getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset) +{ + // belongs to configure.in + unsigned sizeOfPointer = sizeof(CHARSET_INFO*); + ndbrequire((sizeOfPointer & 0x3) == 0); + sizeOfPointer = (sizeOfPointer >> 2); + // do in layout order and return offsets (see DbtupMeta.cpp) + Uint32 allocSize = 0; + // magically aligned to 8 bytes + offset[0] = allocSize += ZTD_SIZE; + offset[1] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction(); + offset[2] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction(); + offset[3] = allocSize += regTabPtr->noOfCharsets * sizeOfPointer; + offset[4] = allocSize += regTabPtr->noOfKeyAttr; + offset[5] = allocSize += regTabPtr->noOfAttributeGroups; + allocSize += regTabPtr->noOfAttr * ZAD_SIZE; + allocSize += ZTD_TRAILER_SIZE; + // return number of words + return allocSize; +} + +Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset) { Uint32 reference = RNIL; - Uint32 allocSize = (ZTD_SIZE + ZTD_TRAILER_SIZE) + (noOfAttributes * ZAD_SIZE); - allocSize += noOfAttributeGroups; - allocSize += ((2 * noOfAttributes * sizeOfReadFunction()) + noOfKeyAttr); + Uint32 allocSize = getTabDescrOffsets(regTabPtr, offset); /* ---------------------------------------------------------------- */ /* ALWAYS ALLOCATE A MULTIPLE OF 16 BYTES */ /* ---------------------------------------------------------------- */ diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp index 36ac20611bb..66f9717a478 100644 --- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp +++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp @@ -330,11 +330,15 @@ private: /* * Attribute metadata. Size must be multiple of word size. + * + * Prefix comparison of char data must use strxfrm and binary + * comparison. The charset is currently unused. */ struct DescAttr { Uint32 m_attrDesc; // standard AttributeDescriptor Uint16 m_primaryAttrId; - Uint16 m_typeId; + unsigned m_typeId : 6; + unsigned m_charset : 10; }; static const unsigned DescAttrSize = sizeof(DescAttr) >> 2; diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp index 4bb3b940d91..3c0af3ca79d 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp @@ -178,19 +178,31 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal) descAttr.m_attrDesc = req->attrDescriptor; descAttr.m_primaryAttrId = req->primaryAttrId; descAttr.m_typeId = req->extTypeInfo & 0xFF; + descAttr.m_charset = (req->extTypeInfo >> 16); #ifdef VM_TRACE if (debugFlags & DebugMeta) { debugOut << "Add frag " << fragPtr.i << " attr " << attrId << " " << descAttr << endl; } #endif - // check if type is valid and has a comparison method - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); + // check that type is valid and has a binary comparison method + const NdbSqlUtil::Type& type = NdbSqlUtil::getTypeBinary(descAttr.m_typeId); if (type.m_typeId == NdbSqlUtil::Type::Undefined || type.m_cmp == 0) { jam(); errorCode = TuxAddAttrRef::InvalidAttributeType; break; } +#ifdef dbtux_uses_charset + if (descAttr.m_charset != 0) { + CHARSET_INFO *cs = get_charset(descAttr.m_charset, MYF(0)); + // here use the non-binary type + if (! NdbSqlUtil::usable_in_ordered_index(descAttr.m_typeId, cs)) { + jam(); + errorCode = TuxAddAttrRef::InvalidCharset; + break; + } + } +#endif if (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd) { jam(); // initialize tree header diff --git a/ndb/src/kernel/vm/MetaData.hpp b/ndb/src/kernel/vm/MetaData.hpp index f6a941e8f9f..11e262664c1 100644 --- a/ndb/src/kernel/vm/MetaData.hpp +++ b/ndb/src/kernel/vm/MetaData.hpp @@ -107,6 +107,9 @@ public: /* Number of primary key attributes (should be computed) */ Uint16 noOfPrimkey; + /* Number of distinct character sets (computed) */ + Uint16 noOfCharsets; + /* Length of primary key in words (should be computed) */ /* For ordered index this is tree node size in words */ Uint16 tupKeyLength; From b5d77b3a26d0a930591e0451877828dcc6be55f2 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Sep 2004 14:38:01 +0200 Subject: [PATCH 10/55] ndb charsets: TUX must use Dbtup::readAttributes --- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 17 +- ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp | 103 +++++++----- ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp | 2 +- ndb/src/kernel/blocks/dbtux/Dbtux.hpp | 43 ++--- ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp | 157 ++---------------- ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp | 8 +- ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp | 84 +++++----- ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp | 4 +- ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp | 4 +- 9 files changed, 161 insertions(+), 261 deletions(-) diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index a36c73ec09a..1444066a76c 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -1004,17 +1004,20 @@ public: void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node); /* - * TUX reads primary table attributes for index keys. Input is - * attribute ids in AttributeHeader format. Output is pointers to - * attribute data within tuple or 0 for NULL value. + * TUX reads primary table attributes for index keys. Tuple is + * specified by location of original tuple and version number. Input + * is attribute ids in AttributeHeader format. Output is attribute + * data with headers. Uses readAttributes with xfrm option set. + * Returns number of words or negative (-terrorCode) on error. */ - void tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData); + int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut); /* * TUX reads primary key without headers into an array of words. Used - * for md5 summing and when returning keyinfo. + * for md5 summing and when returning keyinfo. Returns number of + * words or negative (-terrorCode) on error. */ - void tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData); + int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut); /* * TUX checks if tuple is visible to scan. @@ -1368,7 +1371,7 @@ private: //------------------------------------------------------------------ int readAttributes(Page* const pagePtr, Uint32 TupHeadOffset, - Uint32* inBuffer, + const Uint32* inBuffer, Uint32 inBufLen, Uint32* outBuffer, Uint32 TmaxRead); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp index ec2c63c736e..d864bac8b59 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp @@ -112,10 +112,11 @@ Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& no node = &pagePtr.p->pageWord[pageOffset] + attrDataOffset; } -void -Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData) +int +Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut) { ljamEntry(); + // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i = fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -134,6 +135,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu while (true) { ptrCheckGuard(opPtr, cnoOfOprec, operationrec); if (opPtr.p->realPageIdC != RNIL) { + // update page and offset pagePtr.i = opPtr.p->realPageIdC; pageOffset = opPtr.p->pageOffsetC; ptrCheckGuard(pagePtr, cnoOfPage, page); @@ -147,33 +149,34 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS)); } } - const Uint32 tabDescriptor = tablePtr.p->tabDescriptor; - const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset]; - for (Uint32 i = 0; i < numAttrs; i++) { - AttributeHeader ah(attrIds[i]); - const Uint32 attrId = ah.getAttributeId(); - const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE); - const Uint32 desc1 = tableDescriptor[index].tabDescr; - const Uint32 desc2 = tableDescriptor[index + 1].tabDescr; - if (AttributeDescriptor::getNullable(desc1)) { - Uint32 offset = AttributeOffset::getNullFlagOffset(desc2); - ndbrequire(offset < tablePtr.p->tupNullWords); - offset += tablePtr.p->tupNullIndex; - ndbrequire(offset < tablePtr.p->tupheadsize); - if (AttributeOffset::isNULL(tupleHeader[offset], desc2)) { - ljam(); - attrData[i] = 0; - continue; - } - } - attrData[i] = tupleHeader + AttributeOffset::getOffset(desc2); + // read key attributes from found tuple version + // save globals + TablerecPtr tabptr_old = tabptr; + FragrecordPtr fragptr_old = fragptr; + OperationrecPtr operPtr_old = operPtr; + // new globals + tabptr = tablePtr; + fragptr = fragPtr; + operPtr.i = RNIL; + operPtr.p = NULL; + // do it + int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL); + // restore globals + tabptr = tabptr_old; + fragptr = fragptr_old; + operPtr = operPtr_old; + // done + if (ret == (Uint32)-1) { + ret = terrorCode ? (-(int)terrorCode) : -1; } + return ret; } -void -Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData) +int +Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut) { ljamEntry(); + // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i = fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -184,25 +187,45 @@ Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pk pagePtr.i = pageId; ptrCheckGuard(pagePtr, cnoOfPage, page); const Uint32 tabDescriptor = tablePtr.p->tabDescriptor; - const Uint32 numAttrs = tablePtr.p->noOfKeyAttr; const Uint32* attrIds = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr; - const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset]; - Uint32 size = 0; - for (Uint32 i = 0; i < numAttrs; i++) { - AttributeHeader ah(attrIds[i]); - const Uint32 attrId = ah.getAttributeId(); - const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE); - const Uint32 desc1 = tableDescriptor[index].tabDescr; - const Uint32 desc2 = tableDescriptor[index + 1].tabDescr; - ndbrequire(! AttributeDescriptor::getNullable(desc1)); - const Uint32 attrSize = AttributeDescriptor::getSizeInWords(desc1); - const Uint32* attrData = tupleHeader + AttributeOffset::getOffset(desc2); - for (Uint32 j = 0; j < attrSize; j++) { - pkData[size + j] = attrData[j]; + const Uint32 numAttrs = tablePtr.p->noOfKeyAttr; + // read pk attributes from original tuple + // save globals + TablerecPtr tabptr_old = tabptr; + FragrecordPtr fragptr_old = fragptr; + OperationrecPtr operPtr_old = operPtr; + // new globals + tabptr = tablePtr; + fragptr = fragPtr; + operPtr.i = RNIL; + operPtr.p = NULL; + // do it + int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL); + // restore globals + tabptr = tabptr_old; + fragptr = fragptr_old; + operPtr = operPtr_old; + // done + if (ret != (Uint32)-1) { + // remove headers + Uint32 n = 0; + Uint32 i = 0; + while (n < numAttrs) { + const AttributeHeader ah(dataOut[i]); + Uint32 size = ah.getDataSize(); + ndbrequire(size != 0); + for (Uint32 j = 0; j < size; j++) { + dataOut[i + j - n] = dataOut[i + j + 1]; + } + n += 1; + i += 1 + size; } - size += attrSize; + ndbrequire(i == ret); + ret -= numAttrs; + } else { + ret = terrorCode ? (-(int)terrorCode) : -1; } - *pkSize = size; + return ret; } bool diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index cc47ef7e78f..49ca52b2b9c 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -146,7 +146,7 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr) /* ---------------------------------------------------------------- */ int Dbtup::readAttributes(Page* const pagePtr, Uint32 tupHeadOffset, - Uint32* inBuffer, + const Uint32* inBuffer, Uint32 inBufLen, Uint32* outBuffer, Uint32 maxRead) diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp index 66f9717a478..8dca52cec04 100644 --- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp +++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp @@ -162,11 +162,6 @@ private: // AttributeHeader size is assumed to be 1 word static const unsigned AttributeHeaderSize = 1; - /* - * Array of pointers to TUP table attributes. Always read-on|y. - */ - typedef const Uint32** TableData; - /* * Logical tuple address, "local key". Identifies table tuples. */ @@ -557,9 +552,9 @@ private: void execREAD_CONFIG_REQ(Signal* signal); // utils void setKeyAttrs(const Frag& frag); - void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData); - void readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData); - void copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize); + void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData); + void readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize); + void copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize); /* * DbtuxMeta.cpp @@ -626,17 +621,15 @@ private: /* * DbtuxSearch.cpp */ - void searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos); - void searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos); + void searchToAdd(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos); + void searchToRemove(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos); void searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos); /* * DbtuxCmp.cpp */ - int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize); - int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey); + int cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize); int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize); - int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey); /* * DbtuxDebug.cpp @@ -683,17 +676,27 @@ private: Uint32 c_typeOfStart; /* - * Array of index key attribute ids in AttributeHeader format. - * Includes fixed attribute sizes. This is global data set at - * operation start and is not passed as a parameter. + * Global data set at operation start. Unpacked from index metadata. + * Not passed as parameter to methods. Invalid across timeslices. + * + * TODO inline all into index metadata */ + + // index key attr ids with sizes in AttributeHeader format Data c_keyAttrs; - // buffer for search key data as pointers to TUP storage - TableData c_searchKey; + // pointers to index key comparison functions + NdbSqlUtil::Cmp** c_sqlCmp; - // buffer for current entry key data as pointers to TUP storage - TableData c_entryKey; + /* + * Other buffers used during the operation. + */ + + // buffer for search key data with headers + Data c_searchKey; + + // buffer for current entry key data with headers + Data c_entryKey; // buffer for scan bounds and keyinfo (primary key) Data c_dataBuffer; diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp index debb5252386..4ce413af138 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp @@ -18,21 +18,24 @@ #include "Dbtux.hpp" /* - * Search key vs node prefix. + * Search key vs node prefix or entry * - * The comparison starts at given attribute position (in fact 0). The - * position is updated by number of equal initial attributes found. The - * prefix may be partial in which case CmpUnknown may be returned. + * The comparison starts at given attribute position. The position is + * updated by number of equal initial attributes found. The entry data + * may be partial in which case CmpUnknown may be returned. */ int -Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen) +Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen) { const unsigned numAttrs = frag.m_numAttrs; const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); // number of words of attribute data left unsigned len2 = maxlen; - // skip to right position in search key - searchKey += start; + // skip to right position in search key only + for (unsigned i = 0; i < start; i++) { + jam(); + searchKey += AttributeHeaderSize + searchKey.ah().getDataSize(); + } int ret = 0; while (start < numAttrs) { if (len2 <= AttributeHeaderSize) { @@ -41,22 +44,21 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons break; } len2 -= AttributeHeaderSize; - if (*searchKey != 0) { + if (! searchKey.ah().isNULL()) { if (! entryData.ah().isNULL()) { jam(); // current attribute const DescAttr& descAttr = descEnt.m_descAttr[start]; - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); - ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined); // full data size const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc); ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize()); const unsigned size2 = min(size1, len2); len2 -= size2; // compare - const Uint32* const p1 = *searchKey; + NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start]; + const Uint32* const p1 = &searchKey[AttributeHeaderSize]; const Uint32* const p2 = &entryData[AttributeHeaderSize]; - ret = (*type.m_cmp)(p1, p2, size1, size2); + ret = (*cmp)(p1, p2, size1, size2); if (ret != 0) { jam(); break; @@ -75,7 +77,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons break; } } - searchKey += 1; + searchKey += AttributeHeaderSize + searchKey.ah().getDataSize(); entryData += AttributeHeaderSize + entryData.ah().getDataSize(); start++; } @@ -83,60 +85,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons } /* - * Search key vs tree entry. - * - * Start position is updated as in previous routine. - */ -int -Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey) -{ - const unsigned numAttrs = frag.m_numAttrs; - const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); - // skip to right position - searchKey += start; - entryKey += start; - int ret = 0; - while (start < numAttrs) { - if (*searchKey != 0) { - if (*entryKey != 0) { - jam(); - // current attribute - const DescAttr& descAttr = descEnt.m_descAttr[start]; - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); - ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined); - // full data size - const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc); - // compare - const Uint32* const p1 = *searchKey; - const Uint32* const p2 = *entryKey; - ret = (*type.m_cmp)(p1, p2, size1, size1); - if (ret != 0) { - jam(); - break; - } - } else { - jam(); - // not NULL > NULL - ret = +1; - break; - } - } else { - if (*entryKey != 0) { - jam(); - // NULL < not NULL - ret = -1; - break; - } - } - searchKey += 1; - entryKey += 1; - start++; - } - return ret; -} - -/* - * Scan bound vs node prefix. + * Scan bound vs node prefix or entry. * * Compare lower or upper bound and index attribute data. The attribute * data may be partial in which case CmpUnknown may be returned. @@ -184,8 +133,6 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne // current attribute const unsigned index = boundInfo.ah().getAttributeId(); const DescAttr& descAttr = descEnt.m_descAttr[index]; - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); - ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined); ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId); // full data size const unsigned size1 = boundInfo.ah().getDataSize(); @@ -193,9 +140,10 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne const unsigned size2 = min(size1, len2); len2 -= size2; // compare + NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index]; const Uint32* const p1 = &boundInfo[AttributeHeaderSize]; const Uint32* const p2 = &entryData[AttributeHeaderSize]; - int ret = (*type.m_cmp)(p1, p2, size1, size2); + int ret = (*cmp)(p1, p2, size1, size2); if (ret != 0) { jam(); return ret; @@ -244,72 +192,3 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne return +1; } } - -/* - * Scan bound vs tree entry. - */ -int -Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey) -{ - const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); - // direction 0-lower 1-upper - ndbrequire(dir <= 1); - // initialize type to equality - unsigned type = 4; - while (boundCount != 0) { - // get and skip bound type - type = boundInfo[0]; - boundInfo += 1; - if (! boundInfo.ah().isNULL()) { - if (*entryKey != 0) { - jam(); - // current attribute - const unsigned index = boundInfo.ah().getAttributeId(); - const DescAttr& descAttr = descEnt.m_descAttr[index]; - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); - ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined); - // full data size - const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc); - // compare - const Uint32* const p1 = &boundInfo[AttributeHeaderSize]; - const Uint32* const p2 = *entryKey; - int ret = (*type.m_cmp)(p1, p2, size1, size1); - if (ret != 0) { - jam(); - return ret; - } - } else { - jam(); - // not NULL > NULL - return +1; - } - } else { - jam(); - if (*entryKey != 0) { - jam(); - // NULL < not NULL - return -1; - } - } - boundInfo += AttributeHeaderSize + boundInfo.ah().getDataSize(); - entryKey += 1; - boundCount -= 1; - } - if (dir == 0) { - // lower bound - jam(); - if (type == 1) { - jam(); - return +1; - } - return -1; - } else { - // upper bound - jam(); - if (type == 3) { - jam(); - return -1; - } - return +1; - } -} diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp index 11f4f12b7f6..8d31d2c6a55 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp @@ -207,14 +207,10 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar& } // check ordering within node for (unsigned j = 1; j < node.getOccup(); j++) { - unsigned start = 0; const TreeEnt ent1 = node.getEnt(j - 1); const TreeEnt ent2 = node.getEnt(j); - if (j == 1) { - readKeyAttrs(frag, ent1, start, c_searchKey); - } else { - memcpy(c_searchKey, c_entryKey, frag.m_numAttrs << 2); - } + unsigned start = 0; + readKeyAttrs(frag, ent1, start, c_searchKey); readKeyAttrs(frag, ent2, start, c_entryKey); int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey); if (ret == 0) diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp index f6f1610c8c1..958ba4b0839 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp @@ -16,8 +16,6 @@ #define DBTUX_GEN_CPP #include "Dbtux.hpp" -#include -#include Dbtux::Dbtux(const Configuration& conf) : SimulatedBlock(DBTUX, conf), @@ -202,8 +200,9 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal) } // allocate buffers c_keyAttrs = (Uint32*)allocRecord("c_keyAttrs", sizeof(Uint32), MaxIndexAttributes); - c_searchKey = (TableData)allocRecord("c_searchKey", sizeof(Uint32*), MaxIndexAttributes); - c_entryKey = (TableData)allocRecord("c_entryKey", sizeof(Uint32*), MaxIndexAttributes); + c_sqlCmp = (NdbSqlUtil::Cmp**)allocRecord("c_sqlCmp", sizeof(NdbSqlUtil::Cmp*), MaxIndexAttributes); + c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32*), MaxIndexAttributes); + c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32*), MaxIndexAttributes); c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxAttrDataSize + 1) >> 1); // ack ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); @@ -218,7 +217,8 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal) void Dbtux::setKeyAttrs(const Frag& frag) { - Data keyAttrs = c_keyAttrs; // global + Data keyAttrs = c_keyAttrs; // global + NdbSqlUtil::Cmp** sqlCmp = c_sqlCmp; // global const unsigned numAttrs = frag.m_numAttrs; const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); for (unsigned i = 0; i < numAttrs; i++) { @@ -227,75 +227,71 @@ Dbtux::setKeyAttrs(const Frag& frag) // set attr id and fixed size keyAttrs.ah() = AttributeHeader(descAttr.m_primaryAttrId, size); keyAttrs += 1; + // set comparison method pointer + const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getTypeBinary(descAttr.m_typeId); + ndbrequire(sqlType.m_cmp != 0); + *(sqlCmp++) = sqlType.m_cmp; } } void -Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData) +Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData) { ConstData keyAttrs = c_keyAttrs; // global const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit]; const TupLoc tupLoc = ent.m_tupLoc; const Uint32 tupVersion = ent.m_tupVersion; ndbrequire(start < frag.m_numAttrs); - const unsigned numAttrs = frag.m_numAttrs - start; - // start applies to both keys and output data + const Uint32 numAttrs = frag.m_numAttrs - start; + // skip to start position in keyAttrs only keyAttrs += start; - keyData += start; - c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, numAttrs, keyAttrs, keyData); + int ret = c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, keyAttrs, numAttrs, keyData); jamEntry(); + // TODO handle error + ndbrequire(ret > 0); } void -Dbtux::readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData) +Dbtux::readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize) { const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit]; const TupLoc tupLoc = ent.m_tupLoc; - Uint32 size = 0; - c_tup->tuxReadKeys(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, &size, pkData); - ndbrequire(size != 0); - pkSize = size; + int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, pkData); + jamEntry(); + // TODO handle error + ndbrequire(ret > 0); + pkSize = ret; } /* - * Input is pointers to table attributes. Output is array of attribute - * data with headers. Copies whatever fits. + * Copy attribute data with headers. Input is all index key data. + * Copies whatever fits. */ void -Dbtux::copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2) +Dbtux::copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2) { - ConstData keyAttrs = c_keyAttrs; // global - const unsigned numAttrs = frag.m_numAttrs; + unsigned n = frag.m_numAttrs; unsigned len2 = maxlen2; - for (unsigned n = 0; n < numAttrs; n++) { + while (n != 0) { jam(); - const unsigned attrId = keyAttrs.ah().getAttributeId(); - const unsigned dataSize = keyAttrs.ah().getDataSize(); - const Uint32* const p1 = *data1; - if (p1 != 0) { + const unsigned dataSize = data1.ah().getDataSize(); + // copy header + if (len2 == 0) + return; + data2[0] = data1[0]; + data1 += 1; + data2 += 1; + len2 -= 1; + // copy data + for (unsigned i = 0; i < dataSize; i++) { if (len2 == 0) return; - data2.ah() = AttributeHeader(attrId, dataSize); - data2 += 1; - len2 -= 1; - unsigned n = dataSize; - for (unsigned i = 0; i < dataSize; i++) { - if (len2 == 0) - return; - *data2 = p1[i]; - data2 += 1; - len2 -= 1; - } - } else { - if (len2 == 0) - return; - data2.ah() = AttributeHeader(attrId, 0); - data2.ah().setNULL(); - data2 += 1; + data2[i] = data1[i]; len2 -= 1; } - keyAttrs += 1; - data1 += 1; + data1 += dataSize; + data2 += dataSize; + n -= 1; } #ifdef VM_TRACE memset(data2, DataFillByte, len2 << 2); diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp index c4c33ff931f..9d7d4b06bf7 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp @@ -389,7 +389,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal) jam(); const TreeEnt ent = scan.m_scanPos.m_ent; // read tuple key - readTablePk(frag, ent, pkSize, pkData); + readTablePk(frag, ent, pkData, pkSize); // get read lock or exclusive lock AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); lockReq->returnCode = RNIL; @@ -480,7 +480,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal) jam(); if (pkSize == 0) { jam(); - readTablePk(frag, ent, pkSize, pkData); + readTablePk(frag, ent, pkData, pkSize); } } // conf signal diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp index 84048b308bc..bffbb8f5594 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp @@ -25,7 +25,7 @@ * TODO optimize for initial equal attrs in node min/max */ void -Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos) +Dbtux::searchToAdd(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos) { const TreeHead& tree = frag.m_tree; const unsigned numAttrs = frag.m_numAttrs; @@ -144,7 +144,7 @@ Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt sear * to it. */ void -Dbtux::searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos) +Dbtux::searchToRemove(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos) { const TreeHead& tree = frag.m_tree; const unsigned numAttrs = frag.m_numAttrs; From 2468e5ba46dd94ffbf75298d01141b56e7783589 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Sep 2004 18:02:41 +0200 Subject: [PATCH 11/55] wl1668 - Add support for getting events via mgmapi removed all traces of stat port BitKeeper/deleted/.del-NodeLogLevel.cpp~324045b93af7123b: Delete: ndb/src/mgmsrv/NodeLogLevel.cpp BitKeeper/deleted/.del-NodeLogLevel.hpp~4f4ab0fe52fb497c: Delete: ndb/src/mgmsrv/NodeLogLevel.hpp BitKeeper/deleted/.del-NodeLogLevelList.cpp~97dc9c909e3e92bf: Delete: ndb/src/mgmsrv/NodeLogLevelList.cpp BitKeeper/deleted/.del-NodeLogLevelList.hpp~ef567dd850abddc7: Delete: ndb/src/mgmsrv/NodeLogLevelList.hpp ndb/include/debugger/EventLogger.hpp: Split EventLogger into two classes ndb/include/kernel/LogLevel.hpp: Add some nice to have methods + use uint8 for storage ndb/include/kernel/signaldata/EventSubscribeReq.hpp: operator= ndb/include/kernel/signaldata/SetLogLevelOrd.hpp: operator= ndb/include/mgmapi/mgmapi.h: remove deprecated vars ndb/src/common/debugger/EventLogger.cpp: Split into 2 ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: Remove hardcoded mgm stuff ndb/src/mgmsrv/CommandInterpreter.cpp: #if 0 some deprecated code ndb/src/mgmsrv/Makefile.am: remove NodeLogLevel* ndb/src/mgmsrv/MgmtSrvr.cpp: Remove dead code Handle multiple event listeners in mgmapi ndb/src/mgmsrv/MgmtSrvr.hpp: Remove dead code Handle multiple event listeners in mgmapi ndb/src/mgmsrv/Services.cpp: listen event method in mgmapi ndb/src/mgmsrv/Services.hpp: listen event method in mgmapi ndb/src/mgmsrv/main.cpp: remove stat port --- ndb/include/debugger/EventLogger.hpp | 235 +++++----- ndb/include/kernel/LogLevel.hpp | 24 +- .../kernel/signaldata/EventSubscribeReq.hpp | 14 +- .../kernel/signaldata/SetLogLevelOrd.hpp | 19 + ndb/include/mgmapi/mgmapi.h | 18 - ndb/src/common/debugger/EventLogger.cpp | 423 ++++++++--------- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 33 +- ndb/src/mgmsrv/CommandInterpreter.cpp | 22 +- ndb/src/mgmsrv/Makefile.am | 2 - ndb/src/mgmsrv/MgmtSrvr.cpp | 439 +++++------------- ndb/src/mgmsrv/MgmtSrvr.hpp | 118 ++--- ndb/src/mgmsrv/NodeLogLevel.cpp | 70 --- ndb/src/mgmsrv/NodeLogLevel.hpp | 54 --- ndb/src/mgmsrv/NodeLogLevelList.cpp | 182 -------- ndb/src/mgmsrv/NodeLogLevelList.hpp | 93 ---- ndb/src/mgmsrv/Services.cpp | 205 ++++++-- ndb/src/mgmsrv/Services.hpp | 27 +- ndb/src/mgmsrv/main.cpp | 18 +- 18 files changed, 688 insertions(+), 1308 deletions(-) delete mode 100644 ndb/src/mgmsrv/NodeLogLevel.cpp delete mode 100644 ndb/src/mgmsrv/NodeLogLevel.hpp delete mode 100644 ndb/src/mgmsrv/NodeLogLevelList.cpp delete mode 100644 ndb/src/mgmsrv/NodeLogLevelList.hpp diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp index 6cd6a83e68d..36cece6f22a 100644 --- a/ndb/include/debugger/EventLogger.hpp +++ b/ndb/include/debugger/EventLogger.hpp @@ -24,128 +24,15 @@ #include #include -/** - * The EventLogger is primarily used for logging NDB events - * in the Management Server. It inherits all logging functionality of Logger. - * - * HOW TO USE - * - * 1) Create an EventLogger - * - * EventLogger myEventLogger = new EventLogger(); - * - * 2) Log NDB events and other log messages. - * - * myEventLogger->info("Changing log levels."); - * - * EventReport* report = (EventReport*)&theSignalData[0]; - * myEventLogger->log(eventReport->getEventType(), theSignalData, aNodeId); - * - * - * The following NDB event categories and log levels are enabled as default: - * - * EVENT-CATEGORY LOG-LEVEL - * - * Startup 4 - * Shutdown 1 - * Statistic 2 - * Checkpoint 5 - * NodeRestart 8 - * Connection 2 - * Error 15 - * Info 10 - * - * @see Logger - * @version #@ $Id: EventLogger.hpp,v 1.3 2003/09/01 10:15:52 innpeno Exp $ - */ -class EventLogger : public Logger -{ +class EventLoggerBase { public: - /** - * Default constructor. Enables default log levels and - * sets the log category to 'EventLogger'. - */ - EventLogger(); + virtual ~EventLoggerBase(); /** - * Destructor. + * LogLevel settings */ - ~EventLogger(); - - /** - * Opens/creates the eventlog with the specified filename. - * - * @param aFileName the eventlog filename. - * @param maxNoFiles the maximum no of archived eventlog files. - * @param maxFileSize the maximum eventlog file size. - * @param maxLogEntries the maximum number of log entries before - * checking time to archive. - * @return true if successful. - */ - bool open(const char* logFileName, - int maxNoFiles = FileLogHandler::MAX_NO_FILES, - long int maxFileSize = FileLogHandler::MAX_FILE_SIZE, - unsigned int maxLogEntries = FileLogHandler::MAX_LOG_ENTRIES); - - /** - * Closes the eventlog. - */ - void close(); - - /** - * Logs the NDB event. - * - * @param nodeId the node id of event origin. - * @param eventType the type of event. - * @param theData the event data. - * @deprecated use log(int eventType, const Uint32* theData, NodeId nodeId) - */ - void log(NodeId nodeId, int eventType, const Uint32* theData); - - /** - * Logs the NDB event. - * - * @param eventType the type of event. - * @param theData the event data. - * @param nodeId the node id of event origin. - */ - void log(int eventType, const Uint32* theData, NodeId nodeId = 0); - - /** - * Returns the current log levels. - * Enable, disable log levels to filter the events that are sent to the - * eventlog. - * - * @return the log level. - */ - LogLevel& getLoglevel(); + LogLevel m_logLevel; - /** - * Returns the log level that is used to filter an event. The event will not - * be logged unless its event category's log level is <= levelFilter. - * - * @return the log level filter that is used for all event categories. - */ - int getFilterLevel() const; - /** - * Sets log level filter. The event will be logged if - * the event category's log level is <= 'filterLevel'. - * - * @param level the log level to filter. - */ - void setFilterLevel(int filterLevel); - - /** - * Returns the event text for the specified event report type. - * - * @param type the event type. - * @param theData the event data. - * @param nodeId a node id. - * @return the event report text. - */ - static const char* getText(int type, - const Uint32* theData, NodeId nodeId = 0); - /** * Find a category matching the string * @@ -193,22 +80,113 @@ public: }; static const EventRepLogLevelMatrix matrix[]; + static const Uint32 matrixSize; +}; + +/** + * The EventLogger is primarily used for logging NDB events + * in the Management Server. It inherits all logging functionality of Logger. + * + * HOW TO USE + * + * 1) Create an EventLogger + * + * EventLogger myEventLogger = new EventLogger(); + * + * 2) Log NDB events and other log messages. + * + * myEventLogger->info("Changing log levels."); + * + * EventReport* report = (EventReport*)&theSignalData[0]; + * myEventLogger->log(eventReport->getEventType(), theSignalData, aNodeId); + * + * + * The following NDB event categories and log levels are enabled as default: + * + * EVENT-CATEGORY LOG-LEVEL + * + * Startup 4 + * Shutdown 1 + * Statistic 2 + * Checkpoint 5 + * NodeRestart 8 + * Connection 2 + * Error 15 + * Info 10 + * + * @see Logger + * @version #@ $Id: EventLogger.hpp,v 1.3 2003/09/01 10:15:52 innpeno Exp $ + */ +class EventLogger : public EventLoggerBase, public Logger +{ +public: + /** + * Default constructor. Enables default log levels and + * sets the log category to 'EventLogger'. + */ + EventLogger(); /** - * Default log levels for management nodes. - * - * threshold - is in range [0-15] + * Destructor. */ - struct EventLogMatrix { - LogLevel::EventCategory eventCategory; - Uint32 threshold; - }; + virtual ~EventLogger(); - static const EventLogMatrix defEventLogMatrix[]; + /** + * Opens/creates the eventlog with the specified filename. + * + * @param aFileName the eventlog filename. + * @param maxNoFiles the maximum no of archived eventlog files. + * @param maxFileSize the maximum eventlog file size. + * @param maxLogEntries the maximum number of log entries before + * checking time to archive. + * @return true if successful. + */ + bool open(const char* logFileName, + int maxNoFiles = FileLogHandler::MAX_NO_FILES, + long int maxFileSize = FileLogHandler::MAX_FILE_SIZE, + unsigned int maxLogEntries = FileLogHandler::MAX_LOG_ENTRIES); + /** + * Closes the eventlog. + */ + void close(); + + /** + * Logs the NDB event. + * + * @param eventType the type of event. + * @param theData the event data. + * @param nodeId the node id of event origin. + */ + virtual void log(int eventType, const Uint32* theData, NodeId nodeId = 0); + + /** + * Returns the event text for the specified event report type. + * + * @param type the event type. + * @param theData the event data. + * @param nodeId a node id. + * @return the event report text. + */ + static const char* getText(char * dst, size_t dst_len, + int type, + const Uint32* theData, NodeId nodeId = 0); - static const Uint32 matrixSize; - static const Uint32 defEventLogMatrixSize; + /** + * Returns the log level that is used to filter an event. The event will not + * be logged unless its event category's log level is <= levelFilter. + * + * @return the log level filter that is used for all event categories. + */ + int getFilterLevel() const; + + /** + * Sets log level filter. The event will be logged if + * the event category's log level is <= 'filterLevel'. + * + * @param level the log level to filter. + */ + void setFilterLevel(int filterLevel); private: /** Prohibit */ @@ -216,11 +194,10 @@ private: EventLogger operator = (const EventLogger&); bool operator == (const EventLogger&); - LogLevel m_logLevel; Uint32 m_filterLevel; STATIC_CONST(MAX_TEXT_LENGTH = 256); - static char m_text[MAX_TEXT_LENGTH]; + char m_text[MAX_TEXT_LENGTH]; }; diff --git a/ndb/include/kernel/LogLevel.hpp b/ndb/include/kernel/LogLevel.hpp index 10cd0d43bee..e3a81263dcb 100644 --- a/ndb/include/kernel/LogLevel.hpp +++ b/ndb/include/kernel/LogLevel.hpp @@ -130,18 +130,25 @@ public: */ Uint32 getLogLevel(EventCategory ec) const; + /** + * Set this= max(this, ll) per category + */ + LogLevel& set_max(const LogLevel& ll); + + bool operator==(const LogLevel& l) const { + return memcmp(this, &l, sizeof(* this)) == 0; + } + private: /** * The actual data */ - Uint32 logLevelData[LOGLEVEL_CATEGORIES]; - - LogLevel(const LogLevel &); + Uint8 logLevelData[LOGLEVEL_CATEGORIES]; }; inline LogLevel::LogLevel(){ - clear(); + clear(); } inline @@ -176,5 +183,14 @@ LogLevel::getLogLevel(EventCategory ec) const{ return logLevelData[ec]; } +inline +LogLevel & +LogLevel::set_max(const LogLevel & org){ + for(Uint32 i = 0; i +#include "EventSubscribeReq.hpp" #include "SignalData.hpp" /** @@ -51,6 +52,24 @@ private: * Note level is valid as 0-15 */ void setLogLevel(LogLevel::EventCategory ec, int level = 7); + + SetLogLevelOrd& operator= (const LogLevel& ll){ + noOfEntries = _LOGLEVEL_CATEGORIES; + for(size_t i = 0; icode >> 16; switch (code) { case ArbitCode::ThreadStart: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sPresident restarts arbitration thread [state=%u]", theNodeId, state); break; case ArbitCode::PrepPart2: sd->ticket.getText(ticketText, sizeof(ticketText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sPrepare arbitrator node %u [ticket=%s]", theNodeId, sd->node, ticketText); break; case ArbitCode::PrepAtrun: sd->ticket.getText(ticketText, sizeof(ticketText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sReceive arbitrator node %u [ticket=%s]", theNodeId, sd->node, ticketText); break; case ArbitCode::ApiStart: sd->ticket.getText(ticketText, sizeof(ticketText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sStarted arbitrator node %u [ticket=%s]", theNodeId, sd->node, ticketText); break; case ArbitCode::ApiFail: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sLost arbitrator node %u - process failure [state=%u]", theNodeId, sd->node, state); break; case ArbitCode::ApiExit: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sLost arbitrator node %u - process exit [state=%u]", theNodeId, sd->node, state); break; default: ArbitCode::getErrText(code, errText, sizeof(errText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sLost arbitrator node %u - %s [state=%u]", theNodeId, sd->node, errText, state); break; @@ -446,48 +431,48 @@ EventLogger::getText(int type, const unsigned state = sd->code >> 16; switch (code) { case ArbitCode::LoseNodes: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration check lost - less than 1/2 nodes left", theNodeId); break; case ArbitCode::WinGroups: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration check won - node group majority", theNodeId); break; case ArbitCode::LoseGroups: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration check lost - missing node group", theNodeId); break; case ArbitCode::Partitioning: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNetwork partitioning - arbitration required", theNodeId); break; case ArbitCode::WinChoose: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration won - positive reply from node %u", theNodeId, sd->node); break; case ArbitCode::LoseChoose: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration lost - negative reply from node %u", theNodeId, sd->node); break; case ArbitCode::LoseNorun: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNetwork partitioning - no arbitrator available", theNodeId); break; case ArbitCode::LoseNocfg: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNetwork partitioning - no arbitrator configured", theNodeId); break; default: ArbitCode::getErrText(code, errText, sizeof(errText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration failure - %s [state=%u]", theNodeId, errText, state); break; @@ -500,7 +485,7 @@ EventLogger::getText(int type, // node is the master of this global checkpoint. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sGlobal checkpoint %u started", theNodeId, theData[1]); @@ -510,7 +495,7 @@ EventLogger::getText(int type, // This event reports that a global checkpoint has been completed on this // node and the node is the master of this global checkpoint. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sGlobal checkpoint %u completed", theNodeId, theData[1]); @@ -521,7 +506,7 @@ EventLogger::getText(int type, // node is the master of this local checkpoint. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLocal checkpoint %u started. " "Keep GCI = %u oldest restorable GCI = %u", theNodeId, @@ -535,7 +520,7 @@ EventLogger::getText(int type, // node and the node is the master of this local checkpoint. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLocal checkpoint %u completed", theNodeId, theData[1]); @@ -544,14 +529,14 @@ EventLogger::getText(int type, //----------------------------------------------------------------------- // This event reports that a table has been created. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sTable with ID = %u created", theNodeId, theData[1]); break; case EventReport::LCPStoppedInCalcKeepGci: if (theData[1] == 0) - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sLocal Checkpoint stopped in CALCULATED_KEEP_GCI", theNodeId); break; @@ -560,7 +545,7 @@ EventLogger::getText(int type, // REPORT Node Restart completed copy of dictionary information. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode restart completed copy of dictionary information", theNodeId); break; @@ -569,7 +554,7 @@ EventLogger::getText(int type, // REPORT Node Restart completed copy of distribution information. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode restart completed copy of distribution information", theNodeId); break; @@ -578,7 +563,7 @@ EventLogger::getText(int type, // REPORT Node Restart is starting to copy the fragments. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode restart starting to copy the fragments " "to Node %u", theNodeId, @@ -589,7 +574,7 @@ EventLogger::getText(int type, // REPORT Node Restart copied a fragment. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sTable ID = %u, fragment ID = %u have been copied " "to Node %u", theNodeId, @@ -599,7 +584,7 @@ EventLogger::getText(int type, break; case EventReport::NR_CopyFragsCompleted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode restart completed copying the fragments " "to Node %u", theNodeId, @@ -607,7 +592,7 @@ EventLogger::getText(int type, break; case EventReport::LCPFragmentCompleted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sTable ID = %u, fragment ID = %u has completed LCP " "on Node %u", theNodeId, @@ -620,7 +605,7 @@ EventLogger::getText(int type, // Report information about transaction activity once per 10 seconds. // ------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sTrans. Count = %u, Commit Count = %u, " "Read Count = %u, Simple Read Count = %u,\n" "Write Count = %u, AttrInfo Count = %u, " @@ -639,7 +624,7 @@ EventLogger::getText(int type, theData[10]); break; case EventReport::OperationReportCounters: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sOperations=%u", theNodeId, theData[1]); @@ -649,7 +634,7 @@ EventLogger::getText(int type, // REPORT Undo Logging blocked due to buffer near to overflow. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sACC Blocked %u and TUP Blocked %u times last second", theNodeId, theData[1], @@ -658,7 +643,7 @@ EventLogger::getText(int type, case EventReport::TransporterError: case EventReport::TransporterWarning: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sTransporter to node %d reported error 0x%x", theNodeId, theData[1], @@ -669,7 +654,7 @@ EventLogger::getText(int type, // REPORT Undo Logging blocked due to buffer near to overflow. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode %d missed heartbeat %d", theNodeId, theData[1], @@ -680,21 +665,21 @@ EventLogger::getText(int type, // REPORT Undo Logging blocked due to buffer near to overflow. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode %d declared dead due to missed heartbeat", theNodeId, theData[1]); break; case EventReport::JobStatistic: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sMean loop Counter in doJob last 8192 times = %u", theNodeId, theData[1]); break; case EventReport::SendBytesStatistic: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sMean send size to Node = %d last 4096 sends = %u bytes", theNodeId, theData[1], @@ -702,7 +687,7 @@ EventLogger::getText(int type, break; case EventReport::ReceiveBytesStatistic: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sMean receive size to Node = %d last 4096 sends = %u bytes", theNodeId, theData[1], @@ -710,14 +695,14 @@ EventLogger::getText(int type, break; case EventReport::SentHeartbeat: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode Sent Heartbeat to node = %d", theNodeId, theData[1]); break; case EventReport::CreateLogBytes: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLog part %u, log file %u, MB %u", theNodeId, theData[1], @@ -726,7 +711,7 @@ EventLogger::getText(int type, break; case EventReport::StartLog: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLog part %u, start MB %u, stop MB %u, last GCI, log exec %u", theNodeId, theData[1], @@ -736,7 +721,7 @@ EventLogger::getText(int type, break; case EventReport::StartREDOLog: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]", theNodeId, theData[1], @@ -753,7 +738,7 @@ EventLogger::getText(int type, } ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%s UNDO %s %d [%d %d %d %d %d %d %d %d %d]", theNodeId, line, @@ -771,36 +756,36 @@ EventLogger::getText(int type, break; case EventReport::InfoEvent: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%s%s", theNodeId, (char *)&theData[1]); break; case EventReport::WarningEvent: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%s%s", theNodeId, (char *)&theData[1]); break; case EventReport::GCP_TakeoverStarted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sGCP Take over started", theNodeId); break; case EventReport::GCP_TakeoverCompleted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sGCP Take over completed", theNodeId); break; case EventReport::LCP_TakeoverStarted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLCP Take over started", theNodeId); break; case EventReport::LCP_TakeoverCompleted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLCP Take over completed (state = %d)", theNodeId, theData[1]); break; @@ -812,7 +797,7 @@ EventLogger::getText(int type, const int block = theData[5]; const int percent = (used*100)/total; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%s%s usage %s %d%s(%d %dK pages of total %d)", theNodeId, (block==DBACC ? "Index" : (block == DBTUP ?"Data":"")), @@ -833,7 +818,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Created subscription id" " (subId=%d,SubKey=%d)" " Return code: %d.", @@ -847,7 +832,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: Created subscription id" " (subId=%d,SubKey=%d)" " Return code: %d.", @@ -862,7 +847,7 @@ EventLogger::getText(int type, const int subKey = theData[3]; const int err = theData[4]; const int nodegrp = theData[5]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Created subscription using" " (subId=%d,SubKey=%d)" " in primary system. Primary system has %d nodegroup(s)." @@ -878,7 +863,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: All participants have created " "subscriptions" " using (subId=%d,SubKey=%d)." @@ -893,7 +878,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Logging started on meta data changes." " using (subId=%d,SubKey=%d)" " Return code: %d", @@ -907,7 +892,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: All participants have started " "logging meta data" " changes on the subscription subId=%d,SubKey=%d) " @@ -922,7 +907,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Logging started on table data changes " " using (subId=%d,SubKey=%d)" " Return code: %d", @@ -936,7 +921,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: All participants have started logging " "table data changes on the subscription " "subId=%d,SubKey=%d)." @@ -951,7 +936,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: All participants have started " " synchronization on meta data (META SCAN) using " "(subId=%d,SubKey=%d)." @@ -966,7 +951,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Synchronization started (META SCAN) on " " meta data using (subId=%d,SubKey=%d)" " Return code: %d", @@ -980,7 +965,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: All participants have started " "synchronization " " on table data (DATA SCAN) using (subId=%d,SubKey=%d)." @@ -996,7 +981,7 @@ EventLogger::getText(int type, const int subKey = theData[3]; const int err = theData[4]; const int gci = theData[5]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Synchronization started (DATA SCAN) on " "table data using (subId=%d,SubKey=%d). GCI = %d" " Return code: %d", @@ -1011,7 +996,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: All participants have removed " "subscription (subId=%d,SubKey=%d). I have cleaned " "up resources I've used." @@ -1026,7 +1011,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Removed subscription " "(subId=%d,SubKey=%d)" " Return code: %d", @@ -1037,7 +1022,7 @@ EventLogger::getText(int type, } default: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sUnknown GrepSubscriptonInfo event: %d", theNodeId, theData[1]); @@ -1055,7 +1040,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord:Error code: %d Error message: %s" " (subId=%d,SubKey=%d)", err, @@ -1069,7 +1054,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: FAILED to Created subscription using" " (subId=%d,SubKey=%d)in primary system." " Error code: %d Error Message: %s", @@ -1084,7 +1069,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Logging failed to start on meta " "data changes." " using (subId=%d,SubKey=%d)" @@ -1100,7 +1085,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Logging FAILED to start on table data " " changes using (subId=%d,SubKey=%d)" " Error code: %d Error Message: %s", @@ -1115,7 +1100,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Synchronization FAILED (META SCAN) on " " meta data using (subId=%d,SubKey=%d)" " Error code: %d Error Message: %s", @@ -1131,7 +1116,7 @@ EventLogger::getText(int type, const int subKey = theData[3]; const int err = theData[4]; const int gci = theData[5]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on " "table data using (subId=%d,SubKey=%d). GCI = %d" " Error code: %d Error Message: %s", @@ -1147,7 +1132,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::SSCoord: Failed to remove subscription " "(subId=%d,SubKey=%d). " " Error code: %d Error Message: %s", @@ -1164,7 +1149,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: Error code: %d Error Message: %s" " (subId=%d,SubKey=%d)", err, @@ -1178,7 +1163,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: FAILED to Created subscription using" " (subId=%d,SubKey=%d)in primary system." " Error code: %d Error Message: %s", @@ -1193,7 +1178,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: Logging failed to start on meta " "data changes." " using (subId=%d,SubKey=%d)" @@ -1209,7 +1194,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: Logging FAILED to start on table data " " changes using (subId=%d,SubKey=%d)" " Error code: %d Error Message: %s", @@ -1224,7 +1209,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: Synchronization FAILED (META SCAN) on " " meta data using (subId=%d,SubKey=%d)" " Error code: %d Error Message: %s", @@ -1240,7 +1225,7 @@ EventLogger::getText(int type, const int subKey = theData[3]; const int err = theData[4]; const int gci = theData[5]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on " "table data using (subId=%d,SubKey=%d). GCI = %d. " " Error code: %d Error Message: %s", @@ -1256,7 +1241,7 @@ EventLogger::getText(int type, const int subId = theData[2]; const int subKey = theData[3]; const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Grep::PSCoord: Failed to remove subscription " "(subId=%d,SubKey=%d)." " Error code: %d Error Message: %s", @@ -1270,7 +1255,7 @@ EventLogger::getText(int type, { const int err = theData[4]; const int nodeId = theData[5]; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "Rep: Node %d." " Error code: %d Error Message: %s", nodeId, @@ -1282,7 +1267,7 @@ EventLogger::getText(int type, default: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sUnknown GrepSubscriptionAlert event: %d", theNodeId, theData[1]); @@ -1293,7 +1278,7 @@ EventLogger::getText(int type, default: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sUnknown event: %d", theNodeId, theData[0]); @@ -1303,9 +1288,9 @@ EventLogger::getText(int type, } bool -EventLogger::matchEventCategory(const char * str, - LogLevel::EventCategory * cat, - bool exactMatch){ +EventLoggerBase::matchEventCategory(const char * str, + LogLevel::EventCategory * cat, + bool exactMatch){ unsigned i; if(cat == 0 || str == 0) return false; @@ -1326,7 +1311,7 @@ EventLogger::matchEventCategory(const char * str, } const char * -EventLogger::getEventCategoryName(LogLevel::EventCategory cat){ +EventLoggerBase::getEventCategoryName(LogLevel::EventCategory cat){ for(unsigned i = 0; iblockRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB); return; } - /** - * If it's a new subscription, clear the loglevel - * - * Clear only if noOfEntries is 0, this is needed beacuse we set - * the default loglevels for the MGMT nodes during the inital connect phase. - * See reportConnected(). - */ - if (subReq->noOfEntries == 0){ - ptr.p->logLevel.clear(); - } - + ptr.p->logLevel.clear(); ptr.p->blockRef = subReq->blockRef; } @@ -384,11 +374,6 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal) globalTransporterRegistry.setIOState(i, HaltIO); globalTransporterRegistry.do_disconnect(i); - - /** - * Cancel possible event subscription - */ - cancelSubscription(i); } } if (failNo != 0) { @@ -494,6 +479,8 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal) globalTransporterRegistry.do_connect(hostId); } + cancelSubscription(hostId); + signal->theData[0] = EventReport::Disconnected; signal->theData[1] = hostId; sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); @@ -539,20 +526,6 @@ void Cmvmi::execCONNECT_REP(Signal *signal){ if(type == NodeInfo::MGM){ jam(); globalTransporterRegistry.setIOState(hostId, NoHalt); - - EventSubscribeReq* dst = (EventSubscribeReq *)&signal->theData[0]; - - for (Uint32 i = 0; i < EventLogger::defEventLogMatrixSize; i++) { - dst->theCategories[i] = EventLogger::defEventLogMatrix[i].eventCategory; - dst->theLevels[i] = EventLogger::defEventLogMatrix[i].threshold; - } - - dst->noOfEntries = EventLogger::defEventLogMatrixSize; - /* The BlockNumber is hardcoded as 1 in MgmtSrvr */ - dst->blockRef = numberToRef(MIN_API_BLOCK_NO, hostId); - - execEVENT_SUBSCRIBE_REQ(signal); - } //------------------------------------------ diff --git a/ndb/src/mgmsrv/CommandInterpreter.cpp b/ndb/src/mgmsrv/CommandInterpreter.cpp index 316b6d5795e..8388c012e55 100644 --- a/ndb/src/mgmsrv/CommandInterpreter.cpp +++ b/ndb/src/mgmsrv/CommandInterpreter.cpp @@ -389,9 +389,9 @@ void CommandInterpreter::executeHelp(char* parameters) { << endl; ndbout << " = "; - for(i = 0; i #include -#include "NodeLogLevel.hpp" #include #include @@ -191,41 +190,49 @@ EventLogger g_EventLogger; void MgmtSrvr::logLevelThreadRun() { - NdbMutex* threadMutex = NdbMutex_Create(); - while (!_isStopThread) { - if (_startedNodeId != 0) { - NdbMutex_Lock(threadMutex); + /** + * Handle started nodes + */ + EventSubscribeReq req; + req = m_statisticsListner.m_clients[0].m_logLevel; + req.blockRef = _ownReference; - // Local node - NodeLogLevel* n = NULL; - while ((n = _nodeLogLevelList->next()) != NULL) { - if (n->getNodeId() == _startedNodeId) { - setNodeLogLevel(_startedNodeId, n->getLogLevelOrd(), true); - } + SetLogLevelOrd ord; + + m_started_nodes.lock(); + while(m_started_nodes.size() > 0){ + Uint32 node = m_started_nodes[0]; + m_started_nodes.erase(0, false); + m_started_nodes.unlock(); + + setEventReportingLevelImpl(node, req); + + ord = m_nodeLogLevel[node]; + setNodeLogLevelImpl(node, ord); + + m_started_nodes.lock(); + } + m_started_nodes.unlock(); + + m_log_level_requests.lock(); + while(m_log_level_requests.size() > 0){ + req = m_log_level_requests[0]; + m_log_level_requests.erase(0, false); + m_log_level_requests.unlock(); + + if(req.blockRef == 0){ + req.blockRef = _ownReference; + setEventReportingLevelImpl(0, req); + } else { + ord = req; + setNodeLogLevelImpl(req.blockRef, ord); } - // Cluster log - while ((n = _clusterLogLevelList->next()) != NULL) { - if (n->getNodeId() == _startedNodeId) { - setEventReportingLevel(_startedNodeId, n->getLogLevelOrd(), true); - } - } - _startedNodeId = 0; - - NdbMutex_Unlock(threadMutex); - - } // if (_startedNodeId != 0) { - + m_log_level_requests.lock(); + } + m_log_level_requests.unlock(); NdbSleep_MilliSleep(_logLevelThreadSleep); - } // while (!_isStopThread) - - NdbMutex_Destroy(threadMutex); -} - -void -MgmtSrvr::setStatisticsListner(StatisticsListner* listner) -{ - m_statisticsListner = listner; + } } void @@ -272,7 +279,7 @@ class ErrorItem { public: int _errorCode; - const BaseString _errorText; + const char * _errorText; }; bool @@ -485,23 +492,6 @@ MgmtSrvr::getPort() const { return port; } -int -MgmtSrvr::getStatPort() const { -#if 0 - const Properties *mgmProps; - if(!getConfig()->get("Node", _ownNodeId, &mgmProps)) - return -1; - - int tmp = -1; - if(!mgmProps->get("PortNumberStats", (Uint32 *)&tmp)) - return -1; - - return tmp; -#else - return -1; -#endif -} - /* Constructor */ MgmtSrvr::MgmtSrvr(NodeId nodeId, const BaseString &configFilename, @@ -510,22 +500,19 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, _blockNumber(1), // Hard coded block number since it makes it easy to send // signals to other management servers. _ownReference(0), + m_allocated_resources(*this), theSignalIdleList(NULL), theWaitState(WAIT_SUBSCRIBE_CONF), - theConfCount(0), - m_allocated_resources(*this) { - + m_statisticsListner(this){ + DBUG_ENTER("MgmtSrvr::MgmtSrvr"); _config = NULL; - _isStatPortActive = false; - _isClusterLogStatActive = false; _isStopThread = false; _logLevelThread = NULL; _logLevelThreadSleep = 500; m_signalRecvThread = NULL; - _startedNodeId = 0; theFacade = 0; @@ -583,13 +570,7 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, ndb_mgm_destroy_iterator(iter); } - m_statisticsListner = NULL; - - _nodeLogLevelList = new NodeLogLevelList(); - _clusterLogLevelList = new NodeLogLevelList(); - _props = NULL; - _ownNodeId= 0; NodeId tmp= nodeId; BaseString error_string; @@ -610,6 +591,16 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, } } + { + MgmStatService::StatListener se; + se.m_socket = -1; + for(size_t t = 0; t<_LOGLEVEL_CATEGORIES; t++) + se.m_logLevel.setLogLevel((LogLevel::EventCategory)t, 7); + se.m_logLevel.setLogLevel(LogLevel::llError, 15); + m_statisticsListner.m_clients.push_back(se); + m_statisticsListner.m_logLevel = se.m_logLevel; + } + DBUG_VOID_RETURN; } @@ -671,8 +662,6 @@ MgmtSrvr::start(BaseString &error_string) // Set the initial confirmation count for subscribe requests confirm // from NDB nodes in the cluster. // - theConfCount = getNodeCount(NDB_MGM_NODE_TYPE_NDB); - // Loglevel thread _logLevelThread = NdbThread_Create(logLevelThread_C, (void**)this, @@ -713,9 +702,6 @@ MgmtSrvr::~MgmtSrvr() if(_config != NULL) delete _config; - delete _nodeLogLevelList; - delete _clusterLogLevelList; - // End set log level thread void* res = 0; _isStopThread = true; @@ -736,6 +722,9 @@ MgmtSrvr::~MgmtSrvr() int MgmtSrvr::okToSendTo(NodeId processId, bool unCond) { + if(processId == 0) + return 0; + if (getNodeType(processId) != NDB_MGM_NODE_TYPE_NDB) return WRONG_PROCESS_TYPE; @@ -1540,175 +1529,72 @@ MgmtSrvr::status(int processId, return -1; } - - -//**************************************************************************** -//**************************************************************************** -int -MgmtSrvr::startStatisticEventReporting(int level) -{ - SetLogLevelOrd ll; - NodeId nodeId = 0; - - ll.clear(); - ll.setLogLevel(LogLevel::llStatistic, level); - - if (level > 0) { - _isStatPortActive = true; - } else { - _isStatPortActive = false; - - if (_isClusterLogStatActive) { - return 0; - } - } - - while (getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB)) { - setEventReportingLevelImpl(nodeId, ll); - } - - return 0; -} - -int -MgmtSrvr::setEventReportingLevel(int processId, const SetLogLevelOrd & ll, - bool isResend) -{ - for (Uint32 i = 0; i < ll.noOfEntries; i++) { - if (ll.theCategories[i] == LogLevel::llStatistic) { - if (ll.theLevels[i] > 0) { - _isClusterLogStatActive = true; - break; - } else { - _isClusterLogStatActive = false; - - if (_isStatPortActive) { - return 0; - } - break; - } - } // if (ll.theCategories - } // for (int i = 0 - - return setEventReportingLevelImpl(processId, ll, isResend); -} int MgmtSrvr::setEventReportingLevelImpl(int processId, - const SetLogLevelOrd & ll, - bool isResend) + const EventSubscribeReq& ll) { - Uint32 i; - for(i = 0; inext()) != NULL) { - if (n->getNodeId() == processId && - n->getCategory() == ll.theCategories[i]) { - - n->setLevel(ll.theLevels[i]); - found = true; - } - } - if (!found) { - _clusterLogLevelList->add(new NodeLogLevel(processId, ll)); - } - } - } - + int result = okToSendTo(processId, true); if (result != 0) { return result; } - NdbApiSignal* signal = getSignal(); - if (signal == NULL) { - return COULD_NOT_ALLOCATE_MEMORY; - } + NdbApiSignal signal(_ownReference); EventSubscribeReq * dst = - CAST_PTR(EventSubscribeReq, signal->getDataPtrSend()); - for(i = 0; itheCategories[i] = ll.theCategories[i]; - dst->theLevels[i] = ll.theLevels[i]; - } - - dst->noOfEntries = ll.noOfEntries; - dst->blockRef = _ownReference; + CAST_PTR(EventSubscribeReq, signal.getDataPtrSend()); - signal->set(TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ, - EventSubscribeReq::SignalLength); + * dst = ll; + + signal.set(TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ, + EventSubscribeReq::SignalLength); + + theFacade->lock_mutex(); + send(&signal, processId, NODE_TYPE_DB); + theFacade->unlock_mutex(); - result = sendSignal(processId, WAIT_SUBSCRIBE_CONF, signal, true); - if (result == -1) { - return SEND_OR_RECEIVE_FAILED; - } - else { - // Increment the conf counter - theConfCount++; - } - return 0; } //**************************************************************************** //**************************************************************************** int -MgmtSrvr::setNodeLogLevel(int processId, const SetLogLevelOrd & ll, - bool isResend) +MgmtSrvr::setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll) { - Uint32 i; - for(i = 0; inext()) != NULL) { - if (n->getNodeId() == processId && - n->getCategory() == ll.theCategories[i]) { - - n->setLevel(ll.theLevels[i]); - found = true; - } - } - if (!found) { - _clusterLogLevelList->add(new NodeLogLevel(processId, ll)); - } - } - } - int result = okToSendTo(processId, true); if (result != 0) { return result; } - NdbApiSignal* signal = getSignal(); - if (signal == NULL) { - return COULD_NOT_ALLOCATE_MEMORY; - } - - SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal->getDataPtrSend()); - - for(i = 0; itheCategories[i] = ll.theCategories[i]; - dst->theLevels[i] = ll.theLevels[i]; - } + NdbApiSignal signal(_ownReference); - dst->noOfEntries = ll.noOfEntries; + SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal.getDataPtrSend()); - signal->set(TestOrd::TraceAPI, CMVMI, GSN_SET_LOGLEVELORD, - SetLogLevelOrd::SignalLength); - - result = sendSignal(processId, NO_WAIT, signal, true); - if (result == -1) { - return SEND_OR_RECEIVE_FAILED; - } + * dst = ll; + + signal.set(TestOrd::TraceAPI, CMVMI, GSN_SET_LOGLEVELORD, + SetLogLevelOrd::SignalLength); + + theFacade->lock_mutex(); + theFacade->sendSignalUnCond(&signal, processId); + theFacade->unlock_mutex(); return 0; } +int +MgmtSrvr::send(NdbApiSignal* signal, Uint32 node, Uint32 node_type){ + Uint32 max = (node == 0) ? MAX_NODES : node + 1; + + for(; node < max; node++){ + while(nodeTypes[node] != node_type && node < max) node++; + if(nodeTypes[node] != node_type) + break; + theFacade->sendSignalUnCond(signal, node); + } + return 0; +} //**************************************************************************** //**************************************************************************** @@ -2003,7 +1889,7 @@ const char* MgmtSrvr::getErrorText(int errorCode) for (int i = 0; i < noOfErrorCodes; ++i) { if (errorCode == errorTable[i]._errorCode) { - return errorTable[i]._errorText.c_str(); + return errorTable[i]._errorText; } } @@ -2011,21 +1897,6 @@ const char* MgmtSrvr::getErrorText(int errorCode) return text; } -/***************************************************************************** - * Handle reception of various signals - *****************************************************************************/ - -int -MgmtSrvr::handleSTATISTICS_CONF(NdbApiSignal* signal) -{ - //ndbout << "MgmtSrvr::handleSTATISTICS_CONF" << endl; - - int x = signal->readData(1); - //ndbout << "MgmtSrvr::handleSTATISTICS_CONF, x: " << x << endl; - _statistics._test1 = x; - return 0; -} - void MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) { @@ -2049,51 +1920,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) } break; - case GSN_STATISTICS_CONF: - if (theWaitState != WAIT_STATISTICS) { - g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected " - "signal received, gsn %d, theWaitState = %d", - gsn, theWaitState); - - return; - } - returnCode = handleSTATISTICS_CONF(signal); - if (returnCode != -1) { - theWaitState = NO_WAIT; - } - break; - - - case GSN_SET_VAR_CONF: - if (theWaitState != WAIT_SET_VAR) { - g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected " - "signal received, gsn %d, theWaitState = %d", - gsn, theWaitState); - return; - } - theWaitState = NO_WAIT; - _setVarReqResult = 0; - break; - - case GSN_SET_VAR_REF: - if (theWaitState != WAIT_SET_VAR) { - g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected " - "signal received, gsn %d, theWaitState = %d", - gsn, theWaitState); - return; - } - theWaitState = NO_WAIT; - _setVarReqResult = -1; - break; - case GSN_EVENT_SUBSCRIBE_CONF: - theConfCount--; // OK, we've received a conf message - if (theConfCount < 0) { - g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected " - "signal received, gsn %d, theWaitState = %d", - gsn, theWaitState); - theConfCount = 0; - } break; case GSN_EVENT_REP: @@ -2276,20 +2103,19 @@ void MgmtSrvr::handleStatus(NodeId nodeId, bool alive) { if (alive) { - _startedNodeId = nodeId; // Used by logLevelThreadRun() + m_started_nodes.push_back(nodeId); Uint32 theData[25]; theData[0] = EventReport::Connected; theData[1] = nodeId; + eventReport(_ownNodeId, theData); } else { handleStopReply(nodeId, 0); - theConfCount++; // Increment the event subscr conf count because - + Uint32 theData[25]; theData[0] = EventReport::Disconnected; theData[1] = nodeId; - + eventReport(_ownNodeId, theData); - g_EventLogger.info("Lost connection to node %d", nodeId); } } @@ -2370,7 +2196,7 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, continue; found_matching_id= true; if(iter.get(CFG_TYPE_OF_SECTION, &type_c)) abort(); - if(type_c != type) + if(type_c != (unsigned)type) continue; found_matching_type= true; if (connected_nodes.get(tmp)) @@ -2483,77 +2309,18 @@ MgmtSrvr::getNextNodeId(NodeId * nodeId, enum ndb_mgm_node_type type) const return true; } +#include "Services.hpp" + void MgmtSrvr::eventReport(NodeId nodeId, const Uint32 * theData) { const EventReport * const eventReport = (EventReport *)&theData[0]; - + EventReport::EventType type = eventReport->getEventType(); - if (type == EventReport::TransReportCounters || - type == EventReport::OperationReportCounters) { - - if (_isClusterLogStatActive) { - g_EventLogger.log(type, theData, nodeId); - } - - if (_isStatPortActive) { - char theTime[128]; - struct tm* tm_now; - time_t now; - now = time((time_t*)NULL); -#ifdef NDB_WIN32 - tm_now = localtime(&now); -#else - tm_now = gmtime(&now); -#endif - - snprintf(theTime, sizeof(theTime), - STATISTIC_DATE, - tm_now->tm_year + 1900, - tm_now->tm_mon, - tm_now->tm_mday, - tm_now->tm_hour, - tm_now->tm_min, - tm_now->tm_sec); - - char str[255]; - - if (type == EventReport::TransReportCounters) { - snprintf(str, sizeof(str), - STATISTIC_LINE, - theTime, - (int)now, - nodeId, - theData[1], - theData[2], - theData[3], - // theData[4], simple reads - theData[5], - theData[6], - theData[7], - theData[8]); - } else if (type == EventReport::OperationReportCounters) { - snprintf(str, sizeof(str), - OP_STATISTIC_LINE, - theTime, - (int)now, - nodeId, - theData[1]); - } - - if(m_statisticsListner != 0){ - m_statisticsListner->println_statistics(str); - } - } - - return; - - } // if (type == - // Log event g_EventLogger.log(type, theData, nodeId); - + m_statisticsListner.log(type, theData, nodeId); } /*************************************************************************** @@ -2981,3 +2748,7 @@ template class Vector; #if __SUNPRO_CC != 0x560 template bool SignalQueue::waitFor(Vector&, SigMatch*&, NdbApiSignal*&, unsigned); #endif + +template class MutexVector; +template class MutexVector; +template class MutexVector; diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index d7f9f7a1af3..5de39932bf4 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -28,8 +28,8 @@ #include #include "SignalQueue.hpp" #include - -#include "NodeLogLevelList.hpp" +#include +#include /** * @desc Block number for Management server. @@ -43,6 +43,29 @@ class Config; class SetLogLevelOrd; class SocketServer; +class MgmStatService : public EventLoggerBase +{ + friend class MgmtSrvr; +public: + struct StatListener : public EventLoggerBase { + NDB_SOCKET_TYPE m_socket; + }; + +private: + class MgmtSrvr * m_mgmsrv; + MutexVector m_clients; +public: + MgmStatService(class MgmtSrvr * m) : m_clients(5) { + m_mgmsrv = m; + } + + void add_listener(const StatListener&); + + void log(int eventType, const Uint32* theData, NodeId nodeId); + + void stopSessions(); +}; + /** * @class MgmtSrvr * @brief Main class for the management server. @@ -63,11 +86,6 @@ class SocketServer; class MgmtSrvr { public: - class StatisticsListner { - public: - virtual void println_statistics(const BaseString &s) = 0; - }; - // some compilers need all of this class Allocated_resources; friend class Allocated_resources; @@ -84,11 +102,6 @@ public: NodeBitmask m_reserved_nodes; }; - /** - * Set a reference to the socket server. - */ - void setStatisticsListner(StatisticsListner* listner); - /** * Start/initate the event log. */ @@ -150,15 +163,6 @@ public: STATIC_CONST( OPERATION_IN_PROGRESS = 6667 ); STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 ); - /** - * This class holds all statistical variables fetched with - * the getStatistics methods. - */ - class Statistics { // TODO, Real statistic data to be added - public: - int _test1; - }; - /** * This enum specifies the different signal loggig modes possible to set * with the setSignalLoggingMode method. @@ -206,7 +210,7 @@ public: typedef void (* EnterSingleCallback)(int nodeId, void * anyData, int errorCode); typedef void (* ExitSingleCallback)(int nodeId, void * anyData, - int errorCode); + int errorCode); /** * Lock configuration @@ -313,13 +317,6 @@ public: bool abort = false, int * stopCount = 0, StopCallback = 0, void * anyData = 0); - int setEventReportingLevel(int processId, - const class SetLogLevelOrd & logLevel, - bool isResend = false); - - int startStatisticEventReporting(int level = 5); - - struct BackupEvent { enum Event { BackupStarted = 1, @@ -377,22 +374,8 @@ public: // INVALID_LEVEL //************************************************************************** - /** - * Sets the Node's log level, i.e., its local event reporting. - * - * @param processId the DB node id. - * @param logLevel the log level. - * @param isResend Flag to indicate for resending log levels - * during node restart - - * @return 0 if successful or NO_CONTACT_WITH_PROCESS, - * SEND_OR_RECEIVE_FAILED, - * COULD_NOT_ALLOCATE_MEMORY - */ - int setNodeLogLevel(int processId, - const class SetLogLevelOrd & logLevel, - bool isResend = false); - + int setEventReportingLevelImpl(int processId, const EventSubscribeReq& ll); + int setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll); /** * Insert an error in a DB process. @@ -508,11 +491,6 @@ public: */ NodeId getPrimaryNode() const; - /** - * Returns the statistics port number. - * @return statistic port number. - */ - int getStatPort() const; /** * Returns the port number. * @return port number. @@ -526,10 +504,7 @@ public: private: //************************************************************************** - int setEventReportingLevelImpl(int processId, - const class SetLogLevelOrd & logLevel, - bool isResend = false); - + int setEventReportingLevel(int processId, LogLevel::EventCategory, Uint32); /** * Check if it is possible to send a signal to a (DB) process @@ -563,10 +538,6 @@ private: Allocated_resources m_allocated_resources; struct in_addr m_connect_address[MAX_NODES]; - int _setVarReqResult; // The result of the SET_VAR_REQ response - Statistics _statistics; // handleSTATISTICS_CONF store the result here, - // and getStatistics reads it. - //************************************************************************** // Specific signal handling methods //************************************************************************** @@ -598,14 +569,6 @@ private: // Returns: - //************************************************************************** - int handleSTATISTICS_CONF(NdbApiSignal* signal); - //************************************************************************** - // Description: Handle reception of signal STATISTICS_CONF - // Parameters: - // signal: The recieved signal - // Returns: TODO, to be defined - //************************************************************************** - void handle_MGM_LOCK_CONFIG_REQ(NdbApiSignal *signal); void handle_MGM_UNLOCK_CONFIG_REQ(NdbApiSignal *signal); @@ -631,7 +594,6 @@ private: */ enum WaitSignalType { NO_WAIT, // We don't expect to receive any signal - WAIT_STATISTICS, // Accept STATISTICS_CONF WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF WAIT_SUBSCRIBE_CONF, // Accept event subscription confirmation WAIT_STOP, @@ -733,14 +695,6 @@ private: class SignalQueue m_signalRecvQueue; - enum ndb_mgm_node_type nodeTypes[MAX_NODES]; - - int theConfCount; // The number of expected conf signals - - StatisticsListner * m_statisticsListner; // Used for sending statistics info - bool _isStatPortActive; - bool _isClusterLogStatActive; - struct StopRecord { StopRecord(){ inUse = false; callback = 0; singleUserMode = false;} bool inUse; @@ -765,10 +719,16 @@ private: void handleStopReply(NodeId nodeId, Uint32 errCode); int translateStopRef(Uint32 errCode); - + bool _isStopThread; int _logLevelThreadSleep; - int _startedNodeId; + MutexVector m_started_nodes; + MutexVector m_log_level_requests; + LogLevel m_nodeLogLevel[MAX_NODES]; + enum ndb_mgm_node_type nodeTypes[MAX_NODES]; + friend class MgmApiSession; + friend class MgmStatService; + MgmStatService m_statisticsListner; /** * Handles the thread wich upon a 'Node is started' event will @@ -782,15 +742,13 @@ private: static void *signalRecvThread_C(void *); void signalRecvThreadRun(); - NodeLogLevelList* _nodeLogLevelList; - NodeLogLevelList* _clusterLogLevelList; - void backupCallback(BackupEvent &); BackupCallback m_backupCallback; BackupEvent m_lastBackupEvent; Config *_props; + int send(class NdbApiSignal* signal, Uint32 node, Uint32 node_type); public: /** * This method does not exist diff --git a/ndb/src/mgmsrv/NodeLogLevel.cpp b/ndb/src/mgmsrv/NodeLogLevel.cpp deleted file mode 100644 index 5271cdb0f2b..00000000000 --- a/ndb/src/mgmsrv/NodeLogLevel.cpp +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include "NodeLogLevel.hpp" -// TODO_RONM: Clearly getCategory and getLevel is not correctly coded. Must be taken care of. - -NodeLogLevel::NodeLogLevel(int nodeId, const SetLogLevelOrd& ll) -{ - m_nodeId = nodeId; - m_logLevel = ll; -} - -NodeLogLevel::~NodeLogLevel() -{ -} - -int -NodeLogLevel::getNodeId() const -{ - return m_nodeId; -} - -Uint32 -NodeLogLevel::getCategory() const -{ - for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++) - { - return m_logLevel.theCategories[i]; - } - return 0; -} - -int -NodeLogLevel::getLevel() const -{ - for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++) - { - return m_logLevel.theLevels[i]; - } - return 0; -} - -void -NodeLogLevel::setLevel(int level) -{ - for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++) - { - m_logLevel.theLevels[i] = level; - } - -} - -SetLogLevelOrd -NodeLogLevel::getLogLevelOrd() const -{ - return m_logLevel; -} diff --git a/ndb/src/mgmsrv/NodeLogLevel.hpp b/ndb/src/mgmsrv/NodeLogLevel.hpp deleted file mode 100644 index 3ad758cde99..00000000000 --- a/ndb/src/mgmsrv/NodeLogLevel.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef NODELOGLEVEL_H -#define NODELOGLEVEL_H - -#include - -#include - -/** - * Holds a DB node's log level settings for both local and event log levels. - * It only holds one log level setting even though SetLogLevelOrd can handle - * multiple log levels at once, it is not used in that way in the managment - * server. - * - * @version #@ $Id: NodeLogLevel.hpp,v 1.2 2003/07/05 17:40:22 elathal Exp $ - */ -class NodeLogLevel -{ -public: - NodeLogLevel(int nodeId, const SetLogLevelOrd& ll); - ~NodeLogLevel(); - - int getNodeId() const; - Uint32 getCategory() const; - int getLevel() const; - void setLevel(int level); - SetLogLevelOrd getLogLevelOrd() const; - -private: - NodeLogLevel(); - NodeLogLevel(const NodeLogLevel&); - bool operator == (const NodeLogLevel&); - NodeLogLevel operator = (const NodeLogLevel&); - - int m_nodeId; - SetLogLevelOrd m_logLevel; -}; - -#endif diff --git a/ndb/src/mgmsrv/NodeLogLevelList.cpp b/ndb/src/mgmsrv/NodeLogLevelList.cpp deleted file mode 100644 index 6c7c091c1a8..00000000000 --- a/ndb/src/mgmsrv/NodeLogLevelList.cpp +++ /dev/null @@ -1,182 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include - -#include "NodeLogLevelList.hpp" -#include "NodeLogLevel.hpp" - -// -// PUBLIC -// - -NodeLogLevelList::NodeLogLevelList() : - m_size(0), - m_pHeadNode(NULL), - m_pTailNode(NULL), - m_pCurrNode(NULL) -{ -} - -NodeLogLevelList::~NodeLogLevelList() -{ - removeAll(); -} - -void -NodeLogLevelList::add(NodeLogLevel* pNewNode) -{ - NodeLogLevelNode* pNode = new NodeLogLevelNode(); - - if (m_pHeadNode == NULL) - { - m_pHeadNode = pNode; - pNode->pPrev = NULL; - } - else - { - m_pTailNode->pNext = pNode; - pNode->pPrev = m_pTailNode; - } - m_pTailNode = pNode; - pNode->pNext = NULL; - pNode->pHandler = pNewNode; - - m_size++; -} - -bool -NodeLogLevelList::remove(NodeLogLevel* pRemoveNode) -{ - NodeLogLevelNode* pNode = m_pHeadNode; - bool removed = false; - do - { - if (pNode->pHandler == pRemoveNode) - { - removeNode(pNode); - removed = true; - break; - } - } while ( (pNode = next(pNode)) != NULL); - - return removed; -} - -void -NodeLogLevelList::removeAll() -{ - while (m_pHeadNode != NULL) - { - removeNode(m_pHeadNode); - } -} - -NodeLogLevel* -NodeLogLevelList::next() -{ - NodeLogLevel* pHandler = NULL; - if (m_pCurrNode == NULL) - { - m_pCurrNode = m_pHeadNode; - if (m_pCurrNode != NULL) - { - pHandler = m_pCurrNode->pHandler; - } - } - else - { - m_pCurrNode = next(m_pCurrNode); // Next node - if (m_pCurrNode != NULL) - { - pHandler = m_pCurrNode->pHandler; - } - } - - return pHandler; -} - -int -NodeLogLevelList::size() const -{ - return m_size; -} - -// -// PRIVATE -// - -NodeLogLevelList::NodeLogLevelNode* -NodeLogLevelList::next(NodeLogLevelNode* pNode) -{ - NodeLogLevelNode* pCurr = pNode; - if (pNode->pNext != NULL) - { - pCurr = pNode->pNext; - } - else - { - // Tail - pCurr = NULL; - } - return pCurr; -} - -NodeLogLevelList::NodeLogLevelNode* -NodeLogLevelList::prev(NodeLogLevelNode* pNode) -{ - NodeLogLevelNode* pCurr = pNode; - if (pNode->pPrev != NULL) // head - { - pCurr = pNode->pPrev; - } - else - { - // Head - pCurr = NULL; - } - - return pCurr; -} - -void -NodeLogLevelList::removeNode(NodeLogLevelNode* pNode) -{ - if (pNode->pPrev == NULL) // If head - { - m_pHeadNode = pNode->pNext; - } - else - { - pNode->pPrev->pNext = pNode->pNext; - } - - if (pNode->pNext == NULL) // if tail - { - m_pTailNode = pNode->pPrev; - } - else - { - pNode->pNext->pPrev = pNode->pPrev; - } - - pNode->pNext = NULL; - pNode->pPrev = NULL; - delete pNode->pHandler; // Delete log handler - delete pNode; - - m_size--; -} diff --git a/ndb/src/mgmsrv/NodeLogLevelList.hpp b/ndb/src/mgmsrv/NodeLogLevelList.hpp deleted file mode 100644 index 4a55ee211e2..00000000000 --- a/ndb/src/mgmsrv/NodeLogLevelList.hpp +++ /dev/null @@ -1,93 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef NODELOGLEVELLIST_H -#define NODELOGLEVELLIST_H - -class NodeLogLevel; - -/** - * Provides a simple linked list of NodeLogLevel. - * - * @see NodeLogLevel - * @version #@ $Id: NodeLogLevelList.hpp,v 1.1 2002/08/09 12:53:50 eyualex Exp $ - */ -class NodeLogLevelList -{ -public: - /** - * Default Constructor. - */ - NodeLogLevelList(); - - /** - * Destructor. - */ - ~NodeLogLevelList(); - - /** - * Adds a new node. - * - * @param pNewHandler a new NodeLogLevel. - */ - void add(NodeLogLevel* pNewNode); - - /** - * Removes a NodeLogLevel from the list and call its destructor. - * - * @param pRemoveHandler the NodeLogLevel to remove - */ - bool remove(NodeLogLevel* pRemoveNode); - - /** - * Removes all items. - */ - void removeAll(); - - /** - * Returns the next node in the list. - * returns a node or NULL. - */ - NodeLogLevel* next(); - - /** - * Returns the size of the list. - */ - int size() const; -private: - /** List node */ - struct NodeLogLevelNode - { - NodeLogLevelNode* pPrev; - NodeLogLevelNode* pNext; - NodeLogLevel* pHandler; - }; - - NodeLogLevelNode* next(NodeLogLevelNode* pNode); - NodeLogLevelNode* prev(NodeLogLevelNode* pNode); - - void removeNode(NodeLogLevelNode* pNode); - - int m_size; - - NodeLogLevelNode* m_pHeadNode; - NodeLogLevelNode* m_pTailNode; - NodeLogLevelNode* m_pCurrNode; -}; - -#endif - - diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index c529e277e0e..8ce9367825f 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -133,7 +134,7 @@ ParserRow commands[] = { MGM_ARG("public key", String, Mandatory, "Public key"), MGM_CMD("get version", &MgmApiSession::getVersion, ""), - + MGM_CMD("get status", &MgmApiSession::getStatus, ""), MGM_CMD("get info clusterlog", &MgmApiSession::getInfoClusterLog, ""), @@ -236,7 +237,11 @@ ParserRow commands[] = { MGM_ARG("node", String, Mandatory, "Node"), MGM_ARG("parameter", String, Mandatory, "Parameter"), MGM_ARG("value", String, Mandatory, "Value"), - + + MGM_CMD("listen event", &MgmApiSession::listen_event, ""), + MGM_ARG("node", Int, Optional, "Node"), + MGM_ARG("filter", String, Mandatory, "Event category"), + MGM_END() }; @@ -289,7 +294,8 @@ MgmApiSession::runSession() { break; } } - NDB_CLOSE_SOCKET(m_socket); + if(m_socket >= 0) + NDB_CLOSE_SOCKET(m_socket); } #ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT @@ -554,7 +560,7 @@ MgmApiSession::getStatPort(Parser_t::Context &, const class Properties &) { m_output->println("get statport reply"); - m_output->println("tcpport: %d", m_mgmsrv.getStatPort()); + m_output->println("tcpport: %d", 0); m_output->println(""); } @@ -760,7 +766,6 @@ MgmApiSession::setClusterLogLevel(Parser::Context &, BaseString categoryName, errorString; SetLogLevelOrd logLevel; int result; - logLevel.clear(); args.get("node", &node); args.get("category", categoryName); args.get("level", &level); @@ -779,14 +784,15 @@ MgmApiSession::setClusterLogLevel(Parser::Context &, goto error; } - logLevel.setLogLevel(category, level); - result = m_mgmsrv.setEventReportingLevel(node, logLevel); + EventSubscribeReq req; + req.blockRef = 0; + req.noOfEntries = 1; + req.theCategories[0] = category; + req.theLevels[0] = level; + m_mgmsrv.m_log_level_requests.push_back(req); m_output->println("set cluster loglevel reply"); - if(result != 0) - m_output->println("result: %s", m_mgmsrv.getErrorText(result)); - else - m_output->println("result: Ok"); + m_output->println("result: Ok"); m_output->println(""); return; error: @@ -821,15 +827,15 @@ MgmApiSession::setLogLevel(Parser::Context &, goto error; } - logLevel.setLogLevel(category, level); - - result = m_mgmsrv.setNodeLogLevel(node, logLevel); - + EventSubscribeReq req; + req.blockRef = node; + req.noOfEntries = 1; + req.theCategories[0] = category; + req.theLevels[0] = level; + m_mgmsrv.m_log_level_requests.push_back(req); + m_output->println("set loglevel reply"); - if(result != 0) - m_output->println("result: %s", m_mgmsrv.getErrorText(result)); - else - m_output->println("result: Ok"); + m_output->println("result: Ok"); m_output->println(""); return; error: @@ -1248,33 +1254,91 @@ MgmApiSession::configChange(Parser_t::Context &, m_output->println(""); } -void -MgmStatService::println_statistics(const BaseString &line){ - MutexVector copy(m_sockets.size()); - m_sockets.lock(); - int i; - for(i = m_sockets.size() - 1; i >= 0; i--){ - if(println_socket(m_sockets[i], MAX_WRITE_TIMEOUT, line.c_str()) == -1){ - copy.push_back(m_sockets[i]); - m_sockets.erase(i, false); +NdbOut& +operator<<(NdbOut& out, const LogLevel & ll) +{ + out << "[LogLevel: "; + for(size_t i = 0; i<_LOGLEVEL_CATEGORIES; i++) + out << ll.getLogLevel((LogLevel::EventCategory)i) << " "; + out << "]"; +} + +void +MgmStatService::log(int eventType, const Uint32* theData, NodeId nodeId){ + + Uint32 threshold = 0; + LogLevel::EventCategory cat; + + for(unsigned i = 0; i= 0; i--){ - NDB_CLOSE_SOCKET(copy[i]); - copy.erase(i); + + char m_text[256]; + EventLogger::getText(m_text, sizeof(m_text), eventType, theData, nodeId); + + Vector copy; + m_clients.lock(); + int i; + for(i = m_clients.size() - 1; i >= 0; i--){ + if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat)){ + if(m_clients[i].m_socket >= 0 && + println_socket(m_clients[i].m_socket, + MAX_WRITE_TIMEOUT, m_text) == -1){ + copy.push_back(m_clients[i].m_socket); + m_clients.erase(i, false); + } + } } - if(m_sockets.size() == 0 || false){ - m_mgmsrv->startStatisticEventReporting(0); + m_clients.unlock(); + + for(i = 0; (unsigned)im_log_level_requests.push_back(req); + } + } +} + +void +MgmStatService::add_listener(const StatListener& client){ + m_clients.push_back(client); + LogLevel tmp = m_logLevel; + tmp.set_max(client.m_logLevel); + + if(!(tmp == m_logLevel)){ + m_logLevel = tmp; + EventSubscribeReq req; + req = tmp; + req.blockRef = 0; + m_mgmsrv->m_log_level_requests.push_back(req); } } void MgmStatService::stopSessions(){ - for(int i = m_sockets.size() - 1; i >= 0; i--){ - NDB_CLOSE_SOCKET(m_sockets[i]); - m_sockets.erase(i); + for(int i = m_clients.size() - 1; i >= 0; i--){ + if(m_clients[i].m_socket >= 0){ + NDB_CLOSE_SOCKET(m_clients[i].m_socket); + m_clients.erase(i); + } } } @@ -1298,6 +1362,71 @@ MgmApiSession::setParameter(Parser_t::Context &, m_output->println(""); } +void +MgmApiSession::listen_event(Parser::Context & ctx, + Properties const & args) { + + BaseString node, param, value; + args.get("node", node); + args.get("filter", param); + + int result = 0; + BaseString msg; + + MgmStatService::StatListener le; + le.m_socket = m_socket; + + Vector list; + param.trim(); + param.split(list, " ,"); + for(size_t i = 0; i spec; + list[i].trim(); + list[i].split(spec, "=:"); + if(spec.size() != 2){ + msg.appfmt("Invalid filter specification: >%s< >%s< %d", + param.c_str(), list[i].c_str(), spec.size()); + result = -1; + goto done; + } + + spec[0].trim(); + spec[0].ndb_toupper(); + + LogLevel::EventCategory category; + if(!EventLogger::matchEventCategory(spec[0].c_str(), &category)) { + msg.appfmt("Unknown category: >%s<", spec[0].c_str()); + result = -1; + goto done; + } + + int level = atoi(spec[1].c_str()); + if(level < 0 || level > 15){ + msg.appfmt("Invalid level: >%s<", spec[1].c_str()); + result = -1; + goto done; + } + le.m_logLevel.setLogLevel(category, level); + } + + if(list.size() == 0){ + msg.appfmt("Empty filter specification"); + result = -1; + goto done; + } + + m_mgmsrv.m_statisticsListner.add_listener(le); + + m_stop = true; + m_socket = -1; + +done: + m_output->println("listen event"); + m_output->println("result: %d", result); + if(result != 0) + m_output->println("msg: %s", msg.c_str()); +} + template class MutexVector; template class Vector const*>; template class Vector; diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp index 9cf8b59be8f..e47820826b6 100644 --- a/ndb/src/mgmsrv/Services.hpp +++ b/ndb/src/mgmsrv/Services.hpp @@ -83,7 +83,8 @@ public: void configChange(Parser_t::Context &ctx, const class Properties &args); void setParameter(Parser_t::Context &ctx, const class Properties &args); - + void listen_event(Parser_t::Context &ctx, const class Properties &args); + void repCommand(Parser_t::Context &ctx, const class Properties &args); }; @@ -103,28 +104,4 @@ public: } }; -class MgmStatService : public SocketServer::Service, - public MgmtSrvr::StatisticsListner -{ - class MgmtSrvr * m_mgmsrv; - MutexVector m_sockets; -public: - MgmStatService() : m_sockets(5) { - m_mgmsrv = 0; - } - - void setMgm(class MgmtSrvr * mgmsrv){ - m_mgmsrv = mgmsrv; - } - - SocketServer::Session * newSession(NDB_SOCKET_TYPE socket){ - m_sockets.push_back(socket); - m_mgmsrv->startStatisticEventReporting(5); - return 0; - } - - void stopSessions(); - - void println_statistics(const BaseString &line); -}; #endif diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index cecf1c1e499..6e45edb20b1 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -70,7 +70,6 @@ struct MgmGlobals { bool use_specific_ip; char * interface_name; int port; - int port_stats; /** The configuration of the cluster */ Config * cluster_config; @@ -169,8 +168,6 @@ NDB_MAIN(mgmsrv){ MgmApiService * mapi = new MgmApiService(); - MgmStatService * mstat = new MgmStatService(); - /**************************** * Read configuration files * ****************************/ @@ -230,13 +227,6 @@ NDB_MAIN(mgmsrv){ goto error_end; } - if(!glob.socketServer->setup(mstat, glob.port_stats, glob.interface_name)){ - ndbout_c("Unable to setup statistic port: %d!\nPlease check if the port" - " is already used.", glob.port_stats); - delete mstat; - goto error_end; - } - if(!glob.mgmObject->check_start()){ ndbout_c("Unable to check start management server."); ndbout_c("Probably caused by illegal initial configuration file."); @@ -267,10 +257,7 @@ NDB_MAIN(mgmsrv){ } //glob.mgmObject->saveConfig(); - - mstat->setMgm(glob.mgmObject); mapi->setMgm(glob.mgmObject); - glob.mgmObject->setStatisticsListner(mstat); char msg[256]; snprintf(msg, sizeof(msg), @@ -278,8 +265,8 @@ NDB_MAIN(mgmsrv){ ndbout_c(msg); g_EventLogger.info(msg); - snprintf(msg, 256, "Id: %d, Command port: %d, Statistics port: %d", - glob.localNodeId, glob.port, glob.port_stats); + snprintf(msg, 256, "Id: %d, Command port: %d", + glob.localNodeId, glob.port); ndbout_c(msg); g_EventLogger.info(msg); @@ -309,7 +296,6 @@ NDB_MAIN(mgmsrv){ MgmGlobals::MgmGlobals(){ // Default values port = 0; - port_stats = 0; config_filename = NULL; local_config_filename = NULL; interface_name = 0; From 454062deba8a1d58bd654055fba6a056beb06852 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Sep 2004 22:06:17 +0200 Subject: [PATCH 12/55] removed extra newline --- ndb/test/run-test/daily-devel-tests.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt index 723d241aa46..522bbde32bc 100644 --- a/ndb/test/run-test/daily-devel-tests.txt +++ b/ndb/test/run-test/daily-devel-tests.txt @@ -41,7 +41,6 @@ args: -n SingleUserMode T1 # # SYSTEM RESTARTS # - max-time: 1500 cmd: testSystemRestart args: -n SR3 T6 From 1de03e7b54374b151ac254e30ad592d808044285 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Sep 2004 22:15:51 +0200 Subject: [PATCH 13/55] Fix error handling in atrt-mysqltest-run --- ndb/test/run-test/atrt-mysql-test-run | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/ndb/test/run-test/atrt-mysql-test-run b/ndb/test/run-test/atrt-mysql-test-run index 3a044e43288..e36fe1f3882 100755 --- a/ndb/test/run-test/atrt-mysql-test-run +++ b/ndb/test/run-test/atrt-mysql-test-run @@ -1,5 +1,20 @@ #!/bin/sh set -e -x +p=`pwd` cd $MYSQL_BASE_DIR/mysql-test -./mysql-test-run --with-ndbcluster --ndbconnectstring=$NDB_CONNECTSTRING $* +./mysql-test-run --with-ndbcluster --ndbconnectstring=$NDB_CONNECTSTRING $* | tee $p/output.txt + +f=`grep -c fail $p/output.txt` +o=`grep -c pass $p/output.txt` + +if [ $o -gt 0 -a $f -eq 0 ] +then + echo "NDBT_ProgramExit: OK" + exit 0 +fi + +echo "NDBT_ProgramExit: Failed" +exit 1 + + From 9956b20790d88db6639096ef823b5950643b176c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Sep 2004 23:36:13 +0000 Subject: [PATCH 14/55] removed different CFG_ defined for connection host names added support for setting up multiple interfaces correct connect address for mgmt server bug, -l flag would be interpreted as connectstring added flag to remove all node id checks changed automatic allocation of ports somewhat ndb/include/mgmapi/mgmapi_config_parameters.h: removed different CFG_ defined for connection host names ndb/include/transporter/TransporterRegistry.hpp: added support for setting up multiple interfaces ndb/src/common/mgmcommon/ConfigRetriever.cpp: removed different CFG_ defined for connection host names ndb/src/common/mgmcommon/IPCConfig.cpp: added support for setting up multiple interfaces ndb/src/common/portlib/NdbTCP.cpp: added debug printouts ndb/src/common/transporter/TransporterRegistry.cpp: added support for setting up multiple interfaces ndb/src/common/util/SocketServer.cpp: added my_thread init to enable debug printouts ndb/src/mgmclient/CommandInterpreter.cpp: shortened lines ndb/src/mgmsrv/ConfigInfo.cpp: removed different CFG_ defined for connection host names changed automatic allocation of ports somewhat ndb/src/mgmsrv/MgmtSrvr.cpp: correct connect address for mgmt server ndb/src/mgmsrv/Services.cpp: shoretened lines ndb/src/mgmsrv/main.cpp: bug, -l flag would be interpreted as connectstring added flag to remove all node id checks --- ndb/include/mgmapi/mgmapi_config_parameters.h | 6 +- .../transporter/TransporterRegistry.hpp | 11 +- ndb/src/common/mgmcommon/ConfigRetriever.cpp | 4 +- ndb/src/common/mgmcommon/IPCConfig.cpp | 46 ++--- ndb/src/common/portlib/NdbTCP.cpp | 10 +- .../transporter/TransporterRegistry.cpp | 104 +++++----- ndb/src/common/util/SocketServer.cpp | 30 ++- ndb/src/mgmclient/CommandInterpreter.cpp | 17 +- ndb/src/mgmsrv/ConfigInfo.cpp | 38 +++- ndb/src/mgmsrv/MgmtSrvr.cpp | 193 ++++++++++++------ ndb/src/mgmsrv/Services.cpp | 3 +- ndb/src/mgmsrv/main.cpp | 16 +- 12 files changed, 296 insertions(+), 182 deletions(-) diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/ndb/include/mgmapi/mgmapi_config_parameters.h index 4a4863298dd..6b157720f2b 100644 --- a/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -104,9 +104,9 @@ #define CFG_CONNECTION_NODE_1_SYSTEM 404 #define CFG_CONNECTION_NODE_2_SYSTEM 405 #define CFG_CONNECTION_SERVER_PORT 406 +#define CFG_CONNECTION_HOSTNAME_1 407 +#define CFG_CONNECTION_HOSTNAME_2 408 -#define CFG_TCP_HOSTNAME_1 450 -#define CFG_TCP_HOSTNAME_2 451 #define CFG_TCP_SERVER 452 #define CFG_TCP_SEND_BUFFER_SIZE 454 #define CFG_TCP_RECEIVE_BUFFER_SIZE 455 @@ -128,8 +128,6 @@ #define CFG_SCI_NODE2_ADAPTER0 555 #define CFG_SCI_NODE2_ADAPTER1 556 -#define CFG_OSE_HOSTNAME_1 600 -#define CFG_OSE_HOSTNAME_2 601 #define CFG_OSE_PRIO_A_SIZE 602 #define CFG_OSE_PRIO_B_SIZE 603 #define CFG_OSE_RECEIVE_ARRAY_SIZE 604 diff --git a/ndb/include/transporter/TransporterRegistry.hpp b/ndb/include/transporter/TransporterRegistry.hpp index 3c6c307406c..ac6291f9e57 100644 --- a/ndb/include/transporter/TransporterRegistry.hpp +++ b/ndb/include/transporter/TransporterRegistry.hpp @@ -218,15 +218,18 @@ public: void printState(); #endif - unsigned short m_service_port; - + class Transporter_interface { + public: + unsigned short m_service_port; + const char *m_interface; + }; + Vector m_transporter_interface; + void add_transporter_interface(const char *interface, unsigned short port); protected: private: void * callbackObj; - TransporterService *m_transporter_service; - char *m_interface_name; struct NdbThread *m_start_clients_thread; bool m_run_start_clients_thread; diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp index 109c999852b..a2d6f6a3cea 100644 --- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -344,7 +344,7 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 const char * name; struct in_addr addr; BaseString tmp; - if(!iter.get(CFG_TCP_HOSTNAME_1, &name) && strlen(name)){ + if(!iter.get(CFG_CONNECTION_HOSTNAME_1, &name) && strlen(name)){ if(Ndb_getInAddr(&addr, name) != 0){ tmp.assfmt("Unable to lookup/illegal hostname %s, " "connection from node %d to node %d", @@ -354,7 +354,7 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 } } - if(!iter.get(CFG_TCP_HOSTNAME_2, &name) && strlen(name)){ + if(!iter.get(CFG_CONNECTION_HOSTNAME_2, &name) && strlen(name)){ if(Ndb_getInAddr(&addr, name) != 0){ tmp.assfmt("Unable to lookup/illegal hostname %s, " "connection from node %d to node %d", diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp index a76c541f3f6..d7ad993c2af 100644 --- a/ndb/src/common/mgmcommon/IPCConfig.cpp +++ b/ndb/src/common/mgmcommon/IPCConfig.cpp @@ -345,18 +345,27 @@ IPCConfig::configureTransporters(Uint32 nodeId, const class ndb_mgm_configuration & config, class TransporterRegistry & tr){ - Uint32 noOfTransportersCreated= 0, server_port= 0; + Uint32 noOfTransportersCreated= 0; ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION); for(iter.first(); iter.valid(); iter.next()){ Uint32 nodeId1, nodeId2, remoteNodeId; + const char * remoteHostName= 0, * localHostName= 0; if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue; if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue; if(nodeId1 != nodeId && nodeId2 != nodeId) continue; remoteNodeId = (nodeId == nodeId1 ? nodeId2 : nodeId1); + { + const char * host1= 0, * host2= 0; + iter.get(CFG_CONNECTION_HOSTNAME_1, &host1); + iter.get(CFG_CONNECTION_HOSTNAME_2, &host2); + localHostName = (nodeId == nodeId1 ? host1 : host2); + remoteHostName = (nodeId == nodeId1 ? host2 : host1); + } + Uint32 sendSignalId = 1; Uint32 checksum = 1; if(iter.get(CFG_CONNECTION_SEND_SIGNAL_ID, &sendSignalId)) continue; @@ -365,14 +374,10 @@ IPCConfig::configureTransporters(Uint32 nodeId, Uint32 type = ~0; if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue; - Uint32 tmp_server_port= 0; - if(iter.get(CFG_CONNECTION_SERVER_PORT, &tmp_server_port)) break; + Uint32 server_port= 0; + if(iter.get(CFG_CONNECTION_SERVER_PORT, &server_port)) break; if (nodeId <= nodeId1 && nodeId <= nodeId2) { - if (server_port && server_port != tmp_server_port) { - ndbout << "internal error in config setup of server ports line= " << __LINE__ << endl; - exit(-1); - } - server_port= tmp_server_port; + tr.add_transporter_interface(localHostName, server_port); } switch(type){ @@ -388,7 +393,7 @@ IPCConfig::configureTransporters(Uint32 nodeId, if(iter.get(CFG_SHM_KEY, &conf.shmKey)) break; if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shmSize)) break; - conf.port= tmp_server_port; + conf.port= server_port; if(!tr.createTransporter(&conf)){ ndbout << "Failed to create SHM Transporter from: " @@ -437,14 +442,10 @@ IPCConfig::configureTransporters(Uint32 nodeId, case CONNECTION_TYPE_TCP:{ TCP_TransporterConfiguration conf; - const char * host1, * host2; - if(iter.get(CFG_TCP_HOSTNAME_1, &host1)) break; - if(iter.get(CFG_TCP_HOSTNAME_2, &host2)) break; - if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.sendBufferSize)) break; if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.maxReceiveSize)) break; - conf.port= tmp_server_port; + conf.port= server_port; const char * proxy; if (!iter.get(CFG_TCP_PROXY, &proxy)) { if (strlen(proxy) > 0 && nodeId2 == nodeId) { @@ -455,8 +456,8 @@ IPCConfig::configureTransporters(Uint32 nodeId, conf.localNodeId = nodeId; conf.remoteNodeId = remoteNodeId; - conf.localHostName = (nodeId == nodeId1 ? host1 : host2); - conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1); + conf.localHostName = localHostName; + conf.remoteHostName = remoteHostName; conf.byteOrder = 0; conf.compression = 0; conf.checksum = checksum; @@ -470,19 +471,15 @@ IPCConfig::configureTransporters(Uint32 nodeId, } case CONNECTION_TYPE_OSE:{ OSE_TransporterConfiguration conf; - - const char * host1, * host2; - if(iter.get(CFG_OSE_HOSTNAME_1, &host1)) break; - if(iter.get(CFG_OSE_HOSTNAME_2, &host2)) break; - + if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.prioASignalSize)) break; if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.prioBSignalSize)) break; if(iter.get(CFG_OSE_RECEIVE_ARRAY_SIZE, &conf.receiveBufferSize)) break; conf.localNodeId = nodeId; conf.remoteNodeId = remoteNodeId; - conf.localHostName = (nodeId == nodeId1 ? host1 : host2); - conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1); + conf.localHostName = localHostName; + conf.remoteHostName = remoteHostName; conf.byteOrder = 0; conf.compression = 0; conf.checksum = checksum; @@ -502,9 +499,6 @@ IPCConfig::configureTransporters(Uint32 nodeId, } } } - - tr.m_service_port= server_port; - return noOfTransportersCreated; } diff --git a/ndb/src/common/portlib/NdbTCP.cpp b/ndb/src/common/portlib/NdbTCP.cpp index a3d91f9c7b1..daa94c0c97c 100644 --- a/ndb/src/common/portlib/NdbTCP.cpp +++ b/ndb/src/common/portlib/NdbTCP.cpp @@ -15,6 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include @@ -27,22 +28,25 @@ static NdbMutex LOCK_gethostbyname = NDB_MUTEX_INITIALIZER; extern "C" int Ndb_getInAddr(struct in_addr * dst, const char *address) { + DBUG_ENTER("Ndb_getInAddr"); struct hostent * hostPtr; NdbMutex_Lock(&LOCK_gethostbyname); hostPtr = gethostbyname(address); if (hostPtr != NULL) { dst->s_addr = ((struct in_addr *) *hostPtr->h_addr_list)->s_addr; NdbMutex_Unlock(&LOCK_gethostbyname); - return 0; + DBUG_RETURN(0); } NdbMutex_Unlock(&LOCK_gethostbyname); /* Try it as aaa.bbb.ccc.ddd. */ dst->s_addr = inet_addr(address); if (dst->s_addr != INADDR_NONE) { - return 0; + DBUG_RETURN(0); } - return -1; + DBUG_PRINT("error",("inet_addr(%s) - %d - %s", + address, errno, strerror(errno))); + DBUG_RETURN(-1); } #if 0 diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index fe1d4b04786..5679c3c5834 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -98,9 +98,8 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd) TransporterRegistry::TransporterRegistry(void * callback, unsigned _maxTransporters, - unsigned sizeOfLongSignalMemory) { - - m_transporter_service= 0; + unsigned sizeOfLongSignalMemory) +{ nodeIdSpecified = false; maxTransporters = _maxTransporters; sendCounter = 1; @@ -150,7 +149,6 @@ TransporterRegistry::~TransporterRegistry() { delete[] theTransporters; delete[] performStates; delete[] ioStates; - #ifdef NDB_OSE_TRANSPORTER if(theOSEReceiver != NULL){ theOSEReceiver->destroyPhantom(); @@ -1159,55 +1157,67 @@ TransporterRegistry::stop_clients() return true; } +void +TransporterRegistry::add_transporter_interface(const char *interface, unsigned short port) +{ + DBUG_ENTER("TransporterRegistry::add_transporter_interface"); + DBUG_PRINT("enter",("interface=%s, port= %d", interface, port)); + if (interface && strlen(interface) == 0) + interface= 0; + + for (unsigned i= 0; i < m_transporter_interface.size(); i++) + { + Transporter_interface &tmp= m_transporter_interface[i]; + if (port != tmp.m_service_port) + continue; + if (interface != 0 && tmp.m_interface != 0 && + strcmp(interface, tmp.m_interface) == 0) + { + DBUG_VOID_RETURN; // found match, no need to insert + } + if (interface == 0 && tmp.m_interface == 0) + { + DBUG_VOID_RETURN; // found match, no need to insert + } + } + Transporter_interface t; + t.m_service_port= port; + t.m_interface= interface; + m_transporter_interface.push_back(t); + DBUG_PRINT("exit",("interface and port added")); + DBUG_VOID_RETURN; +} + bool TransporterRegistry::start_service(SocketServer& socket_server) { -#if 0 - for (int i= 0, n= 0; n < nTransporters; i++){ - Transporter * t = theTransporters[i]; - if (!t) - continue; - n++; - if (t->isServer) { - t->m_service = new TransporterService(new SocketAuthSimple("ndbd passwd")); - if(!socket_server.setup(t->m_service, t->m_r_port, 0)) - { - ndbout_c("Unable to setup transporter service port: %d!\n" - "Please check if the port is already used,\n" - "(perhaps a mgmt server is already running)", - m_service_port); - delete t->m_service; - return false; - } - } + if (m_transporter_interface.size() > 0 && nodeIdSpecified != true) + { + ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified"); + return false; } -#endif - if (m_service_port != 0) { - - m_transporter_service = new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd")); - - if (nodeIdSpecified != true) { - ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified"); + for (unsigned i= 0; i < m_transporter_interface.size(); i++) + { + Transporter_interface &t= m_transporter_interface[i]; + if (t.m_service_port == 0) + { + continue; + } + TransporterService *transporter_service = + new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd")); + if(!socket_server.setup(transporter_service, + t.m_service_port, t.m_interface)) + { + ndbout_c("Unable to setup transporter service port: %s:%d!\n" + "Please check if the port is already used,\n" + "(perhaps the node is already running)", + t.m_interface ? t.m_interface : "*", t.m_service_port); + delete transporter_service; return false; } - - //m_interface_name = "ndbd"; - m_interface_name = 0; - - if(!socket_server.setup(m_transporter_service, m_service_port, m_interface_name)) - { - ndbout_c("Unable to setup transporter service port: %d!\n" - "Please check if the port is already used,\n" - "(perhaps a mgmt server is already running)", - m_service_port); - delete m_transporter_service; - return false; - } - m_transporter_service->setTransporterRegistry(this); - } else - m_transporter_service= 0; - + transporter_service->setTransporterRegistry(this); + } return true; } @@ -1281,3 +1291,5 @@ NdbOut & operator <<(NdbOut & out, SignalHeader & sh){ out << "trace: " << (int)sh.theTrace << endl; return out; } + +template class Vector; diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp index 0cc06a54496..380a8073a2c 100644 --- a/ndb/src/common/util/SocketServer.cpp +++ b/ndb/src/common/util/SocketServer.cpp @@ -16,6 +16,7 @@ #include +#include #include @@ -83,7 +84,8 @@ bool SocketServer::setup(SocketServer::Service * service, unsigned short port, const char * intface){ - + DBUG_ENTER("SocketServer::setup"); + DBUG_PRINT("enter",("interface=%s, port=%d", intface, port)); struct sockaddr_in servaddr; memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; @@ -92,36 +94,44 @@ SocketServer::setup(SocketServer::Service * service, if(intface != 0){ if(Ndb_getInAddr(&servaddr.sin_addr, intface)) - return false; + DBUG_RETURN(false); } const NDB_SOCKET_TYPE sock = socket(AF_INET, SOCK_STREAM, 0); if (sock == NDB_INVALID_SOCKET) { - return false; + DBUG_PRINT("error",("socket() - %d - %s", + errno, strerror(errno))); + DBUG_RETURN(false); } const int on = 1; if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on)) == -1) { + DBUG_PRINT("error",("getsockopt() - %d - %s", + errno, strerror(errno))); NDB_CLOSE_SOCKET(sock); - return false; + DBUG_RETURN(false); } if (bind(sock, (struct sockaddr*) &servaddr, sizeof(servaddr)) == -1) { + DBUG_PRINT("error",("bind() - %d - %s", + errno, strerror(errno))); NDB_CLOSE_SOCKET(sock); - return false; + DBUG_RETURN(false); } if (listen(sock, m_maxSessions) == -1){ + DBUG_PRINT("error",("listen() - %d - %s", + errno, strerror(errno))); NDB_CLOSE_SOCKET(sock); - return false; + DBUG_RETURN(false); } ServiceInstance i; i.m_socket = sock; i.m_service = service; m_services.push_back(i); - return true; + DBUG_RETURN(true); } void @@ -177,8 +187,9 @@ void* socketServerThread_C(void* _ss){ SocketServer * ss = (SocketServer *)_ss; + my_thread_init(); ss->doRun(); - + my_thread_end(); NdbThread_Exit(0); return 0; } @@ -287,8 +298,10 @@ void* sessionThread_C(void* _sc){ SocketServer::Session * si = (SocketServer::Session *)_sc; + my_thread_init(); if(!transfer(si->m_socket)){ si->m_stopped = true; + my_thread_end(); NdbThread_Exit(0); return 0; } @@ -301,6 +314,7 @@ sessionThread_C(void* _sc){ } si->m_stopped = true; + my_thread_end(); NdbThread_Exit(0); return 0; } diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index 91d057f8c30..ba8e93edde9 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -673,8 +673,10 @@ CommandInterpreter::executeShutdown(char* parameters) if (mgm_id == 0) mgm_id= state->node_states[i].node_id; else { - ndbout << "Unable to locate management server, shutdown manually with #STOP" + ndbout << "Unable to locate management server, " + << "shutdown manually with STOP" << endl; + return; } } } @@ -721,11 +723,13 @@ const char *status_string(ndb_mgm_node_status status) static void print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it, - const char *proc_name, int no_proc, ndb_mgm_node_type type, int master_id) + const char *proc_name, int no_proc, ndb_mgm_node_type type, + int master_id) { int i; ndbout << "[" << proc_name - << "(" << ndb_mgm_get_node_type_string(type) << ")]\t" << no_proc << " node(s)" << endl; + << "(" << ndb_mgm_get_node_type_string(type) << ")]\t" + << no_proc << " node(s)" << endl; for(i=0; i < state->no_of_nodes; i++) { struct ndb_mgm_node_state *node_state= &(state->node_states[i]); if(node_state->node_type == type) { @@ -733,7 +737,9 @@ print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it, ndbout << "id=" << node_id; if(node_state->version != 0) { const char *hostname= node_state->connect_address; - if (hostname == 0 || strlen(hostname) == 0 || strcmp(hostname,"0.0.0.0") == 0) + if (hostname == 0 + || strlen(hostname) == 0 + || strcmp(hostname,"0.0.0.0") == 0) ndbout << " "; else ndbout << "\t@" << hostname; @@ -761,7 +767,8 @@ print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it, ndb_mgm_get_string_parameter(it, CFG_NODE_HOST, &config_hostname); if (config_hostname == 0 || config_hostname[0] == 0) config_hostname= "any host"; - ndbout << " (not connected, accepting connect from " << config_hostname << ")" << endl; + ndbout << " (not connected, accepting connect from " + << config_hostname << ")" << endl; } } } diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index ea19bc76d0e..1de9e0aa165 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -1529,7 +1529,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { }, { - CFG_TCP_HOSTNAME_1, + CFG_CONNECTION_HOSTNAME_1, "HostName1", "TCP", "Name/IP of computer on one side of the connection", @@ -1540,7 +1540,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { 0, 0 }, { - CFG_TCP_HOSTNAME_2, + CFG_CONNECTION_HOSTNAME_2, "HostName2", "TCP", "Name/IP of computer on one side of the connection", @@ -1935,7 +1935,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { }, { - CFG_OSE_HOSTNAME_1, + CFG_CONNECTION_HOSTNAME_1, "HostName1", "OSE", "Name of computer on one side of the connection", @@ -1946,7 +1946,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { 0, 0 }, { - CFG_OSE_HOSTNAME_2, + CFG_CONNECTION_HOSTNAME_2, "HostName2", "OSE", "Name of computer on one side of the connection", @@ -2902,26 +2902,38 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){ DBUG_ENTER("fixPortNumber"); Uint32 id1= 0, id2= 0; + const char *hostName1; + const char *hostName2; require(ctx.m_currentSection->get("NodeId1", &id1)); require(ctx.m_currentSection->get("NodeId2", &id2)); + require(ctx.m_currentSection->get("HostName1", &hostName1)); + require(ctx.m_currentSection->get("HostName2", &hostName2)); + DBUG_PRINT("info",("NodeId1=%d HostName1=\"%s\"",id1,hostName1)); + DBUG_PRINT("info",("NodeId2=%d HostName2=\"%s\"",id2,hostName2)); + if (id1 > id2) { Uint32 tmp= id1; + const char *tmp_name= hostName1; + hostName1= hostName2; id1= id2; + hostName2= tmp_name; id2= tmp; } const Properties * node; require(ctx.m_config->get("Node", id1, &node)); - BaseString hostname; - require(node->get("HostName", hostname)); + BaseString hostname(hostName1); + // require(node->get("HostName", hostname)); if (hostname.c_str()[0] == 0) { - ctx.reportError("Hostname required on nodeid %d since it will act as server.", id1); + ctx.reportError("Hostname required on nodeid %d since it will " + "act as server.", id1); DBUG_RETURN(false); } Uint32 port= 0; - if (!node->get("ServerPort", &port) && !ctx.m_userProperties.get("ServerPort_", id1, &port)) { + if (!node->get("ServerPort", &port) && + !ctx.m_userProperties.get("ServerPort_", id1, &port)) { Uint32 adder= 0; { BaseString server_port_adder(hostname); @@ -2932,7 +2944,8 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){ Uint32 base= 0; if (!ctx.m_userProperties.get("ServerPortBase", &base)){ - if(!(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) && + if(!(ctx.m_userDefaults && + ctx.m_userDefaults->get("PortNumber", &base)) && !ctx.m_systemDefaults->get("PortNumber", &base)) { base= strtoll(NDB_BASE_PORT,0,0)+2; // ctx.reportError("Cannot retrieve base port number"); @@ -2945,12 +2958,15 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){ } if(ctx.m_currentSection->contains("PortNumber")) { - ndbout << "PortNumber should no longer be specificied per connection, please remove from config. Will be changed to " << port << endl; + ndbout << "PortNumber should no longer be specificied " + << "per connection, please remove from config. " + << "Will be changed to " << port << endl; ctx.m_currentSection->put("PortNumber", port, true); } else ctx.m_currentSection->put("PortNumber", port); - DBUG_PRINT("info", ("connection %d-%d port %d host %s", id1, id2, port, hostname.c_str())); + DBUG_PRINT("info", ("connection %d-%d port %d host %s", + id1, id2, port, hostname.c_str())); DBUG_RETURN(true); } diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 8380f3fd86a..57039a791c3 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -62,6 +62,7 @@ #endif extern int global_flag_send_heartbeat_now; +extern int g_no_nodeid_checks; static void @@ -591,9 +592,25 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, _props = NULL; _ownNodeId= 0; + char my_hostname[256]; + struct sockaddr_in tmp_addr; + SOCKET_SIZE_TYPE addrlen= sizeof(tmp_addr); + if (!g_no_nodeid_checks) { + if (gethostname(my_hostname, sizeof(my_hostname))) { + ndbout << "error: gethostname() - " << strerror(errno) << endl; + exit(-1); + } + if (Ndb_getInAddr(&(((sockaddr_in*)&tmp_addr)->sin_addr),my_hostname)) { + ndbout << "error: Ndb_getInAddr(" << my_hostname << ") - " + << strerror(errno) << endl; + exit(-1); + } + } NodeId tmp= nodeId; BaseString error_string; - if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, 0, 0, error_string)){ + if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, + (struct sockaddr *)&tmp_addr, + &addrlen, error_string)){ ndbout << "Unable to obtain requested nodeid: " << error_string.c_str() << endl; exit(-1); @@ -1020,36 +1037,38 @@ int MgmtSrvr::versionNode(int processId, bool abort, VersionCallback callback, void * anyData) { + int version; + if(m_versionRec.inUse) return OPERATION_IN_PROGRESS; m_versionRec.callback = callback; m_versionRec.inUse = true ; - ClusterMgr::Node node; - int version; - if (getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) { - if(m_versionRec.callback != 0) - m_versionRec.callback(processId, NDB_VERSION, this,0); - } - if (getNodeType(processId) == NDB_MGM_NODE_TYPE_NDB) { - node = theFacade->theClusterMgr->getNodeInfo(processId); - version = node.m_info.m_version; - if(theFacade->theClusterMgr->getNodeInfo(processId).connected) - if(m_versionRec.callback != 0) - m_versionRec.callback(processId, version, this,0); - else - if(m_versionRec.callback != 0) - m_versionRec.callback(processId, 0, this,0); - + if (getOwnNodeId() == processId) + { + version= NDB_VERSION; } - - if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API) { + else if (getNodeType(processId) == NDB_MGM_NODE_TYPE_NDB) + { + ClusterMgr::Node node= theFacade->theClusterMgr->getNodeInfo(processId); + if(node.connected) + version= node.m_info.m_version; + else + version= 0; + } + else if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API || + getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) + { return sendVersionReq(processId); } + if(m_versionRec.callback != 0) + m_versionRec.callback(processId, version, this,0); m_versionRec.inUse = false ; - return 0; + m_versionRec.version[processId]= version; + + return 0; } int @@ -1460,17 +1479,14 @@ MgmtSrvr::status(int processId, Uint32 * nodegroup, Uint32 * connectCount) { - if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API) { + if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API || + getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) { if(versionNode(processId, false,0,0) ==0) * version = m_versionRec.version[processId]; else * version = 0; } - if (getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) { - * version = NDB_VERSION; - } - const ClusterMgr::Node node = theFacade->theClusterMgr->getNodeInfo(processId); @@ -2337,12 +2353,19 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, SOCKET_SIZE_TYPE *client_addr_len, BaseString &error_string) { + DBUG_ENTER("MgmtSrvr::alloc_node_id"); + DBUG_PRINT("enter", ("nodeid=%d, type=%d, client_addr=%d", + *nodeId, type, client_addr)); + if (g_no_nodeid_checks) { + if (*nodeId == 0) { + error_string.appfmt("no-nodeid-ckecks set in manegment server.\n" + "node id must be set explicitly in connectstring"); + DBUG_RETURN(false); + } + DBUG_RETURN(true); + } Guard g(&f_node_id_mutex); -#if 0 - ndbout << "MgmtSrvr::getFreeNodeId type=" << type - << " *nodeid=" << *nodeId << endl; -#endif - + int no_mgm= 0; NodeBitmask connected_nodes(m_reserved_nodes); if (theFacade && theFacade->theClusterMgr) { for(Uint32 i = 0; i < MAX_NODES; i++) @@ -2350,19 +2373,21 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i); if (node.connected) connected_nodes.bitOR(node.m_state.m_connected_nodes); - } + } else if (getNodeType(i) == NDB_MGM_NODE_TYPE_MGM) + no_mgm++; } bool found_matching_id= false; bool found_matching_type= false; bool found_free_node= false; - const char *config_hostname = 0; + unsigned id_found= 0; + const char *config_hostname= 0; struct in_addr config_addr= {0}; int r_config_addr= -1; unsigned type_c= 0; - ndb_mgm_configuration_iterator iter(*(ndb_mgm_configuration *)_config->m_configValues, - CFG_SECTION_NODE); + ndb_mgm_configuration_iterator + iter(*(ndb_mgm_configuration *)_config->m_configValues, CFG_SECTION_NODE); for(iter.first(); iter.valid(); iter.next()) { unsigned tmp= 0; if(iter.get(CFG_NODE_ID, &tmp)) abort(); @@ -2377,8 +2402,9 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, continue; found_free_node= true; if(iter.get(CFG_NODE_HOST, &config_hostname)) abort(); - - if (config_hostname && config_hostname[0] != 0 && client_addr) { + if (config_hostname && config_hostname[0] == 0) + config_hostname= 0; + else if (client_addr) { // check hostname compatability const void *tmp_in= &(((sockaddr_in*)client_addr)->sin_addr); if((r_config_addr= Ndb_getInAddr(&config_addr, config_hostname)) != 0 @@ -2388,8 +2414,9 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, || memcmp(&tmp_addr, tmp_in, sizeof(config_addr)) != 0) { // not localhost #if 0 - ndbout << "MgmtSrvr::getFreeNodeId compare failed for \"" << config_hostname - << "\" id=" << tmp << endl; + ndbout << "MgmtSrvr::getFreeNodeId compare failed for \"" + << config_hostname + << "\" id=" << tmp << endl; #endif continue; } @@ -2405,22 +2432,59 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, } } } - *nodeId= tmp; - if (client_addr) - m_connect_address[tmp]= ((struct sockaddr_in *)client_addr)->sin_addr; - else - Ndb_getInAddr(&(m_connect_address[tmp]), "localhost"); - m_reserved_nodes.set(tmp); -#if 0 - ndbout << "MgmtSrvr::getFreeNodeId found type=" << type - << " *nodeid=" << *nodeId << endl; -#endif - return true; + if (*nodeId != 0 || + type != NDB_MGM_NODE_TYPE_MGM || + no_mgm == 1) { // any match is ok + id_found= tmp; + break; + } + if (id_found) { // mgmt server may only have one match + error_string.appfmt("Ambiguous node id's %d and %d.\n" + "Suggest specifying node id in connectstring,\n" + "or specifying unique host names in config file.", + id_found, tmp); + DBUG_RETURN(false); + } + if (config_hostname == 0) { + error_string.appfmt("Ambiguity for node id %d.\n" + "Suggest specifying node id in connectstring,\n" + "or specifying unique host names in config file,\n", + "or specifying just one mgmt server in config file.", + tmp); + DBUG_RETURN(false); + } + id_found= tmp; // mgmt server matched, check for more matches + } + + if (id_found) + { + *nodeId= id_found; + DBUG_PRINT("info", ("allocating node id %d",*nodeId)); + { + int r= 0; + if (client_addr) + m_connect_address[id_found]= + ((struct sockaddr_in *)client_addr)->sin_addr; + else if (config_hostname) + r= Ndb_getInAddr(&(m_connect_address[id_found]), config_hostname); + else { + char name[256]; + r= gethostname(name, sizeof(name)); + if (r == 0) { + name[sizeof(name)-1]= 0; + r= Ndb_getInAddr(&(m_connect_address[id_found]), name); + } + } + if (r) + m_connect_address[id_found].s_addr= 0; + } + m_reserved_nodes.set(id_found); + DBUG_RETURN(true); } if (found_matching_type && !found_free_node) { - // we have a temporary error which might be due to that we have got the latest - // connect status from db-nodes. Force update. + // we have a temporary error which might be due to that + // we have got the latest connect status from db-nodes. Force update. global_flag_send_heartbeat_now= 1; } @@ -2429,7 +2493,8 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, const char *alias, *str; alias= ndb_mgm_get_node_type_alias_string(type, &str); type_string.assfmt("%s(%s)", alias, str); - alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c, &str); + alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c, + &str); type_c_string.assfmt("%s(%s)", alias, str); } @@ -2440,9 +2505,11 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, error_string.appfmt("Connection done from wrong host ip %s.", inet_ntoa(((struct sockaddr_in *)(client_addr))->sin_addr)); else - error_string.appfmt("No free node id found for %s.", type_string.c_str()); + error_string.appfmt("No free node id found for %s.", + type_string.c_str()); else - error_string.appfmt("No %s node defined in config file.", type_string.c_str()); + error_string.appfmt("No %s node defined in config file.", + type_string.c_str()); else error_string.append("No nodes defined in config file."); } else { @@ -2451,19 +2518,23 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, if (found_free_node) { // have to split these into two since inet_ntoa overwrites itself error_string.appfmt("Connection with id %d done from wrong host ip %s,", - *nodeId, inet_ntoa(((struct sockaddr_in *)(client_addr))->sin_addr)); + *nodeId, inet_ntoa(((struct sockaddr_in *) + (client_addr))->sin_addr)); error_string.appfmt(" expected %s(%s).", config_hostname, - r_config_addr ? "lookup failed" : inet_ntoa(config_addr)); + r_config_addr ? + "lookup failed" : inet_ntoa(config_addr)); } else - error_string.appfmt("Id %d already allocated by another node.", *nodeId); + error_string.appfmt("Id %d already allocated by another node.", + *nodeId); else error_string.appfmt("Id %d configured as %s, connect attempted as %s.", - *nodeId, type_c_string.c_str(), type_string.c_str()); + *nodeId, type_c_string.c_str(), + type_string.c_str()); else - error_string.appfmt("No node defined with id=%d in config file.", *nodeId); + error_string.appfmt("No node defined with id=%d in config file.", + *nodeId); } - - return false; + DBUG_RETURN(false); } bool diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index c529e277e0e..8e9aff85824 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -418,7 +418,8 @@ MgmApiSession::get_nodeid(Parser_t::Context &, &addr, &addrlen, error_string)){ const char *alias; const char *str; - alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)nodetype, &str); + alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type) + nodetype, &str); m_output->println(cmd); m_output->println("result: %s", error_string.c_str()); m_output->println(""); diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index 323a836cdd4..fbee81ccf36 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -82,6 +82,7 @@ struct MgmGlobals { SocketServer * socketServer; }; +int g_no_nodeid_checks= 0; static MgmGlobals glob; @@ -118,7 +119,9 @@ struct getargs args[] = { "Specify configuration file connect string (will default use Ndb.cfg if available)", "filename" }, { "interactive", 0, arg_flag, &glob.interactive, - "Run interactive. Not supported but provided for testing purposes", "" }, + "Run interactive. Not supported but provided for testing purposes", "" }, + { "no-nodeid-checks", 0, arg_flag, &g_no_nodeid_checks, + "Do not provide any node id checks", "" }, { "nodaemon", 0, arg_flag, &glob.non_interactive, "Don't run as daemon, but don't read from stdin", "non-interactive" } }; @@ -336,17 +339,12 @@ MgmGlobals::~MgmGlobals(){ * @fn readLocalConfig * @param glob : Global variables * @return true if success, false otherwise. - * - * How to get LOCAL CONFIGURATION FILE: - * 1. Use local config file name (-l) - * 2. Use environment NDB_HOME + Ndb.cfg - * If NDB_HOME is not set this results in reading from local dir */ static bool readLocalConfig(){ // Read local config file LocalConfig lc; - if(!lc.init(glob.local_config_filename)){ + if(!lc.init(0,glob.local_config_filename)){ lc.printError(); return false; } @@ -360,10 +358,6 @@ readLocalConfig(){ * @fn readGlobalConfig * @param glob : Global variables * @return true if success, false otherwise. - * - * How to get the GLOBAL CONFIGURATION: - * 1. Use config file name (this is a text file)(-c) - * 2. Use name from line 2 of local config file, ex: file:///c/ndb/Ndb_cfg.bin */ static bool readGlobalConfig() { From 254d4e87d04fd0d37183489504b31ee90b05d5f0 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 00:12:03 +0000 Subject: [PATCH 15/55] corrected mistake in counting --- ndb/src/mgmsrv/MgmtSrvr.cpp | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 4d96b629b26..7c2298d4773 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2193,16 +2193,17 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, Guard g(&f_node_id_mutex); int no_mgm= 0; NodeBitmask connected_nodes(m_reserved_nodes); - if (theFacade && theFacade->theClusterMgr) { - for(Uint32 i = 0; i < MAX_NODES; i++) - if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB) { - const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i); - if (node.connected) - connected_nodes.bitOR(node.m_state.m_connected_nodes); - } else if (getNodeType(i) == NDB_MGM_NODE_TYPE_MGM) - no_mgm++; + for(Uint32 i = 0; i < MAX_NODES; i++) + { + if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB && + theFacade && theFacade->theClusterMgr) { + const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i); + if (node.connected) { + connected_nodes.bitOR(node.m_state.m_connected_nodes); + } + } else if (getNodeType(i) == NDB_MGM_NODE_TYPE_MGM) + no_mgm++; } - bool found_matching_id= false; bool found_matching_type= false; bool found_free_node= false; @@ -2274,7 +2275,7 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, if (config_hostname == 0) { error_string.appfmt("Ambiguity for node id %d.\n" "Suggest specifying node id in connectstring,\n" - "or specifying unique host names in config file,\n", + "or specifying unique host names in config file,\n" "or specifying just one mgmt server in config file.", tmp); DBUG_RETURN(false); @@ -2795,15 +2796,15 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value, switch(p_type){ case 0: res = i2.set(param, val_32); - ndbout_c("Updateing node %d param: %d to %d", node, param, val_32); + ndbout_c("Updating node %d param: %d to %d", node, param, val_32); break; case 1: res = i2.set(param, val_64); - ndbout_c("Updateing node %d param: %d to %Ld", node, param, val_32); + ndbout_c("Updating node %d param: %d to %Ld", node, param, val_32); break; case 2: res = i2.set(param, val_char); - ndbout_c("Updateing node %d param: %d to %s", node, param, val_char); + ndbout_c("Updating node %d param: %d to %s", node, param, val_char); break; default: abort(); From 08ffa2988de9cb8e672cc5e9977755ed791e1ea3 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 08:28:22 +0200 Subject: [PATCH 16/55] Moved event category match functionality into mgmapi Removed grep introduced bogus mgm call BitKeeper/deleted/.del-LogLevel.cpp~314a6bada2df40a8: Delete: ndb/src/common/debugger/LogLevel.cpp ndb/include/debugger/EventLogger.hpp: Removed match functionality and put into mgmapi ndb/include/kernel/LogLevel.hpp: Update LogLevel ndb/include/mgmapi/mgmapi_config_parameters.h: fix parameter ndb/src/common/debugger/EventLogger.cpp: Removed match functionality and put into mgmapi ndb/src/common/debugger/Makefile.am: removed LogLevel.cpp ndb/src/cw/cpcd/APIService.cpp: compiler warning ndb/src/cw/cpcd/CPCD.cpp: compiler warning ndb/src/cw/cpcd/CPCD.hpp: compiler warning ndb/src/cw/cpcd/main.cpp: compiler warning ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: Changed name of config parameter ndb/src/kernel/vm/Configuration.cpp: Changed name of config parameter --- ndb/include/debugger/EventLogger.hpp | 33 ---- ndb/include/kernel/LogLevel.hpp | 82 ++-------- ndb/include/mgmapi/mgmapi.h | 48 ++++-- ndb/include/mgmapi/mgmapi_config_parameters.h | 25 +-- ndb/src/common/debugger/EventLogger.cpp | 55 ------- ndb/src/common/debugger/LogLevel.cpp | 30 ---- ndb/src/common/debugger/Makefile.am | 2 +- ndb/src/cw/cpcd/APIService.cpp | 12 +- ndb/src/cw/cpcd/CPCD.cpp | 2 +- ndb/src/cw/cpcd/CPCD.hpp | 2 +- ndb/src/cw/cpcd/main.cpp | 8 +- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 2 +- ndb/src/kernel/vm/Configuration.cpp | 2 +- ndb/src/mgmapi/mgmapi.cpp | 52 +++++- ndb/src/mgmclient/CommandInterpreter.cpp | 148 +++++++----------- ndb/src/mgmsrv/CommandInterpreter.cpp | 10 +- ndb/src/mgmsrv/MgmtSrvr.cpp | 2 +- ndb/src/mgmsrv/Services.cpp | 58 +++---- ndb/test/run-test/atrt-mysql-test-run | 2 - 19 files changed, 209 insertions(+), 366 deletions(-) delete mode 100644 ndb/src/common/debugger/LogLevel.cpp diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp index 36cece6f22a..9a624559d16 100644 --- a/ndb/include/debugger/EventLogger.hpp +++ b/ndb/include/debugger/EventLogger.hpp @@ -33,39 +33,6 @@ public: */ LogLevel m_logLevel; - /** - * Find a category matching the string - * - * @param str string to match. - * @param cat the event category. - * @param exactMatch only do exact matching. - * - * @return TRUE if match is found, then cat is modified - * FALSE if match is not found - */ - static bool matchEventCategory(const char * str, - LogLevel::EventCategory * cat, - bool exactMatch = false); - - /** - * Returns category name or NULL if not found. - * - * @param cat the event category. - * @return category name. - */ - static const char * getEventCategoryName(LogLevel::EventCategory cat); - - /** - * Specifies allowed event categories/log levels. - */ - struct EventCategoryName { - LogLevel::EventCategory category; - const char * name; - }; - - static const EventCategoryName eventCategoryNames[]; - static const Uint32 noOfEventCategoryNames; - /** * This matrix defines which event should be printed when * diff --git a/ndb/include/kernel/LogLevel.hpp b/ndb/include/kernel/LogLevel.hpp index e3a81263dcb..21450917f03 100644 --- a/ndb/include/kernel/LogLevel.hpp +++ b/ndb/include/kernel/LogLevel.hpp @@ -45,81 +45,29 @@ public: * Copy operator */ LogLevel & operator= (const LogLevel &); - - static const Uint32 MIN_LOGLEVEL_ID = CFG_LOGLEVEL_STARTUP; - + enum EventCategory { - /** - * Events during all kind of startups - */ - llStartUp = CFG_LOGLEVEL_STARTUP - MIN_LOGLEVEL_ID, - - /** - * Events during shutdown - */ - llShutdown = CFG_LOGLEVEL_SHUTDOWN - MIN_LOGLEVEL_ID, - - /** - * Transaction statistics - * Job level - * TCP/IP speed - */ - llStatistic = CFG_LOGLEVEL_STATISTICS - MIN_LOGLEVEL_ID, - - /** - * Checkpoints - */ - llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - MIN_LOGLEVEL_ID, - - /** - * Events during node restart - */ - llNodeRestart = CFG_LOGLEVEL_NODERESTART - MIN_LOGLEVEL_ID, - - /** - * Events related to connection / communication - */ - llConnection = CFG_LOGLEVEL_CONNECTION - MIN_LOGLEVEL_ID, - - /** - * Assorted event w.r.t unexpected happenings - */ - llError = CFG_LOGLEVEL_ERROR - MIN_LOGLEVEL_ID, - - /** - * Assorted event w.r.t warning - */ - llWarning = CFG_LOGLEVEL_WARNING - MIN_LOGLEVEL_ID, - - /** - * Assorted event w.r.t information - */ - llInfo = CFG_LOGLEVEL_INFO - MIN_LOGLEVEL_ID, - - /** - * Events related to global replication - */ - llGrep = CFG_LOGLEVEL_GREP - MIN_LOGLEVEL_ID + llStartUp = CFG_LOGLEVEL_STARTUP - CFG_MIN_LOGLEVEL, + llShutdown = CFG_LOGLEVEL_SHUTDOWN - CFG_MIN_LOGLEVEL, + llStatistic = CFG_LOGLEVEL_STATISTICS - CFG_MIN_LOGLEVEL, + llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - CFG_MIN_LOGLEVEL, + llNodeRestart = CFG_LOGLEVEL_NODERESTART - CFG_MIN_LOGLEVEL, + llConnection = CFG_LOGLEVEL_CONNECTION - CFG_MIN_LOGLEVEL, + llInfo = CFG_LOGLEVEL_INFO - CFG_MIN_LOGLEVEL, + llWarning = CFG_LOGLEVEL_WARNING - CFG_MIN_LOGLEVEL, + llError = CFG_LOGLEVEL_ERROR - CFG_MIN_LOGLEVEL, + llGrep = CFG_LOGLEVEL_GREP - CFG_MIN_LOGLEVEL, + llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL, }; - struct LogLevelCategoryName { - const char* name; - }; - - /** - * Log/event level category names. Remember to update the names whenever - * a new category is added. - */ - static const LogLevelCategoryName LOGLEVEL_CATEGORY_NAME[]; - /** * No of categories */ -#define _LOGLEVEL_CATEGORIES 10 +#define _LOGLEVEL_CATEGORIES (CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1); static const Uint32 LOGLEVEL_CATEGORIES = _LOGLEVEL_CATEGORIES; - + void clear(); - + /** * Note level is valid as 0-15 */ diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h index 9caf1c86c4d..53f2c118156 100644 --- a/ndb/include/mgmapi/mgmapi.h +++ b/ndb/include/mgmapi/mgmapi.h @@ -254,19 +254,34 @@ extern "C" { * Log categories */ enum ndb_mgm_event_category { - NDB_MGM_EVENT_CATEGORY_STARTUP, ///< Events during all kinds - ///< of startups - NDB_MGM_EVENT_CATEGORY_SHUTDOWN, ///< Events during shutdown - NDB_MGM_EVENT_CATEGORY_STATISTIC, ///< Transaction statistics - ///< (Job level, TCP/IP speed) - NDB_MGM_EVENT_CATEGORY_CHECKPOINT, ///< Checkpoints - NDB_MGM_EVENT_CATEGORY_NODE_RESTART, ///< Events during node restart - NDB_MGM_EVENT_CATEGORY_CONNECTION, ///< Events related to connection - ///< and communication - NDB_MGM_EVENT_CATEGORY_ERROR ///< Assorted event w.r.t. - ///< unexpected happenings - }; + NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1, ///< Invalid + /** + * Events during all kinds of startups + */ + NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP, + + /** + * Events during shutdown + */ + NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN, + /** + * Transaction statistics (Job level, TCP/IP speed) + */ + NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS, + NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT, + NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART, + NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION, + NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG, + NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO, + NDB_MGM_EVENT_CATEGORY_WARNING = CFG_LOGLEVEL_WARNING, + NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR, + NDB_MGM_EVENT_CATEGORY_GREP = CFG_LOGLEVEL_GREP, + + NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL, + NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL + }; + /***************************************************************************/ /** * @name Functions: Error Handling @@ -402,6 +417,9 @@ extern "C" { */ const char * ndb_mgm_get_node_status_string(enum ndb_mgm_node_status status); + ndb_mgm_event_category ndb_mgm_match_event_category(const char *); + const char * ndb_mgm_get_event_category_string(enum ndb_mgm_event_category); + /** @} *********************************************************************/ /** * @name Functions: State of cluster @@ -562,8 +580,7 @@ extern "C" { */ int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId, - /*enum ndb_mgm_event_category category*/ - char * category, + enum ndb_mgm_event_category category, int level, struct ndb_mgm_reply* reply); @@ -579,8 +596,7 @@ extern "C" { */ int ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId, - /*enum ndb_mgm_event_category category*/ - char * category, + enum ndb_mgm_event_category category, int level, struct ndb_mgm_reply* reply); diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/ndb/include/mgmapi/mgmapi_config_parameters.h index 6b157720f2b..8bd7d1f69f8 100644 --- a/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -1,7 +1,6 @@ #ifndef MGMAPI_CONFIG_PARAMTERS_H #define MGMAPI_CONFIG_PARAMTERS_H - #define CFG_SYS_NAME 3 #define CFG_SYS_PRIMARY_MGM_NODE 1 #define CFG_SYS_CONFIG_GENERATION 2 @@ -64,16 +63,6 @@ #define CFG_DB_BACKUP_LOG_BUFFER_MEM 135 #define CFG_DB_BACKUP_WRITE_SIZE 136 -#define CFG_LOGLEVEL_STARTUP 137 -#define CFG_LOGLEVEL_SHUTDOWN 138 -#define CFG_LOGLEVEL_STATISTICS 139 -#define CFG_LOGLEVEL_CHECKPOINT 140 -#define CFG_LOGLEVEL_NODERESTART 141 -#define CFG_LOGLEVEL_CONNECTION 142 -#define CFG_LOGLEVEL_INFO 143 -#define CFG_LOGLEVEL_WARNING 144 -#define CFG_LOGLEVEL_ERROR 145 -#define CFG_LOGLEVEL_GREP 146 #define CFG_LOG_DESTINATION 147 #define CFG_DB_DISCLESS 148 @@ -95,6 +84,20 @@ #define CFG_NODE_ARBIT_RANK 200 #define CFG_NODE_ARBIT_DELAY 201 +#define CFG_MIN_LOGLEVEL 250 +#define CFG_LOGLEVEL_STARTUP 250 +#define CFG_LOGLEVEL_SHUTDOWN 251 +#define CFG_LOGLEVEL_STATISTICS 252 +#define CFG_LOGLEVEL_CHECKPOINT 253 +#define CFG_LOGLEVEL_NODERESTART 254 +#define CFG_LOGLEVEL_CONNECTION 255 +#define CFG_LOGLEVEL_INFO 256 +#define CFG_LOGLEVEL_WARNING 257 +#define CFG_LOGLEVEL_ERROR 258 +#define CFG_LOGLEVEL_GREP 259 +#define CFG_LOGLEVEL_DEBUG 260 +#define CFG_MAX_LOGLEVEL 260 + #define CFG_MGM_PORT 300 #define CFG_CONNECTION_NODE_1 400 diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index 2e818c04e02..bd066e65c94 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -113,26 +113,6 @@ const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = { const Uint32 EventLoggerBase::matrixSize = sizeof(EventLoggerBase::matrix)/ sizeof(EventRepLogLevelMatrix); -/** - * Specifies allowed event categories/log levels that can be set from - * the Management API/interactive shell. - */ -const EventLoggerBase::EventCategoryName -EventLoggerBase::eventCategoryNames[] = { - { LogLevel::llStartUp, "STARTUP" }, - { LogLevel::llStatistic, "STATISTICS" }, - { LogLevel::llCheckpoint, "CHECKPOINT" }, - { LogLevel::llNodeRestart, "NODERESTART" }, - { LogLevel::llConnection, "CONNECTION" }, - { LogLevel::llInfo, "INFO" }, - { LogLevel::llGrep, "GREP" } -}; - -const Uint32 -EventLoggerBase::noOfEventCategoryNames = - sizeof(EventLoggerBase::eventCategoryNames)/ - sizeof(EventLoggerBase::EventCategoryName); - const char* EventLogger::getText(char * m_text, size_t m_text_len, int type, @@ -1287,41 +1267,6 @@ EventLogger::getText(char * m_text, size_t m_text_len, return m_text; } -bool -EventLoggerBase::matchEventCategory(const char * str, - LogLevel::EventCategory * cat, - bool exactMatch){ - unsigned i; - if(cat == 0 || str == 0) - return false; - - char * tmp = strdup(str); - for(i = 0; i - -const LogLevel::LogLevelCategoryName LogLevel::LOGLEVEL_CATEGORY_NAME[] = { - { "LogLevelStartup" }, - { "LogLevelShutdown" }, - { "LogLevelStatistic" }, - { "LogLevelCheckpoint" }, - { "LogLevelNodeRestart" }, - { "LogLevelConnection" }, - { "LogLevelError" }, - { "LogLevelWarning" }, - { "LogLevelInfo" }, - { "LogLevelGrep" } -}; diff --git a/ndb/src/common/debugger/Makefile.am b/ndb/src/common/debugger/Makefile.am index 0278d0d2ba0..d0fb30717cd 100644 --- a/ndb/src/common/debugger/Makefile.am +++ b/ndb/src/common/debugger/Makefile.am @@ -2,7 +2,7 @@ SUBDIRS = signaldata noinst_LTLIBRARIES = libtrace.la -libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp LogLevel.cpp EventLogger.cpp GrepError.cpp +libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp GrepError.cpp include $(top_srcdir)/ndb/config/common.mk.am include $(top_srcdir)/ndb/config/type_kernel.mk.am diff --git a/ndb/src/cw/cpcd/APIService.cpp b/ndb/src/cw/cpcd/APIService.cpp index 46b043c7004..de0e40cebfc 100644 --- a/ndb/src/cw/cpcd/APIService.cpp +++ b/ndb/src/cw/cpcd/APIService.cpp @@ -47,7 +47,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ fun, \ - desc } + desc, 0 } #define CPCD_ARG(name, type, opt, desc) \ { name, \ @@ -58,7 +58,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ 0, \ - desc } + desc, 0 } #define CPCD_ARG2(name, type, opt, min, max, desc) \ { name, \ @@ -69,7 +69,7 @@ ParserRow::IgnoreMinMax, \ min, max, \ 0, \ - desc } + desc, 0 } #define CPCD_END() \ { 0, \ @@ -80,7 +80,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ 0, \ - 0 } + 0, 0 } #define CPCD_CMD_ALIAS(name, realName, fun) \ { name, \ @@ -91,7 +91,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ 0, \ - 0 } + 0, 0 } #define CPCD_ARG_ALIAS(name, realName, fun) \ { name, \ @@ -102,7 +102,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ 0, \ - 0 } + 0, 0 } const ParserRow commands[] = diff --git a/ndb/src/cw/cpcd/CPCD.cpp b/ndb/src/cw/cpcd/CPCD.cpp index 44db10422b9..bc9f350755f 100644 --- a/ndb/src/cw/cpcd/CPCD.cpp +++ b/ndb/src/cw/cpcd/CPCD.cpp @@ -378,7 +378,7 @@ CPCD::getProcessList() { } void -CPCD::RequestStatus::err(enum RequestStatusCode status, char *msg) { +CPCD::RequestStatus::err(enum RequestStatusCode status, const char *msg) { m_status = status; snprintf(m_errorstring, sizeof(m_errorstring), "%s", msg); } diff --git a/ndb/src/cw/cpcd/CPCD.hpp b/ndb/src/cw/cpcd/CPCD.hpp index 4a7cab23bab..a5c0bef1dac 100644 --- a/ndb/src/cw/cpcd/CPCD.hpp +++ b/ndb/src/cw/cpcd/CPCD.hpp @@ -91,7 +91,7 @@ public: RequestStatus() { m_status = OK; m_errorstring[0] = '\0'; }; /** @brief Sets an errorcode and a printable message */ - void err(enum RequestStatusCode, char *); + void err(enum RequestStatusCode, const char *); /** @brief Returns the error message */ char *getErrMsg() { return m_errorstring; }; diff --git a/ndb/src/cw/cpcd/main.cpp b/ndb/src/cw/cpcd/main.cpp index 913c31de1f7..207b81bfa89 100644 --- a/ndb/src/cw/cpcd/main.cpp +++ b/ndb/src/cw/cpcd/main.cpp @@ -28,12 +28,12 @@ #include "common.hpp" -static char *work_dir = CPCD_DEFAULT_WORK_DIR; +static const char *work_dir = CPCD_DEFAULT_WORK_DIR; static int port = CPCD_DEFAULT_TCP_PORT; static int use_syslog = 0; -static char *logfile = NULL; -static char *config_file = CPCD_DEFAULT_CONFIG_FILE; -static char *user = 0; +static const char *logfile = NULL; +static const char *config_file = CPCD_DEFAULT_CONFIG_FILE; +static const char *user = 0; static struct getargs args[] = { { "work-dir", 'w', arg_string, &work_dir, diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index fff6c734bd3..4c8d82c9e2e 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -97,7 +97,7 @@ Cmvmi::Cmvmi(const Configuration & conf) : const ndb_mgm_configuration_iterator * db = theConfig.getOwnConfigIterator(); for(unsigned j = 0; jsetLogLevel((LogLevel::EventCategory)j, tmp); } } diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 08b83a8d750..c036089526d 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -954,13 +954,51 @@ struct ndb_mgm_event_categories { const char* name; enum ndb_mgm_event_category category; +} categories[] = { + { "STARTUP", NDB_MGM_EVENT_CATEGORY_STARTUP }, + { "SHUTDOWN", NDB_MGM_EVENT_CATEGORY_SHUTDOWN }, + { "STATISTICS", NDB_MGM_EVENT_CATEGORY_STATISTIC }, + { "NODERESTART", NDB_MGM_EVENT_CATEGORY_NODE_RESTART }, + { "CONNECTION", NDB_MGM_EVENT_CATEGORY_CONNECTION }, + { "CHECKPOINT", NDB_MGM_EVENT_CATEGORY_CHECKPOINT }, + { "DEBUG", NDB_MGM_EVENT_CATEGORY_DEBUG }, + { "INFO", NDB_MGM_EVENT_CATEGORY_INFO }, + { "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR }, + { "GREP", NDB_MGM_EVENT_CATEGORY_GREP }, + { 0, NDB_MGM_ILLEGAL_EVENT_CATEGORY } }; +extern "C" +ndb_mgm_event_category +ndb_mgm_match_event_category(const char * status) +{ + if(status == 0) + return NDB_MGM_ILLEGAL_EVENT_CATEGORY; + + for(int i = 0; categories[i].name !=0 ; i++) + if(strcmp(status, categories[i].name) == 0) + return categories[i].category; + + return NDB_MGM_ILLEGAL_EVENT_CATEGORY; +} + +extern "C" +const char * +ndb_mgm_get_event_category_string(enum ndb_mgm_event_category status) +{ + int i; + for(i = 0; categories[i].name != 0; i++) + if(categories[i].category == status) + return categories[i].name; + + return 0; +} + extern "C" int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId, - /*enum ndb_mgm_event_category*/ - char * category, int level, + enum ndb_mgm_event_category cat, + int level, struct ndb_mgm_reply* /*reply*/) { SET_ERROR(handle, NDB_MGM_NO_ERROR, @@ -975,14 +1013,14 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId, Properties args; args.put("node", nodeId); - args.put("category", category); + args.put("category", cat); args.put("level", level); - + const Properties *reply; reply = ndb_mgm_call(handle, clusterlog_reply, "set cluster loglevel", &args); CHECK_REPLY(reply, -1); - + BaseString result; reply->get("result", result); if(strcmp(result.c_str(), "Ok") != 0) { @@ -997,8 +1035,8 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId, extern "C" int ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId, - /*enum ndb_mgm_event_category category*/ - char * category, int level, + enum ndb_mgm_event_category category, + int level, struct ndb_mgm_reply* /*reply*/) { SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_set_loglevel_node"); diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index ba8e93edde9..816a84375f1 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -611,9 +611,9 @@ CommandInterpreter::executeHelp(char* parameters) << endl; ndbout << " = "; - for(Uint32 i = 0; i 15){ - ndbout << "Invalid loglevel specification row, level 0-15" << endl; - free(tmpString); - return ; - } - logLevel.setLogLevel(cat, level); - - item = strtok_r(NULL, ", ", &tmpPtr); - } - free(tmpString); + BaseString tmp(parameters); + Vector spec; + tmp.split(spec, "="); + if(spec.size() != 2){ + ndbout << "Invalid loglevel specification: " << parameters << endl; + return; } + spec[0].trim().ndb_toupper(); + int category = ndb_mgm_match_event_category(spec[0].c_str()); + if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){ + category = atoi(spec[0].c_str()); + if(category < NDB_MGM_MIN_EVENT_CATEGORY || + category > NDB_MGM_MAX_EVENT_CATEGORY){ + ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl; + return; + } + } + + int level = atoi(spec[1].c_str()); + if(level < 0 || level > 15){ + ndbout << "Invalid level: " << spec[1].c_str() << endl; + return; + } + struct ndb_mgm_reply reply; int result; result = ndb_mgm_set_loglevel_node(m_mgmsrv, - processId, // fast fix - pekka - (char*)EventLogger::getEventCategoryName(cat), + processId, + (ndb_mgm_event_category)category, level, &reply); - + if (result < 0) { ndbout_c("Executing LOGLEVEL on node %d failed.", processId); printError(); @@ -1303,7 +1288,7 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters, ndbout << "Executing LOGLEVEL on node " << processId << " OK!" << endl; } - + } //***************************************************************************** @@ -1633,54 +1618,41 @@ CommandInterpreter::executeEventReporting(int processId, bool all) { connect(); - SetLogLevelOrd logLevel; logLevel.clear(); - char categoryTxt[255]; - int level; - LogLevel::EventCategory cat; - if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) { - for(Uint32 i = 0; i 15){ - ndbout << "Invalid loglevel specification row, level 0-15" << endl; - free(tmpString); - return ; - } - logLevel.setLogLevel(cat, level); - - item = strtok_r(NULL, ", ", &tmpPtr); - } - free(tmpString); + BaseString tmp(parameters); + Vector spec; + tmp.split(spec, "="); + if(spec.size() != 2){ + ndbout << "Invalid loglevel specification: " << parameters << endl; + return; } + + spec[0].trim().ndb_toupper(); + int category = ndb_mgm_match_event_category(spec[0].c_str()); + if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){ + category = atoi(spec[0].c_str()); + if(category < NDB_MGM_MIN_EVENT_CATEGORY || + category > NDB_MGM_MAX_EVENT_CATEGORY){ + ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl; + return; + } + } + + int level = atoi(spec[1].c_str()); + if(level < 0 || level > 15){ + ndbout << "Invalid level: " << spec[1].c_str() << endl; + return; + } + + struct ndb_mgm_reply reply; int result; - result = - ndb_mgm_set_loglevel_clusterlog(m_mgmsrv, - processId, // fast fix - pekka - (char*) - EventLogger::getEventCategoryName(cat), - level, - &reply); + result = ndb_mgm_set_loglevel_clusterlog(m_mgmsrv, + processId, // fast fix - pekka + (ndb_mgm_event_category)category, + level, + &reply); if (result != 0) { ndbout_c("Executing CLUSTERLOG on node %d failed", processId); diff --git a/ndb/src/mgmsrv/CommandInterpreter.cpp b/ndb/src/mgmsrv/CommandInterpreter.cpp index 8388c012e55..2c2aeda21ed 100644 --- a/ndb/src/mgmsrv/CommandInterpreter.cpp +++ b/ndb/src/mgmsrv/CommandInterpreter.cpp @@ -52,7 +52,7 @@ static const char* helpTexts[] = { "{|ALL} CLUSTERLOG {=}+ Set log level for cluster log", "QUIT Quit management server", }; -static const int noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*); +static const unsigned noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*); static const char* helpTextShow = "SHOW prints NDB Cluster information\n\n" @@ -389,14 +389,14 @@ void CommandInterpreter::executeHelp(char* parameters) { << endl; ndbout << " = "; - for(i = 0; i = " << "0 - 15" << endl; diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 7c2298d4773..232c0c7bb78 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -611,7 +611,7 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, { MgmStatService::StatListener se; se.m_socket = -1; - for(size_t t = 0; t<_LOGLEVEL_CATEGORIES; t++) + for(size_t t = 0; t::Context &, void MgmApiSession::setClusterLogLevel(Parser::Context &, Properties const &args) { - Uint32 node, level; - BaseString categoryName, errorString; + Uint32 node, level, category; + BaseString errorString; SetLogLevelOrd logLevel; int result; args.get("node", &node); - args.get("category", categoryName); + args.get("category", &category); args.get("level", &level); /* XXX should use constants for this value */ @@ -777,26 +777,18 @@ MgmApiSession::setClusterLogLevel(Parser::Context &, goto error; } - categoryName.ndb_toupper(); - - LogLevel::EventCategory category; - if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) { - errorString.assign("Unknown category"); - goto error; - } - EventSubscribeReq req; req.blockRef = 0; req.noOfEntries = 1; req.theCategories[0] = category; req.theLevels[0] = level; m_mgmsrv.m_log_level_requests.push_back(req); - + m_output->println("set cluster loglevel reply"); m_output->println("result: Ok"); m_output->println(""); return; - error: +error: m_output->println("set cluster loglevel reply"); m_output->println("result: %s", errorString.c_str()); m_output->println(""); @@ -805,13 +797,13 @@ MgmApiSession::setClusterLogLevel(Parser::Context &, void MgmApiSession::setLogLevel(Parser::Context &, Properties const &args) { - Uint32 node = 0, level = 0; - BaseString categoryName, errorString; + Uint32 node = 0, level = 0, category; + BaseString errorString; SetLogLevelOrd logLevel; int result; logLevel.clear(); args.get("node", &node); - args.get("category", categoryName); + args.get("category", &category); args.get("level", &level); /* XXX should use constants for this value */ @@ -820,14 +812,6 @@ MgmApiSession::setLogLevel(Parser::Context &, goto error; } - categoryName.ndb_toupper(); - - LogLevel::EventCategory category; - if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) { - errorString.assign("Unknown category"); - goto error; - } - EventSubscribeReq req; req.blockRef = node; req.noOfEntries = 1; @@ -1259,7 +1243,7 @@ NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { out << "[LogLevel: "; - for(size_t i = 0; i<_LOGLEVEL_CATEGORIES; i++) + for(size_t i = 0; i::Context & ctx, result = -1; goto done; } - - spec[0].trim(); - spec[0].ndb_toupper(); - - LogLevel::EventCategory category; - if(!EventLogger::matchEventCategory(spec[0].c_str(), &category)) { - msg.appfmt("Unknown category: >%s<", spec[0].c_str()); - result = -1; - goto done; - } + spec[0].trim().ndb_toupper(); + int category = ndb_mgm_match_event_category(spec[0].c_str()); + if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){ + category = atoi(spec[0].c_str()); + if(category < NDB_MGM_MIN_EVENT_CATEGORY || + category > NDB_MGM_MAX_EVENT_CATEGORY){ + msg.appfmt("Unknown category: >%s<", spec[0].c_str()); + result = -1; + goto done; + } + } + int level = atoi(spec[1].c_str()); if(level < 0 || level > 15){ msg.appfmt("Invalid level: >%s<", spec[1].c_str()); result = -1; goto done; } - le.m_logLevel.setLogLevel(category, level); + le.m_logLevel.setLogLevel((LogLevel::EventCategory)category, level); } if(list.size() == 0){ diff --git a/ndb/test/run-test/atrt-mysql-test-run b/ndb/test/run-test/atrt-mysql-test-run index e36fe1f3882..75482f4b4a7 100755 --- a/ndb/test/run-test/atrt-mysql-test-run +++ b/ndb/test/run-test/atrt-mysql-test-run @@ -16,5 +16,3 @@ fi echo "NDBT_ProgramExit: Failed" exit 1 - - From f53bc339ff10e3c0e645b636211a54486ed4acc1 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 10:32:22 +0200 Subject: [PATCH 17/55] Added EventReport for backup ndb/src/mgmsrv/MgmtSrvr.cpp: removed useless code ndb/src/mgmsrv/MgmtSrvr.hpp: removed useless code --- ndb/include/kernel/LogLevel.hpp | 3 +- ndb/include/kernel/signaldata/EventReport.hpp | 11 +- ndb/include/mgmapi/mgmapi.h | 1 + ndb/include/mgmapi/mgmapi_config_parameters.h | 3 +- ndb/src/common/debugger/EventLogger.cpp | 956 +++++++++--------- ndb/src/kernel/blocks/backup/Backup.cpp | 32 +- ndb/src/mgmapi/mgmapi.cpp | 1 + ndb/src/mgmsrv/MgmtSrvr.cpp | 71 -- ndb/src/mgmsrv/MgmtSrvr.hpp | 3 - 9 files changed, 542 insertions(+), 539 deletions(-) diff --git a/ndb/include/kernel/LogLevel.hpp b/ndb/include/kernel/LogLevel.hpp index 21450917f03..4db34b4d14c 100644 --- a/ndb/include/kernel/LogLevel.hpp +++ b/ndb/include/kernel/LogLevel.hpp @@ -57,7 +57,8 @@ public: llWarning = CFG_LOGLEVEL_WARNING - CFG_MIN_LOGLEVEL, llError = CFG_LOGLEVEL_ERROR - CFG_MIN_LOGLEVEL, llGrep = CFG_LOGLEVEL_GREP - CFG_MIN_LOGLEVEL, - llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL, + llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL + ,llBackup = CFG_LOGLEVEL_BACKUP - CFG_MIN_LOGLEVEL }; /** diff --git a/ndb/include/kernel/signaldata/EventReport.hpp b/ndb/include/kernel/signaldata/EventReport.hpp index b6106bb0ca4..1ad6e1bf7ac 100644 --- a/ndb/include/kernel/signaldata/EventReport.hpp +++ b/ndb/include/kernel/signaldata/EventReport.hpp @@ -135,12 +135,17 @@ public: //GREP GrepSubscriptionInfo = 52, - GrepSubscriptionAlert = 53 - }; + GrepSubscriptionAlert = 53, + //BACKUP + BackupStarted = 54, + BackupFailedToStart = 55, + BackupCompleted = 56, + BackupAborted = 57 + }; + void setEventType(EventType type); EventType getEventType() const; -private: UintR eventType; // DATA 0 }; diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h index 53f2c118156..28be268d6d0 100644 --- a/ndb/include/mgmapi/mgmapi.h +++ b/ndb/include/mgmapi/mgmapi.h @@ -277,6 +277,7 @@ extern "C" { NDB_MGM_EVENT_CATEGORY_WARNING = CFG_LOGLEVEL_WARNING, NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR, NDB_MGM_EVENT_CATEGORY_GREP = CFG_LOGLEVEL_GREP, + NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP, NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL, NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/ndb/include/mgmapi/mgmapi_config_parameters.h index 8bd7d1f69f8..e0ad06c322f 100644 --- a/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -96,7 +96,8 @@ #define CFG_LOGLEVEL_ERROR 258 #define CFG_LOGLEVEL_GREP 259 #define CFG_LOGLEVEL_DEBUG 260 -#define CFG_MAX_LOGLEVEL 260 +#define CFG_LOGLEVEL_BACKUP 261 +#define CFG_MAX_LOGLEVEL 261 #define CFG_MGM_PORT 300 diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index bd066e65c94..ca5eade629c 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -107,7 +107,13 @@ const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = { //Global replication { EventReport::GrepSubscriptionInfo, LogLevel::llGrep, 7, Logger::LL_INFO}, - { EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, Logger::LL_ALERT} + { EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, Logger::LL_ALERT}, + + // Backup + { EventReport::BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO }, + { EventReport::BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO }, + { EventReport::BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT}, + { EventReport::BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT } }; const Uint32 EventLoggerBase::matrixSize = sizeof(EventLoggerBase::matrix)/ @@ -790,472 +796,504 @@ EventLogger::getText(char * m_text, size_t m_text_len, case EventReport::GrepSubscriptionInfo : - { - GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; - switch(event) { - case GrepEvent::GrepSS_CreateSubIdConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Created subscription id" - " (subId=%d,SubKey=%d)" - " Return code: %d.", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_CreateSubIdConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: Created subscription id" - " (subId=%d,SubKey=%d)" - " Return code: %d.", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubCreateConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int nodegrp = theData[5]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Created subscription using" - " (subId=%d,SubKey=%d)" - " in primary system. Primary system has %d nodegroup(s)." - " Return code: %d", - subId, - subKey, - nodegrp, - err); - break; - } - case GrepEvent::GrepPS_SubCreateConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have created " - "subscriptions" - " using (subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubStartMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Logging started on meta data changes." - " using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubStartMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have started " - "logging meta data" - " changes on the subscription subId=%d,SubKey=%d) " - "(N.I yet)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubStartDataConf: { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Logging started on table data changes " - " using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubStartDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have started logging " - "table data changes on the subscription " - "subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubSyncMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have started " - " synchronization on meta data (META SCAN) using " - "(subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubSyncMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Synchronization started (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubSyncDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have started " - "synchronization " - " on table data (DATA SCAN) using (subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubSyncDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Synchronization started (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d" - " Return code: %d", - subId, - subKey, - gci, - err); - break; - } - case GrepEvent::GrepPS_SubRemoveConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have removed " - "subscription (subId=%d,SubKey=%d). I have cleaned " - "up resources I've used." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubRemoveConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Removed subscription " - "(subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } + { + GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; + switch(event) { + case GrepEvent::GrepSS_CreateSubIdConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Created subscription id" + " (subId=%d,SubKey=%d)" + " Return code: %d.", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_CreateSubIdConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Created subscription id" + " (subId=%d,SubKey=%d)" + " Return code: %d.", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubCreateConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + const int nodegrp = theData[5]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Created subscription using" + " (subId=%d,SubKey=%d)" + " in primary system. Primary system has %d nodegroup(s)." + " Return code: %d", + subId, + subKey, + nodegrp, + err); + break; + } + case GrepEvent::GrepPS_SubCreateConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have created " + "subscriptions" + " using (subId=%d,SubKey=%d)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubStartMetaConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Logging started on meta data changes." + " using (subId=%d,SubKey=%d)" + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_SubStartMetaConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have started " + "logging meta data" + " changes on the subscription subId=%d,SubKey=%d) " + "(N.I yet)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubStartDataConf: { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Logging started on table data changes " + " using (subId=%d,SubKey=%d)" + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_SubStartDataConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have started logging " + "table data changes on the subscription " + "subId=%d,SubKey=%d)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_SubSyncMetaConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have started " + " synchronization on meta data (META SCAN) using " + "(subId=%d,SubKey=%d)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubSyncMetaConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Synchronization started (META SCAN) on " + " meta data using (subId=%d,SubKey=%d)" + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_SubSyncDataConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have started " + "synchronization " + " on table data (DATA SCAN) using (subId=%d,SubKey=%d)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubSyncDataConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + const int gci = theData[5]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Synchronization started (DATA SCAN) on " + "table data using (subId=%d,SubKey=%d). GCI = %d" + " Return code: %d", + subId, + subKey, + gci, + err); + break; + } + case GrepEvent::GrepPS_SubRemoveConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have removed " + "subscription (subId=%d,SubKey=%d). I have cleaned " + "up resources I've used." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubRemoveConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Removed subscription " + "(subId=%d,SubKey=%d)" + " Return code: %d", + subId, + subKey, + err); + break; + } default: ::snprintf(m_text, m_text_len, "%sUnknown GrepSubscriptonInfo event: %d", theNodeId, theData[1]); - } - break; - } - - case EventReport::GrepSubscriptionAlert : - { - GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; - switch(event) - { - case GrepEvent::GrepSS_CreateSubIdRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord:Error code: %d Error message: %s" - " (subId=%d,SubKey=%d)", - err, - GrepError::getErrorDesc((GrepError::Code)err), - subId, - subKey); - break; - } - case GrepEvent::GrepSS_SubCreateRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: FAILED to Created subscription using" - " (subId=%d,SubKey=%d)in primary system." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubStartMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Logging failed to start on meta " - "data changes." - " using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubStartDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Logging FAILED to start on table data " - " changes using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubSyncMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Synchronization FAILED (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubSyncDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d" - " Error code: %d Error Message: %s", - subId, - subKey, - gci, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubRemoveRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::SSCoord: Failed to remove subscription " - "(subId=%d,SubKey=%d). " - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err) - ); - break; - } - - case GrepEvent::GrepPS_CreateSubIdRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: Error code: %d Error Message: %s" - " (subId=%d,SubKey=%d)", - err, - GrepError::getErrorDesc((GrepError::Code)err), - subId, - subKey); - break; - } - case GrepEvent::GrepPS_SubCreateRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: FAILED to Created subscription using" - " (subId=%d,SubKey=%d)in primary system." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubStartMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: Logging failed to start on meta " - "data changes." - " using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubStartDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: Logging FAILED to start on table data " - " changes using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubSyncMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: Synchronization FAILED (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubSyncDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d. " - " Error code: %d Error Message: %s", - subId, - subKey, - gci, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubRemoveRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, m_text_len, - "Grep::PSCoord: Failed to remove subscription " - "(subId=%d,SubKey=%d)." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::Rep_Disconnect: - { - const int err = theData[4]; - const int nodeId = theData[5]; - ::snprintf(m_text, m_text_len, - "Rep: Node %d." - " Error code: %d Error Message: %s", - nodeId, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - - - default: - ::snprintf(m_text, - m_text_len, - "%sUnknown GrepSubscriptionAlert event: %d", - theNodeId, - theData[1]); - break; - } - break; } + break; + } + case EventReport::GrepSubscriptionAlert : + { + GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; + switch(event) + { + case GrepEvent::GrepSS_CreateSubIdRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord:Error code: %d Error message: %s" + " (subId=%d,SubKey=%d)", + err, + GrepError::getErrorDesc((GrepError::Code)err), + subId, + subKey); + break; + } + case GrepEvent::GrepSS_SubCreateRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: FAILED to Created subscription using" + " (subId=%d,SubKey=%d)in primary system." + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubStartMetaRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Logging failed to start on meta " + "data changes." + " using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubStartDataRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Logging FAILED to start on table data " + " changes using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubSyncMetaRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Synchronization FAILED (META SCAN) on " + " meta data using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubSyncDataRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + const int gci = theData[5]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on " + "table data using (subId=%d,SubKey=%d). GCI = %d" + " Error code: %d Error Message: %s", + subId, + subKey, + gci, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubRemoveRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Failed to remove subscription " + "(subId=%d,SubKey=%d). " + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err) + ); + break; + } + + case GrepEvent::GrepPS_CreateSubIdRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Error code: %d Error Message: %s" + " (subId=%d,SubKey=%d)", + err, + GrepError::getErrorDesc((GrepError::Code)err), + subId, + subKey); + break; + } + case GrepEvent::GrepPS_SubCreateRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: FAILED to Created subscription using" + " (subId=%d,SubKey=%d)in primary system." + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubStartMetaRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Logging failed to start on meta " + "data changes." + " using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubStartDataRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Logging FAILED to start on table data " + " changes using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubSyncMetaRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Synchronization FAILED (META SCAN) on " + " meta data using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubSyncDataRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + const int gci = theData[5]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on " + "table data using (subId=%d,SubKey=%d). GCI = %d. " + " Error code: %d Error Message: %s", + subId, + subKey, + gci, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubRemoveRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Failed to remove subscription " + "(subId=%d,SubKey=%d)." + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::Rep_Disconnect: + { + const int err = theData[4]; + const int nodeId = theData[5]; + ::snprintf(m_text, m_text_len, + "Rep: Node %d." + " Error code: %d Error Message: %s", + nodeId, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + + + default: + ::snprintf(m_text, + m_text_len, + "%sUnknown GrepSubscriptionAlert event: %d", + theNodeId, + theData[1]); + break; + } + break; + } + + case EventReport::BackupStarted: + ::snprintf(m_text, + m_text_len, + "%sBackup %d started from node %d", + theNodeId, theData[2], refToNode(theData[1])); + break; + case EventReport::BackupFailedToStart: + ::snprintf(m_text, + m_text_len, + "%sBackup request from %d failed to start. Error: %d", + theNodeId, refToNode(theData[1]), theData[2]); + break; + case EventReport::BackupCompleted: + ::snprintf(m_text, + m_text_len, + "%sBackup %d started from node %d completed\n" + " StartGCP: %d StopGCP: %d\n" + " #Records: %d #LogRecords: %d\n" + " Data: %d bytes Log: %d bytes", + theNodeId, theData[2], refToNode(theData[1]), + theData[3], theData[4], theData[6], theData[8], + theData[5], theData[7]); + break; + case EventReport::BackupAborted: + ::snprintf(m_text, + m_text_len, + "%sBackup %d started from %d has been aborted. Error: %d", + theNodeId, + theData[2], + refToNode(theData[1]), + theData[3]); + break; default: ::snprintf(m_text, m_text_len, diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index b3e9ff735ac..6c7a3c977da 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -944,6 +945,11 @@ Backup::sendBackupRef(BlockReference senderRef, Signal *signal, ref->errorCode = errorCode; ref->masterRef = numberToRef(BACKUP, getMasterNodeId()); sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB); + + signal->theData[0] = EventReport::BackupFailedToStart; + signal->theData[1] = senderRef; + signal->theData[2] = errorCode; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); } void @@ -1226,7 +1232,13 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) conf->nodes = ptr.p->nodes; sendSignal(ptr.p->clientRef, GSN_BACKUP_CONF, signal, BackupConf::SignalLength, JBB); - + + signal->theData[0] = EventReport::BackupStarted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB); + ptr.p->masterData.state.setState(DEFINED); /** * Prepare Trig @@ -2069,6 +2081,18 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) rep->nodes = ptr.p->nodes; sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal, BackupCompleteRep::SignalLength, JBB); + + signal->theData[0] = EventReport::BackupCompleted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + signal->theData[3] = ptr.p->startGCP; + signal->theData[4] = ptr.p->stopGCP; + signal->theData[5] = ptr.p->noOfBytes; + signal->theData[6] = ptr.p->noOfRecords; + signal->theData[7] = ptr.p->noOfLogBytes; + signal->theData[8] = ptr.p->noOfLogRecords; + ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); } /***************************************************************************** @@ -2259,6 +2283,12 @@ Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr) rep->reason = ptr.p->errorCode; sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal, BackupAbortRep::SignalLength, JBB); + + signal->theData[0] = EventReport::BackupAborted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + signal->theData[3] = ptr.p->errorCode; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); }//if // ptr.p->masterData.state.setState(INITIAL); diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index c036089526d..09090f7f1af 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -965,6 +965,7 @@ struct ndb_mgm_event_categories { "INFO", NDB_MGM_EVENT_CATEGORY_INFO }, { "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR }, { "GREP", NDB_MGM_EVENT_CATEGORY_GREP }, + { "BACKUP", NDB_MGM_EVENT_CATEGORY_BACKUP }, { 0, NDB_MGM_ILLEGAL_EVENT_CATEGORY } }; diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 232c0c7bb78..adc47b7cb39 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -63,63 +63,6 @@ extern int global_flag_send_heartbeat_now; extern int g_no_nodeid_checks; -static -void -CmdBackupCallback(const MgmtSrvr::BackupEvent & event) -{ - char str[255]; - - ndbout << endl; - - bool ok = false; - switch(event.Event){ - case MgmtSrvr::BackupEvent::BackupStarted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d started", event.Started.BackupId); - break; - case MgmtSrvr::BackupEvent::BackupFailedToStart: - ok = true; - snprintf(str, sizeof(str), - "Backup failed to start (Error %d)", - event.FailedToStart.ErrorCode); - break; - case MgmtSrvr::BackupEvent::BackupCompleted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d completed", - event.Completed.BackupId); - ndbout << str << endl; - - snprintf(str, sizeof(str), - " StartGCP: %d StopGCP: %d", - event.Completed.startGCP, event.Completed.stopGCP); - ndbout << str << endl; - - snprintf(str, sizeof(str), - " #Records: %d #LogRecords: %d", - event.Completed.NoOfRecords, event.Completed.NoOfLogRecords); - ndbout << str << endl; - - snprintf(str, sizeof(str), - " Data: %d bytes Log: %d bytes", - event.Completed.NoOfBytes, event.Completed.NoOfLogBytes); - break; - case MgmtSrvr::BackupEvent::BackupAborted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d has been aborted reason %d", - event.Aborted.BackupId, - event.Aborted.Reason); - break; - } - if(!ok){ - snprintf(str, sizeof(str), "Unknown backup event: %d", event.Event); - } - ndbout << str << endl; -} - - void * MgmtSrvr::logLevelThread_C(void* m) { @@ -519,7 +462,6 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, m_newConfig = NULL; m_configFilename = configFilename; - setCallback(CmdBackupCallback); m_localNdbConfigFilename = ndb_config_filename; m_nextConfigGenerationNumber = 0; @@ -2398,15 +2340,6 @@ MgmtSrvr::eventReport(NodeId nodeId, const Uint32 * theData) /*************************************************************************** * Backup ***************************************************************************/ - -MgmtSrvr::BackupCallback -MgmtSrvr::setCallback(BackupCallback aCall) -{ - BackupCallback ret = m_backupCallback; - m_backupCallback = aCall; - return ret; -} - int MgmtSrvr::startBackup(Uint32& backupId, bool waitCompleted) { @@ -2605,10 +2538,6 @@ MgmtSrvr::backupCallback(BackupEvent & event) return; } - - if(m_backupCallback != 0){ - (* m_backupCallback)(event); - } } diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 5de39932bf4..3f3e98dbcc1 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -353,8 +353,6 @@ public: /** * Backup functionallity */ - typedef void (* BackupCallback)(const BackupEvent& Event); - BackupCallback setCallback(BackupCallback); int startBackup(Uint32& backupId, bool waitCompleted = false); int abortBackup(Uint32 backupId); int performBackup(Uint32* backupId); @@ -743,7 +741,6 @@ private: void signalRecvThreadRun(); void backupCallback(BackupEvent &); - BackupCallback m_backupCallback; BackupEvent m_lastBackupEvent; Config *_props; From 034bbee322bb4a97118c9bb4bcb2f9d967433c41 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 10:05:42 +0000 Subject: [PATCH 18/55] made tryBind static using tryBind instead to determine if we're "config host" is "local" ndb/include/util/SocketServer.hpp: made tryBind static ndb/src/common/util/SocketServer.cpp: made tryBind static ndb/src/mgmsrv/MgmtSrvr.cpp: using tryBind instead to determine if we're "config host" is "local" --- ndb/include/util/SocketServer.hpp | 2 +- ndb/src/common/util/SocketServer.cpp | 2 +- ndb/src/mgmsrv/MgmtSrvr.cpp | 33 +++++++++++++++++++++++----- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/ndb/include/util/SocketServer.hpp b/ndb/include/util/SocketServer.hpp index 334fa575e47..3860b9ca84b 100644 --- a/ndb/include/util/SocketServer.hpp +++ b/ndb/include/util/SocketServer.hpp @@ -76,7 +76,7 @@ public: * then close the socket * Returns true if succeding in binding */ - bool tryBind(unsigned short port, const char * intface = 0) const; + static bool tryBind(unsigned short port, const char * intface = 0); /** * Setup socket diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp index 380a8073a2c..c3cffa1399b 100644 --- a/ndb/src/common/util/SocketServer.cpp +++ b/ndb/src/common/util/SocketServer.cpp @@ -47,7 +47,7 @@ SocketServer::~SocketServer() { } bool -SocketServer::tryBind(unsigned short port, const char * intface) const { +SocketServer::tryBind(unsigned short port, const char * intface) { struct sockaddr_in servaddr; memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 7c2298d4773..5ba2dde7fe6 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include -#include +#include #include "MgmtSrvr.hpp" #include "MgmtErrorReporter.hpp" @@ -124,9 +124,10 @@ void * MgmtSrvr::logLevelThread_C(void* m) { MgmtSrvr *mgm = (MgmtSrvr*)m; - + my_thread_init(); mgm->logLevelThreadRun(); + my_thread_end(); NdbThread_Exit(0); /* NOTREACHED */ return 0; @@ -136,9 +137,10 @@ void * MgmtSrvr::signalRecvThread_C(void *m) { MgmtSrvr *mgm = (MgmtSrvr*)m; - + my_thread_init(); mgm->signalRecvThreadRun(); + my_thread_end(); NdbThread_Exit(0); /* NOTREACHED */ return 0; @@ -573,6 +575,9 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, _props = NULL; _ownNodeId= 0; + NodeId tmp= nodeId; + BaseString error_string; +#if 0 char my_hostname[256]; struct sockaddr_in tmp_addr; SOCKET_SIZE_TYPE addrlen= sizeof(tmp_addr); @@ -587,8 +592,6 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, exit(-1); } } - NodeId tmp= nodeId; - BaseString error_string; if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, (struct sockaddr *)&tmp_addr, &addrlen, error_string)){ @@ -596,6 +599,14 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, << error_string.c_str() << endl; exit(-1); } +#else + if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, + 0, 0, error_string)){ + ndbout << "Unable to obtain requested nodeid: " + << error_string.c_str() << endl; + exit(-1); + } +#endif _ownNodeId = tmp; @@ -2248,7 +2259,12 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, continue; } // connecting through localhost - // check if config_hostname match hostname + // check if config_hostname is local +#if 1 + if (!SocketServer::tryBind(0,config_hostname)) { + continue; + } +#else char my_hostname[256]; if (gethostname(my_hostname, sizeof(my_hostname)) != 0) continue; @@ -2257,6 +2273,11 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, // no match continue; } +#endif + } + } else { // client_addr == 0 + if (!SocketServer::tryBind(0,config_hostname)) { + continue; } } if (*nodeId != 0 || From 72fedd949429c8bbe7e7bce8461e4582f27c683e Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 14:09:21 +0200 Subject: [PATCH 19/55] ndb charsets (wl-1732) final part: use strxfrm + strcoll ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp: oops ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp: jamEntry ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: jamEntry mysql-test/r/ndb_index.result: ndb charsets: minimal fix to make test pass mysql-test/t/ndb_index.test: ndb charsets: minimal fix to make test pass --- mysql-test/r/ndb_charset.result | 191 ++++++++++++++++++ mysql-test/r/ndb_index.result | 2 +- mysql-test/t/ndb_charset.test | 159 +++++++++++++++ mysql-test/t/ndb_index.test | 2 +- ndb/include/util/NdbSqlUtil.hpp | 6 +- ndb/src/common/util/NdbSqlUtil.cpp | 88 ++++---- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 1 + ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 18 +- .../kernel/blocks/dbtup/DbtupExecQuery.cpp | 21 +- ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp | 4 +- ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 6 +- ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp | 83 +++++++- ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp | 9 +- ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp | 5 +- ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp | 4 +- ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp | 1 + ndb/src/kernel/blocks/dbtux/Times.txt | 18 +- ndb/src/ndbapi/NdbIndexOperation.cpp | 32 ++- ndb/src/ndbapi/NdbOperationDefine.cpp | 11 + ndb/src/ndbapi/NdbOperationSearch.cpp | 31 ++- ndb/src/ndbapi/NdbScanOperation.cpp | 35 +++- ndb/src/ndbapi/ndberror.c | 2 +- ndb/test/ndbapi/testOIBasic.cpp | 20 ++ 23 files changed, 672 insertions(+), 77 deletions(-) create mode 100644 mysql-test/r/ndb_charset.result create mode 100644 mysql-test/t/ndb_charset.test diff --git a/mysql-test/r/ndb_charset.result b/mysql-test/r/ndb_charset.result new file mode 100644 index 00000000000..93429a1fcb0 --- /dev/null +++ b/mysql-test/r/ndb_charset.result @@ -0,0 +1,191 @@ +drop table if exists t1; +create table t1 ( +a char(3) character set latin1 collate latin1_bin primary key +) engine=ndb; +insert into t1 values('aAa'); +insert into t1 values('aaa'); +insert into t1 values('AAA'); +select * from t1 order by a; +a +AAA +aAa +aaa +select * from t1 where a = 'aAa'; +a +aAa +select * from t1 where a = 'aaa'; +a +aaa +select * from t1 where a = 'AaA'; +a +select * from t1 where a = 'AAA'; +a +AAA +drop table t1; +create table t1 ( +a char(3) character set latin1 collate latin1_swedish_ci primary key +) engine=ndb; +insert into t1 values('aAa'); +insert into t1 values('aaa'); +ERROR 23000: Duplicate entry 'aaa' for key 1 +insert into t1 values('AAA'); +ERROR 23000: Duplicate entry 'AAA' for key 1 +select * from t1 order by a; +a +aAa +select * from t1 where a = 'aAa'; +a +aAa +select * from t1 where a = 'aaa'; +a +aAa +select * from t1 where a = 'AaA'; +a +aAa +select * from t1 where a = 'AAA'; +a +aAa +drop table t1; +create table t1 ( +p int primary key, +a char(3) character set latin1 collate latin1_bin not null, +unique key(a) +) engine=ndb; +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +select * from t1 order by p; +p a +1 aAa +2 aaa +3 AAA +select * from t1 where a = 'aAa'; +p a +1 aAa +select * from t1 where a = 'aaa'; +p a +2 aaa +select * from t1 where a = 'AaA'; +p a +select * from t1 where a = 'AAA'; +p a +3 AAA +drop table t1; +create table t1 ( +p int primary key, +a char(3) character set latin1 collate latin1_swedish_ci not null, +unique key(a) +) engine=ndb; +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +ERROR 23000: Can't write, because of unique constraint, to table 't1' +insert into t1 values(3, 'AAA'); +ERROR 23000: Can't write, because of unique constraint, to table 't1' +select * from t1 order by p; +p a +1 aAa +select * from t1 where a = 'aAa'; +p a +1 aAa +select * from t1 where a = 'aaa'; +p a +1 aAa +select * from t1 where a = 'AaA'; +p a +1 aAa +select * from t1 where a = 'AAA'; +p a +1 aAa +drop table t1; +create table t1 ( +p int primary key, +a char(3) character set latin1 collate latin1_bin not null, +index(a) +) engine=ndb; +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +insert into t1 values(4, 'aAa'); +insert into t1 values(5, 'aaa'); +insert into t1 values(6, 'AAA'); +select * from t1 order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +explain select * from t1 where a = 'zZz' order by p; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort +select * from t1 where a = 'aAa' order by p; +p a +1 aAa +4 aAa +select * from t1 where a = 'aaa' order by p; +p a +2 aaa +5 aaa +select * from t1 where a = 'AaA' order by p; +p a +select * from t1 where a = 'AAA' order by p; +p a +3 AAA +6 AAA +drop table t1; +create table t1 ( +p int primary key, +a char(3) character set latin1 collate latin1_swedish_ci not null, +index(a) +) engine=ndb; +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +insert into t1 values(4, 'aAa'); +insert into t1 values(5, 'aaa'); +insert into t1 values(6, 'AAA'); +select * from t1 order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +explain select * from t1 where a = 'zZz' order by p; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort +select * from t1 where a = 'aAa' order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +select * from t1 where a = 'aaa' order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +select * from t1 where a = 'AaA' order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +select * from t1 where a = 'AAA' order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +drop table t1; diff --git a/mysql-test/r/ndb_index.result b/mysql-test/r/ndb_index.result index dd92c237ace..5702552b0b5 100644 --- a/mysql-test/r/ndb_index.result +++ b/mysql-test/r/ndb_index.result @@ -4,7 +4,7 @@ PORT varchar(16) NOT NULL, ACCESSNODE varchar(16) NOT NULL, POP varchar(48) NOT NULL, ACCESSTYPE int unsigned NOT NULL, -CUSTOMER_ID varchar(20) NOT NULL, +CUSTOMER_ID varchar(20) collate latin1_bin NOT NULL, PROVIDER varchar(16), TEXPIRE int unsigned, NUM_IP int unsigned, diff --git a/mysql-test/t/ndb_charset.test b/mysql-test/t/ndb_charset.test new file mode 100644 index 00000000000..b9f28ed0faf --- /dev/null +++ b/mysql-test/t/ndb_charset.test @@ -0,0 +1,159 @@ +--source include/have_ndb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# Minimal NDB charset test. +# + +# pk - binary + +create table t1 ( + a char(3) character set latin1 collate latin1_bin primary key +) engine=ndb; +# ok +insert into t1 values('aAa'); +insert into t1 values('aaa'); +insert into t1 values('AAA'); +# 3 +select * from t1 order by a; +# 1 +select * from t1 where a = 'aAa'; +# 1 +select * from t1 where a = 'aaa'; +# 0 +select * from t1 where a = 'AaA'; +# 1 +select * from t1 where a = 'AAA'; +drop table t1; + +# pk - case insensitive + +create table t1 ( + a char(3) character set latin1 collate latin1_swedish_ci primary key +) engine=ndb; +# ok +insert into t1 values('aAa'); +# fail +--error 1062 +insert into t1 values('aaa'); +--error 1062 +insert into t1 values('AAA'); +# 1 +select * from t1 order by a; +# 1 +select * from t1 where a = 'aAa'; +# 1 +select * from t1 where a = 'aaa'; +# 1 +select * from t1 where a = 'AaA'; +# 1 +select * from t1 where a = 'AAA'; +drop table t1; + +# unique hash index - binary + +create table t1 ( + p int primary key, + a char(3) character set latin1 collate latin1_bin not null, + unique key(a) +) engine=ndb; +# ok +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +# 3 +select * from t1 order by p; +# 1 +select * from t1 where a = 'aAa'; +# 1 +select * from t1 where a = 'aaa'; +# 0 +select * from t1 where a = 'AaA'; +# 1 +select * from t1 where a = 'AAA'; +drop table t1; + +# unique hash index - case insensitive + +create table t1 ( + p int primary key, + a char(3) character set latin1 collate latin1_swedish_ci not null, + unique key(a) +) engine=ndb; +# ok +insert into t1 values(1, 'aAa'); +# fail +--error 1169 +insert into t1 values(2, 'aaa'); +--error 1169 +insert into t1 values(3, 'AAA'); +# 1 +select * from t1 order by p; +# 1 +select * from t1 where a = 'aAa'; +# 1 +select * from t1 where a = 'aaa'; +# 1 +select * from t1 where a = 'AaA'; +# 1 +select * from t1 where a = 'AAA'; +drop table t1; + +# ordered index - binary + +create table t1 ( + p int primary key, + a char(3) character set latin1 collate latin1_bin not null, + index(a) +) engine=ndb; +# ok +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +insert into t1 values(4, 'aAa'); +insert into t1 values(5, 'aaa'); +insert into t1 values(6, 'AAA'); +# 6 +select * from t1 order by p; +# plan +explain select * from t1 where a = 'zZz' order by p; +# 2 +select * from t1 where a = 'aAa' order by p; +# 2 +select * from t1 where a = 'aaa' order by p; +# 0 +select * from t1 where a = 'AaA' order by p; +# 2 +select * from t1 where a = 'AAA' order by p; +drop table t1; + +# ordered index - case insensitive + +create table t1 ( + p int primary key, + a char(3) character set latin1 collate latin1_swedish_ci not null, + index(a) +) engine=ndb; +# ok +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +insert into t1 values(4, 'aAa'); +insert into t1 values(5, 'aaa'); +insert into t1 values(6, 'AAA'); +# 6 +select * from t1 order by p; +# plan +explain select * from t1 where a = 'zZz' order by p; +# 6 +select * from t1 where a = 'aAa' order by p; +# 6 +select * from t1 where a = 'aaa' order by p; +# 6 +select * from t1 where a = 'AaA' order by p; +# 6 +select * from t1 where a = 'AAA' order by p; +drop table t1; diff --git a/mysql-test/t/ndb_index.test b/mysql-test/t/ndb_index.test index d3977dc3ea4..e65b24a9b20 100644 --- a/mysql-test/t/ndb_index.test +++ b/mysql-test/t/ndb_index.test @@ -9,7 +9,7 @@ CREATE TABLE t1 ( ACCESSNODE varchar(16) NOT NULL, POP varchar(48) NOT NULL, ACCESSTYPE int unsigned NOT NULL, - CUSTOMER_ID varchar(20) NOT NULL, + CUSTOMER_ID varchar(20) collate latin1_bin NOT NULL, PROVIDER varchar(16), TEXPIRE int unsigned, NUM_IP int unsigned, diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp index 00216057d58..3062d1e4e1b 100644 --- a/ndb/include/util/NdbSqlUtil.hpp +++ b/ndb/include/util/NdbSqlUtil.hpp @@ -40,11 +40,14 @@ public: * Compare kernel attribute values. Returns -1, 0, +1 for less, * equal, greater, respectively. Parameters are pointers to values, * full attribute size in words, and size of available data in words. + * There is also pointer to type specific extra info. Char types + * receive CHARSET_INFO in it. + * * If available size is less than full size, CmpUnknown may be * returned. If a value cannot be parsed, it compares like NULL i.e. * less than any valid value. */ - typedef int Cmp(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size); + typedef int Cmp(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size); enum CmpResult { CmpLess = -1, @@ -55,6 +58,7 @@ public: /** * Kernel data types. Must match m_typeList in NdbSqlUtil.cpp. + * Now also must match types in NdbDictionary. */ struct Type { enum Enum { diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index ffcf29a7242..6e4e5919e43 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -198,7 +198,7 @@ NdbSqlUtil::getTypeBinary(Uint32 typeId) // compare int -NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpTinyint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Int8 v; } u1, u2; @@ -212,7 +212,7 @@ NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s } int -NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpTinyunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Uint8 v; } u1, u2; @@ -226,7 +226,7 @@ NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uin } int -NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpSmallint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Int16 v; } u1, u2; @@ -240,7 +240,7 @@ NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpSmallunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Uint16 v; } u1, u2; @@ -254,7 +254,7 @@ NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Ui } int -NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpMediumint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { const Uint32* p; const unsigned char* v; } u1, u2; @@ -270,7 +270,7 @@ NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpMediumunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { const Uint32* p; const unsigned char* v; } u1, u2; @@ -286,7 +286,7 @@ NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, U } int -NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpInt(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Int32 v; } u1, u2; @@ -300,7 +300,7 @@ NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) } int -NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpUnsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Uint32 v; } u1, u2; @@ -314,7 +314,7 @@ NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpBigint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); if (size >= 2) { @@ -333,7 +333,7 @@ NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si } int -NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpBigunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); if (size >= 2) { @@ -352,7 +352,7 @@ NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint } int -NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpFloat(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; float v; } u1, u2; @@ -367,7 +367,7 @@ NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 siz } int -NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpDouble(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); if (size >= 2) { @@ -387,7 +387,7 @@ NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si } int -NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpDecimal(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); // not used by MySQL or NDB @@ -396,27 +396,34 @@ NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s } int -NdbSqlUtil::cmpChar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpChar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { - assert(full >= size && size > 0); + // collation does not work on prefix for some charsets + assert(full == size && size > 0); /* - * Char is blank-padded to length and null-padded to word size. There - * is no terminator so we compare the full values. + * Char is blank-padded to length and null-padded to word size. */ - union { const Uint32* p; const char* v; } u1, u2; + union { const Uint32* p; const uchar* v; } u1, u2; u1.p = p1; u2.p = p2; - int k = memcmp(u1.v, u2.v, size << 2); - return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown; + // not const in MySQL + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + // length in bytes including null padding to Uint32 + uint l1 = (full << 2); + int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1); + return k < 0 ? -1 : k > 0 ? +1 : 0; } int -NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpVarchar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* * Varchar is not allowed to contain a null byte and the value is * null-padded. Therefore comparison does not need to use the length. + * + * Not used before MySQL 5.0. Format is likely to change. Handle + * only binary collation for now. */ union { const Uint32* p; const char* v; } u1, u2; u1.p = p1; @@ -427,7 +434,7 @@ NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s } int -NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpBinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* @@ -441,12 +448,14 @@ NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si } int -NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpVarbinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* * Binary data of variable length padded with nulls. The comparison * does not need to use the length. + * + * Not used before MySQL 5.0. Format is likely to change. */ union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; @@ -457,11 +466,13 @@ NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpDatetime(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* * Datetime is CC YY MM DD hh mm ss \0 + * + * Not used via MySQL. */ union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; @@ -478,11 +489,13 @@ NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpTimespec(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* * Timespec is CC YY MM DD hh mm ss \0 NN NN NN NN + * + * Not used via MySQL. */ union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; @@ -509,12 +522,11 @@ NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpBlob(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* - * Blob comparison is on the inline bytes. Except for larger header - * the format is like Varbinary. + * Blob comparison is on the inline bytes (null padded). */ const unsigned head = NDB_BLOB_HEAD_SIZE; // skip blob head @@ -529,21 +541,26 @@ NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size } int -NdbSqlUtil::cmpText(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpText(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { - assert(full >= size && size > 0); + // collation does not work on prefix for some charsets + assert(full == size && size > 0); /* - * Text comparison is on the inline bytes. Except for larger header - * the format is like Varchar. + * Text comparison is on the inline bytes (blank padded). Currently + * not supported for multi-byte charsets. */ const unsigned head = NDB_BLOB_HEAD_SIZE; // skip blob head if (size >= head + 1) { - union { const Uint32* p; const char* v; } u1, u2; + union { const Uint32* p; const uchar* v; } u1, u2; u1.p = p1 + head; u2.p = p2 + head; - int k = memcmp(u1.v, u2.v, (size - head) << 2); - return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown; + // not const in MySQL + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + // length in bytes including null padding to Uint32 + uint l1 = (full << 2); + int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1); + return k < 0 ? -1 : k > 0 ? +1 : 0; } return CmpUnknown; } @@ -652,6 +669,7 @@ const Testcase testcase[] = { int main(int argc, char** argv) { + ndb_init(); // for charsets unsigned count = argc > 1 ? atoi(argv[1]) : 1000000; ndbout_c("count = %u", count); assert(count != 0); diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 467df4aca3d..8342870d69c 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -7700,6 +7700,7 @@ void Dblqh::accScanConfScanLab(Signal* signal) ndbrequire(sz == boundAiLength); EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO, signal, TuxBoundInfo::SignalLength + boundAiLength); + jamEntry(); if (req->errorCode != 0) { jam(); /* diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 1444066a76c..ce81c1c9bc8 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -1374,7 +1374,8 @@ private: const Uint32* inBuffer, Uint32 inBufLen, Uint32* outBuffer, - Uint32 TmaxRead); + Uint32 TmaxRead, + bool xfrmFlag); //------------------------------------------------------------------ //------------------------------------------------------------------ @@ -1620,6 +1621,20 @@ private: Uint32 attrDescriptor, Uint32 attrDes2); +// ***************************************************************** +// Read char routines optionally (tXfrmFlag) apply strxfrm +// ***************************************************************** + + bool readCharNotNULL(Uint32* outBuffer, + AttributeHeader* ahOut, + Uint32 attrDescriptor, + Uint32 attrDes2); + + bool readCharNULLable(Uint32* outBuffer, + AttributeHeader* ahOut, + Uint32 attrDescriptor, + Uint32 attrDes2); + //------------------------------------------------------------------ //------------------------------------------------------------------ bool nullFlagCheck(Uint32 attrDes2); @@ -2225,6 +2240,7 @@ private: Uint32 tMaxRead; Uint32 tOutBufIndex; Uint32* tTupleHeader; + bool tXfrmFlag; // updateAttributes module Uint32 tInBufIndex; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index 0a47778f7c1..dfd1e37d4f5 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -903,7 +903,8 @@ int Dbtup::handleReadReq(Signal* signal, &cinBuffer[0], regOperPtr->attrinbufLen, dst, - dstLen); + dstLen, + false); if (TnoOfDataRead != (Uint32)-1) { /* ------------------------------------------------------------------------- */ // We have read all data into coutBuffer. Now send it to the API. @@ -1274,7 +1275,8 @@ int Dbtup::interpreterStartLab(Signal* signal, &cinBuffer[5], RinitReadLen, &dst[0], - dstLen); + dstLen, + false); if (TnoDataRW != (Uint32)-1) { RattroutCounter = TnoDataRW; RinstructionCounter += RinitReadLen; @@ -1347,7 +1349,8 @@ int Dbtup::interpreterStartLab(Signal* signal, &cinBuffer[RinstructionCounter], RfinalRLen, &dst[RattroutCounter], - (dstLen - RattroutCounter)); + (dstLen - RattroutCounter), + false); if (TnoDataRW != (Uint32)-1) { RattroutCounter += TnoDataRW; } else { @@ -1487,7 +1490,8 @@ int Dbtup::interpreterNextLab(Signal* signal, &theAttrinfo, (Uint32)1, &TregMemBuffer[theRegister], - (Uint32)3); + (Uint32)3, + false); if (TnoDataRW == 2) { /* ------------------------------------------------------------- */ // Two words read means that we get the instruction plus one 32 @@ -1833,7 +1837,8 @@ int Dbtup::interpreterNextLab(Signal* signal, Int32 TnoDataR = readAttributes(pagePtr, TupHeadOffset, &attrId, 1, - tmpArea, tmpAreaSz); + tmpArea, tmpAreaSz, + false); if (TnoDataR == -1) { jam(); @@ -1929,7 +1934,8 @@ int Dbtup::interpreterNextLab(Signal* signal, Int32 TnoDataR = readAttributes(pagePtr, TupHeadOffset, &attrId, 1, - tmpArea, tmpAreaSz); + tmpArea, tmpAreaSz, + false); if (TnoDataR == -1) { jam(); @@ -1957,7 +1963,8 @@ int Dbtup::interpreterNextLab(Signal* signal, Int32 TnoDataR = readAttributes(pagePtr, TupHeadOffset, &attrId, 1, - tmpArea, tmpAreaSz); + tmpArea, tmpAreaSz, + false); if (TnoDataR == -1) { jam(); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp index d864bac8b59..2dd707ebafc 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp @@ -160,7 +160,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu operPtr.i = RNIL; operPtr.p = NULL; // do it - int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL); + int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true); // restore globals tabptr = tabptr_old; fragptr = fragptr_old; @@ -200,7 +200,7 @@ Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* data operPtr.i = RNIL; operPtr.p = NULL; // do it - int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL); + int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true); // restore globals tabptr = tabptr_old; fragptr = fragptr_old; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index dc04650cd1b..efea312b865 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -332,11 +332,11 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) } if (i == fragOperPtr.p->charsetIndex) { ljam(); - ndbrequire(i < regTabPtr.p->noOfCharsets); - regTabPtr.p->charsetArray[i] = cs; - AttributeOffset::setCharsetPos(attrDes2, i); fragOperPtr.p->charsetIndex++; } + ndbrequire(i < regTabPtr.p->noOfCharsets); + regTabPtr.p->charsetArray[i] = cs; + AttributeOffset::setCharsetPos(attrDes2, i); } setTabDescrWord(firstTabDesIndex + 1, attrDes2); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index 49ca52b2b9c..a4e7cb47249 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -35,6 +35,7 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr) for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) { Uint32 attrDescriptorStart = startDescriptor + (i << ZAD_LOG_SIZE); Uint32 attrDescriptor = tableDescriptor[attrDescriptorStart].tabDescr; + Uint32 attrOffset = tableDescriptor[attrDescriptorStart + 1].tabDescr; if (!AttributeDescriptor::getDynamic(attrDescriptor)) { if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) || (AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) { @@ -54,6 +55,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr) } else { ndbrequire(false); }//if + // replace read function of char attribute + if (AttributeOffset::getCharsetFlag(attrOffset)) { + ljam(); + regTabPtr->readFunctionArray[i] = &Dbtup::readCharNotNULL; + } } else { if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1) { ljam(); @@ -72,6 +78,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr) regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable; regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable; }//if + // replace read function of char attribute + if (AttributeOffset::getCharsetFlag(attrOffset)) { + ljam(); + regTabPtr->readFunctionArray[i] = &Dbtup::readCharNULLable; + } }//if } else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) { if (!AttributeDescriptor::getNullable(attrDescriptor)) { @@ -149,7 +160,8 @@ int Dbtup::readAttributes(Page* const pagePtr, const Uint32* inBuffer, Uint32 inBufLen, Uint32* outBuffer, - Uint32 maxRead) + Uint32 maxRead, + bool xfrmFlag) { Tablerec* const regTabPtr = tabptr.p; Uint32 numAttributes = regTabPtr->noOfAttr; @@ -162,6 +174,7 @@ int Dbtup::readAttributes(Page* const pagePtr, tCheckOffset = regTabPtr->tupheadsize; tMaxRead = maxRead; tTupleHeader = &pagePtr->pageWord[tupHeadOffset]; + tXfrmFlag = xfrmFlag; ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE); while (inBufIndex < inBufLen) { @@ -542,6 +555,74 @@ Dbtup::readDynSmallVarSize(Uint32* outBuffer, return false; }//Dbtup::readDynSmallVarSize() + +bool +Dbtup::readCharNotNULL(Uint32* outBuffer, + AttributeHeader* ahOut, + Uint32 attrDescriptor, + Uint32 attrDes2) +{ + Uint32 indexBuf = tOutBufIndex; + Uint32 readOffset = AttributeOffset::getOffset(attrDes2); + Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor); + Uint32 newIndexBuf = indexBuf + attrNoOfWords; + Uint32 maxRead = tMaxRead; + + ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset); + if (newIndexBuf <= maxRead) { + ljam(); + ahOut->setDataSize(attrNoOfWords); + if (! tXfrmFlag) { + MEMCOPY_NO_WORDS(&outBuffer[indexBuf], + &tTupleHeader[readOffset], + attrNoOfWords); + } else { + ljam(); + Tablerec* regTabPtr = tabptr.p; + Uint32 i = AttributeOffset::getCharsetPos(attrDes2); + ndbrequire(i < tabptr.p->noOfCharsets); + // not const in MySQL + CHARSET_INFO* cs = tabptr.p->charsetArray[i]; + // XXX should strip Uint32 null padding + const unsigned nBytes = attrNoOfWords << 2; + unsigned n = + (*cs->coll->strnxfrm)(cs, + (uchar*)&outBuffer[indexBuf], + nBytes, + (const uchar*)&tTupleHeader[readOffset], + nBytes); + // pad with ascii spaces + while (n < nBytes) + ((uchar*)&outBuffer[indexBuf])[n++] = 0x20; + } + tOutBufIndex = newIndexBuf; + return true; + } else { + ljam(); + terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; + return false; + } +} + +bool +Dbtup::readCharNULLable(Uint32* outBuffer, + AttributeHeader* ahOut, + Uint32 attrDescriptor, + Uint32 attrDes2) +{ + if (!nullFlagCheck(attrDes2)) { + ljam(); + return readCharNotNULL(outBuffer, + ahOut, + attrDescriptor, + attrDes2); + } else { + ljam(); + ahOut->setNULL(); + return true; + } +} + /* ---------------------------------------------------------------------- */ /* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */ /* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */ diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index a93ff4566e7..c0b49364ee6 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -751,7 +751,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, &tableDescriptor[regTabPtr->readKeyArray].tabDescr, regTabPtr->noOfKeyAttr, keyBuffer, - ZATTR_BUFFER_SIZE); + ZATTR_BUFFER_SIZE, + true); ndbrequire(noPrimKey != (Uint32)-1); Uint32 numAttrsToRead; @@ -792,7 +793,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, &readBuffer[0], numAttrsToRead, mainBuffer, - ZATTR_BUFFER_SIZE); + ZATTR_BUFFER_SIZE, + true); ndbrequire(noMainWords != (Uint32)-1); } else { ljam(); @@ -816,7 +818,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, &readBuffer[0], numAttrsToRead, copyBuffer, - ZATTR_BUFFER_SIZE); + ZATTR_BUFFER_SIZE, + true); ndbrequire(noCopyWords != (Uint32)-1); if ((noMainWords == noCopyWords) && diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp index 4ce413af138..549720cc17c 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp @@ -58,7 +58,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, Cons NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start]; const Uint32* const p1 = &searchKey[AttributeHeaderSize]; const Uint32* const p2 = &entryData[AttributeHeaderSize]; - ret = (*cmp)(p1, p2, size1, size2); + ret = (*cmp)(0, p1, p2, size1, size2); if (ret != 0) { jam(); break; @@ -132,6 +132,7 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne jam(); // current attribute const unsigned index = boundInfo.ah().getAttributeId(); + ndbrequire(index < frag.m_numAttrs); const DescAttr& descAttr = descEnt.m_descAttr[index]; ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId); // full data size @@ -143,7 +144,7 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index]; const Uint32* const p1 = &boundInfo[AttributeHeaderSize]; const Uint32* const p2 = &entryData[AttributeHeaderSize]; - int ret = (*cmp)(p1, p2, size1, size2); + int ret = (*cmp)(0, p1, p2, size1, size2); if (ret != 0) { jam(); return ret; diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp index 958ba4b0839..39cd8e25184 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp @@ -201,8 +201,8 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal) // allocate buffers c_keyAttrs = (Uint32*)allocRecord("c_keyAttrs", sizeof(Uint32), MaxIndexAttributes); c_sqlCmp = (NdbSqlUtil::Cmp**)allocRecord("c_sqlCmp", sizeof(NdbSqlUtil::Cmp*), MaxIndexAttributes); - c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32*), MaxIndexAttributes); - c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32*), MaxIndexAttributes); + c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize); + c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize); c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxAttrDataSize + 1) >> 1); // ack ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp index 9d7d4b06bf7..5b161d3c4ce 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp @@ -112,6 +112,7 @@ Dbtux::execACC_SCANREQ(Signal* signal) void Dbtux::execTUX_BOUND_INFO(Signal* signal) { + jamEntry(); struct BoundInfo { unsigned offset; unsigned size; diff --git a/ndb/src/kernel/blocks/dbtux/Times.txt b/ndb/src/kernel/blocks/dbtux/Times.txt index 84819ddcf97..03473353a52 100644 --- a/ndb/src/kernel/blocks/dbtux/Times.txt +++ b/ndb/src/kernel/blocks/dbtux/Times.txt @@ -83,7 +83,7 @@ optim 13 mc02/a 39 ms 59 ms 50 pct mc02/c 9 ms 12 ms 44 pct mc02/d 246 ms 289 ms 17 pct -[ case d: what happened to PK read performance? ] +[ case d: bug in testOIBasic killed PK read performance ] optim 14 mc02/a 41 ms 60 ms 44 pct mc02/b 46 ms 81 ms 73 pct @@ -91,5 +91,21 @@ optim 14 mc02/a 41 ms 60 ms 44 pct mc02/d 242 ms 285 ms 17 pct [ case b: do long keys suffer from many subroutine calls? ] +[ case d: bug in testOIBasic killed PK read performance ] + +none mc02/a 35 ms 60 ms 71 pct + mc02/b 42 ms 75 ms 76 pct + mc02/c 5 ms 12 ms 106 pct + mc02/d 165 ms 238 ms 44 pct + +[ johan re-installed mc02 as fedora gcc-3.3.2 ] +[ case c: table scan has improved... ] + +charsets mc02/a 35 ms 60 ms 71 pct + mc02/b 42 ms 84 ms 97 pct + mc02/c 5 ms 12 ms 109 pct + mc02/d 190 ms 236 ms 23 pct + +[ case b: TUX can no longer use pointers to TUP data ] vim: set et: diff --git a/ndb/src/ndbapi/NdbIndexOperation.cpp b/ndb/src/ndbapi/NdbIndexOperation.cpp index 0742f8d911c..bf4b07842f6 100644 --- a/ndb/src/ndbapi/NdbIndexOperation.cpp +++ b/ndb/src/ndbapi/NdbIndexOperation.cpp @@ -164,6 +164,7 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo, Uint32 tData; Uint32 tKeyInfoPosition; const char* aValue = aValuePassed; + Uint32 xfrmData[1024]; Uint32 tempData[1024]; if ((theStatus == OperationDefined) && @@ -224,6 +225,21 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo, m_theIndexDefined[i][2] = true; Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + const char* aValueToWrite = aValue; + + CHARSET_INFO* cs = tAttrInfo->m_cs; + if (cs != 0) { + // current limitation: strxfrm does not increase length + assert(cs->strxfrm_multiply == 1); + unsigned n = + (*cs->coll->strnxfrm)(cs, + (uchar*)xfrmData, sizeof(xfrmData), + (const uchar*)aValue, sizeInBytes); + while (n < sizeInBytes) + ((uchar*)xfrmData)[n++] = 0x20; + aValue = (char*)xfrmData; + } + Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ; Uint32 totalSizeInWords = (sizeInBytes + 3)/4;// Inc. bits in last word Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word @@ -314,13 +330,20 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo, if ((tOpType == InsertRequest) || (tOpType == WriteRequest)) { if (!tAttrInfo->m_indexOnly){ + // invalid data can crash kernel + if (cs != NULL && + (*cs->cset->well_formed_len)(cs, + aValueToWrite, + aValueToWrite + sizeInBytes, + sizeInBytes) != sizeInBytes) + goto equal_error4; Uint32 ahValue; Uint32 sz = totalSizeInWords; AttributeHeader::init(&ahValue, tAttrId, sz); insertATTRINFO( ahValue ); - insertATTRINFOloop((Uint32*)aValue, sizeInWords); + insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords); if (bitsInLastWord != 0) { - tData = *(Uint32*)(aValue + (sizeInWords << 2)); + tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2)); tData = convertEndian(tData); tData = tData & ((1 << bitsInLastWord) - 1); tData = convertEndian(tData); @@ -411,7 +434,10 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo, equal_error3: setErrorCodeAbort(4209); - + return -1; + + equal_error4: + setErrorCodeAbort(744); return -1; } diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp index 6d995e06582..ad838ddd601 100644 --- a/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -492,6 +492,17 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, // Insert Attribute Id into ATTRINFO part. const Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + + CHARSET_INFO* cs = tAttrInfo->m_cs; + // invalid data can crash kernel + if (cs != NULL && + (*cs->cset->well_formed_len)(cs, + aValue, + aValue + sizeInBytes, + sizeInBytes) != sizeInBytes) { + setErrorCodeAbort(744); + return -1; + } #if 0 tAttrSize = tAttrInfo->theAttrSize; tArraySize = tAttrInfo->theArraySize; diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp index 19cb133dbf7..e5166fc4a82 100644 --- a/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -60,6 +60,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, Uint32 tData; Uint32 tKeyInfoPosition; const char* aValue = aValuePassed; + Uint32 xfrmData[1024]; Uint32 tempData[1024]; if ((theStatus == OperationDefined) && @@ -117,6 +118,21 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, theTupleKeyDefined[i][2] = true; Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + const char* aValueToWrite = aValue; + + CHARSET_INFO* cs = tAttrInfo->m_cs; + if (cs != 0) { + // current limitation: strxfrm does not increase length + assert(cs->strxfrm_multiply == 1); + unsigned n = + (*cs->coll->strnxfrm)(cs, + (uchar*)xfrmData, sizeof(xfrmData), + (const uchar*)aValue, sizeInBytes); + while (n < sizeInBytes) + ((uchar*)xfrmData)[n++] = 0x20; + aValue = (char*)xfrmData; + } + Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ; Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Inc. bits in last word Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word @@ -206,13 +222,20 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, if ((tOpType == InsertRequest) || (tOpType == WriteRequest)) { if (!tAttrInfo->m_indexOnly){ + // invalid data can crash kernel + if (cs != NULL && + (*cs->cset->well_formed_len)(cs, + aValueToWrite, + aValueToWrite + sizeInBytes, + sizeInBytes) != sizeInBytes) + goto equal_error4; Uint32 ahValue; const Uint32 sz = totalSizeInWords; AttributeHeader::init(&ahValue, tAttrId, sz); insertATTRINFO( ahValue ); - insertATTRINFOloop((Uint32*)aValue, sizeInWords); + insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords); if (bitsInLastWord != 0) { - tData = *(Uint32*)(aValue + (sizeInWords << 2)); + tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2)); tData = convertEndian(tData); tData = tData & ((1 << bitsInLastWord) - 1); tData = convertEndian(tData); @@ -311,6 +334,10 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, equal_error3: setErrorCodeAbort(4209); return -1; + + equal_error4: + setErrorCodeAbort(744); + return -1; } /****************************************************************************** diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp index 86c174c4545..ac5f4268386 100644 --- a/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/ndb/src/ndbapi/NdbScanOperation.cpp @@ -1096,30 +1096,43 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo, theStatus == SetBound && (0 <= type && type <= 4) && len <= 8000) { - // bound type - + // insert bound type insertATTRINFO(type); - // attribute header Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + // normalize char bound + CHARSET_INFO* cs = tAttrInfo->m_cs; + Uint32 xfrmData[2000]; + if (cs != NULL && aValue != NULL) { + // current limitation: strxfrm does not increase length + assert(cs->strxfrm_multiply == 1); + unsigned n = + (*cs->coll->strnxfrm)(cs, + (uchar*)xfrmData, sizeof(xfrmData), + (const uchar*)aValue, sizeInBytes); + while (n < sizeInBytes) + ((uchar*)xfrmData)[n++] = 0x20; + aValue = (char*)xfrmData; + } if (len != sizeInBytes && (len != 0)) { setErrorCodeAbort(4209); return -1; } + // insert attribute header len = aValue != NULL ? sizeInBytes : 0; Uint32 tIndexAttrId = tAttrInfo->m_attrId; Uint32 sizeInWords = (len + 3) / 4; AttributeHeader ah(tIndexAttrId, sizeInWords); insertATTRINFO(ah.m_value); if (len != 0) { - // attribute data + // insert attribute data if ((UintPtr(aValue) & 0x3) == 0 && (len & 0x3) == 0) insertATTRINFOloop((const Uint32*)aValue, sizeInWords); else { - Uint32 temp[2000]; - memcpy(temp, aValue, len); + Uint32 tempData[2000]; + memcpy(tempData, aValue, len); while ((len & 0x3) != 0) - ((char*)temp)[len++] = 0; - insertATTRINFOloop(temp, sizeInWords); + ((char*)tempData)[len++] = 0; + insertATTRINFOloop(tempData, sizeInWords); } } @@ -1206,11 +1219,11 @@ NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols, if((r1_null ^ (unsigned)r2->isNULL())){ return (r1_null ? -1 : 1); } - Uint32 type = NdbColumnImpl::getImpl(* r1->m_column).m_extType; + const NdbColumnImpl & col = NdbColumnImpl::getImpl(* r1->m_column); Uint32 size = (r1->theAttrSize * r1->theArraySize + 3) / 4; if(!r1_null){ - const NdbSqlUtil::Type& t = NdbSqlUtil::getType(type); - int r = (*t.m_cmp)(d1, d2, size, size); + const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(col.m_extType); + int r = (*sqlType.m_cmp)(col.m_cs, d1, d2, size, size); if(r){ assert(r != NdbSqlUtil::CmpUnknown); return r; diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 2ebcf4be444..037c441cc38 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -282,7 +282,7 @@ ErrorBundle ErrorCodes[] = { { 741, SE, "Unsupported alter table" }, { 742, SE, "Unsupported attribute type in index" }, { 743, SE, "Unsupported character set in table or index" }, - { 744, SE, "Character conversion error" }, + { 744, SE, "Character string is invalid for given character set" }, { 241, SE, "Invalid schema object version" }, { 283, SE, "Table is being dropped" }, { 284, SE, "Table not defined in transaction coordinator" }, diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index ac28b96af80..f9eb3514926 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -28,6 +28,7 @@ #include #include #include +#include // options @@ -37,6 +38,8 @@ struct Opt { const char* m_bound; const char* m_case; bool m_core; + const char* m_csname; + CHARSET_INFO* m_cs; bool m_dups; NdbDictionary::Object::FragmentType m_fragtype; unsigned m_idxloop; @@ -59,6 +62,8 @@ struct Opt { m_bound("01234"), m_case(0), m_core(false), + m_csname("latin1_bin"), + m_cs(0), m_dups(false), m_fragtype(NdbDictionary::Object::FragUndefined), m_idxloop(4), @@ -94,6 +99,7 @@ printhelp() << " -bound xyz use only these bound types 0-4 [" << d.m_bound << "]" << endl << " -case abc only given test cases (letters a-z)" << endl << " -core core dump on error [" << d.m_core << "]" << endl + << " -csname S charset (collation) of non-pk char column [" << d.m_csname << "]" << endl << " -dups allow duplicate tuples from index scan [" << d.m_dups << "]" << endl << " -fragtype T fragment type single/small/medium/large" << endl << " -index xyz only given index numbers (digits 1-9)" << endl @@ -983,6 +989,10 @@ createtable(Par par) c.setLength(col.m_length); c.setPrimaryKey(col.m_pk); c.setNullable(col.m_nullable); + if (c.getCharset()) { // test if char type + if (! col.m_pk) + c.setCharset(par.m_cs); + } t.addColumn(c); } con.m_dic = con.m_ndb->getDictionary(); @@ -3149,6 +3159,10 @@ runtest(Par par) LL1("start"); if (par.m_seed != 0) srandom(par.m_seed); + assert(par.m_csname != 0); + CHARSET_INFO* cs; + CHK((cs = get_charset_by_name(par.m_csname, MYF(0))) != 0 || (cs = get_charset_by_csname(par.m_csname, MY_CS_PRIMARY, MYF(0))) != 0); + par.m_cs = cs; Con con; CHK(con.connect() == 0); par.m_con = &con; @@ -3232,6 +3246,12 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) g_opt.m_core = true; continue; } + if (strcmp(arg, "-csname") == 0) { + if (++argv, --argc > 0) { + g_opt.m_csname = strdup(argv[0]); + continue; + } + } if (strcmp(arg, "-dups") == 0) { g_opt.m_dups = true; continue; From dbbe2e18755e86c00ec25a9ec1eaf1ae9ee3adcd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 12:16:34 +0000 Subject: [PATCH 20/55] replaced some "localhost" checks with try bind removed some #id 0 code ndb/src/common/mgmcommon/ConfigRetriever.cpp: replaced some "localhost" checks with try bind ndb/src/mgmsrv/MgmtSrvr.cpp: removed some #id 0 code --- ndb/src/common/mgmcommon/ConfigRetriever.cpp | 39 ++---------- ndb/src/mgmsrv/MgmtSrvr.cpp | 67 +------------------- 2 files changed, 8 insertions(+), 98 deletions(-) diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp index a2d6f6a3cea..40325fbae99 100644 --- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -18,6 +18,7 @@ #include #include +#include #include "LocalConfig.hpp" #include @@ -272,43 +273,15 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 NdbConfig_SetPath(datadir); } - char localhost[MAXHOSTNAMELEN]; - if(NdbHost_GetHostName(localhost) != 0){ - snprintf(buf, 255, "Unable to get own hostname"); + if (hostname && hostname[0] != 0 && + !SocketServer::tryBind(0,hostname)) { + snprintf(buf, 255, "Config hostname(%s) don't match a local interface," + " tried to bind, error = %d - %s", + hostname, errno, strerror(errno)); setError(CR_ERROR, buf); return false; } - do { - if(strlen(hostname) == 0) - break; - - if(strcasecmp(hostname, localhost) == 0) - break; - - if(strcasecmp(hostname, "localhost") == 0) - break; - - struct in_addr local, config; - bool b1 = false, b2 = false, b3 = false; - b1 = Ndb_getInAddr(&local, localhost) == 0; - b2 = Ndb_getInAddr(&config, hostname) == 0; - b3 = memcmp(&local, &config, sizeof(local)) == 0; - - if(b1 && b2 && b3) - break; - - b1 = Ndb_getInAddr(&local, "localhost") == 0; - b3 = memcmp(&local, &config, sizeof(local)) == 0; - if(b1 && b2 && b3) - break; - - snprintf(buf, 255, "Local hostname(%s) and config hostname(%s) dont match", - localhost, hostname); - setError(CR_ERROR, buf); - return false; - } while(false); - unsigned int _type; if(ndb_mgm_get_int_parameter(it, CFG_TYPE_OF_SECTION, &_type)){ snprintf(buf, 255, "Unable to get type of node(%d) from config", diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index e95fc0263c8..b55b6a0e233 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -439,59 +439,6 @@ MgmtSrvr::getPort() const { ndb_mgm_destroy_iterator(iter); - /***************** - * Set Stat Port * - *****************/ -#if 0 - if (!mgmProps->get("PortNumberStats", &tmp)){ - ndbout << "Could not find PortNumberStats in the configuration file." - << endl; - return false; - } - glob.port_stats = tmp; -#endif - -#if 0 - const char * host; - if(ndb_mgm_get_string_parameter(iter, mgmProps->get("ExecuteOnComputer", host)){ - ndbout << "Failed to find \"ExecuteOnComputer\" for my node" << endl; - ndbout << "Unable to verify own hostname" << endl; - return false; - } - - const char * hostname; - { - const Properties * p; - char buf[255]; - snprintf(buf, sizeof(buf), "Computer_%s", host.c_str()); - if(!glob.cluster_config->get(buf, &p)){ - ndbout << "Failed to find computer " << host << " in config" << endl; - ndbout << "Unable to verify own hostname" << endl; - return false; - } - if(!p->get("HostName", &hostname)){ - ndbout << "Failed to find \"HostName\" for computer " << host - << " in config" << endl; - ndbout << "Unable to verify own hostname" << endl; - return false; - } - if(NdbHost_GetHostName(buf) != 0){ - ndbout << "Unable to get own hostname" << endl; - ndbout << "Unable to verify own hostname" << endl; - return false; - } - } - - const char * ip_address; - if(mgmProps->get("IpAddress", &ip_address)){ - glob.use_specific_ip = true; - glob.interface_name = strdup(ip_address); - return true; - } - - glob.interface_name = strdup(hostname); -#endif - return port; } @@ -2260,20 +2207,9 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, } // connecting through localhost // check if config_hostname is local -#if 1 if (!SocketServer::tryBind(0,config_hostname)) { continue; } -#else - char my_hostname[256]; - if (gethostname(my_hostname, sizeof(my_hostname)) != 0) - continue; - if(Ndb_getInAddr(&tmp_addr, my_hostname) != 0 - || memcmp(&tmp_addr, &config_addr, sizeof(config_addr)) != 0) { - // no match - continue; - } -#endif } } else { // client_addr == 0 if (!SocketServer::tryBind(0,config_hostname)) { @@ -2351,7 +2287,8 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, if (found_matching_type) if (found_free_node) error_string.appfmt("Connection done from wrong host ip %s.", - inet_ntoa(((struct sockaddr_in *)(client_addr))->sin_addr)); + inet_ntoa(((struct sockaddr_in *) + (client_addr))->sin_addr)); else error_string.appfmt("No free node id found for %s.", type_string.c_str()); From e8c9a137699965fcfb3e8cdc281b031d7061b6ca Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 14:18:28 +0200 Subject: [PATCH 21/55] Fix backup event --- ndb/include/debugger/EventLogger.hpp | 4 +- ndb/include/kernel/LogLevel.hpp | 23 +++- .../kernel/signaldata/EventSubscribeReq.hpp | 14 +- .../kernel/signaldata/SetLogLevelOrd.hpp | 21 ++- ndb/src/common/debugger/EventLogger.cpp | 41 +++--- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 21 ++- ndb/src/mgmsrv/MgmtSrvr.cpp | 121 ++++-------------- ndb/src/mgmsrv/Services.cpp | 8 +- 8 files changed, 90 insertions(+), 163 deletions(-) diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp index 9a624559d16..686989089ae 100644 --- a/ndb/include/debugger/EventLogger.hpp +++ b/ndb/include/debugger/EventLogger.hpp @@ -125,8 +125,8 @@ public: * @param theData the event data. * @param nodeId the node id of event origin. */ - virtual void log(int eventType, const Uint32* theData, NodeId nodeId = 0); - + virtual void log(int, const Uint32*, NodeId = 0,const class LogLevel * = 0); + /** * Returns the event text for the specified event report type. * diff --git a/ndb/include/kernel/LogLevel.hpp b/ndb/include/kernel/LogLevel.hpp index 4db34b4d14c..52c2f70cda8 100644 --- a/ndb/include/kernel/LogLevel.hpp +++ b/ndb/include/kernel/LogLevel.hpp @@ -87,6 +87,8 @@ public: bool operator==(const LogLevel& l) const { return memcmp(this, &l, sizeof(* this)) == 0; } + + LogLevel& operator=(const class EventSubscribeReq & req); private: /** @@ -103,9 +105,7 @@ LogLevel::LogLevel(){ inline LogLevel & LogLevel::operator= (const LogLevel & org){ - for(Uint32 i = 0; i= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES); - logLevelData[ec] = level; + logLevelData[ec] = (Uint8)level; } inline @@ -129,7 +129,7 @@ Uint32 LogLevel::getLogLevel(EventCategory ec) const{ assert(ec >= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES); - return logLevelData[ec]; + return (Uint32)logLevelData[ec]; } inline @@ -142,4 +142,17 @@ LogLevel::set_max(const LogLevel & org){ return * this; } +#include + +inline +LogLevel& +LogLevel::operator=(const EventSubscribeReq& req) +{ + clear(); + for(size_t i = 0; i> 16)] = req.theData[i] & 0xFFFF; + } + return * this; +} + #endif diff --git a/ndb/include/kernel/signaldata/EventSubscribeReq.hpp b/ndb/include/kernel/signaldata/EventSubscribeReq.hpp index 522f8d9b8d3..84a1717b1de 100644 --- a/ndb/include/kernel/signaldata/EventSubscribeReq.hpp +++ b/ndb/include/kernel/signaldata/EventSubscribeReq.hpp @@ -38,8 +38,8 @@ struct EventSubscribeReq { */ friend class MgmtSrvr; - STATIC_CONST( SignalLength = 22 ); - + STATIC_CONST( SignalLength = 2 + LogLevel::LOGLEVEL_CATEGORIES ); + /** * Note: If you use the same blockRef as you have used earlier, * you update your ongoing subscription @@ -52,14 +52,12 @@ struct EventSubscribeReq { */ Uint32 noOfEntries; - Uint32 theCategories[10]; - Uint32 theLevels[10]; - + Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES]; + EventSubscribeReq& operator= (const LogLevel& ll){ - noOfEntries = _LOGLEVEL_CATEGORIES; + noOfEntries = LogLevel::LOGLEVEL_CATEGORIES; for(size_t i = 0; igetLogLevel(cat) : m_logLevel.getLogLevel(cat); + if (threshold <= set){ switch (severity){ case Logger::LL_ALERT: alert(EventLogger::getText(m_text, sizeof(m_text), diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 4c8d82c9e2e..234d832655c 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -169,9 +169,9 @@ void Cmvmi::execSET_LOGLEVELORD(Signal* signal) jamEntry(); for(unsigned int i = 0; inoOfEntries; i++){ - category = (LogLevel::EventCategory)llOrd->theCategories[i]; - level = llOrd->theLevels[i]; - + category = (LogLevel::EventCategory)(llOrd->theData[i] >> 16); + level = llOrd->theData[i] & 0xFFFF; + clogLevel.setLogLevel(category, level); } }//execSET_LOGLEVELORD() @@ -196,10 +196,10 @@ void Cmvmi::execEVENT_REP(Signal* signal) Uint32 threshold = 16; LogLevel::EventCategory eventCategory = (LogLevel::EventCategory)0; - for(unsigned int i = 0; i< EventLogger::matrixSize; i++){ - if(EventLogger::matrix[i].eventType == eventType){ - eventCategory = EventLogger::matrix[i].eventCategory; - threshold = EventLogger::matrix[i].threshold; + for(unsigned int i = 0; i< EventLoggerBase::matrixSize; i++){ + if(EventLoggerBase::matrix[i].eventType == eventType){ + eventCategory = EventLoggerBase::matrix[i].eventCategory; + threshold = EventLoggerBase::matrix[i].threshold; break; } } @@ -266,10 +266,9 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){ LogLevel::EventCategory category; Uint32 level = 0; for(Uint32 i = 0; inoOfEntries; i++){ - category = (LogLevel::EventCategory)subReq->theCategories[i]; - level = subReq->theLevels[i]; - ptr.p->logLevel.setLogLevel(category, - level); + category = (LogLevel::EventCategory)(subReq->theData[i] >> 16); + level = subReq->theData[i] & 0xFFFF; + ptr.p->logLevel.setLogLevel(category, level); } } diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 6a68b426149..ab93c8c8fcc 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -133,6 +133,16 @@ MgmtSrvr::signalRecvThreadRun() EventLogger g_EventLogger; +static NdbOut& +operator<<(NdbOut& out, const LogLevel & ll) +{ + out << "[LogLevel: "; + for(size_t i = 0; isendSignalUnCond(signal, node); } @@ -1969,7 +1985,6 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) event.Completed.NoOfLogBytes = rep->noOfLogBytes; event.Completed.NoOfRecords = rep->noOfRecords; event.Completed.NoOfLogRecords = rep->noOfLogRecords; - event.Completed.stopGCP = rep->stopGCP; event.Completed.startGCP = rep->startGCP; event.Nodes = rep->nodes; @@ -2352,9 +2367,9 @@ MgmtSrvr::eventReport(NodeId nodeId, const Uint32 * theData) const EventReport * const eventReport = (EventReport *)&theData[0]; EventReport::EventType type = eventReport->getEventType(); - // Log event - g_EventLogger.log(type, theData, nodeId); + g_EventLogger.log(type, theData, nodeId, + &m_statisticsListner.m_clients[0].m_logLevel); m_statisticsListner.log(type, theData, nodeId); } @@ -2467,98 +2482,6 @@ MgmtSrvr::abortBackup(Uint32 backupId) void MgmtSrvr::backupCallback(BackupEvent & event) { - char str[255]; - - bool ok = false; - switch(event.Event){ - case BackupEvent::BackupStarted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d started", event.Started.BackupId); - break; - case BackupEvent::BackupFailedToStart: - ok = true; - snprintf(str, sizeof(str), - "Backup failed to start (Backup error %d)", - event.FailedToStart.ErrorCode); - break; - case BackupEvent::BackupCompleted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d completed", - event.Completed.BackupId); - g_EventLogger.info(str); - - snprintf(str, sizeof(str), - " StartGCP: %d StopGCP: %d", - event.Completed.startGCP, event.Completed.stopGCP); - g_EventLogger.info(str); - - snprintf(str, sizeof(str), - " #Records: %d #LogRecords: %d", - event.Completed.NoOfRecords, event.Completed.NoOfLogRecords); - g_EventLogger.info(str); - - snprintf(str, sizeof(str), - " Data: %d bytes Log: %d bytes", - event.Completed.NoOfBytes, event.Completed.NoOfLogBytes); - break; - case BackupEvent::BackupAborted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d has been aborted reason %d", - event.Aborted.BackupId, - event.Aborted.Reason); - break; - } - if(!ok){ - snprintf(str, sizeof(str), - "Unknown backup event: %d", - event.Event); - - } - g_EventLogger.info(str); - - switch (theWaitState){ - case WAIT_BACKUP_STARTED: - switch(event.Event){ - case BackupEvent::BackupStarted: - case BackupEvent::BackupFailedToStart: - m_lastBackupEvent = event; - theWaitState = NO_WAIT; - break; - default: - snprintf(str, sizeof(str), - "Received event %d in unexpected state WAIT_BACKUP_STARTED", - event.Event); - g_EventLogger.info(str); - return; - } - - break; - case WAIT_BACKUP_COMPLETED: - switch(event.Event){ - case BackupEvent::BackupCompleted: - case BackupEvent::BackupAborted: - case BackupEvent::BackupFailedToStart: - m_lastBackupEvent = event; - theWaitState = NO_WAIT; - break; - default: - snprintf(str, sizeof(str), - "Received event %d in unexpected state WAIT_BACKUP_COMPLETED", - event.Event); - g_EventLogger.info(str); - return; - } - break; - default: - snprintf(str, sizeof(str), "Received event %d in unexpected state = %d", - event.Event, theWaitState); - g_EventLogger.info(str); - return; - - } } diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index 7634a68e3df..3bdd4eaaa57 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -780,8 +780,7 @@ MgmApiSession::setClusterLogLevel(Parser::Context &, EventSubscribeReq req; req.blockRef = 0; req.noOfEntries = 1; - req.theCategories[0] = category; - req.theLevels[0] = level; + req.theData[0] = (category << 16) | level; m_mgmsrv.m_log_level_requests.push_back(req); m_output->println("set cluster loglevel reply"); @@ -815,8 +814,7 @@ MgmApiSession::setLogLevel(Parser::Context &, EventSubscribeReq req; req.blockRef = node; req.noOfEntries = 1; - req.theCategories[0] = category; - req.theLevels[0] = level; + req.theData[0] = (category << 16) | level; m_mgmsrv.m_log_level_requests.push_back(req); m_output->println("set loglevel reply"); @@ -1239,7 +1237,7 @@ MgmApiSession::configChange(Parser_t::Context &, m_output->println(""); } -NdbOut& +static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { out << "[LogLevel: "; From 32e8f2ac9560f9c563fa43ba1b6e0954444e2da0 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 15:11:57 +0200 Subject: [PATCH 22/55] New mgmapi call - ndb_mtm_listen_event --- ndb/include/mgmapi/mgmapi.h | 9 ++++++++ ndb/src/mgmapi/mgmapi.cpp | 42 +++++++++++++++++++++++++++++++++++++ ndb/src/mgmsrv/MgmtSrvr.cpp | 2 ++ ndb/src/mgmsrv/Services.cpp | 1 + 4 files changed, 54 insertions(+) diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h index 28be268d6d0..44307c3e73c 100644 --- a/ndb/include/mgmapi/mgmapi.h +++ b/ndb/include/mgmapi/mgmapi.h @@ -668,6 +668,15 @@ extern "C" { int ndb_mgm_exit_single_user(NdbMgmHandle handle, struct ndb_mgm_reply* reply); + /** + * Listen event + * + * @param filter pairs of { level, category } that will be + * pushed to fd, level=0 ends lists + * @return fd which events will be pushed to + */ + int ndb_mgm_listen_event(NdbMgmHandle handle, int filter[]); + /** * Get configuration * @param handle NDB management handle. diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 09090f7f1af..fccd5c7983b 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -1069,6 +1069,48 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId, return 0; } +extern "C" +int +ndb_mgm_listen_event(NdbMgmHandle handle, int filter[]) +{ + SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_listen_event"); + const ParserRow stat_reply[] = { + MGM_CMD("listen event", NULL, ""), + MGM_ARG("result", Int, Mandatory, "Error message"), + MGM_ARG("msg", String, Optional, "Error message"), + MGM_END() + }; + CHECK_HANDLE(handle, -1); + + SocketClient s(handle->hostname, handle->port); + const NDB_SOCKET_TYPE sockfd = s.connect(); + if (sockfd < 0) { + setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__, + "Unable to connect to"); + return -1; + } + + Properties args; + { + BaseString tmp; + for(int i = 0; filter[i] != 0; i += 2){ + tmp.appfmt("%d=%d ", filter[i+1], filter[i]); + } + args.put("filter", tmp.c_str()); + } + + int tmp = handle->socket; + handle->socket = sockfd; + + const Properties *reply; + reply = ndb_mgm_call(handle, stat_reply, "listen event", &args); + + handle->socket = tmp; + + CHECK_REPLY(reply, -1); + return sockfd; +} + extern "C" int ndb_mgm_get_stat_port(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/) diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index bd58d1330de..944eb47c618 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2419,6 +2419,8 @@ MgmtSrvr::abortBackup(Uint32 backupId) void MgmtSrvr::backupCallback(BackupEvent & event) { + m_lastBackupEvent = event; + theWaitState = NO_WAIT; } diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index 3bdd4eaaa57..f7f4da4930a 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -1391,6 +1391,7 @@ MgmApiSession::listen_event(Parser::Context & ctx, result = -1; goto done; } + category -= CFG_MIN_LOGLEVEL; le.m_logLevel.setLogLevel((LogLevel::EventCategory)category, level); } From 095e10b01194f236acc3b5cd0fe4864a27edd992 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 17 Sep 2004 14:58:08 +0000 Subject: [PATCH 23/55] Preparation for batching --- sql/ha_ndbcluster.cc | 31 +++++++++++++++++++++---------- sql/ha_ndbcluster.h | 2 ++ 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 7df69c8e4c7..3b332d44d53 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -138,6 +138,16 @@ static int ndb_to_mysql_error(const NdbError *err) } + +inline +int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans) +{ + int m_batch_execute= 0; + if (false && m_batch_execute) + return 0; + return trans->execute(NoCommit); +} + /* Place holder for ha_ndbcluster thread specific data */ @@ -217,7 +227,8 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) void ha_ndbcluster::no_uncommitted_rows_update(int c) { DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update"); - struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; + struct Ndb_table_local_info *info= + (struct Ndb_table_local_info *)m_table_info; info->no_uncommitted_rows_count+= c; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", ((const NDBTAB *)m_table)->getTableId(), @@ -1023,7 +1034,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) } } - if (trans->execute(NoCommit) != 0) + if (execute_no_commit(this,trans) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1135,7 +1146,7 @@ inline int ha_ndbcluster::next_result(byte *buf) */ if (ops_pending && blobs_pending) { - if (trans->execute(NoCommit) != 0) + if (execute_no_commit(this,trans) != 0) DBUG_RETURN(ndb_err(trans)); ops_pending= 0; blobs_pending= false; @@ -1163,7 +1174,7 @@ inline int ha_ndbcluster::next_result(byte *buf) DBUG_PRINT("info", ("ops_pending: %d", ops_pending)); if (current_thd->transaction.on) { - if (ops_pending && (trans->execute(NoCommit) != 0)) + if (ops_pending && (execute_no_commit(this,trans) != 0)) DBUG_RETURN(ndb_err(trans)); } else @@ -1503,7 +1514,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) ERR_RETURN(op->getNdbError()); } - if (trans->execute(NoCommit) != 0) + if (execute_no_commit(this,trans) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -1591,7 +1602,7 @@ int ha_ndbcluster::write_row(byte *record) bulk_insert_not_flushed= false; if (thd->transaction.on) { - if (trans->execute(NoCommit) != 0) + if (execute_no_commit(this,trans) != 0) { skip_auto_increment= true; no_uncommitted_rows_execute_failure(); @@ -1766,7 +1777,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) } // Execute update operation - if (!cursor && trans->execute(NoCommit) != 0) { + if (!cursor && execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -1836,7 +1847,7 @@ int ha_ndbcluster::delete_row(const byte *record) } // Execute delete operation - if (trans->execute(NoCommit) != 0) { + if (execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2266,7 +2277,7 @@ int ha_ndbcluster::close_scan() deleteing/updating transaction before closing the scan */ DBUG_PRINT("info", ("ops_pending: %d", ops_pending)); - if (trans->execute(NoCommit) != 0) { + if (execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2573,7 +2584,7 @@ int ha_ndbcluster::end_bulk_insert() "rows_inserted:%d, bulk_insert_rows: %d", rows_inserted, bulk_insert_rows)); bulk_insert_not_flushed= false; - if (trans->execute(NoCommit) != 0) { + if (execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); my_errno= error= ndb_err(trans); } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index a25d3e18310..5d8aa0e76db 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -263,6 +263,8 @@ class ha_ndbcluster: public handler void no_uncommitted_rows_update(int); void no_uncommitted_rows_init(THD *); void no_uncommitted_rows_reset(THD *); + + friend int execute_no_commit(ha_ndbcluster*, NdbConnection*); }; bool ndbcluster_init(void); From 162bd361de7b1242b0e54e55e20c9ab8eeb4d445 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Sep 2004 09:24:25 +0200 Subject: [PATCH 24/55] bug#4590 Use ndb_mgm_listen_event to wait for backup messages ndb/src/mgmclient/CommandInterpreter.cpp: Listen to backup event start backup wait until event related to backup arrives ndb/src/mgmsrv/Services.cpp: Fix newline at end of reply --- ndb/src/mgmclient/CommandInterpreter.cpp | 36 ++++++++++++++++++++++-- ndb/src/mgmsrv/Services.cpp | 1 + 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index 816a84375f1..fbb74d7c151 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -1672,13 +1672,45 @@ CommandInterpreter::executeStartBackup(char* /*parameters*/) connect(); struct ndb_mgm_reply reply; unsigned int backupId; + + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 }; + int fd = ndb_mgm_listen_event(m_mgmsrv, filter); int result = ndb_mgm_start_backup(m_mgmsrv, &backupId, &reply); if (result != 0) { ndbout << "Start of backup failed" << endl; printError(); - } else { - ndbout << "Backup started. Backup id " << backupId << "." << endl; + close(fd); + return; } + + char *tmp; + char buf[1024]; + { + SocketInputStream in(fd); + int count = 0; + do { + tmp = in.gets(buf, 1024); + if(tmp) + { + ndbout << tmp; + int id; + if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){ + count++; + } + } + } while(count < 2); + } + + SocketInputStream in(fd, 10); + do { + tmp = in.gets(buf, 1024); + if(tmp && tmp[0] != 0) + { + ndbout << tmp; + } + } while(tmp && tmp[0] != 0); + + close(fd); } void diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index f7f4da4930a..684c10dbd4d 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -1411,6 +1411,7 @@ done: m_output->println("result: %d", result); if(result != 0) m_output->println("msg: %s", msg.c_str()); + m_output->println(""); } template class MutexVector; From 7f1fcf6648c6c4a29b94360fd47f53e90812cb98 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Sep 2004 11:47:27 +0200 Subject: [PATCH 25/55] Dont log IAmNotMaster errors --- ndb/src/kernel/blocks/backup/Backup.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index 6c7a3c977da..08a8bf83e20 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -946,10 +946,12 @@ Backup::sendBackupRef(BlockReference senderRef, Signal *signal, ref->masterRef = numberToRef(BACKUP, getMasterNodeId()); sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB); - signal->theData[0] = EventReport::BackupFailedToStart; - signal->theData[1] = senderRef; - signal->theData[2] = errorCode; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); + if(errorCode != BackupRef::IAmNotMaster){ + signal->theData[0] = EventReport::BackupFailedToStart; + signal->theData[1] = senderRef; + signal->theData[2] = errorCode; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); + } } void From 48e56f47a62829480803a7bdfe46d6ebbea17c47 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Sep 2004 12:03:18 +0200 Subject: [PATCH 26/55] Restored old shared memory buffer implementation (used by SCI and SHM). Improved Default SCI config params Added missing SCI libraries in ndb_mgm and atrt Added max of 1024 signals per receive on transporter (to improve real-time bahaviour and to ensure no job buffer explosion, still some more work left on avoiding job buffer explosion in the general case) ndb/src/common/transporter/Packer.cpp: Fix for job buffer explosion and real-time behaviour also in high load scenarios. ndb/src/common/transporter/SCI_Transporter.cpp: Restored old Shared memory buffer implementation. Changed condition slightly on when to send SCI buffer. ndb/src/common/transporter/SCI_Transporter.hpp: Changed back to old shared memory implementation ndb/src/common/transporter/SHM_Buffer.hpp: Changed back to old shared memory implementation ndb/src/common/transporter/SHM_Transporter.cpp: Changed back to old shared memory implementation ndb/src/common/transporter/SHM_Transporter.hpp: Changed back to old shared memory implementation ndb/src/common/transporter/TransporterRegistry.cpp: Changed back to old shared memory implementation ndb/src/kernel/vm/FastScheduler.hpp: Spelling error ndb/src/mgmclient/Makefile.am: Missing SCI library ndb/src/mgmsrv/ConfigInfo.cpp: Changed to more proper config parameters ndb/test/run-test/Makefile.am: Added missing SCI library --- ndb/src/common/transporter/Packer.cpp | 21 ++++++++++----- .../common/transporter/SCI_Transporter.cpp | 13 ++-------- .../common/transporter/SCI_Transporter.hpp | 9 +++---- ndb/src/common/transporter/SHM_Buffer.hpp | 26 ++++++------------- .../common/transporter/SHM_Transporter.cpp | 9 ------- .../common/transporter/SHM_Transporter.hpp | 8 +++--- .../transporter/TransporterRegistry.cpp | 16 ++++++------ ndb/src/kernel/vm/FastScheduler.hpp | 2 +- ndb/src/mgmclient/Makefile.am | 2 +- ndb/src/mgmsrv/ConfigInfo.cpp | 4 +-- ndb/test/run-test/Makefile.am | 2 +- 11 files changed, 45 insertions(+), 67 deletions(-) diff --git a/ndb/src/common/transporter/Packer.cpp b/ndb/src/common/transporter/Packer.cpp index 645517a4b1a..9eba335330d 100644 --- a/ndb/src/common/transporter/Packer.cpp +++ b/ndb/src/common/transporter/Packer.cpp @@ -21,6 +21,7 @@ #include #include +#define MAX_RECEIVED_SIGNALS 1024 Uint32 TransporterRegistry::unpack(Uint32 * readPtr, Uint32 sizeOfData, @@ -30,12 +31,15 @@ TransporterRegistry::unpack(Uint32 * readPtr, LinearSectionPtr ptr[3]; Uint32 usedData = 0; - + Uint32 loop_count = 0; + if(state == NoHalt || state == HaltOutput){ - while(sizeOfData >= 4 + sizeof(Protocol6)){ + while ((sizeOfData >= 4 + sizeof(Protocol6)) && + (loop_count < MAX_RECEIVED_SIGNALS)) { Uint32 word1 = readPtr[0]; Uint32 word2 = readPtr[1]; Uint32 word3 = readPtr[2]; + loop_count++; #if 0 if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){ @@ -112,10 +116,12 @@ TransporterRegistry::unpack(Uint32 * readPtr, } else { /** state = HaltIO || state == HaltInput */ - while(sizeOfData >= 4 + sizeof(Protocol6)){ + while ((sizeOfData >= 4 + sizeof(Protocol6)) && + (loop_count < MAX_RECEIVED_SIGNALS)) { Uint32 word1 = readPtr[0]; Uint32 word2 = readPtr[1]; Uint32 word3 = readPtr[2]; + loop_count++; #if 0 if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){ @@ -208,12 +214,13 @@ TransporterRegistry::unpack(Uint32 * readPtr, IOState state) { static SignalHeader signalHeader; static LinearSectionPtr ptr[3]; + Uint32 loop_count = 0; if(state == NoHalt || state == HaltOutput){ - while(readPtr < eodPtr){ + while ((readPtr < eodPtr) && (loop_count < MAX_RECEIVED_SIGNALS)) { Uint32 word1 = readPtr[0]; Uint32 word2 = readPtr[1]; Uint32 word3 = readPtr[2]; - + loop_count++; #if 0 if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){ //Do funky stuff @@ -280,11 +287,11 @@ TransporterRegistry::unpack(Uint32 * readPtr, } else { /** state = HaltIO || state == HaltInput */ - while(readPtr < eodPtr){ + while ((readPtr < eodPtr) && (loop_count < MAX_RECEIVED_SIGNALS)) { Uint32 word1 = readPtr[0]; Uint32 word2 = readPtr[1]; Uint32 word3 = readPtr[2]; - + loop_count++; #if 0 if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){ //Do funky stuff diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/ndb/src/common/transporter/SCI_Transporter.cpp index 465d7827069..73fbb064599 100644 --- a/ndb/src/common/transporter/SCI_Transporter.cpp +++ b/ndb/src/common/transporter/SCI_Transporter.cpp @@ -530,7 +530,6 @@ void SCI_Transporter::setupLocalSegment() Uint32 * localReadIndex = (Uint32*)m_SourceSegm[m_ActiveAdapterId].mappedMemory; Uint32 * localWriteIndex = (Uint32*)(localReadIndex+ 1); - Uint32 * localEndWriteIndex = (Uint32*)(localReadIndex + 2); m_localStatusFlag = (Uint32*)(localReadIndex + 3); char * localStartOfBuf = (char*) @@ -538,7 +537,6 @@ void SCI_Transporter::setupLocalSegment() * localReadIndex = 0; * localWriteIndex = 0; - * localEndWriteIndex = 0; const Uint32 slack = MAX_MESSAGE_SIZE; @@ -546,7 +544,6 @@ void SCI_Transporter::setupLocalSegment() sizeOfBuffer, slack, localReadIndex, - localEndWriteIndex, localWriteIndex); reader->clear(); @@ -570,7 +567,6 @@ void SCI_Transporter::setupRemoteSegment() Uint32 * remoteReadIndex = (Uint32*)segPtr; Uint32 * remoteWriteIndex = (Uint32*)(segPtr + 1); - Uint32 * remoteEndWriteIndex = (Uint32*) (segPtr + 2); m_remoteStatusFlag = (Uint32*)(segPtr + 3); char * remoteStartOfBuf = ( char*)((char*)segPtr+(sharedSize)); @@ -579,7 +575,6 @@ void SCI_Transporter::setupRemoteSegment() sizeOfBuffer, slack, remoteReadIndex, - remoteEndWriteIndex, remoteWriteIndex); writer->clear(); @@ -598,7 +593,6 @@ void SCI_Transporter::setupRemoteSegment() Uint32 * remoteReadIndex2 = (Uint32*)segPtr; Uint32 * remoteWriteIndex2 = (Uint32*) (segPtr + 1); - Uint32 * remoteEndWriteIndex2 = (Uint32*) (segPtr + 2); m_remoteStatusFlag2 = (Uint32*)(segPtr + 3); char * remoteStartOfBuf2 = ( char*)((char *)segPtr+sharedSize); @@ -613,12 +607,10 @@ void SCI_Transporter::setupRemoteSegment() sizeOfBuffer, slack, remoteReadIndex2, - remoteEndWriteIndex2, remoteWriteIndex2); * remoteReadIndex = 0; * remoteWriteIndex = 0; - * remoteEndWriteIndex = 0; writer2->clear(); m_TargetSegm[1].writer=writer2; if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) { @@ -918,14 +910,13 @@ SCI_Transporter::getWritePtr(Uint32 lenBytes, Uint32 prio) Uint32 send_buf_size = m_sendBuffer.m_sendBufferSize; Uint32 curr_data_size = m_sendBuffer.m_dataSize << 2; Uint32 new_curr_data_size = curr_data_size + lenBytes; - if ((new_curr_data_size >= send_buf_size) || + if ((curr_data_size >= send_buf_size) || (curr_data_size >= sci_buffer_remaining)) { /** * The new message will not fit in the send buffer. We need to * send the send buffer before filling it up with the new * signal data. If current data size will spill over buffer edge - * we will also send to avoid writing larger than possible in - * buffer. + * we will also send to ensure correct operation. */ if (!doSend()) { /** diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/ndb/src/common/transporter/SCI_Transporter.hpp index adc94f8bb4b..e2f2dfcaf99 100644 --- a/ndb/src/common/transporter/SCI_Transporter.hpp +++ b/ndb/src/common/transporter/SCI_Transporter.hpp @@ -297,13 +297,12 @@ private: */ bool sendIsPossible(struct timeval * timeout); - - void getReceivePtr(Uint32 ** ptr, Uint32 &size){ - size = reader->getReadPtr(* ptr); + void getReceivePtr(Uint32 ** ptr, Uint32 ** eod){ + reader->getReadPtr(* ptr, * eod); } - void updateReceivePtr(Uint32 size){ - reader->updateReadPtr(size); + void updateReceivePtr(Uint32 *ptr){ + reader->updateReadPtr(ptr); } /** diff --git a/ndb/src/common/transporter/SHM_Buffer.hpp b/ndb/src/common/transporter/SHM_Buffer.hpp index b0dbd3362a8..f49b4fe73cb 100644 --- a/ndb/src/common/transporter/SHM_Buffer.hpp +++ b/ndb/src/common/transporter/SHM_Buffer.hpp @@ -42,13 +42,11 @@ public: Uint32 _sizeOfBuffer, Uint32 _slack, Uint32 * _readIndex, - Uint32 * _endWriteIndex, Uint32 * _writeIndex) : m_startOfBuffer(_startOfBuffer), m_totalBufferSize(_sizeOfBuffer), m_bufferSize(_sizeOfBuffer - _slack), m_sharedReadIndex(_readIndex), - m_sharedEndWriteIndex(_endWriteIndex), m_sharedWriteIndex(_writeIndex) { } @@ -68,12 +66,12 @@ public: * returns ptr - where to start reading * sz - how much can I read */ - inline Uint32 getReadPtr(Uint32 * & ptr); + inline void getReadPtr(Uint32 * & ptr, Uint32 * & eod); /** * Update read ptr */ - inline void updateReadPtr(Uint32 size); + inline void updateReadPtr(Uint32 *ptr); private: char * const m_startOfBuffer; @@ -82,7 +80,6 @@ private: Uint32 m_readIndex; Uint32 * m_sharedReadIndex; - Uint32 * m_sharedEndWriteIndex; Uint32 * m_sharedWriteIndex; }; @@ -100,22 +97,19 @@ SHM_Reader::empty() const{ * sz - how much can I read */ inline -Uint32 -SHM_Reader::getReadPtr(Uint32 * & ptr) +void +SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod) { - Uint32 *eod; Uint32 tReadIndex = m_readIndex; Uint32 tWriteIndex = * m_sharedWriteIndex; - Uint32 tEndWriteIndex = * m_sharedEndWriteIndex; ptr = (Uint32*)&m_startOfBuffer[tReadIndex]; if(tReadIndex <= tWriteIndex){ eod = (Uint32*)&m_startOfBuffer[tWriteIndex]; } else { - eod = (Uint32*)&m_startOfBuffer[tEndWriteIndex]; + eod = (Uint32*)&m_startOfBuffer[m_bufferSize]; } - return (Uint32)((char*)eod - (char*)ptr); } /** @@ -123,10 +117,10 @@ SHM_Reader::getReadPtr(Uint32 * & ptr) */ inline void -SHM_Reader::updateReadPtr(Uint32 size) +SHM_Reader::updateReadPtr(Uint32 *ptr) { - Uint32 tReadIndex = m_readIndex; - tReadIndex += size; + Uint32 tReadIndex = ((char*)ptr) - m_startOfBuffer; + assert(tReadIndex < m_totalBufferSize); if(tReadIndex >= m_bufferSize){ @@ -145,13 +139,11 @@ public: Uint32 _sizeOfBuffer, Uint32 _slack, Uint32 * _readIndex, - Uint32 * _endWriteIndex, Uint32 * _writeIndex) : m_startOfBuffer(_startOfBuffer), m_totalBufferSize(_sizeOfBuffer), m_bufferSize(_sizeOfBuffer - _slack), m_sharedReadIndex(_readIndex), - m_sharedEndWriteIndex(_endWriteIndex), m_sharedWriteIndex(_writeIndex) { } @@ -176,7 +168,6 @@ private: Uint32 m_writeIndex; Uint32 * m_sharedReadIndex; - Uint32 * m_sharedEndWriteIndex; Uint32 * m_sharedWriteIndex; }; @@ -215,7 +206,6 @@ SHM_Writer::updateWritePtr(Uint32 sz){ assert(tWriteIndex < m_totalBufferSize); if(tWriteIndex >= m_bufferSize){ - * m_sharedEndWriteIndex = tWriteIndex; tWriteIndex = 0; } diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/ndb/src/common/transporter/SHM_Transporter.cpp index 7c801658dbd..ab161d8c18c 100644 --- a/ndb/src/common/transporter/SHM_Transporter.cpp +++ b/ndb/src/common/transporter/SHM_Transporter.cpp @@ -82,14 +82,12 @@ SHM_Transporter::setupBuffers(){ Uint32 * sharedReadIndex1 = base1; Uint32 * sharedWriteIndex1 = base1 + 1; - Uint32 * sharedEndWriteIndex1 = base1 + 2; serverStatusFlag = base1 + 4; char * startOfBuf1 = shmBuf+sharedSize; Uint32 * base2 = (Uint32*)(shmBuf + sizeOfBuffer + sharedSize); Uint32 * sharedReadIndex2 = base2; Uint32 * sharedWriteIndex2 = base2 + 1; - Uint32 * sharedEndWriteIndex2 = base2 + 2; clientStatusFlag = base2 + 4; char * startOfBuf2 = ((char *)base2)+sharedSize; @@ -99,23 +97,19 @@ SHM_Transporter::setupBuffers(){ sizeOfBuffer, slack, sharedReadIndex1, - sharedEndWriteIndex1, sharedWriteIndex1); writer = new SHM_Writer(startOfBuf2, sizeOfBuffer, slack, sharedReadIndex2, - sharedEndWriteIndex2, sharedWriteIndex2); * sharedReadIndex1 = 0; * sharedWriteIndex1 = 0; - * sharedEndWriteIndex1 = 0; * sharedReadIndex2 = 0; * sharedWriteIndex2 = 0; - * sharedEndWriteIndex2 = 0; reader->clear(); writer->clear(); @@ -148,19 +142,16 @@ SHM_Transporter::setupBuffers(){ sizeOfBuffer, slack, sharedReadIndex2, - sharedEndWriteIndex2, sharedWriteIndex2); writer = new SHM_Writer(startOfBuf1, sizeOfBuffer, slack, sharedReadIndex1, - sharedEndWriteIndex1, sharedWriteIndex1); * sharedReadIndex2 = 0; * sharedWriteIndex1 = 0; - * sharedEndWriteIndex1 = 0; reader->clear(); writer->clear(); diff --git a/ndb/src/common/transporter/SHM_Transporter.hpp b/ndb/src/common/transporter/SHM_Transporter.hpp index 892acbb7ac4..27692209ffe 100644 --- a/ndb/src/common/transporter/SHM_Transporter.hpp +++ b/ndb/src/common/transporter/SHM_Transporter.hpp @@ -61,12 +61,12 @@ public: writer->updateWritePtr(lenBytes); } - void getReceivePtr(Uint32 ** ptr, Uint32 sz){ - sz = reader->getReadPtr(* ptr); + void getReceivePtr(Uint32 ** ptr, Uint32 ** eod){ + reader->getReadPtr(* ptr, * eod); } - void updateReceivePtr(Uint32 sz){ - reader->updateReadPtr(sz); + void updateReceivePtr(Uint32 * ptr){ + reader->updateReadPtr(ptr); } protected: diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index ca574b19dbc..5ca88211f54 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -857,11 +857,11 @@ TransporterRegistry::performReceive(){ const NodeId nodeId = t->getRemoteNodeId(); if(is_connected(nodeId)){ if(t->isConnected() && t->checkConnected()){ - Uint32 * readPtr; + Uint32 * readPtr, * eodPtr; Uint32 sz = 0; - t->getReceivePtr(&readPtr, sz); - Uint32 szUsed = unpack(readPtr, sz, nodeId, ioStates[nodeId]); - t->updateReceivePtr(szUsed); + t->getReceivePtr(&readPtr, &eodPtr); + Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]); + t->updateReceivePtr(newPtr); } } } @@ -873,11 +873,11 @@ TransporterRegistry::performReceive(){ const NodeId nodeId = t->getRemoteNodeId(); if(is_connected(nodeId)){ if(t->isConnected() && t->checkConnected()){ - Uint32 * readPtr; + Uint32 * readPtr, * eodPtr; Uint32 sz = 0; - t->getReceivePtr(&readPtr, sz); - Uint32 szUsed = unpack(readPtr, sz, nodeId, ioStates[nodeId]); - t->updateReceivePtr(szUsed); + t->getReceivePtr(&readPtr, &eodPtr); + Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]); + t->updateReceivePtr(newPtr); } } } diff --git a/ndb/src/kernel/vm/FastScheduler.hpp b/ndb/src/kernel/vm/FastScheduler.hpp index 9749dab5d85..dc707e47eef 100644 --- a/ndb/src/kernel/vm/FastScheduler.hpp +++ b/ndb/src/kernel/vm/FastScheduler.hpp @@ -141,7 +141,7 @@ int FastScheduler::checkDoJob() { /* - * Joob buffer overload protetction + * Job buffer overload protetction * If the job buffer B is filled over a certain limit start * to execute the signals in the job buffer's */ diff --git a/ndb/src/mgmclient/Makefile.am b/ndb/src/mgmclient/Makefile.am index 72ddc9d098b..e271c7bed53 100644 --- a/ndb/src/mgmclient/Makefile.am +++ b/ndb/src/mgmclient/Makefile.am @@ -16,7 +16,7 @@ LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ $(top_builddir)/strings/libmystrings.a \ - @TERMCAP_LIB@ + @TERMCAP_LIB@ @NDB_SCI_LIBS@ ndb_mgm_LDFLAGS = @ndb_bin_am_ldflags@ diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index 7cb438bd2dd..3bbceb0113e 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -1944,7 +1944,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ConfigInfo::USED, false, ConfigInfo::INT, - "2K", + "8K", "128", "32K" }, @@ -1956,7 +1956,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ConfigInfo::USED, false, ConfigInfo::INT, - "192K", + "1M", "64K", STR_VALUE(MAX_INT_RNIL) }, diff --git a/ndb/test/run-test/Makefile.am b/ndb/test/run-test/Makefile.am index 03b53509f05..3bf2edde47a 100644 --- a/ndb/test/run-test/Makefile.am +++ b/ndb/test/run-test/Makefile.am @@ -16,7 +16,7 @@ LDADD_LOC = $(top_builddir)/ndb/src/mgmclient/CpcClient.o \ $(top_builddir)/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a + $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ wrappersdir=$(prefix)/bin wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run From fd4761dfaccaf3268fc6ed477e40d16ca1a6c688 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Sep 2004 16:30:23 +0200 Subject: [PATCH 27/55] Patches to make it compile --- ndb/src/common/mgmcommon/IPCConfig.cpp | 6 +++--- ndb/src/common/transporter/TransporterRegistry.cpp | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp index 285df61f8a2..b70881dd830 100644 --- a/ndb/src/common/mgmcommon/IPCConfig.cpp +++ b/ndb/src/common/mgmcommon/IPCConfig.cpp @@ -370,7 +370,7 @@ IPCConfig::configureTransporters(Uint32 nodeId, tr.add_transporter_interface(localHostName, server_port); } DBUG_PRINT("info", ("Transporter between this node %d and node %d using port %d, signalId %d, checksum %d", - nodeId, remoteNodeId, tmp_server_port, sendSignalId, checksum)); + nodeId, remoteNodeId, server_port, sendSignalId, checksum)); switch(type){ case CONNECTION_TYPE_SHM:{ SHM_TransporterConfiguration conf; @@ -385,7 +385,7 @@ IPCConfig::configureTransporters(Uint32 nodeId, conf.port= server_port; if(!tr.createTransporter(&conf)){ - DBUG_PRINT("error", ("Failed to create SCI Transporter from %d to %d", + DBUG_PRINT("error", ("Failed to create SHM Transporter from %d to %d", conf.localNodeId, conf.remoteNodeId)); ndbout << "Failed to create SHM Transporter from: " << conf.localNodeId << " to: " << conf.remoteNodeId << endl; @@ -403,7 +403,7 @@ IPCConfig::configureTransporters(Uint32 nodeId, conf.remoteNodeId = remoteNodeId; conf.checksum = checksum; conf.signalId = sendSignalId; - conf.port= tmp_server_port; + conf.port= server_port; if(iter.get(CFG_SCI_HOSTNAME_1, &host1)) break; if(iter.get(CFG_SCI_HOSTNAME_2, &host2)) break; diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index 1608444e739..cacbbed00f1 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -107,7 +107,6 @@ TransporterRegistry::TransporterRegistry(void * callback, unsigned _maxTransporters, unsigned sizeOfLongSignalMemory) { - m_transporter_service= 0; nodeIdSpecified = false; maxTransporters = _maxTransporters; sendCounter = 1; From 40880be36ac61b5ded7ca0ff555325c0fdf7eceb Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Sep 2004 17:25:12 +0200 Subject: [PATCH 28/55] Fix event systable collation (binary) --- ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp index 40e6aa2dcd7..2a65271a32a 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp @@ -46,7 +46,7 @@ Ndbcntr::g_sysTable_SYSTAB_0 = { static const Ndbcntr::SysColumn column_NDBEVENTS_0[] = { { 0, "NAME", - DictTabInfo::ExtChar, MAX_TAB_NAME_SIZE, + DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE, true, false }, { 1, "EVENT_TYPE", @@ -54,7 +54,7 @@ column_NDBEVENTS_0[] = { false, false }, { 2, "TABLE_NAME", - DictTabInfo::ExtChar, MAX_TAB_NAME_SIZE, + DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE, false, false }, { 3, "ATTRIBUTE_MASK", From 091e238cd300046e67b8b7f8e22849895e124401 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Sep 2004 15:52:19 +0000 Subject: [PATCH 29/55] suggested fix for bug 5591 adn corrct select count() with several clients --- sql/ha_ndbcluster.cc | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 3b332d44d53..f8ffa20de9d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -183,7 +183,7 @@ void ha_ndbcluster::records_update() DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", ((const NDBTAB *)m_table)->getTableId(), info->no_uncommitted_rows_count)); - if (info->records == ~(ha_rows)0) + // if (info->records == ~(ha_rows)0) { Uint64 rows; if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ @@ -614,7 +614,7 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_ENTER("get_metadata"); DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); - if (!(tab= dict->getTable(m_tabname, &m_table_info))) + if (!(tab= dict->getTable(m_tabname))) ERR_RETURN(dict->getNdbError()); DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); @@ -663,7 +663,9 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_RETURN(error); // All checks OK, lets use the table - m_table= (void*)tab; + // m_table= (void*)tab; + m_table= 0; + m_table_info= 0; DBUG_RETURN(build_index_list(table, ILBP_OPEN)); } @@ -2396,7 +2398,17 @@ void ha_ndbcluster::info(uint flag) if (flag & HA_STATUS_VARIABLE) { DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); - records_update(); + if (m_table_info) + { + records_update(); + } + else + { + Uint64 rows; + if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ + records= rows; + } + } } if (flag & HA_STATUS_ERRKEY) { @@ -2777,6 +2789,16 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) // Start of transaction retrieve_all_fields= FALSE; ops_pending= 0; + { + NDBDICT *dict= m_ndb->getDictionary(); + const NDBTAB *tab; + void *tab_info; + if (!(tab= dict->getTable(m_tabname, &tab_info))) + ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); + m_table= (void *)tab; + m_table_info= tab_info; + } no_uncommitted_rows_init(thd); } else @@ -2799,6 +2821,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) thd->transaction.stmt.ndb_tid= 0; } } + m_table= (void *)0; + m_table_info= 0; if (m_active_trans) DBUG_PRINT("warning", ("m_active_trans != NULL")); if (m_active_cursor) From ef1e278a20c30e4c8a582bf44ce0814bb230e82a Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Sep 2004 06:33:29 +0200 Subject: [PATCH 30/55] wl1292 remove -e from atrt-mysql-test-run --- ndb/test/run-test/atrt-mysql-test-run | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/test/run-test/atrt-mysql-test-run b/ndb/test/run-test/atrt-mysql-test-run index 75482f4b4a7..7657140d0fa 100755 --- a/ndb/test/run-test/atrt-mysql-test-run +++ b/ndb/test/run-test/atrt-mysql-test-run @@ -1,6 +1,6 @@ #!/bin/sh -set -e -x +set -x p=`pwd` cd $MYSQL_BASE_DIR/mysql-test ./mysql-test-run --with-ndbcluster --ndbconnectstring=$NDB_CONNECTSTRING $* | tee $p/output.txt From 5ebab4843777f466b0b62b9abda47677d81b87fe Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Sep 2004 08:13:35 +0200 Subject: [PATCH 31/55] Removed debug printout --- ndb/src/mgmsrv/MgmtSrvr.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 944eb47c618..d45953503ee 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -179,7 +179,6 @@ MgmtSrvr::logLevelThreadRun() LogLevel tmp; tmp = req; - ndbout << "req3: " << tmp << endl; if(req.blockRef == 0){ req.blockRef = _ownReference; From b61bfd8c9786da2c33530699245eacc2b96d768b Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Sep 2004 14:58:23 +0200 Subject: [PATCH 32/55] Make operation list on fragment fifo so that uncommitted operations are undo-logged in correct order Add bunch of testcases to autotest ndb/src/kernel/blocks/dbtup/Dbtup.hpp: Send fragrecord ptr to initOpConnection so that is does not have to look it up every time (if needed) Make operation list on fragment fifo so that uncommitted operations are undo-logged in correct order ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp: Send fragptr to initOpConnection ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp: dllist -> dlfifolist ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp: dllist -> dlfifolist ndb/src/kernel/blocks/dbtup/DbtupGen.cpp: Make operation list on fragment fifo so that uncommitted operations are undo-logged in correct order ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp: Fix uninitialized variable c_start.m_startTime which made startPartial and startPartitioned fail ndb/test/ndbapi/testSystemRestart.cpp: Added testSystemRestart -n SR9 which demonstrates that two prepared operation on same records _used to_ be undo-logged in wrong order makeing system restart fail ndb/test/run-test/daily-devel-tests.txt: Add * testSystemRestart -n SR6 Restart while some nodes have fs and other not * testSystemRestart -n SR7 Restart in partition win * testSystemRestart -n SR8 Restart in partition win, others starting during restart * testSystemRestart -n SR9 Multiple ops on same record prepared before --- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 3 +- ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp | 6 +- ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp | 15 ++--- .../kernel/blocks/dbtup/DbtupExecQuery.cpp | 20 +++--- ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 3 +- ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 8 ++- ndb/test/ndbapi/testSystemRestart.cpp | 62 ++++++++++++++++++- ndb/test/run-test/daily-devel-tests.txt | 16 +++++ 8 files changed, 103 insertions(+), 30 deletions(-) diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index ce81c1c9bc8..0e8dd5fbbe8 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -515,6 +515,7 @@ struct Fragrecord { Uint32 emptyPrimPage; Uint32 firstusedOprec; + Uint32 lastusedOprec; Uint32 thFreeFirst; Uint32 thFreeCopyFirst; @@ -1653,7 +1654,7 @@ private: //------------------------------------------------------------------ //------------------------------------------------------------------ - void initOpConnection(Operationrec* const regOperPtr); + void initOpConnection(Operationrec* regOperPtr, Fragrecord*); //------------------------------------------------------------------ //------------------------------------------------------------------ diff --git a/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp index 1ffc5f06754..e9043a8b52d 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp @@ -77,7 +77,7 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal) if (regOperPtr.p->optype == ZREAD) { ljam(); freeAllAttrBuffers(regOperPtr.p); - initOpConnection(regOperPtr.p); + initOpConnection(regOperPtr.p, 0); return; }//if @@ -134,7 +134,7 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal) ndbrequire(regOperPtr.p->tupleState == ALREADY_ABORTED); commitUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p); }//if - initOpConnection(regOperPtr.p); + initOpConnection(regOperPtr.p, regFragPtr.p); }//execTUP_ABORTREQ() void Dbtup::setTupleStateOnPreviousOps(Uint32 prevOpIndex) @@ -459,7 +459,7 @@ void Dbtup::tupkeyErrorLab(Signal* signal) freeAllAttrBuffers(regOperPtr); abortUpdate(signal, regOperPtr, fragptr.p, tabptr.p); removeActiveOpList(regOperPtr); - initOpConnection(regOperPtr); + initOpConnection(regOperPtr, fragptr.p); regOperPtr->transstate = IDLE; regOperPtr->tupleState = NO_OTHER_OP; TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtrSend(); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp index fa3667b221e..cbd56c3281f 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp @@ -224,7 +224,8 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr) /* ---------------------------------------------------------------- */ /* INITIALIZATION OF ONE CONNECTION RECORD TO PREPARE FOR NEXT OP. */ /* ---------------------------------------------------------------- */ -void Dbtup::initOpConnection(Operationrec* const regOperPtr) +void Dbtup::initOpConnection(Operationrec* regOperPtr, + Fragrecord * fragPtrP) { Uint32 RinFragList = regOperPtr->inFragList; regOperPtr->transstate = IDLE; @@ -244,22 +245,18 @@ void Dbtup::initOpConnection(Operationrec* const regOperPtr) regOperPtr->inFragList = ZFALSE; if (tropPrevLinkPtr.i == RNIL) { ljam(); - FragrecordPtr regFragPtr; - regFragPtr.i = regOperPtr->fragmentPtr; - ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); - regFragPtr.p->firstusedOprec = tropNextLinkPtr.i; + fragPtrP->firstusedOprec = tropNextLinkPtr.i; } else { ljam(); ptrCheckGuard(tropPrevLinkPtr, cnoOfOprec, operationrec); tropPrevLinkPtr.p->nextOprecInList = tropNextLinkPtr.i; }//if if (tropNextLinkPtr.i == RNIL) { - ; + fragPtrP->lastusedOprec = tropPrevLinkPtr.i; } else { - ljam(); ptrCheckGuard(tropNextLinkPtr, cnoOfOprec, operationrec); tropNextLinkPtr.p->prevOprecInList = tropPrevLinkPtr.i; - }//if + } regOperPtr->prevOprecInList = RNIL; regOperPtr->nextOprecInList = RNIL; }//if @@ -336,7 +333,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) commitUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p); removeActiveOpList(regOperPtr.p); }//if - initOpConnection(regOperPtr.p); + initOpConnection(regOperPtr.p, regFragPtr.p); }//execTUP_COMMITREQ() void diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index dfd1e37d4f5..0061ebe812d 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -319,24 +319,20 @@ void Dbtup::linkOpIntoFragList(OperationrecPtr regOperPtr, Fragrecord* const regFragPtr) { OperationrecPtr sopTmpOperPtr; -/* ----------------------------------------------------------------- */ -/* LINK THE OPERATION INTO A DOUBLY LINKED LIST ON THE FRAGMENT*/ -/* PUT IT FIRST IN THIS LIST SINCE IT DOESN'T MATTER WHERE IT */ -/* IS PUT. */ -/* ----------------------------------------------------------------- */ + Uint32 tail = regFragPtr->lastusedOprec; ndbrequire(regOperPtr.p->inFragList == ZFALSE); regOperPtr.p->inFragList = ZTRUE; - regOperPtr.p->prevOprecInList = RNIL; - sopTmpOperPtr.i = regFragPtr->firstusedOprec; - regFragPtr->firstusedOprec = regOperPtr.i; - regOperPtr.p->nextOprecInList = sopTmpOperPtr.i; - if (sopTmpOperPtr.i == RNIL) { - return; + regOperPtr.p->prevOprecInList = tail; + regOperPtr.p->nextOprecInList = RNIL; + sopTmpOperPtr.i = tail; + if (tail == RNIL) { + regFragPtr->firstusedOprec = regOperPtr.i; } else { jam(); ptrCheckGuard(sopTmpOperPtr, cnoOfOprec, operationrec); - sopTmpOperPtr.p->prevOprecInList = regOperPtr.i; + sopTmpOperPtr.p->nextOprecInList = regOperPtr.i; }//if + regFragPtr->lastusedOprec = regOperPtr.i; }//Dbtup::linkOpIntoFragList() /* diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index f3391ff7b59..d33adcd08e1 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -963,6 +963,7 @@ void Dbtup::initializeFragrecord() regFragPtr.p->nextfreefrag = regFragPtr.i + 1; regFragPtr.p->checkpointVersion = RNIL; regFragPtr.p->firstusedOprec = RNIL; + regFragPtr.p->lastusedOprec = RNIL; regFragPtr.p->fragStatus = IDLE; }//for regFragPtr.i = cnoOfFragrec - 1; @@ -1164,7 +1165,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal) return; }//if regOperPtr.p->optype = ZREAD; - initOpConnection(regOperPtr.p); + initOpConnection(regOperPtr.p, 0); regOperPtr.p->userpointer = userPtr; regOperPtr.p->userblockref = userRef; signal->theData[0] = regOperPtr.p->userpointer; diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index ff4876b1506..568ed6c6566 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -103,7 +103,7 @@ void Ndbcntr::execCONTINUEB(Signal* signal) } Uint64 now = NdbTick_CurrentMillisecond(); - if(c_start.m_startFailureTimeout > now){ + if(now > c_start.m_startFailureTimeout){ ndbrequire(false); } @@ -446,13 +446,17 @@ void Ndbcntr::execREAD_NODESCONF(Signal* signal) ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTITION_TIMEOUT, &to_2); ndb_mgm_get_int_parameter(p, CFG_DB_START_FAILURE_TIMEOUT, &to_3); + c_start.m_startTime = NdbTick_CurrentMillisecond(); c_start.m_startPartialTimeout = setTimeout(c_start.m_startTime, to_1); c_start.m_startPartitionedTimeout = setTimeout(c_start.m_startTime, to_2); c_start.m_startFailureTimeout = setTimeout(c_start.m_startTime, to_3); - + UpgradeStartup::sendCmAppChg(* this, signal, 0); // ADD sendCntrStartReq(signal); + + signal->theData[0] = ZSTARTUP; + sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1); return; } diff --git a/ndb/test/ndbapi/testSystemRestart.cpp b/ndb/test/ndbapi/testSystemRestart.cpp index 68e5eacc631..f8f2b84acc4 100644 --- a/ndb/test/ndbapi/testSystemRestart.cpp +++ b/ndb/test/ndbapi/testSystemRestart.cpp @@ -872,7 +872,7 @@ int runSystemRestart7(NDBT_Context* ctx, NDBT_Step* step){ const Uint32 nodeCount = restarter.getNumDbNodes(); if(nodeCount < 2){ - g_info << "SR8 - Needs atleast 2 nodes to test" << endl; + g_info << "SR7 - Needs atleast 2 nodes to test" << endl; return NDBT_OK; } @@ -1001,7 +1001,52 @@ int runSystemRestart8(NDBT_Context* ctx, NDBT_Step* step){ i++; } - g_info << "runSystemRestart7 finished" << endl; + g_info << "runSystemRestart8 finished" << endl; + + return result; +} + +int runSystemRestart9(NDBT_Context* ctx, NDBT_Step* step){ + Ndb* pNdb = GETNDB(step); + int result = NDBT_OK; + int timeout = 300; + Uint32 loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter restarter; + Uint32 i = 1; + + Uint32 currentRestartNodeIndex = 1; + UtilTransactions utilTrans(*ctx->getTab()); + HugoTransactions hugoTrans(*ctx->getTab()); + + int args[] = { DumpStateOrd::DihMaxTimeBetweenLCP }; + int dump[] = { DumpStateOrd::DihStartLcpImmediately }; + + do { + CHECK(restarter.dumpStateAllNodes(args, 1) == 0); + + HugoOperations ops(* ctx->getTab()); + CHECK(ops.startTransaction(pNdb) == 0); + for(i = 0; i<10; i++){ + CHECK(ops.pkInsertRecord(pNdb, i, 1, 1) == 0); + CHECK(ops.execute_NoCommit(pNdb) == 0); + } + for(i = 0; i<10; i++){ + CHECK(ops.pkUpdateRecord(pNdb, i, 1) == 0); + CHECK(ops.execute_NoCommit(pNdb) == 0); + } + NdbSleep_SecSleep(10); + CHECK(restarter.dumpStateAllNodes(dump, 1) == 0); + NdbSleep_SecSleep(10); + CHECK(ops.execute_Commit(pNdb) == 0); + + CHECK(restarter.restartAll() == 0); + CHECK(restarter.waitClusterStarted(timeout) == 0); + CHECK(pNdb->waitUntilReady(timeout) == 0); + ops.closeTransaction(pNdb); + } while(0); + + g_info << "runSystemRestart9 finished" << endl; return result; } @@ -1176,6 +1221,19 @@ TESTCASE("SR8", STEP(runSystemRestart8); FINALIZER(runClearTable); } +TESTCASE("SR9", + "Perform partition win system restart with other nodes delayed\n" + "* 1. Start transaction\n" + "* 2. insert (1,1)\n" + "* 3. update (1,2)\n" + "* 4. start lcp\n" + "* 5. commit\n" + "* 6. restart\n"){ + INITIALIZER(runWaitStarted); + INITIALIZER(runClearTable); + STEP(runSystemRestart9); + FINALIZER(runClearTable); +} NDBT_TESTSUITE_END(testSystemRestart); int main(int argc, const char** argv){ diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt index 522bbde32bc..64d11bc7701 100644 --- a/ndb/test/run-test/daily-devel-tests.txt +++ b/ndb/test/run-test/daily-devel-tests.txt @@ -190,3 +190,19 @@ max-time: 2500 cmd: test_event args: -n BasicEventOperation T1 T6 +# +max-time: 1500 +cmd: testSystemRestart +args: -n SR6 -l 1 T1 + +max-time: 1500 +cmd: testSystemRestart +args: -n SR7 -l 1 T1 + +max-time: 1500 +cmd: testSystemRestart +args: -n S8 -l 1 T1 + +max-time: 1500 +cmd: testSystemRestart +args: -n S9 -l 1 T1 From 0094f7e4ec1c1c5da01cbe1e916dbe8034c33a0b Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Sep 2004 17:31:01 +0000 Subject: [PATCH 33/55] config parameter changed --- ndb/include/mgmapi/mgmapi_config_parameters.h | 6 ++---- ndb/src/common/mgmcommon/IPCConfig.cpp | 8 ++------ ndb/src/mgmsrv/ConfigInfo.cpp | 4 ++-- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/ndb/include/mgmapi/mgmapi_config_parameters.h index 3110d3bc830..6a0cd376355 100644 --- a/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -125,10 +125,8 @@ #define CFG_SCI_HOST1_ID_1 551 #define CFG_SCI_HOST2_ID_0 552 #define CFG_SCI_HOST2_ID_1 553 -#define CFG_SCI_HOSTNAME_1 554 -#define CFG_SCI_HOSTNAME_2 555 -#define CFG_SCI_SEND_LIMIT 556 -#define CFG_SCI_BUFFER_MEM 557 +#define CFG_SCI_SEND_LIMIT 554 +#define CFG_SCI_BUFFER_MEM 555 #define CFG_OSE_PRIO_A_SIZE 602 #define CFG_OSE_PRIO_B_SIZE 603 diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp index b70881dd830..780504d2c62 100644 --- a/ndb/src/common/mgmcommon/IPCConfig.cpp +++ b/ndb/src/common/mgmcommon/IPCConfig.cpp @@ -398,18 +398,14 @@ IPCConfig::configureTransporters(Uint32 nodeId, } case CONNECTION_TYPE_SCI:{ SCI_TransporterConfiguration conf; - const char * host1, * host2; conf.localNodeId = nodeId; conf.remoteNodeId = remoteNodeId; conf.checksum = checksum; conf.signalId = sendSignalId; conf.port= server_port; - if(iter.get(CFG_SCI_HOSTNAME_1, &host1)) break; - if(iter.get(CFG_SCI_HOSTNAME_2, &host2)) break; - - conf.localHostName = (nodeId == nodeId1 ? host1 : host2); - conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1); + conf.localHostName = localHostName; + conf.remoteHostName = remoteHostName; if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sendLimit)) break; if(iter.get(CFG_SCI_BUFFER_MEM, &conf.bufferSize)) break; diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index a7c20a55eb7..0b7c664dfd3 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -1831,7 +1831,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { STR_VALUE(MAX_INT_RNIL) }, { - CFG_SCI_HOSTNAME_1, + CFG_CONNECTION_HOSTNAME_1, "HostName1", "SCI", "Name/IP of computer on one side of the connection", @@ -1842,7 +1842,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { 0, 0 }, { - CFG_SCI_HOSTNAME_2, + CFG_CONNECTION_HOSTNAME_2, "HostName2", "SCI", "Name/IP of computer on one side of the connection", From aa55c00558880bbccd5fae54a1a41da81d4e0281 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 22 Sep 2004 05:48:52 +0200 Subject: [PATCH 34/55] misspelled testcase name --- ndb/test/run-test/daily-devel-tests.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt index 64d11bc7701..2497fa7d038 100644 --- a/ndb/test/run-test/daily-devel-tests.txt +++ b/ndb/test/run-test/daily-devel-tests.txt @@ -193,16 +193,16 @@ args: -n BasicEventOperation T1 T6 # max-time: 1500 cmd: testSystemRestart -args: -n SR6 -l 1 T1 +args: -l 1 -n SR6 T1 max-time: 1500 cmd: testSystemRestart -args: -n SR7 -l 1 T1 +args: -l 1 -n SR7 T1 max-time: 1500 cmd: testSystemRestart -args: -n S8 -l 1 T1 +args: -l 1 -n SR8 T1 max-time: 1500 cmd: testSystemRestart -args: -n S9 -l 1 T1 +args: -l 1 -n SR9 T1 From 71ead013168a1bf15af84037a7cd3da723323be7 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 22 Sep 2004 09:13:13 +0200 Subject: [PATCH 35/55] testScan - ReadWithLocksAndInserts testBasic LocksAndInserts ndb/test/src/HugoTransactions.cpp: Fix HugoTransaction::load so that transaction is closed and started if error is found --- ndb/test/src/HugoTransactions.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 53809ecc851..994a45de3dc 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -729,10 +729,9 @@ HugoTransactions::loadTable(Ndb* pNdb, NdbSleep_MilliSleep(doSleep); // if (first_batch || !oneTrans) { - if (first_batch) { + if (first_batch || !pTrans) { first_batch = false; pTrans = pNdb->startTransaction(); - if (pTrans == NULL) { const NdbError err = pNdb->getNdbError(); @@ -786,7 +785,7 @@ HugoTransactions::loadTable(Ndb* pNdb, if(check == -1 ) { const NdbError err = pTrans->getNdbError(); pNdb->closeTransaction(pTrans); - + pTrans= 0; switch(err.status){ case NdbError::Success: ERR(err); @@ -828,6 +827,7 @@ HugoTransactions::loadTable(Ndb* pNdb, else{ if (closeTrans) { pNdb->closeTransaction(pTrans); + pTrans= 0; } } From 22db858b5057e90ba80f49f9503c9b7884d478cb Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 22 Sep 2004 09:27:34 +0200 Subject: [PATCH 36/55] Fix so that testBackup wait until start has finished before starting to restore --- ndb/test/ndbapi/testBackup.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp index 509cd4780bf..77b9d0a4baa 100644 --- a/ndb/test/ndbapi/testBackup.cpp +++ b/ndb/test/ndbapi/testBackup.cpp @@ -149,6 +149,9 @@ int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){ if (restarter.restartAll(true) != 0) return NDBT_FAILED; + if (restarter.waitClusterStarted() != 0) + return NDBT_FAILED; + return NDBT_OK; } @@ -413,7 +416,6 @@ TESTCASE("BackupOne", INITIALIZER(runRestoreOne); VERIFIER(runVerifyOne); FINALIZER(runClearTable); - FINALIZER(runDropTable); } TESTCASE("BackupBank", "Test that backup and restore works during transaction load\n" From 99e809cbd944a58417a851b37db4848d6661bbb0 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 22 Sep 2004 18:27:03 +0000 Subject: [PATCH 37/55] debug printout --- ndb/src/ndbapi/DictCache.cpp | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index 12300ce216f..d395e3c9847 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -70,6 +70,27 @@ LocalDictCache::put(const char * name, Ndb_local_table_info * tab_info){ void LocalDictCache::drop(const char * name){ Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name)); + +#ifndef DBUG_OFF + if (info == 0) { + ndbout_c("LocalDictCache::drop(%s) info==0", name); + ndbout_c("dump begin"); + NdbElement_t * curr = m_tableHash.getNext(0); + while(curr != 0){ + Ndb_local_table_info *tmp = curr->theData; + if (tmp) { + ndbout_c("m_table_impl=0x%x, id=%d, name=%s", + tmp->m_table_impl, + tmp->m_table_impl->m_tableId, + tmp->m_table_impl->getName()); + } else { + ndbout_c("NULL"); + } + curr = m_tableHash.getNext(curr); + } + ndbout_c("dump end"); + } +#endif DBUG_ASSERT(info != 0); Ndb_local_table_info::destroy(info); } From 8090516c5250f1d1873adf5f312fc16e70e3f6be Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 23 Sep 2004 09:25:05 +0200 Subject: [PATCH 38/55] bug#5702 Check index version already in master to prevent "non-atmoic" failing drop index ndb/src/kernel/blocks/dbdict/Dbdict.cpp: Check index version already in master to prevent "non-atmoic" failing drop index --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 4757f1d2bf3..b214538bce0 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -6545,6 +6545,8 @@ Dbdict::execDROP_INDX_REQ(Signal* signal) jamEntry(); DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend(); OpDropIndexPtr opPtr; + + int err = DropIndxRef::BadRequestType; const Uint32 senderRef = signal->senderBlockRef(); const DropIndxReq::RequestType requestType = req->getRequestType(); if (requestType == DropIndxReq::RT_USER) { @@ -6559,6 +6561,20 @@ Dbdict::execDROP_INDX_REQ(Signal* signal) return; } // forward initial request plus operation key to all + Uint32 indexId= req->getIndexId(); + Uint32 indexVersion= req->getIndexVersion(); + TableRecordPtr tmp; + int res = getMetaTablePtr(tmp, indexId, indexVersion); + switch(res){ + case MetaData::InvalidArgument: + case MetaData::TableNotFound: + err = DropTableRef::NoSuchTable; + goto error; + case MetaData::InvalidTableVersion: + err = DropIndxRef::InvalidIndexVersion; + goto error; + } + req->setOpKey(++c_opRecordSequence); NodeReceiverGroup rg(DBDICT, c_aliveNodes); sendSignal(rg, GSN_DROP_INDX_REQ, @@ -6608,12 +6624,13 @@ Dbdict::execDROP_INDX_REQ(Signal* signal) return; } } +error: jam(); // return to sender OpDropIndex opBad; opPtr.p = &opBad; opPtr.p->save(req); - opPtr.p->m_errorCode = DropIndxRef::BadRequestType; + opPtr.p->m_errorCode = (DropIndxRef::ErrorCode)err; opPtr.p->m_errorLine = __LINE__; dropIndex_sendReply(signal, opPtr, true); } From a366d6eefc1a52dc3c4097faff18787c91363d5e Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 24 Sep 2004 08:47:46 +0000 Subject: [PATCH 39/55] bug, releaseTableObject called twice bug, whole bucket removed if delete first element in bucket ndb/src/kernel/blocks/dbdict/Dbdict.cpp: bug, releaseTableObject called twice ndb/src/ndbapi/NdbLinHash.hpp: bug, whole bucket removed if delete first element in bucket --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 2 -- ndb/src/ndbapi/NdbLinHash.hpp | 11 ++++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index b214538bce0..1de5cd08a01 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -2868,7 +2868,6 @@ Dbdict::execALTER_TABLE_REQ(Signal* signal) jam(); c_opCreateTable.release(alterTabPtr); parseRecord.tablePtr.p->tabState = TableRecord::NOT_DEFINED; - releaseTableObject(parseRecord.tablePtr.i, false); alterTableRef(signal, req, (AlterTableRef::ErrorCode) parseRecord.errorCode, aParseRecord); @@ -3054,7 +3053,6 @@ Dbdict::execALTER_TAB_REQ(Signal * signal) jam(); c_opCreateTable.release(alterTabPtr); parseRecord.tablePtr.p->tabState = TableRecord::NOT_DEFINED; - releaseTableObject(parseRecord.tablePtr.i, false); alterTabRef(signal, req, (AlterTableRef::ErrorCode) parseRecord.errorCode, aParseRecord); diff --git a/ndb/src/ndbapi/NdbLinHash.hpp b/ndb/src/ndbapi/NdbLinHash.hpp index f786600607f..f245a261a04 100644 --- a/ndb/src/ndbapi/NdbLinHash.hpp +++ b/ndb/src/ndbapi/NdbLinHash.hpp @@ -287,17 +287,14 @@ NdbLinHash::deleteKey ( const char* str, Uint32 len){ NdbElement_t **chainp = &directory[dir]->elements[seg]; for(NdbElement_t * chain = *chainp; chain != 0; chain = chain->next){ if(chain->len == len && !memcmp(chain->str, str, len)){ + C *data= chain->theData; if (oldChain == 0) { - C *data= chain->theData; - delete chain; - * chainp = 0; - return data; + * chainp = chain->next; } else { - C *data= chain->theData; oldChain->next = chain->next; - delete chain; - return data; } + delete chain; + return data; } else { oldChain = chain; } From 2917d9fdbbe24969002098f38605f9c67be8cff7 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 24 Sep 2004 12:38:38 +0200 Subject: [PATCH 40/55] Dbdict - set table state in releaseTableObject - make handleTabInfoInit release object if parsing fails - make sure table is not accessed is parsing fails --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 1de5cd08a01..115a94a2b5c 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -2867,7 +2867,6 @@ Dbdict::execALTER_TABLE_REQ(Signal* signal) if(parseRecord.errorCode != 0){ jam(); c_opCreateTable.release(alterTabPtr); - parseRecord.tablePtr.p->tabState = TableRecord::NOT_DEFINED; alterTableRef(signal, req, (AlterTableRef::ErrorCode) parseRecord.errorCode, aParseRecord); @@ -3052,7 +3051,6 @@ Dbdict::execALTER_TAB_REQ(Signal * signal) if(parseRecord.errorCode != 0){ jam(); c_opCreateTable.release(alterTabPtr); - parseRecord.tablePtr.p->tabState = TableRecord::NOT_DEFINED; alterTabRef(signal, req, (AlterTableRef::ErrorCode) parseRecord.errorCode, aParseRecord); @@ -3437,7 +3435,6 @@ Dbdict::execALTER_TAB_CONF(Signal * signal){ // Release resources TableRecordPtr tabPtr; c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI); - tabPtr.p->tabState = TableRecord::NOT_DEFINED; releaseTableObject(tabPtr.i, false); c_opCreateTable.release(alterTabPtr); c_blockState = BS_IDLE; @@ -3571,7 +3568,6 @@ Dbdict::alterTab_writeTableConf(Signal* signal, jam(); // Release resources c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI); - tabPtr.p->tabState = TableRecord::NOT_DEFINED; releaseTableObject(tabPtr.i, false); c_opCreateTable.release(alterTabPtr); c_blockState = BS_IDLE; @@ -4459,7 +4455,6 @@ Dbdict::createTab_dropComplete(Signal* signal, TableRecordPtr tabPtr; c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - tabPtr.p->tabState = TableRecord::NOT_DEFINED; releaseTableObject(tabPtr.i); PageRecordPtr pagePtr; @@ -5497,6 +5492,8 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash) c_tableRecordPool.getPtr(tablePtr, tableId); if (removeFromHash) c_tableRecordHash.remove(tablePtr); + + tablePtr.p->tabState = TableRecord::NOT_DEFINED; Uint32 nextAttrRecord = tablePtr.p->firstAttribute; while (nextAttrRecord != RNIL) { From 99a020db026e0739d3281a3639261a28f6862654 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 24 Sep 2004 13:40:38 +0200 Subject: [PATCH 41/55] Extra jam broken if --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 115a94a2b5c..fa263760b7c 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -4553,13 +4553,14 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, TableRecordPtr tablePtr; c_tableRecordHash.find(tablePtr, keyRecord); - if (checkExist) + if (checkExist){ jam(); /* ---------------------------------------------------------------- */ // Check if table already existed. /* ---------------------------------------------------------------- */ tabRequire(tablePtr.i == RNIL, CreateTableRef::TableAlreadyExist); - + } + switch (parseP->requestType) { case DictTabInfo::CreateTableFromAPI: { jam(); From d6c1a39a1523999ec0bc170a54257104f2ad5528 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 24 Sep 2004 16:58:25 +0000 Subject: [PATCH 42/55] fix for invalidating table if mismatch with frm removed debug printout new test in alter table for dictionay update test with multiple connections added coice of setting MaxNoOfOrderedIndexes added option to run "--small-bench" mysql-test/mysql-test-run.sh: added option to run "--small-bench" mysql-test/ndb/ndb_config_2_node.ini: added coice of setting MaxNoOfOrderedIndexes mysql-test/ndb/ndbcluster.sh: added coice of setting MaxNoOfOrderedIndexes mysql-test/r/ndb_alter_table.result: new test in alter table for dictionay update test with multiple connections mysql-test/t/ndb_alter_table.test: new test in alter table for dictionay update test with multiple connections ndb/src/ndbapi/DictCache.cpp: removed debug printout sql/ha_ndbcluster.cc: fix for invalidating table if mismatch with frm --- mysql-test/mysql-test-run.sh | 22 ++++++- mysql-test/ndb/ndb_config_2_node.ini | 1 + mysql-test/ndb/ndbcluster.sh | 5 +- mysql-test/r/ndb_alter_table.result | 19 ++++++ mysql-test/t/ndb_alter_table.test | 31 +++++++++ ndb/src/ndbapi/DictCache.cpp | 21 ------ sql/ha_ndbcluster.cc | 96 ++++++++++++++-------------- 7 files changed, 123 insertions(+), 72 deletions(-) diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 41dc3c419f0..1765bf4504b 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -295,6 +295,11 @@ while test $# -gt 0; do --record) RECORD=1; EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1" ;; + --small-bench) + DO_SMALL_BENCH=1 + DO_BENCH=1 + NO_SLAVE=1 + ;; --bench) DO_BENCH=1 NO_SLAVE=1 @@ -1451,7 +1456,13 @@ then if [ -z "$USE_RUNNING_NDBCLUSTER" ] then echo "Starting ndbcluster" - ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --small --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1 + if [ "$DO_BENCH" = 1 ] + then + NDBCLUSTER_OPTS="" + else + NDBCLUSTER_OPTS="--small" + fi + ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT $NDBCLUSTER_OPTS --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1 USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"host=localhost:$NDBCLUSTER_PORT\"" else USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"$USE_RUNNING_NDBCLUSTER\"" @@ -1485,9 +1496,14 @@ if [ "$DO_BENCH" = 1 ] then start_master + if [ "$DO_SMALL_BENCH" = 1 ] + then + EXTRA_BENCH_ARGS="--small-test --small-tables" + fi + if [ ! -z "$USE_NDBCLUSTER" ] then - EXTRA_BENCH_ARGS="--create-options=TYPE=ndb" + EXTRA_BENCH_ARGS="--create-options=TYPE=ndb $EXTRA_BENCH_ARGS" fi BENCHDIR=$BASEDIR/sql-bench/ @@ -1495,7 +1511,7 @@ then cd $BENCHDIR if [ -z "$1" ] then - ./run-all-tests --socket=$MASTER_MYSOCK --user=root $EXTRA_BENCH_ARGS + ./run-all-tests --socket=$MASTER_MYSOCK --user=root $EXTRA_BENCH_ARGS --log else if [ -x "./$1" ] then diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini index cc0f940efe3..8c89d2aa2cc 100644 --- a/mysql-test/ndb/ndb_config_2_node.ini +++ b/mysql-test/ndb/ndb_config_2_node.ini @@ -6,6 +6,7 @@ IndexMemory= CHOOSE_IndexMemory Diskless= CHOOSE_Diskless TimeBetweenWatchDogCheck= 30000 DataDir= CHOOSE_FILESYSTEM +MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes [ndbd] HostName= CHOOSE_HOSTNAME_1 diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh index f143242371f..a1b6400d753 100644 --- a/mysql-test/ndb/ndbcluster.sh +++ b/mysql-test/ndb/ndbcluster.sh @@ -44,7 +44,8 @@ initial_ndb= status_ndb= ndb_diskless=0 -ndb_con_op=100000 +ndb_no_ord=512 +ndb_con_op=10000 ndb_dmem=80M ndb_imem=24M @@ -65,6 +66,7 @@ while test $# -gt 0; do status_ndb=1 ;; --small) + ndb_no_ord=128 ndb_con_op=10000 ndb_dmem=40M ndb_imem=12M @@ -128,6 +130,7 @@ port_transporter=`expr $ndb_mgmd_port + 2` if [ $initial_ndb ] ; then sed \ + -e s,"CHOOSE_MaxNoOfOrderedIndexes","$ndb_no_ord",g \ -e s,"CHOOSE_MaxNoOfConcurrentOperations","$ndb_con_op",g \ -e s,"CHOOSE_DataMemory","$ndb_dmem",g \ -e s,"CHOOSE_IndexMemory","$ndb_imem",g \ diff --git a/mysql-test/r/ndb_alter_table.result b/mysql-test/r/ndb_alter_table.result index 8143e34ecc2..a36536b878d 100644 --- a/mysql-test/r/ndb_alter_table.result +++ b/mysql-test/r/ndb_alter_table.result @@ -73,3 +73,22 @@ col6 col1 col3 fourth col4 col4_5 col5 col7 col8 1 101 3 4 5 PENDING 0000-00-00 00:00:00 2 102 4 3 5 99 PENDING EXTRA 2004-01-01 00:00:00 drop table t1; +CREATE TABLE t1 ( +a INT NOT NULL, +b INT NOT NULL +) ENGINE=ndbcluster; +INSERT INTO t1 VALUES (9410,9412); +ALTER TABLE t1 ADD COLUMN c int not null; +select * from t1; +a b c +9410 9412 0 +select * from t1; +a b c +9410 9412 0 +alter table t1 drop c; +select * from t1; +a b +9410 9412 +drop table t1; +select * from t1; +ERROR 42S02: Table 'test.t1' doesn't exist diff --git a/mysql-test/t/ndb_alter_table.test b/mysql-test/t/ndb_alter_table.test index 3cdddfa8dce..96270d94dcb 100644 --- a/mysql-test/t/ndb_alter_table.test +++ b/mysql-test/t/ndb_alter_table.test @@ -49,6 +49,37 @@ show table status; select * from t1 order by col1; drop table t1; + +# +# Check that invalidating dictionary cache works +# + +CREATE TABLE t1 ( + a INT NOT NULL, + b INT NOT NULL +) ENGINE=ndbcluster; + +INSERT INTO t1 VALUES (9410,9412); + +connect (con1,localhost,,,test); +connect (con2,localhost,,,test); + +connection con1; +ALTER TABLE t1 ADD COLUMN c int not null; +select * from t1; + +connection con2; +select * from t1; +alter table t1 drop c; + +connection con1; +select * from t1; +drop table t1; + +connection con2; +--error 1146 +select * from t1; + #--disable_warnings #DROP TABLE IF EXISTS t2; #--enable_warnings diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index d395e3c9847..12300ce216f 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -70,27 +70,6 @@ LocalDictCache::put(const char * name, Ndb_local_table_info * tab_info){ void LocalDictCache::drop(const char * name){ Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name)); - -#ifndef DBUG_OFF - if (info == 0) { - ndbout_c("LocalDictCache::drop(%s) info==0", name); - ndbout_c("dump begin"); - NdbElement_t * curr = m_tableHash.getNext(0); - while(curr != 0){ - Ndb_local_table_info *tmp = curr->theData; - if (tmp) { - ndbout_c("m_table_impl=0x%x, id=%d, name=%s", - tmp->m_table_impl, - tmp->m_table_impl->m_tableId, - tmp->m_table_impl->getName()); - } else { - ndbout_c("NULL"); - } - curr = m_tableHash.getNext(curr); - } - ndbout_c("dump end"); - } -#endif DBUG_ASSERT(info != 0); Ndb_local_table_info::destroy(info); } diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d5ba6d725eb..2c330fae84d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -611,58 +611,60 @@ int ha_ndbcluster::get_metadata(const char *path) { NDBDICT *dict= m_ndb->getDictionary(); const NDBTAB *tab; - const void *data, *pack_data; - const char **key_name; - uint ndb_columns, mysql_columns, length, pack_length; int error; + bool invalidating_ndb_table= false; + DBUG_ENTER("get_metadata"); DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); - if (!(tab= dict->getTable(m_tabname))) - ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); - - /* - This is the place to check that the table we got from NDB - is equal to the one on local disk - */ - ndb_columns= (uint) tab->getNoOfColumns(); - mysql_columns= table->fields; - if (table->primary_key == MAX_KEY) - ndb_columns--; - if (ndb_columns != mysql_columns) - { - DBUG_PRINT("error", - ("Wrong number of columns, ndb: %d mysql: %d", - ndb_columns, mysql_columns)); - DBUG_RETURN(HA_ERR_OLD_METADATA); - } - - /* - Compare FrmData in NDB with frm file from disk. - */ - error= 0; - if (readfrm(path, &data, &length) || - packfrm(data, length, &pack_data, &pack_length)) - { - my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); - my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR)); - DBUG_RETURN(1); - } + do { + const void *data, *pack_data; + uint length, pack_length; + + if (!(tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); + /* + Compare FrmData in NDB with frm file from disk. + */ + error= 0; + if (readfrm(path, &data, &length) || + packfrm(data, length, &pack_data, &pack_length)) + { + my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_RETURN(1); + } - if ((pack_length != tab->getFrmLength()) || - (memcmp(pack_data, tab->getFrmData(), pack_length))) - { - DBUG_PRINT("error", - ("metadata, pack_length: %d getFrmLength: %d memcmp: %d", - pack_length, tab->getFrmLength(), - memcmp(pack_data, tab->getFrmData(), pack_length))); - DBUG_DUMP("pack_data", (char*)pack_data, pack_length); - DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); - error= HA_ERR_OLD_METADATA; - } - my_free((char*)data, MYF(0)); - my_free((char*)pack_data, MYF(0)); + if ((pack_length != tab->getFrmLength()) || + (memcmp(pack_data, tab->getFrmData(), pack_length))) + { + if (!invalidating_ndb_table) + { + DBUG_PRINT("info", ("Invalidating table")); + dict->invalidateTable(m_tabname); + invalidating_ndb_table= true; + } + else + { + DBUG_PRINT("error", + ("metadata, pack_length: %d getFrmLength: %d memcmp: %d", + pack_length, tab->getFrmLength(), + memcmp(pack_data, tab->getFrmData(), pack_length))); + DBUG_DUMP("pack_data", (char*)pack_data, pack_length); + DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); + error= HA_ERR_OLD_METADATA; + invalidating_ndb_table= false; + } + } + else + { + invalidating_ndb_table= false; + } + my_free((char*)data, MYF(0)); + my_free((char*)pack_data, MYF(0)); + } while (invalidating_ndb_table); + if (error) DBUG_RETURN(error); From 892b5c9bcacd1507e1b6c1090103f3dd6a6438a9 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 25 Sep 2004 10:16:37 +0200 Subject: [PATCH 43/55] Fix memory leak BitKeeper/deleted/.del-NdbScanReceiver.hpp~5c295814a47e7bb4: Delete: ndb/src/ndbapi/NdbScanReceiver.hpp BitKeeper/deleted/.del-NdbScanReceiver.cpp~b8a7472e4a7c424f: Delete: ndb/src/ndbapi/NdbScanReceiver.cpp ndb/include/ndb_global.h: Added ndb_end (corresponding to ndb_init) ndb/src/common/util/ndb_init.c: Added ndb_end (corresponding to ndb_init) ndb/src/ndbapi/NdbScanOperation.cpp: Release SCAN_TABREQ and make sure to call NdbOperation::release (that releases other stuff) ndb/src/ndbapi/Ndbinit.cpp: Reorder free-ing so that signal are release last (so that operations holding signals can release the first) --- ndb/include/ndb_global.h | 1 + ndb/src/common/util/ndb_init.c | 6 + ndb/src/ndbapi/NdbScanOperation.cpp | 9 +- ndb/src/ndbapi/NdbScanReceiver.cpp | 187 ------------------------- ndb/src/ndbapi/NdbScanReceiver.hpp | 210 ---------------------------- ndb/src/ndbapi/Ndbinit.cpp | 8 +- 6 files changed, 19 insertions(+), 402 deletions(-) delete mode 100644 ndb/src/ndbapi/NdbScanReceiver.cpp delete mode 100644 ndb/src/ndbapi/NdbScanReceiver.hpp diff --git a/ndb/include/ndb_global.h b/ndb/include/ndb_global.h index 19bd387c457..3ce37a2edee 100644 --- a/ndb/include/ndb_global.h +++ b/ndb/include/ndb_global.h @@ -78,6 +78,7 @@ extern "C" { /* call in main() - does not return on error */ extern int ndb_init(void); +extern void ndb_end(int); #ifndef HAVE_STRDUP extern char * strdup(const char *s); diff --git a/ndb/src/common/util/ndb_init.c b/ndb/src/common/util/ndb_init.c index b160ed3636b..f3aa734d7f9 100644 --- a/ndb/src/common/util/ndb_init.c +++ b/ndb/src/common/util/ndb_init.c @@ -27,3 +27,9 @@ ndb_init() } return 0; } + +void +ndb_end(int flags) +{ + my_end(flags); +} diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp index ac5f4268386..8db4778f2b9 100644 --- a/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/ndb/src/ndbapi/NdbScanOperation.cpp @@ -52,6 +52,7 @@ NdbScanOperation::NdbScanOperation(Ndb* aNdb) : NdbScanOperation::~NdbScanOperation() { for(Uint32 i = 0; irelease(); theNdb->releaseNdbScanRec(m_receivers[i]); } delete[] m_array; @@ -191,7 +192,7 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm, return 0; } - theSCAN_TABREQ = theNdb->getSignal(); + theSCAN_TABREQ = (!theSCAN_TABREQ ? theNdb->getSignal() : theSCAN_TABREQ); if (theSCAN_TABREQ == NULL) { setErrorCodeAbort(4000); return 0; @@ -719,6 +720,12 @@ void NdbScanOperation::release() for(Uint32 i = 0; irelease(); } + if(theSCAN_TABREQ) + { + theNdb->releaseSignal(theSCAN_TABREQ); + theSCAN_TABREQ = 0; + } + NdbOperation::release(); } /*************************************************************************** diff --git a/ndb/src/ndbapi/NdbScanReceiver.cpp b/ndb/src/ndbapi/NdbScanReceiver.cpp deleted file mode 100644 index 6c8c16c3ecf..00000000000 --- a/ndb/src/ndbapi/NdbScanReceiver.cpp +++ /dev/null @@ -1,187 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include "NdbScanReceiver.hpp" -#include - -#include - -#include - - -/*************************************************************************** - * int receiveKEYINFO20( NdbApiSignal* aSignal) - * - * Remark: Handles the reception of the KEYINFO20 signal. - * Save a copy of the signal in list - * - ***************************************************************************/ -int -NdbScanReceiver::receiveKEYINFO20( NdbApiSignal* aSignal){ - const KeyInfo20 * const keyInfo = CAST_CONSTPTR(KeyInfo20, aSignal->getDataPtr()); - if (theStatus != Waiting){ - //ndbout << "Dropping KEYINFO20, theStatus="<getLength() < 5){ - //ndbout << "Dropping KEYINFO20, length="<getLength() << endl; - } - Uint64 tCurrTransId = theNdbOp->theNdbCon->getTransactionId(); - Uint64 tRecTransId = (Uint64)keyInfo->transId1 + ((Uint64)keyInfo->transId2 << 32); - if ((tRecTransId - tCurrTransId) != (Uint64)0){ - //ndbout << "Dropping KEYINFO20 wrong transid" << endl; - return -1; - } - - NdbApiSignal * tCopy = new NdbApiSignal(0);//getSignal(); - if (tCopy == NULL) { - theNdbOp->setErrorCode(4000); - return 2; // theWaitState = NO_WAIT - } - // Put copy last in list of KEYINFO20 signals - tCopy->copyFrom(aSignal); - tCopy->next(NULL); - if (theFirstKEYINFO20_Recv == NULL) - theFirstKEYINFO20_Recv = tCopy; - else - theLastKEYINFO20_Recv->next(tCopy); - theLastKEYINFO20_Recv = tCopy; - - theTotalKI_Len = keyInfo->keyLen; // This is the total length of all signals - theTotalRecKI_Len += aSignal->getLength() - 5; - return theNdbOp->theNdbCon->checkNextScanResultComplete(); -} - -/*************************************************************************** - * int receiveTRANSID_AI_SCAN( NdbApiSignal* aSignal) - * - * Remark: Handles the reception of the TRANSID_AI_signal with - * 22 signal data words. - * Save a copy of the signal in list and check if all - * signals belonging to this resultset is receieved. - * - ***************************************************************************/ -int -NdbScanReceiver::receiveTRANSID_AI_SCAN( NdbApiSignal* aSignal) -{ - const Uint32* aDataPtr = aSignal->getDataPtr(); - if (theStatus != Waiting){ - //ndbout << "Dropping TRANSID_AI, theStatus="<getLength() < 3){ - //ndbout << "Dropping TRANSID_AI, length="<getLength() << endl; - return -1; - } - if (theNdbOp == NULL){ - //ndbout << "Dropping TRANSID_AI, theNdbOp == NULL" << endl; - return -1; - } - if (theNdbOp->theNdbCon == NULL){ - //ndbout << "Dropping TRANSID_AI, theNdbOp->theNdbCon == NULL" << endl; - return -1; - } - Uint64 tCurrTransId = theNdbOp->theNdbCon->getTransactionId(); - Uint64 tRecTransId = (Uint64)aDataPtr[1] + ((Uint64)aDataPtr[2] << 32); - if ((tRecTransId - tCurrTransId) != (Uint64)0){ - //ndbout << "Dropping TRANSID_AI wrong transid" << endl; - return -1; - } - - NdbApiSignal * tCopy = new NdbApiSignal(0);//getSignal(); - if (tCopy == NULL){ - theNdbOp->setErrorCode(4000); - return 2; // theWaitState = NO_WAIT - } - tCopy->copyFrom(aSignal); - tCopy->next(NULL); - if (theFirstTRANSID_AI_Recv == NULL) - theFirstTRANSID_AI_Recv = tCopy; - else - theLastTRANSID_AI_Recv->next(tCopy); - theLastTRANSID_AI_Recv = tCopy; - theTotalRecAI_Len += aSignal->getLength() - 3; - - return theNdbOp->theNdbCon->checkNextScanResultComplete(); -} - -/*************************************************************************** - * int executeSavedSignals() - * - * Remark: Execute all saved TRANSID_AI signals into the parent NdbOperation - * - * - ***************************************************************************/ -int -NdbScanReceiver::executeSavedSignals(){ - - NdbApiSignal* tSignal = theFirstTRANSID_AI_Recv; - while (tSignal != NULL) { - const Uint32* tDataPtr = tSignal->getDataPtr(); - - int tRet = theNdbOp->receiveREAD_AI((Uint32*)&tDataPtr[3], - tSignal->getLength() - 3); - if (tRet != -1){ - // -1 means that more signals are wanted ? - // Make sure there are no more signals in the list - assert(tSignal->next() == NULL); - } - tSignal = tSignal->next(); - } - // receiveREAD_AI may not copy to application buffers - NdbRecAttr* tRecAttr = theNdbOp->theFirstRecAttr; - while (tRecAttr != NULL) { - if (tRecAttr->copyoutRequired()) // copy to application buffer - tRecAttr->copyout(); - tRecAttr = tRecAttr->next(); - } - // Release TRANSID_AI signals for this receiver - while(theFirstTRANSID_AI_Recv != NULL){ - NdbApiSignal* tmp = theFirstTRANSID_AI_Recv; - theFirstTRANSID_AI_Recv = tmp->next(); - delete tmp; - } - - // theNdbOp->theNdb->releaseSignalsInList(&theFirstTRANSID_AI_Recv); - theFirstTRANSID_AI_Recv = NULL; - theLastTRANSID_AI_Recv = NULL; - theStatus = Executed; - - return 0; -} - - -void -NdbScanReceiver::prepareNextScanResult(){ - if(theStatus == Executed){ - - // theNdbOp->theNdb->releaseSignalsInList(&theFirstKEYINFO20_Recv); - while(theFirstKEYINFO20_Recv != NULL){ - NdbApiSignal* tmp = theFirstKEYINFO20_Recv; - theFirstKEYINFO20_Recv = tmp->next(); - delete tmp; - } - theFirstKEYINFO20_Recv = NULL; - theLastKEYINFO20_Recv = NULL; - theTotalRecAI_Len = 0; - theTotalRecKI_Len = 0; - if (theLockMode == true) - theTotalKI_Len = 0xFFFFFFFF; - else - theTotalKI_Len = 0; - theStatus = Waiting; - } -} diff --git a/ndb/src/ndbapi/NdbScanReceiver.hpp b/ndb/src/ndbapi/NdbScanReceiver.hpp deleted file mode 100644 index 72f9e48f02c..00000000000 --- a/ndb/src/ndbapi/NdbScanReceiver.hpp +++ /dev/null @@ -1,210 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef NdbScanReceiver_H -#define NdbScanReceiver_H - -#include "Ndb.hpp" -#include "NdbConnection.hpp" -#include "NdbOperation.hpp" -#include "NdbApiSignal.hpp" -#include "NdbReceiver.hpp" -#include - - -class NdbScanReceiver -{ - enum ReceiverStatus { Init, - Waiting, - Completed, - Executed, - Released }; - - friend class Ndb; - friend class NdbOperation; -public: - NdbScanReceiver(Ndb *aNdb) : - theReceiver(aNdb), - theNdbOp(NULL), - theFirstTRANSID_AI_Recv(NULL), - theLastTRANSID_AI_Recv(NULL), - theFirstKEYINFO20_Recv(NULL), - theLastKEYINFO20_Recv(NULL), - theTotalRecAI_Len(0), - theTotalKI_Len(0xFFFFFFFF), - theTotalRecKI_Len(0), - theStatus(Init), - theNextScanRec(NULL) - { - theReceiver.init(NdbReceiver::NDB_SCANRECEIVER, this); - } - - int checkMagicNumber(); - int receiveTRANSID_AI_SCAN(NdbApiSignal*); - int receiveKEYINFO20(NdbApiSignal*); - int executeSavedSignals(); - void prepareNextScanResult(); - - NdbScanReceiver* next(); - void next(NdbScanReceiver*); - - bool isCompleted(Uint32 aiLenToReceive); - void setCompleted(); - - void init(NdbOperation* aNdbOp, bool lockMode); - - Uint32 ptr2int() { return theReceiver.getId(); }; -private: - NdbScanReceiver(); - void release(); - - NdbReceiver theReceiver; - - NdbOperation* theNdbOp; - NdbApiSignal* theFirstTRANSID_AI_Recv; - NdbApiSignal* theLastTRANSID_AI_Recv; - NdbApiSignal* theFirstKEYINFO20_Recv; - NdbApiSignal* theLastKEYINFO20_Recv; - - Uint32 theTotalRecAI_Len; - Uint32 theTotalKI_Len; - Uint32 theTotalRecKI_Len; - ReceiverStatus theStatus; - Uint32 theMagicNumber; - NdbScanReceiver* theNextScanRec; - bool theLockMode; - -}; - -inline -void -NdbScanReceiver::init(NdbOperation* aNdbOp, bool lockMode){ - assert(theStatus == Init || theStatus == Released); - theNdbOp = aNdbOp; - theMagicNumber = 0xA0B1C2D3; - theTotalRecAI_Len = 0; - - /* If we are locking the records for take over - * KI_len to receive is at least 1, since we don't know yet - * how much KI we are expecting(this is written in the first KI signal) - * set theTotalKI_Len to FFFFFFFF, this will make the ScanReciever wait for - * at least the first KI, and when that is received we will know if - * we are expecting another one - */ - theLockMode = lockMode; - if (theLockMode == true) - theTotalKI_Len = 0xFFFFFFFF; - else - theTotalKI_Len = 0; - theTotalRecKI_Len = 0; - - assert(theNextScanRec == NULL); - theNextScanRec = NULL; - assert(theFirstTRANSID_AI_Recv == NULL); - theFirstTRANSID_AI_Recv = NULL; - assert(theLastTRANSID_AI_Recv == NULL); - theLastTRANSID_AI_Recv = NULL; - assert(theFirstKEYINFO20_Recv == NULL); - theFirstKEYINFO20_Recv = NULL; - theLastKEYINFO20_Recv = NULL; - - theStatus = Waiting; -}; - - -inline -void -NdbScanReceiver::release(){ - theStatus = Released; - // theNdbOp->theNdb->releaseSignalsInList(&theFirstTRANSID_AI_Recv); - while(theFirstTRANSID_AI_Recv != NULL){ - NdbApiSignal* tmp = theFirstTRANSID_AI_Recv; - theFirstTRANSID_AI_Recv = tmp->next(); - delete tmp; - } - theFirstTRANSID_AI_Recv = NULL; - theLastTRANSID_AI_Recv = NULL; - // theNdbOp->theNdb->releaseSignalsInList(&theFirstKEYINFO20_Recv); - while(theFirstKEYINFO20_Recv != NULL){ - NdbApiSignal* tmp = theFirstKEYINFO20_Recv; - theFirstKEYINFO20_Recv = tmp->next(); - delete tmp; - } - theFirstKEYINFO20_Recv = NULL; - theLastKEYINFO20_Recv = NULL; - theNdbOp = NULL; - theTotalRecAI_Len = 0; - theTotalRecKI_Len = 0; - theTotalKI_Len = 0xFFFFFFFF; -}; - -inline -int -NdbScanReceiver::checkMagicNumber() -{ - if (theMagicNumber != 0xA0B1C2D3) - return -1; - return 0; -} - -inline -NdbScanReceiver* -NdbScanReceiver::next(){ - return theNextScanRec; -} - -inline -void -NdbScanReceiver::next(NdbScanReceiver* aScanRec){ - theNextScanRec = aScanRec; -} - -inline -bool -NdbScanReceiver::isCompleted(Uint32 aiLenToReceive){ - assert(theStatus == Waiting || theStatus == Completed); -#if 0 - ndbout << "NdbScanReceiver::isCompleted"< Date: Sat, 25 Sep 2004 14:10:06 +0000 Subject: [PATCH 46/55] moved LocalConfig out of config retriver --- ndb/include/mgmcommon/ConfigRetriever.hpp | 17 ++--------- ndb/include/ndbapi/ndb_cluster_connection.hpp | 2 ++ ndb/src/common/mgmcommon/ConfigRetriever.cpp | 25 +++------------- ndb/src/kernel/main.cpp | 14 +++++++-- ndb/src/kernel/vm/Configuration.cpp | 11 +++++-- ndb/src/kernel/vm/Configuration.hpp | 4 ++- ndb/src/mgmsrv/MgmtSrvr.cpp | 9 +++--- ndb/src/mgmsrv/MgmtSrvr.hpp | 4 +-- ndb/src/mgmsrv/MgmtSrvrConfig.cpp | 3 +- ndb/src/mgmsrv/main.cpp | 29 +++++-------------- ndb/src/ndbapi/TransporterFacade.hpp | 1 - ndb/src/ndbapi/ndb_cluster_connection.cpp | 27 +++++++++++++---- 12 files changed, 67 insertions(+), 79 deletions(-) diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp index f9f8904b65c..a584c394f45 100644 --- a/ndb/include/mgmcommon/ConfigRetriever.hpp +++ b/ndb/include/mgmcommon/ConfigRetriever.hpp @@ -28,7 +28,7 @@ */ class ConfigRetriever { public: - ConfigRetriever(Uint32 version, Uint32 nodeType); + ConfigRetriever(LocalConfig &local_config, Uint32 version, Uint32 nodeType); ~ConfigRetriever(); /** @@ -54,16 +54,6 @@ public: const char * getErrorString(); - /** - * Sets connectstring which can be used instead of local config file - */ - void setConnectString(const char * connectString); - - /** - * Sets name of local config file (usually not needed) - */ - void setLocalConfigFileName(const char * connectString); - /** * @return Node id of this node (as stated in local config or connectString) */ @@ -93,12 +83,9 @@ private: void setError(ErrorType, const char * errorMsg); - BaseString _localConfigFileName; - struct LocalConfig _localConfig; + struct LocalConfig& _localConfig; Uint32 _ownNodeId; - BaseString m_connectString; - Uint32 m_version; Uint32 m_node_type; NdbMgmHandle m_handle; diff --git a/ndb/include/ndbapi/ndb_cluster_connection.hpp b/ndb/include/ndbapi/ndb_cluster_connection.hpp index 59d5a038844..f8e6f25ce73 100644 --- a/ndb/include/ndbapi/ndb_cluster_connection.hpp +++ b/ndb/include/ndbapi/ndb_cluster_connection.hpp @@ -19,6 +19,7 @@ #define CLUSTER_CONNECTION_HPP class TransporterFacade; +class LocalConfig; class ConfigRetriever; class NdbThread; @@ -37,6 +38,7 @@ private: void connect_thread(); char *m_connect_string; TransporterFacade *m_facade; + LocalConfig *m_local_config; ConfigRetriever *m_config_retriever; NdbThread *m_connect_thread; int (*m_connect_callback)(void); diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp index 40325fbae99..44b41956d33 100644 --- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -45,8 +45,10 @@ //**************************************************************************** //**************************************************************************** -ConfigRetriever::ConfigRetriever(Uint32 version, Uint32 node_type) { - +ConfigRetriever::ConfigRetriever(LocalConfig &local_config, + Uint32 version, Uint32 node_type) + : _localConfig(local_config) +{ m_handle= 0; m_version = version; m_node_type = node_type; @@ -66,15 +68,6 @@ ConfigRetriever::~ConfigRetriever(){ int ConfigRetriever::init() { - if (!_localConfig.init(m_connectString.c_str(), - _localConfigFileName.c_str())){ - - setError(CR_ERROR, "error in retrieving contact info for mgmtsrvr"); - _localConfig.printError(); - _localConfig.printUsage(); - return -1; - } - return _ownNodeId = _localConfig._ownNodeId; } @@ -230,16 +223,6 @@ ConfigRetriever::getErrorString(){ return errorString.c_str(); } -void -ConfigRetriever::setLocalConfigFileName(const char * localConfigFileName) { - _localConfigFileName.assign(localConfigFileName ? localConfigFileName : ""); -} - -void -ConfigRetriever::setConnectString(const char * connectString) { - m_connectString.assign(connectString ? connectString : ""); -} - bool ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 nodeid){ diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index 20844db75b6..035dfff8d01 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -19,6 +19,7 @@ #include #include "Configuration.hpp" +#include #include #include "vm/SimBlockList.hpp" @@ -67,12 +68,19 @@ NDB_MAIN(ndb_kernel){ // Parse command line options Configuration* theConfig = globalEmulatorData.theConfiguration; if(!theConfig->init(argc, argv)){ - return 0; + return NRT_Default; } + LocalConfig local_config; + if (!local_config.init(theConfig->getConnectString(),0)){ + local_config.printError(); + local_config.printUsage(); + return NRT_Default; + } + { // Do configuration signal(SIGPIPE, SIG_IGN); - theConfig->fetch_configuration(); + theConfig->fetch_configuration(local_config); } chdir(NdbConfig_get_path(0)); @@ -135,7 +143,7 @@ NDB_MAIN(ndb_kernel){ exit(0); } g_eventLogger.info("Ndb has terminated (pid %d) restarting", child); - theConfig->fetch_configuration(); + theConfig->fetch_configuration(local_config); } g_eventLogger.info("Angel pid: %d ndb pid: %d", getppid(), getpid()); diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index b7054a1bf22..0c4e24129df 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -16,6 +16,7 @@ #include +#include #include "Configuration.hpp" #include #include "GlobalData.hpp" @@ -184,7 +185,7 @@ Configuration::closeConfiguration(){ } void -Configuration::fetch_configuration(){ +Configuration::fetch_configuration(LocalConfig &local_config){ /** * Fetch configuration from management server */ @@ -192,8 +193,7 @@ Configuration::fetch_configuration(){ delete m_config_retriever; } - m_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_DB); - m_config_retriever->setConnectString(_connectString ? _connectString : ""); + m_config_retriever= new ConfigRetriever(local_config, NDB_VERSION, NODE_TYPE_DB); if(m_config_retriever->init() == -1 || m_config_retriever->do_connect() == -1){ @@ -416,6 +416,11 @@ Configuration::setRestartOnErrorInsert(int i){ m_restartOnErrorInsert = i; } +const char * +Configuration::getConnectString() const { + return _connectString; +} + char * Configuration::getConnectStringCopy() const { if(_connectString != 0) diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp index e84ff8d9193..7ac171c4a70 100644 --- a/ndb/src/kernel/vm/Configuration.hpp +++ b/ndb/src/kernel/vm/Configuration.hpp @@ -21,6 +21,7 @@ #include class ConfigRetriever; +class LocalConfig; class Configuration { public: @@ -32,7 +33,7 @@ public: */ bool init(int argc, const char** argv); - void fetch_configuration(); + void fetch_configuration(LocalConfig &local_config); void setupConfiguration(); void closeConfiguration(); @@ -54,6 +55,7 @@ public: const char * programName() const; const char * fileSystemPath() const; const char * backupFilePath() const; + const char * getConnectString() const; char * getConnectStringCopy() const; /** diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index d45953503ee..4c09805ba12 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -401,7 +401,7 @@ MgmtSrvr::getPort() const { /* Constructor */ MgmtSrvr::MgmtSrvr(NodeId nodeId, const BaseString &configFilename, - const BaseString &ndb_config_filename, + LocalConfig &local_config, Config * config): _blockNumber(1), // Hard coded block number since it makes it easy to send // signals to other management servers. @@ -409,7 +409,9 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, m_allocated_resources(*this), theSignalIdleList(NULL), theWaitState(WAIT_SUBSCRIBE_CONF), - m_statisticsListner(this){ + m_statisticsListner(this), + m_local_config(local_config) +{ DBUG_ENTER("MgmtSrvr::MgmtSrvr"); @@ -424,7 +426,6 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, m_newConfig = NULL; m_configFilename = configFilename; - m_localNdbConfigFilename = ndb_config_filename; m_nextConfigGenerationNumber = 0; @@ -514,7 +515,7 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, { DBUG_PRINT("info", ("verifyConfig")); - ConfigRetriever cr(NDB_VERSION, NDB_MGM_NODE_TYPE_MGM); + ConfigRetriever cr(m_local_config, NDB_VERSION, NDB_MGM_NODE_TYPE_MGM); if (!cr.verifyConfig(config->m_configValues, _ownNodeId)) { ndbout << cr.getErrorString() << endl; exit(-1); diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 3f3e98dbcc1..c6157db489a 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -173,7 +173,7 @@ public: MgmtSrvr(NodeId nodeId, /* Local nodeid */ const BaseString &config_filename, /* Where to save config */ - const BaseString &ndb_config_filename, /* Ndb.cfg filename */ + LocalConfig &local_config, /* Ndb.cfg filename */ Config * config); NodeId getOwnNodeId() const {return _ownNodeId;}; @@ -528,8 +528,8 @@ private: NdbMutex *m_configMutex; const Config * _config; Config * m_newConfig; + LocalConfig &m_local_config; BaseString m_configFilename; - BaseString m_localNdbConfigFilename; Uint32 m_nextConfigGenerationNumber; NodeBitmask m_reserved_nodes; diff --git a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp index 44c2aadd1e2..1d51061e909 100644 --- a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp +++ b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp @@ -288,8 +288,7 @@ MgmtSrvr::readConfig() { Config * MgmtSrvr::fetchConfig() { - ConfigRetriever cr(NDB_VERSION, NODE_TYPE_MGM); - cr.setLocalConfigFileName(m_localNdbConfigFilename.c_str()); + ConfigRetriever cr(m_local_config, NDB_VERSION, NODE_TYPE_MGM); struct ndb_mgm_configuration * tmp = cr.getConfig(); if(tmp != 0){ Config * conf = new Config(); diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index a582d082312..1a2b95391a9 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -173,15 +173,19 @@ NDB_MAIN(mgmsrv){ /**************************** * Read configuration files * ****************************/ - if (!readLocalConfig()) + LocalConfig local_config; + if(!local_config.init(0,glob.local_config_filename)){ + local_config.printError(); goto error_end; + } + glob.localNodeId = local_config._ownNodeId; + if (!readGlobalConfig()) goto error_end; glob.mgmObject = new MgmtSrvr(glob.localNodeId, BaseString(glob.config_filename), - BaseString(glob.local_config_filename == 0 ? - "" : glob.local_config_filename), + local_config, glob.cluster_config); chdir(NdbConfig_get_path(0)); @@ -320,25 +324,6 @@ MgmGlobals::~MgmGlobals(){ free(interface_name); } -/** - * @fn readLocalConfig - * @param glob : Global variables - * @return true if success, false otherwise. - */ -static bool -readLocalConfig(){ - // Read local config file - LocalConfig lc; - if(!lc.init(0,glob.local_config_filename)){ - lc.printError(); - return false; - } - - glob.localNodeId = lc._ownNodeId; - return true; -} - - /** * @fn readGlobalConfig * @param glob : Global variables diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/ndb/src/ndbapi/TransporterFacade.hpp index 76beaa708f1..8b6e38a0611 100644 --- a/ndb/src/ndbapi/TransporterFacade.hpp +++ b/ndb/src/ndbapi/TransporterFacade.hpp @@ -236,7 +236,6 @@ public: NdbMutex* theMutexPtr; private: static TransporterFacade* theFacadeInstance; - static ConfigRetriever *s_config_retriever; public: GlobalDictCache m_globalDictCache; diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp index 27695cec187..174d2c314af 100644 --- a/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -30,14 +31,18 @@ static int g_run_connect_thread= 0; Ndb_cluster_connection::Ndb_cluster_connection(const char *connect_string) { + DBUG_ENTER("Ndb_cluster_connection"); + DBUG_PRINT("enter",("Ndb_cluster_connection this=0x%x", this)); m_facade= TransporterFacade::theFacadeInstance= new TransporterFacade(); if (connect_string) - m_connect_string= strdup(connect_string); + m_connect_string= my_strdup(connect_string,MYF(MY_WME)); else m_connect_string= 0; m_config_retriever= 0; + m_local_config= 0; m_connect_thread= 0; m_connect_callback= 0; + DBUG_VOID_RETURN; } extern "C" pthread_handler_decl(run_ndb_cluster_connection_connect_thread, me) @@ -99,8 +104,16 @@ int Ndb_cluster_connection::connect(int reconnect) do { if (m_config_retriever == 0) { - m_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_API); - m_config_retriever->setConnectString(m_connect_string); + if (m_local_config == 0) { + m_local_config= new LocalConfig(); + if (m_local_config->init(m_connect_string,0)) { + ndbout << "Configuration error: Unable to retrieve local config" << endl; + m_local_config->printError(); + m_local_config->printUsage(); + DBUG_RETURN(-1); + } + } + m_config_retriever= new ConfigRetriever(*m_local_config, NDB_VERSION, NODE_TYPE_API); if(m_config_retriever->init() == -1) break; } @@ -145,6 +158,8 @@ int Ndb_cluster_connection::connect(int reconnect) Ndb_cluster_connection::~Ndb_cluster_connection() { + DBUG_ENTER("~Ndb_cluster_connection"); + DBUG_PRINT("enter",("~Ndb_cluster_connection this=0x%x", this)); TransporterFacade::stop_instance(); if (m_connect_thread) { @@ -161,10 +176,12 @@ Ndb_cluster_connection::~Ndb_cluster_connection() abort(); TransporterFacade::theFacadeInstance= 0; } - if (m_connect_string) - free(m_connect_string); + my_free(m_connect_string,MYF(MY_ALLOW_ZERO_PTR)); if (m_config_retriever) delete m_config_retriever; + if (m_local_config == 0) + delete m_local_config; + DBUG_VOID_RETURN; } From 339d362dd7c3284aa6537a5df04c80790381741b Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 25 Sep 2004 16:47:51 +0200 Subject: [PATCH 47/55] bug#5702 more bug fixes. ndb/src/kernel/blocks/dbdict/Dbdict.cpp: DropIndex - 1) return Invalid version 2) Mark as IS_DROPPING so that 2 simulatainious threads can't drop it ndb/src/ndbapi/NdbDictionary.cpp: Changed listIndex from taking table name to taking table id (should be index version aswell) ndb/src/ndbapi/NdbDictionaryImpl.cpp: List indexes using id Fix log towards m_globalHash ndb/src/ndbapi/NdbDictionaryImpl.hpp: List indexes using tableid (indexid) --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 38 +++++++++++++++++++------ ndb/src/ndbapi/NdbDictionary.cpp | 7 ++++- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 35 ++++++++++++----------- ndb/src/ndbapi/NdbDictionaryImpl.hpp | 2 +- 4 files changed, 56 insertions(+), 26 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index fa263760b7c..821c847d5b9 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -4538,6 +4538,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, parseP->errorLine = __LINE__; return; } + + if(parseP->requestType == DictTabInfo::AlterTableFromAPI) + { + ndbrequire(!checkExist); + } + if(!checkExist) + { + ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI); + } /* ---------------------------------------------------------------- */ // Verify that table name is an allowed table name. @@ -4633,12 +4642,10 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, strcpy(tablePtr.p->tableName, keyRecord.tableName); if (parseP->requestType != DictTabInfo::AlterTableFromAPI) { jam(); - c_tableRecordHash.add(tablePtr); - } - #ifdef VM_TRACE - ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i); + ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i); #endif + } //tablePtr.p->noOfPrimkey = tableDesc.NoOfKeyAttr; //tablePtr.p->noOfNullAttr = tableDesc.NoOfNullable; @@ -4677,11 +4684,12 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, handleTabInfo(it, parseP); - if(parseP->errorCode != 0){ + if(parseP->errorCode != 0) + { /** * Release table */ - releaseTableObject(tablePtr.i); + releaseTableObject(tablePtr.i, !checkExist); } }//handleTabInfoInit() @@ -6563,14 +6571,28 @@ Dbdict::execDROP_INDX_REQ(Signal* signal) int res = getMetaTablePtr(tmp, indexId, indexVersion); switch(res){ case MetaData::InvalidArgument: - case MetaData::TableNotFound: - err = DropTableRef::NoSuchTable; + err = DropIndxRef::IndexNotFound; goto error; + case MetaData::TableNotFound: case MetaData::InvalidTableVersion: err = DropIndxRef::InvalidIndexVersion; goto error; } + if (! tmp.p->isIndex()) { + jam(); + err = DropIndxRef::NotAnIndex; + goto error; + } + + if (tmp.p->indexState == TableRecord::IS_DROPPING){ + jam(); + err = DropIndxRef::IndexNotFound; + goto error; + } + + tmp.p->indexState = TableRecord::IS_DROPPING; + req->setOpKey(++c_opRecordSequence); NodeReceiverGroup rg(DBDICT, c_aliveNodes); sendSignal(rg, GSN_DROP_INDX_REQ, diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index 6cfacc2c340..c8414ec16a3 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -856,7 +856,12 @@ NdbDictionary::Dictionary::listObjects(List& list, Object::Type type) int NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName) { - return m_impl.listIndexes(list, tableName); + const NdbDictionary::Table* tab= getTable(tableName); + if(tab == 0) + { + return -1; + } + return m_impl.listIndexes(list, tab->getTableId()); } const struct NdbError & diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 0b2a0386a6b..9abe52fb030 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -1407,16 +1407,15 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl) // Remove cached information and let it be refreshed at next access if (m_localHash.get(originalInternalName) != NULL) { m_localHash.drop(originalInternalName); + m_globalHash->lock(); NdbTableImpl * cachedImpl = m_globalHash->get(originalInternalName); // If in local cache it must be in global if (!cachedImpl) abort(); - m_globalHash->lock(); m_globalHash->drop(cachedImpl); m_globalHash->unlock(); } } - return ret; } @@ -1714,6 +1713,7 @@ NdbDictionaryImpl::dropTable(const char * name) int NdbDictionaryImpl::dropTable(NdbTableImpl & impl) { + int res; const char * name = impl.getName(); if(impl.m_status == NdbDictionary::Object::New){ return dropTable(name); @@ -1725,28 +1725,34 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl) } List list; - if (listIndexes(list, name) == -1) + if ((res = listIndexes(list, impl.m_tableId)) == -1){ return -1; + } for (unsigned i = 0; i < list.count; i++) { const List::Element& element = list.elements[i]; - if (dropIndex(element.name, name) == -1) + if ((res = dropIndex(element.name, name)) == -1) + { return -1; + } } - + if (impl.m_noOfBlobs != 0) { - if (dropBlobTables(impl) != 0) + if (dropBlobTables(impl) != 0){ return -1; + } } - + int ret = m_receiver.dropTable(impl); - if(ret == 0){ + if(ret == 0 || m_error.code == 709){ const char * internalTableName = impl.m_internalName.c_str(); - + m_localHash.drop(internalTableName); m_globalHash->lock(); m_globalHash->drop(&impl); m_globalHash->unlock(); + + return 0; } return ret; @@ -1762,8 +1768,9 @@ NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t) char btname[NdbBlob::BlobTableNameSize]; NdbBlob::getBlobTableName(btname, &t, &c); if (dropTable(btname) != 0) { - if (m_error.code != 709) + if (m_error.code != 709){ return -1; + } } } return 0; @@ -2132,7 +2139,6 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName) m_globalHash->drop(impl.m_table); m_globalHash->unlock(); } - return ret; } @@ -2816,14 +2822,11 @@ NdbDictionaryImpl::listObjects(List& list, NdbDictionary::Object::Type type) } int -NdbDictionaryImpl::listIndexes(List& list, const char * tableName) +NdbDictionaryImpl::listIndexes(List& list, Uint32 indexId) { ListTablesReq req; - NdbTableImpl* impl = getTable(tableName); - if (impl == 0) - return -1; req.requestData = 0; - req.setTableId(impl->m_tableId); + req.setTableId(indexId); req.setListNames(true); req.setListIndexes(true); return m_receiver.listObjects(list, req.requestData, m_ndb.usingFullyQualifiedNames()); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index cf659c71397..1fe92db94ed 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -390,7 +390,7 @@ public: int stopSubscribeEvent(NdbEventImpl &); int listObjects(List& list, NdbDictionary::Object::Type type); - int listIndexes(List& list, const char * tableName); + int listIndexes(List& list, Uint32 indexId); NdbTableImpl * getTable(const char * tableName, void **data= 0); Ndb_local_table_info * get_local_table_info(const char * internalName); From 7a2cb204ed0427d3e13c1fa4633cd62d97487f71 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 25 Sep 2004 16:06:30 +0000 Subject: [PATCH 48/55] added printout for where configuration is fetched debug printouts mysql-test/ndb/ndbcluster.sh: . ndb/include/mgmcommon/ConfigRetriever.hpp: added printout for where configuration is fetched ndb/src/common/mgmcommon/ConfigRetriever.cpp: added printout for where configuration is fetched ndb/src/kernel/main.cpp: added printout for where configuration is fetched ndb/src/kernel/vm/Configuration.cpp: added printout for where configuration is fetched ndb/src/kernel/vm/Configuration.hpp: added printout for where configuration is fetched ndb/src/ndbapi/Ndb.cpp: debug printouts --- mysql-test/ndb/ndbcluster.sh | 2 +- ndb/include/mgmcommon/ConfigRetriever.hpp | 7 ++++++- ndb/src/common/mgmcommon/ConfigRetriever.cpp | 7 +++++++ ndb/src/kernel/main.cpp | 3 +++ ndb/src/kernel/vm/Configuration.cpp | 5 +++++ ndb/src/kernel/vm/Configuration.hpp | 5 +++++ ndb/src/ndbapi/Ndb.cpp | 10 ++++++++-- 7 files changed, 35 insertions(+), 4 deletions(-) diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh index a1b6400d753..7485e42923e 100644 --- a/mysql-test/ndb/ndbcluster.sh +++ b/mysql-test/ndb/ndbcluster.sh @@ -45,7 +45,7 @@ status_ndb= ndb_diskless=0 ndb_no_ord=512 -ndb_con_op=10000 +ndb_con_op=105000 ndb_dmem=80M ndb_imem=24M diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp index a584c394f45..938f4c19071 100644 --- a/ndb/include/mgmcommon/ConfigRetriever.hpp +++ b/ndb/include/mgmcommon/ConfigRetriever.hpp @@ -73,6 +73,9 @@ public: * Verify config */ bool verifyConfig(const struct ndb_mgm_configuration *, Uint32 nodeid); + + Uint32 get_mgmd_port() const {return m_mgmd_port;}; + const char *get_mgmd_host() const {return m_mgmd_host;}; private: BaseString errorString; enum ErrorType { @@ -85,7 +88,9 @@ private: struct LocalConfig& _localConfig; Uint32 _ownNodeId; - + Uint32 m_mgmd_port; + const char *m_mgmd_host; + Uint32 m_version; Uint32 m_node_type; NdbMgmHandle m_handle; diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp index 44b41956d33..c4957ffdbf1 100644 --- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -74,6 +74,9 @@ ConfigRetriever::init() { int ConfigRetriever::do_connect(int exit_on_connect_failure){ + m_mgmd_port= 0; + m_mgmd_host= 0; + if(!m_handle) m_handle= ndb_mgm_create_handle(); @@ -94,6 +97,8 @@ ConfigRetriever::do_connect(int exit_on_connect_failure){ case MgmId_TCP: tmp.assfmt("%s:%d", m->name.c_str(), m->port); if (ndb_mgm_connect(m_handle, tmp.c_str()) == 0) { + m_mgmd_port= m->port; + m_mgmd_host= m->name.c_str(); return 0; } setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle)); @@ -118,6 +123,8 @@ ConfigRetriever::do_connect(int exit_on_connect_failure){ ndb_mgm_destroy_handle(&m_handle); m_handle= 0; + m_mgmd_port= 0; + m_mgmd_host= 0; return -1; } diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index 035dfff8d01..9c25da79065 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -252,6 +252,9 @@ systemInfo(const Configuration & config, const LogLevel & logLevel){ if(logLevel.getLogLevel(LogLevel::llStartUp) > 0){ g_eventLogger.info("NDB Cluster -- DB node %d", globalData.ownId); g_eventLogger.info("%s --", NDB_VERSION_STRING); + if (config.get_mgmd_host()) + g_eventLogger.info("Configuration fetched at %s port %d", + config.get_mgmd_host(), config.get_mgmd_port()); #ifdef NDB_SOLARIS // ok g_eventLogger.info("NDB is running on a machine with %d processor(s) at %d MHz", processor, speed); diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 0c4e24129df..600e6f67910 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -193,6 +193,8 @@ Configuration::fetch_configuration(LocalConfig &local_config){ delete m_config_retriever; } + m_mgmd_port= 0; + m_mgmd_host= 0; m_config_retriever= new ConfigRetriever(local_config, NDB_VERSION, NODE_TYPE_DB); if(m_config_retriever->init() == -1 || m_config_retriever->do_connect() == -1){ @@ -207,6 +209,9 @@ Configuration::fetch_configuration(LocalConfig &local_config){ ERROR_SET(fatal, ERR_INVALID_CONFIG, "Could connect to ndb_mgmd", s); } + m_mgmd_port= m_config_retriever->get_mgmd_port(); + m_mgmd_host= m_config_retriever->get_mgmd_host(); + ConfigRetriever &cr= *m_config_retriever; if((globalData.ownId = cr.allocNodeId()) == 0){ diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp index 7ac171c4a70..2ea32ffea37 100644 --- a/ndb/src/kernel/vm/Configuration.hpp +++ b/ndb/src/kernel/vm/Configuration.hpp @@ -67,6 +67,9 @@ public: const ndb_mgm_configuration_iterator * getOwnConfigIterator() const; + Uint32 get_mgmd_port() const {return m_mgmd_port;}; + const char *get_mgmd_host() const {return m_mgmd_host;}; + class LogLevel * m_logLevel; private: friend class Cmvmi; @@ -95,6 +98,8 @@ private: char * _backupPath; bool _initialStart; char * _connectString; + Uint32 m_mgmd_port; + const char *m_mgmd_host; bool _daemonMode; void calcSizeAlt(class ConfigValues * ); diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index 7312eafb2f5..cb126a221a8 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -327,7 +327,11 @@ Ndb::startTransaction(Uint32 aPriority, const char * keyData, Uint32 keyLen) } else { nodeId = 0; }//if - DBUG_RETURN(startTransactionLocal(aPriority, nodeId)); + { + NdbConnection *trans= startTransactionLocal(aPriority, nodeId); + DBUG_PRINT("exit",("start trans= 0x%x", trans)); + DBUG_RETURN(trans); + } } else { DBUG_RETURN(NULL); }//if @@ -451,7 +455,7 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId) abort(); } #endif - DBUG_PRINT("exit", ("transaction id: %d", tConnection->getTransactionId())); + DBUG_PRINT("exit", ("transid= %lld", tConnection->getTransactionId())); DBUG_RETURN(tConnection); }//Ndb::startTransactionLocal() @@ -465,6 +469,8 @@ void Ndb::closeTransaction(NdbConnection* aConnection) { DBUG_ENTER("Ndb::closeTransaction"); + DBUG_PRINT("enter",("close trans= 0x%x, transid= %lld", + aConnection, aConnection->getTransactionId())); NdbConnection* tCon; NdbConnection* tPreviousCon; From 45a7efa3e8726cc4bcabc7d3abbd74868c5921e8 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 25 Sep 2004 16:36:54 +0000 Subject: [PATCH 49/55] corrected small :) mistake --- ndb/src/ndbapi/ndb_cluster_connection.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp index 174d2c314af..688445125f3 100644 --- a/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -106,7 +106,7 @@ int Ndb_cluster_connection::connect(int reconnect) { if (m_local_config == 0) { m_local_config= new LocalConfig(); - if (m_local_config->init(m_connect_string,0)) { + if (!m_local_config->init(m_connect_string,0)) { ndbout << "Configuration error: Unable to retrieve local config" << endl; m_local_config->printError(); m_local_config->printUsage(); @@ -179,7 +179,7 @@ Ndb_cluster_connection::~Ndb_cluster_connection() my_free(m_connect_string,MYF(MY_ALLOW_ZERO_PTR)); if (m_config_retriever) delete m_config_retriever; - if (m_local_config == 0) + if (m_local_config) delete m_local_config; DBUG_VOID_RETURN; } From ea29015d6fb6e70e8ce7a310450581241643c1c3 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 25 Sep 2004 18:36:56 +0200 Subject: [PATCH 50/55] Ops forgott to put table in hash --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 821c847d5b9..97270b65132 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -4561,7 +4561,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, TableRecordPtr tablePtr; c_tableRecordHash.find(tablePtr, keyRecord); - + if (checkExist){ jam(); /* ---------------------------------------------------------------- */ @@ -4645,6 +4645,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, #ifdef VM_TRACE ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i); #endif + c_tableRecordHash.add(tablePtr); } //tablePtr.p->noOfPrimkey = tableDesc.NoOfKeyAttr; From 01eca8f8307bffb4db15de0ed10aa1c5c3cbcfa0 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 25 Sep 2004 17:30:51 +0000 Subject: [PATCH 51/55] local config now outside config retriever --- ndb/test/src/NdbBackup.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp index 71b4b49b3a6..ad26dbeab16 100644 --- a/ndb/test/src/NdbBackup.cpp +++ b/ndb/test/src/NdbBackup.cpp @@ -69,7 +69,11 @@ NdbBackup::getBackupDataDirForNode(int _node_id){ /** * Fetch configuration from management server */ - ConfigRetriever cr(0, NODE_TYPE_API); + LocalConfig lc; + if (!lc.init(0,0)) { + abort(); + } + ConfigRetriever cr(lc, 0, NODE_TYPE_API); ndb_mgm_configuration * p = 0; BaseString tmp; tmp.assfmt("%s:%d", host.c_str(), port); From 0ef58d54886429896a6146803a7cf21822ad075b Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 25 Sep 2004 19:31:36 +0200 Subject: [PATCH 52/55] testBackup ndb/src/mgmsrv/MgmtSrvr.cpp: Fix so that start backup can be blocking ndb/test/run-test/daily-devel-tests.txt: Use atrt-testBackup (which setups env) --- ndb/src/mgmsrv/MgmtSrvr.cpp | 12 +++++++++++- ndb/test/run-test/daily-devel-tests.txt | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 4c09805ba12..92a8025295f 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2420,7 +2420,17 @@ void MgmtSrvr::backupCallback(BackupEvent & event) { m_lastBackupEvent = event; - theWaitState = NO_WAIT; + switch(event.Event){ + case BackupEvent::BackupFailedToStart: + case BackupEvent::BackupAborted: + case BackupEvent::BackupCompleted: + theWaitState = NO_WAIT; + break; + case BackupEvent::BackupStarted: + if(theWaitState == WAIT_BACKUP_STARTED) + theWaitState = NO_WAIT; + } + return; } diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt index 2497fa7d038..f2abc961807 100644 --- a/ndb/test/run-test/daily-devel-tests.txt +++ b/ndb/test/run-test/daily-devel-tests.txt @@ -27,7 +27,7 @@ cmd: atrt-testBackup args: -n BackupOne T1 T6 T3 I3 max-time: 1000 -cmd: testBackup +cmd: atrt-testBackup args: -n BackupBank T6 # From 2de9ef17262a87c2b4d28c301387fcb26bba9b05 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 26 Sep 2004 10:03:57 +0200 Subject: [PATCH 53/55] testDict -n InvalidTables ndb/src/kernel/blocks/dbdict/Dbdict.cpp: Add more require's in VM_TRACE Fix release of invalid tables testDict -n InvalidTables --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 97270b65132..d1a8128ea7f 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -3475,12 +3475,19 @@ int Dbdict::handleAlterTab(AlterTabReq * req, jam(); // Table rename // Remove from hashtable +#ifdef VM_TRACE + TableRecordPtr tmp; + ndbrequire(c_tableRecordHash.find(tmp, *origTablePtr.p)); +#endif c_tableRecordHash.remove(origTablePtr); strcpy(regAlterTabPtr->previousTableName, origTablePtr.p->tableName); strcpy(origTablePtr.p->tableName, newTablePtr.p->tableName); // Set new schema version origTablePtr.p->tableVersion = newTablePtr.p->tableVersion; // Put it back +#ifdef VM_TRACE + ndbrequire(!c_tableRecordHash.find(tmp, *origTablePtr.p)); +#endif c_tableRecordHash.add(origTablePtr); return 0; @@ -3501,12 +3508,19 @@ void Dbdict::revertAlterTable(Signal * signal, TableRecordPtr tablePtr; c_tableRecordPool.getPtr(tablePtr, tableId); // Remove from hashtable +#ifdef VM_TRACE + TableRecordPtr tmp; + ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p)); +#endif c_tableRecordHash.remove(tablePtr); // Restore name strcpy(tablePtr.p->tableName, regAlterTabPtr->previousTableName); // Revert schema version tablePtr.p->tableVersion = tablePtr.p->tableVersion - 1; // Put it back +#ifdef VM_TRACE + ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p)); +#endif c_tableRecordHash.add(tablePtr); return; @@ -4644,6 +4658,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, jam(); #ifdef VM_TRACE ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i); + TableRecordPtr tmp; + ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p)); #endif c_tableRecordHash.add(tablePtr); } @@ -4690,7 +4706,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, /** * Release table */ - releaseTableObject(tablePtr.i, !checkExist); + releaseTableObject(tablePtr.i, checkExist); } }//handleTabInfoInit() @@ -5501,8 +5517,13 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash) AttributeRecordPtr attrPtr; c_tableRecordPool.getPtr(tablePtr, tableId); if (removeFromHash) + { +#ifdef VM_TRACE + TableRecordPtr tmp; + ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p)); +#endif c_tableRecordHash.remove(tablePtr); - + } tablePtr.p->tabState = TableRecord::NOT_DEFINED; Uint32 nextAttrRecord = tablePtr.p->firstAttribute; From a64953ed3f469e3efdff4dd6462a1756675459b6 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 26 Sep 2004 10:51:14 +0200 Subject: [PATCH 54/55] testBasic -n MassiveRollback Make sure to not receive more that buffer can fit currently ndb/src/common/transporter/TCP_Transporter.cpp: Make sure to not receive more that buffer can fit currently --- .../common/transporter/TCP_Transporter.cpp | 80 ++++++++++--------- 1 file changed, 43 insertions(+), 37 deletions(-) diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/ndb/src/common/transporter/TCP_Transporter.cpp index b44afc7c136..7cfdc224b34 100644 --- a/ndb/src/common/transporter/TCP_Transporter.cpp +++ b/ndb/src/common/transporter/TCP_Transporter.cpp @@ -362,50 +362,56 @@ TCP_Transporter::doReceive() { // Select-function must return the socket for read // before this method is called // It reads the external TCP/IP interface once - - const int nBytesRead = recv(theSocket, - receiveBuffer.insertPtr, maxReceiveSize, 0); - - if (nBytesRead > 0) { - receiveBuffer.sizeOfData += nBytesRead; - receiveBuffer.insertPtr += nBytesRead; + int size = receiveBuffer.sizeOfBuffer - receiveBuffer.sizeOfData; + if(size > 0){ + const int nBytesRead = recv(theSocket, + receiveBuffer.insertPtr, + size < maxReceiveSize ? size : maxReceiveSize, + 0); - if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){ + if (nBytesRead > 0) { + receiveBuffer.sizeOfData += nBytesRead; + receiveBuffer.insertPtr += nBytesRead; + + if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){ #ifdef DEBUG_TRANSPORTER - ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", - receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer); - ndbout_c("nBytesRead = %d", nBytesRead); + ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", + receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer); + ndbout_c("nBytesRead = %d", nBytesRead); #endif - ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", - receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer); - report_error(TE_INVALID_MESSAGE_LENGTH); - return 0; - } - - receiveCount ++; - receiveSize += nBytesRead; - - if(receiveCount == reportFreq){ - reportReceiveLen(get_callback_obj(), remoteNodeId, receiveCount, receiveSize); - receiveCount = 0; - receiveSize = 0; + ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", + receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer); + report_error(TE_INVALID_MESSAGE_LENGTH); + return 0; + } + + receiveCount ++; + receiveSize += nBytesRead; + + if(receiveCount == reportFreq){ + reportReceiveLen(get_callback_obj(), remoteNodeId, receiveCount, receiveSize); + receiveCount = 0; + receiveSize = 0; + } + return nBytesRead; + } else { +#if defined DEBUG_TRANSPORTER + ndbout_c("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d " + "errno = %d strerror = %s", + DISCONNECT_ERRNO(InetErrno, nBytesRead), + remoteNodeId, nBytesRead, InetErrno, + (char*)ndbstrerror(InetErrno)); +#endif + if(DISCONNECT_ERRNO(InetErrno, nBytesRead)){ + // The remote node has closed down + doDisconnect(); + report_disconnect(InetErrno); + } } return nBytesRead; } else { -#if defined DEBUG_TRANSPORTER - ndbout_c("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d " - "errno = %d strerror = %s", - DISCONNECT_ERRNO(InetErrno, nBytesRead), - remoteNodeId, nBytesRead, InetErrno, - (char*)ndbstrerror(InetErrno)); -#endif - if(DISCONNECT_ERRNO(InetErrno, nBytesRead)){ - // The remote node has closed down - doDisconnect(); - report_disconnect(InetErrno); - } + return 0; } - return nBytesRead; } void From a6b64d28984a6c20b632a1beb1f2580cce33ae4a Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 27 Sep 2004 00:24:23 +0000 Subject: [PATCH 55/55] removed init on ConfigRetriever added some debug printouts some changes in ndbcluster_init to make start of mysqld first work ndb/include/mgmcommon/ConfigRetriever.hpp: removed init on ConfigRetriever ndb/src/common/mgmcommon/ConfigRetriever.cpp: removed init on ConfigRetriever added some debug printouts ndb/src/kernel/vm/Configuration.cpp: removed init on ConfigRetriever ndb/src/ndbapi/ndb_cluster_connection.cpp: removed init on ConfigRetriever added sleep in retry sql/ha_ndbcluster.cc: some changes in ndbcluster_init to make start of mysqld first work --- ndb/include/mgmcommon/ConfigRetriever.hpp | 8 +------- ndb/src/common/mgmcommon/ConfigRetriever.cpp | 17 ++++++++++------- ndb/src/kernel/vm/Configuration.cpp | 5 +---- ndb/src/ndbapi/ndb_cluster_connection.cpp | 5 +++-- sql/ha_ndbcluster.cc | 12 +++++++----- 5 files changed, 22 insertions(+), 25 deletions(-) diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp index 938f4c19071..6c32255e921 100644 --- a/ndb/include/mgmcommon/ConfigRetriever.hpp +++ b/ndb/include/mgmcommon/ConfigRetriever.hpp @@ -31,16 +31,10 @@ public: ConfigRetriever(LocalConfig &local_config, Uint32 version, Uint32 nodeType); ~ConfigRetriever(); - /** - * Read local config - * @return Own node id, -1 means fail - */ - int init(); - int do_connect(int exit_on_connect_failure= false); /** - * Get configuration for current (nodeId given in local config file) node. + * Get configuration for current node. * * Configuration is fetched from one MGM server configured in local config * file. The method loops over all the configured MGM servers and tries diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp index c4957ffdbf1..b4f2d0b9897 100644 --- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -52,6 +52,7 @@ ConfigRetriever::ConfigRetriever(LocalConfig &local_config, m_handle= 0; m_version = version; m_node_type = node_type; + _ownNodeId = _localConfig._ownNodeId; } ConfigRetriever::~ConfigRetriever(){ @@ -66,11 +67,6 @@ ConfigRetriever::~ConfigRetriever(){ //**************************************************************************** //**************************************************************************** -int -ConfigRetriever::init() { - return _ownNodeId = _localConfig._ownNodeId; -} - int ConfigRetriever::do_connect(int exit_on_connect_failure){ @@ -93,12 +89,18 @@ ConfigRetriever::do_connect(int exit_on_connect_failure){ BaseString tmp; for (unsigned int i = 0; i<_localConfig.ids.size(); i++){ MgmtSrvrId * m = &_localConfig.ids[i]; + DBUG_PRINT("info",("trying %s:%d", + m->name.c_str(), + m->port)); switch(m->type){ case MgmId_TCP: tmp.assfmt("%s:%d", m->name.c_str(), m->port); if (ndb_mgm_connect(m_handle, tmp.c_str()) == 0) { m_mgmd_port= m->port; m_mgmd_host= m->name.c_str(); + DBUG_PRINT("info",("connected to ndb_mgmd at %s:%d", + m_mgmd_host, + m_mgmd_port)); return 0; } setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle)); @@ -106,9 +108,10 @@ ConfigRetriever::do_connect(int exit_on_connect_failure){ break; } } - if (exit_on_connect_failure) - return 1; if(latestErrorType == CR_RETRY){ + DBUG_PRINT("info",("CR_RETRY")); + if (exit_on_connect_failure) + return 1; REPORT_WARNING("Failed to retrieve cluster configuration"); ndbout << "(Cause of failure: " << getErrorString() << ")" << endl; ndbout << "Attempt " << retry << " of " << retry_max << ". " diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 600e6f67910..fd5d79b92e7 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -196,13 +196,10 @@ Configuration::fetch_configuration(LocalConfig &local_config){ m_mgmd_port= 0; m_mgmd_host= 0; m_config_retriever= new ConfigRetriever(local_config, NDB_VERSION, NODE_TYPE_DB); - if(m_config_retriever->init() == -1 || - m_config_retriever->do_connect() == -1){ - + if(m_config_retriever->do_connect() == -1){ const char * s = m_config_retriever->getErrorString(); if(s == 0) s = "No error given!"; - /* Set stop on error to true otherwise NDB will go into an restart loop... */ diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp index 688445125f3..5be4f0f9f91 100644 --- a/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -60,6 +60,7 @@ void Ndb_cluster_connection::connect_thread() DBUG_ENTER("Ndb_cluster_connection::connect_thread"); int r; do { + NdbSleep_SecSleep(1); if ((r = connect(1)) == 0) break; if (r == -1) { @@ -80,6 +81,7 @@ int Ndb_cluster_connection::start_connect_thread(int (*connect_callback)(void)) m_connect_callback= connect_callback; if ((r = connect(1)) == 1) { + DBUG_PRINT("info",("starting thread")); m_connect_thread= NdbThread_Create(run_ndb_cluster_connection_connect_thread, (void**)this, 32768, @@ -114,8 +116,6 @@ int Ndb_cluster_connection::connect(int reconnect) } } m_config_retriever= new ConfigRetriever(*m_local_config, NDB_VERSION, NODE_TYPE_API); - if(m_config_retriever->init() == -1) - break; } else if (reconnect == 0) @@ -131,6 +131,7 @@ int Ndb_cluster_connection::connect(int reconnect) else if(m_config_retriever->do_connect() == -1) break; + Uint32 nodeId = m_config_retriever->allocNodeId(); for(Uint32 i = 0; nodeId == 0 && i<5; i++){ NdbSleep_SecSleep(3); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 2c330fae84d..07a1ade912a 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3746,12 +3746,14 @@ bool ndbcluster_init() { g_ndb->waitUntilReady(10); } - else if(res == 1 && g_ndb_cluster_connection->start_connect_thread()) + else if(res == 1) { - DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()")); - DBUG_RETURN(TRUE); + if (g_ndb_cluster_connection->start_connect_thread()) { + DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()")); + DBUG_RETURN(TRUE); + } } - else + else { DBUG_ASSERT(res == -1); DBUG_PRINT("error", ("permanent error")); @@ -3764,7 +3766,7 @@ bool ndbcluster_init() ndbcluster_inited= 1; #ifdef USE_DISCOVER_ON_STARTUP - if (ndb_discover_tables() != 0) + if (res == 0 && ndb_discover_tables() != 0) DBUG_RETURN(TRUE); #endif DBUG_RETURN(false);