diff --git a/mysql-test/r/ndb_charset.result b/mysql-test/r/ndb_charset.result new file mode 100644 index 00000000000..93429a1fcb0 --- /dev/null +++ b/mysql-test/r/ndb_charset.result @@ -0,0 +1,191 @@ +drop table if exists t1; +create table t1 ( +a char(3) character set latin1 collate latin1_bin primary key +) engine=ndb; +insert into t1 values('aAa'); +insert into t1 values('aaa'); +insert into t1 values('AAA'); +select * from t1 order by a; +a +AAA +aAa +aaa +select * from t1 where a = 'aAa'; +a +aAa +select * from t1 where a = 'aaa'; +a +aaa +select * from t1 where a = 'AaA'; +a +select * from t1 where a = 'AAA'; +a +AAA +drop table t1; +create table t1 ( +a char(3) character set latin1 collate latin1_swedish_ci primary key +) engine=ndb; +insert into t1 values('aAa'); +insert into t1 values('aaa'); +ERROR 23000: Duplicate entry 'aaa' for key 1 +insert into t1 values('AAA'); +ERROR 23000: Duplicate entry 'AAA' for key 1 +select * from t1 order by a; +a +aAa +select * from t1 where a = 'aAa'; +a +aAa +select * from t1 where a = 'aaa'; +a +aAa +select * from t1 where a = 'AaA'; +a +aAa +select * from t1 where a = 'AAA'; +a +aAa +drop table t1; +create table t1 ( +p int primary key, +a char(3) character set latin1 collate latin1_bin not null, +unique key(a) +) engine=ndb; +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +select * from t1 order by p; +p a +1 aAa +2 aaa +3 AAA +select * from t1 where a = 'aAa'; +p a +1 aAa +select * from t1 where a = 'aaa'; +p a +2 aaa +select * from t1 where a = 'AaA'; +p a +select * from t1 where a = 'AAA'; +p a +3 AAA +drop table t1; +create table t1 ( +p int primary key, +a char(3) character set latin1 collate latin1_swedish_ci not null, +unique key(a) +) engine=ndb; +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +ERROR 23000: Can't write, because of unique constraint, to table 't1' +insert into t1 values(3, 'AAA'); +ERROR 23000: Can't write, because of unique constraint, to table 't1' +select * from t1 order by p; +p a +1 aAa +select * from t1 where a = 'aAa'; +p a +1 aAa +select * from t1 where a = 'aaa'; +p a +1 aAa +select * from t1 where a = 'AaA'; +p a +1 aAa +select * from t1 where a = 'AAA'; +p a +1 aAa +drop table t1; +create table t1 ( +p int primary key, +a char(3) character set latin1 collate latin1_bin not null, +index(a) +) engine=ndb; +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +insert into t1 values(4, 'aAa'); +insert into t1 values(5, 'aaa'); +insert into t1 values(6, 'AAA'); +select * from t1 order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +explain select * from t1 where a = 'zZz' order by p; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort +select * from t1 where a = 'aAa' order by p; +p a +1 aAa +4 aAa +select * from t1 where a = 'aaa' order by p; +p a +2 aaa +5 aaa +select * from t1 where a = 'AaA' order by p; +p a +select * from t1 where a = 'AAA' order by p; +p a +3 AAA +6 AAA +drop table t1; +create table t1 ( +p int primary key, +a char(3) character set latin1 collate latin1_swedish_ci not null, +index(a) +) engine=ndb; +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +insert into t1 values(4, 'aAa'); +insert into t1 values(5, 'aaa'); +insert into t1 values(6, 'AAA'); +select * from t1 order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +explain select * from t1 where a = 'zZz' order by p; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort +select * from t1 where a = 'aAa' order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +select * from t1 where a = 'aaa' order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +select * from t1 where a = 'AaA' order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +select * from t1 where a = 'AAA' order by p; +p a +1 aAa +2 aaa +3 AAA +4 aAa +5 aaa +6 AAA +drop table t1; diff --git a/mysql-test/r/ndb_index.result b/mysql-test/r/ndb_index.result index dd92c237ace..5702552b0b5 100644 --- a/mysql-test/r/ndb_index.result +++ b/mysql-test/r/ndb_index.result @@ -4,7 +4,7 @@ PORT varchar(16) NOT NULL, ACCESSNODE varchar(16) NOT NULL, POP varchar(48) NOT NULL, ACCESSTYPE int unsigned NOT NULL, -CUSTOMER_ID varchar(20) NOT NULL, +CUSTOMER_ID varchar(20) collate latin1_bin NOT NULL, PROVIDER varchar(16), TEXPIRE int unsigned, NUM_IP int unsigned, diff --git a/mysql-test/t/ndb_charset.test b/mysql-test/t/ndb_charset.test new file mode 100644 index 00000000000..b9f28ed0faf --- /dev/null +++ b/mysql-test/t/ndb_charset.test @@ -0,0 +1,159 @@ +--source include/have_ndb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# Minimal NDB charset test. +# + +# pk - binary + +create table t1 ( + a char(3) character set latin1 collate latin1_bin primary key +) engine=ndb; +# ok +insert into t1 values('aAa'); +insert into t1 values('aaa'); +insert into t1 values('AAA'); +# 3 +select * from t1 order by a; +# 1 +select * from t1 where a = 'aAa'; +# 1 +select * from t1 where a = 'aaa'; +# 0 +select * from t1 where a = 'AaA'; +# 1 +select * from t1 where a = 'AAA'; +drop table t1; + +# pk - case insensitive + +create table t1 ( + a char(3) character set latin1 collate latin1_swedish_ci primary key +) engine=ndb; +# ok +insert into t1 values('aAa'); +# fail +--error 1062 +insert into t1 values('aaa'); +--error 1062 +insert into t1 values('AAA'); +# 1 +select * from t1 order by a; +# 1 +select * from t1 where a = 'aAa'; +# 1 +select * from t1 where a = 'aaa'; +# 1 +select * from t1 where a = 'AaA'; +# 1 +select * from t1 where a = 'AAA'; +drop table t1; + +# unique hash index - binary + +create table t1 ( + p int primary key, + a char(3) character set latin1 collate latin1_bin not null, + unique key(a) +) engine=ndb; +# ok +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +# 3 +select * from t1 order by p; +# 1 +select * from t1 where a = 'aAa'; +# 1 +select * from t1 where a = 'aaa'; +# 0 +select * from t1 where a = 'AaA'; +# 1 +select * from t1 where a = 'AAA'; +drop table t1; + +# unique hash index - case insensitive + +create table t1 ( + p int primary key, + a char(3) character set latin1 collate latin1_swedish_ci not null, + unique key(a) +) engine=ndb; +# ok +insert into t1 values(1, 'aAa'); +# fail +--error 1169 +insert into t1 values(2, 'aaa'); +--error 1169 +insert into t1 values(3, 'AAA'); +# 1 +select * from t1 order by p; +# 1 +select * from t1 where a = 'aAa'; +# 1 +select * from t1 where a = 'aaa'; +# 1 +select * from t1 where a = 'AaA'; +# 1 +select * from t1 where a = 'AAA'; +drop table t1; + +# ordered index - binary + +create table t1 ( + p int primary key, + a char(3) character set latin1 collate latin1_bin not null, + index(a) +) engine=ndb; +# ok +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +insert into t1 values(4, 'aAa'); +insert into t1 values(5, 'aaa'); +insert into t1 values(6, 'AAA'); +# 6 +select * from t1 order by p; +# plan +explain select * from t1 where a = 'zZz' order by p; +# 2 +select * from t1 where a = 'aAa' order by p; +# 2 +select * from t1 where a = 'aaa' order by p; +# 0 +select * from t1 where a = 'AaA' order by p; +# 2 +select * from t1 where a = 'AAA' order by p; +drop table t1; + +# ordered index - case insensitive + +create table t1 ( + p int primary key, + a char(3) character set latin1 collate latin1_swedish_ci not null, + index(a) +) engine=ndb; +# ok +insert into t1 values(1, 'aAa'); +insert into t1 values(2, 'aaa'); +insert into t1 values(3, 'AAA'); +insert into t1 values(4, 'aAa'); +insert into t1 values(5, 'aaa'); +insert into t1 values(6, 'AAA'); +# 6 +select * from t1 order by p; +# plan +explain select * from t1 where a = 'zZz' order by p; +# 6 +select * from t1 where a = 'aAa' order by p; +# 6 +select * from t1 where a = 'aaa' order by p; +# 6 +select * from t1 where a = 'AaA' order by p; +# 6 +select * from t1 where a = 'AAA' order by p; +drop table t1; diff --git a/mysql-test/t/ndb_index.test b/mysql-test/t/ndb_index.test index d3977dc3ea4..e65b24a9b20 100644 --- a/mysql-test/t/ndb_index.test +++ b/mysql-test/t/ndb_index.test @@ -9,7 +9,7 @@ CREATE TABLE t1 ( ACCESSNODE varchar(16) NOT NULL, POP varchar(48) NOT NULL, ACCESSTYPE int unsigned NOT NULL, - CUSTOMER_ID varchar(20) NOT NULL, + CUSTOMER_ID varchar(20) collate latin1_bin NOT NULL, PROVIDER varchar(16), TEXPIRE int unsigned, NUM_IP int unsigned, diff --git a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp index 7abebcc832d..76ce1a8efe3 100644 --- a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp +++ b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp @@ -46,9 +46,9 @@ * * NdbDictionary::Column * setName() - * setPrimaryKey() * setType() * setLength() + * setPrimaryKey() * setNullable() * * NdbDictionary::Table @@ -234,9 +234,9 @@ int create_table(Ndb * myNdb) * Column REG_NO */ myColumn.setName("REG_NO"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myTable.addColumn(myColumn); @@ -244,9 +244,9 @@ int create_table(Ndb * myNdb) * Column BRAND */ myColumn.setName("BRAND"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Char); myColumn.setLength(20); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); @@ -254,9 +254,9 @@ int create_table(Ndb * myNdb) * Column COLOR */ myColumn.setName("COLOR"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Char); myColumn.setLength(20); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); @@ -454,6 +454,7 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData) int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database /******************************************* @@ -493,5 +494,3 @@ int main() std::cout << "Number of temporary errors: " << tempErrors << std::endl; delete myNdb; } - - diff --git a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp index 879d86de824..03a84aa249b 100644 --- a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp +++ b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp @@ -44,6 +44,7 @@ int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database NdbDictionary::Table myTable; NdbDictionary::Column myColumn; @@ -78,16 +79,16 @@ int main() myTable.setName("MYTABLENAME"); myColumn.setName("ATTR1"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myTable.addColumn(myColumn); myColumn.setName("ATTR2"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); diff --git a/ndb/examples/ndbapi_example2/ndbapi_example2.cpp b/ndb/examples/ndbapi_example2/ndbapi_example2.cpp index 1c61721c037..95a7bae66b8 100644 --- a/ndb/examples/ndbapi_example2/ndbapi_example2.cpp +++ b/ndb/examples/ndbapi_example2/ndbapi_example2.cpp @@ -39,6 +39,7 @@ static void callback(int result, NdbConnection* NdbObject, void* aObject); int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB_2" ); // Object representing the database NdbConnection* myNdbConnection[2]; // For transactions diff --git a/ndb/examples/ndbapi_example3/ndbapi_example3.cpp b/ndb/examples/ndbapi_example3/ndbapi_example3.cpp index 36d2cf1608c..91d9ff122ba 100644 --- a/ndb/examples/ndbapi_example3/ndbapi_example3.cpp +++ b/ndb/examples/ndbapi_example3/ndbapi_example3.cpp @@ -176,6 +176,7 @@ int executeInsertTransaction(int transactionId, Ndb* myNdb) { int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database /******************************************* diff --git a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp index 520172b9b0c..fcb770d49e9 100644 --- a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp +++ b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp @@ -44,6 +44,7 @@ int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database NdbDictionary::Table myTable; NdbDictionary::Column myColumn; @@ -79,16 +80,16 @@ int main() myTable.setName("MYTABLENAME"); myColumn.setName("ATTR1"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myTable.addColumn(myColumn); myColumn.setName("ATTR2"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); diff --git a/ndb/examples/ndbapi_example5/ndbapi_example5.cpp b/ndb/examples/ndbapi_example5/ndbapi_example5.cpp index a9d3099883c..77f74e7bb63 100644 --- a/ndb/examples/ndbapi_example5/ndbapi_example5.cpp +++ b/ndb/examples/ndbapi_example5/ndbapi_example5.cpp @@ -65,6 +65,7 @@ int myCreateEvent(Ndb* myNdb, int main() { + ndb_init(); Ndb* myNdb = myCreateNdb(); NdbDictionary::Dictionary *myDict; diff --git a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp index 7c3a66326c6..22641bc5b57 100644 --- a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp +++ b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp @@ -47,9 +47,9 @@ * * NdbDictionary::Column * setName() - * setPrimaryKey() * setType() * setLength() + * setPrimaryKey() * setNullable() * * NdbDictionary::Table @@ -165,24 +165,24 @@ int create_table(Ndb * myNdb) myTable.setName("GARAGE"); myColumn.setName("REG_NO"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myTable.addColumn(myColumn); myColumn.setName("BRAND"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Char); myColumn.setLength(20); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); myColumn.setName("COLOR"); - myColumn.setPrimaryKey(false); myColumn.setType(NdbDictionary::Column::Char); myColumn.setLength(20); + myColumn.setPrimaryKey(false); myColumn.setNullable(false); myTable.addColumn(myColumn); @@ -761,6 +761,7 @@ int scan_print(Ndb * myNdb, int parallelism, int main() { + ndb_init(); Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database @@ -813,4 +814,3 @@ int main() delete myNdb; } - diff --git a/ndb/examples/select_all/select_all.cpp b/ndb/examples/select_all/select_all.cpp index 3cdbdc47e62..bd25fb60128 100644 --- a/ndb/examples/select_all/select_all.cpp +++ b/ndb/examples/select_all/select_all.cpp @@ -112,6 +112,7 @@ const char* ResultSetContainer::getAttrName(int i) const {return m_names[i];} int main(int argc, const char** argv) { + ndb_init(); Ndb* myNdb = new Ndb("ndbapi_example4"); // Object representing the database NdbConnection* myNdbConnection; // For transactions NdbOperation* myNdbOperation; // For operations diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp index 6cd6a83e68d..686989089ae 100644 --- a/ndb/include/debugger/EventLogger.hpp +++ b/ndb/include/debugger/EventLogger.hpp @@ -24,6 +24,32 @@ #include #include +class EventLoggerBase { +public: + virtual ~EventLoggerBase(); + + /** + * LogLevel settings + */ + LogLevel m_logLevel; + + /** + * This matrix defines which event should be printed when + * + * threshold - is in range [0-15] + * severity - DEBUG to ALERT (Type of log message) + */ + struct EventRepLogLevelMatrix { + EventReport::EventType eventType; + LogLevel::EventCategory eventCategory; + Uint32 threshold; + Logger::LoggerLevel severity; + }; + + static const EventRepLogLevelMatrix matrix[]; + static const Uint32 matrixSize; +}; + /** * The EventLogger is primarily used for logging NDB events * in the Management Server. It inherits all logging functionality of Logger. @@ -58,7 +84,7 @@ * @see Logger * @version #@ $Id: EventLogger.hpp,v 1.3 2003/09/01 10:15:52 innpeno Exp $ */ -class EventLogger : public Logger +class EventLogger : public EventLoggerBase, public Logger { public: /** @@ -70,7 +96,7 @@ public: /** * Destructor. */ - ~EventLogger(); + virtual ~EventLogger(); /** * Opens/creates the eventlog with the specified filename. @@ -92,16 +118,6 @@ public: */ void close(); - /** - * Logs the NDB event. - * - * @param nodeId the node id of event origin. - * @param eventType the type of event. - * @param theData the event data. - * @deprecated use log(int eventType, const Uint32* theData, NodeId nodeId) - */ - void log(NodeId nodeId, int eventType, const Uint32* theData); - /** * Logs the NDB event. * @@ -109,32 +125,8 @@ public: * @param theData the event data. * @param nodeId the node id of event origin. */ - void log(int eventType, const Uint32* theData, NodeId nodeId = 0); - - /** - * Returns the current log levels. - * Enable, disable log levels to filter the events that are sent to the - * eventlog. - * - * @return the log level. - */ - LogLevel& getLoglevel(); + virtual void log(int, const Uint32*, NodeId = 0,const class LogLevel * = 0); - /** - * Returns the log level that is used to filter an event. The event will not - * be logged unless its event category's log level is <= levelFilter. - * - * @return the log level filter that is used for all event categories. - */ - int getFilterLevel() const; - /** - * Sets log level filter. The event will be logged if - * the event category's log level is <= 'filterLevel'. - * - * @param level the log level to filter. - */ - void setFilterLevel(int filterLevel); - /** * Returns the event text for the specified event report type. * @@ -143,72 +135,25 @@ public: * @param nodeId a node id. * @return the event report text. */ - static const char* getText(int type, + static const char* getText(char * dst, size_t dst_len, + int type, const Uint32* theData, NodeId nodeId = 0); - - /** - * Find a category matching the string - * - * @param str string to match. - * @param cat the event category. - * @param exactMatch only do exact matching. - * - * @return TRUE if match is found, then cat is modified - * FALSE if match is not found - */ - static bool matchEventCategory(const char * str, - LogLevel::EventCategory * cat, - bool exactMatch = false); /** - * Returns category name or NULL if not found. + * Returns the log level that is used to filter an event. The event will not + * be logged unless its event category's log level is <= levelFilter. * - * @param cat the event category. - * @return category name. + * @return the log level filter that is used for all event categories. */ - static const char * getEventCategoryName(LogLevel::EventCategory cat); + int getFilterLevel() const; /** - * Specifies allowed event categories/log levels. - */ - struct EventCategoryName { - LogLevel::EventCategory category; - const char * name; - }; - - static const EventCategoryName eventCategoryNames[]; - static const Uint32 noOfEventCategoryNames; - - /** - * This matrix defines which event should be printed when + * Sets log level filter. The event will be logged if + * the event category's log level is <= 'filterLevel'. * - * threshold - is in range [0-15] - * severity - DEBUG to ALERT (Type of log message) - */ - struct EventRepLogLevelMatrix { - EventReport::EventType eventType; - LogLevel::EventCategory eventCategory; - Uint32 threshold; - Logger::LoggerLevel severity; - }; - - static const EventRepLogLevelMatrix matrix[]; - - /** - * Default log levels for management nodes. - * - * threshold - is in range [0-15] + * @param level the log level to filter. */ - struct EventLogMatrix { - LogLevel::EventCategory eventCategory; - Uint32 threshold; - }; - - static const EventLogMatrix defEventLogMatrix[]; - - - static const Uint32 matrixSize; - static const Uint32 defEventLogMatrixSize; + void setFilterLevel(int filterLevel); private: /** Prohibit */ @@ -216,11 +161,10 @@ private: EventLogger operator = (const EventLogger&); bool operator == (const EventLogger&); - LogLevel m_logLevel; Uint32 m_filterLevel; STATIC_CONST(MAX_TEXT_LENGTH = 256); - static char m_text[MAX_TEXT_LENGTH]; + char m_text[MAX_TEXT_LENGTH]; }; diff --git a/ndb/include/kernel/LogLevel.hpp b/ndb/include/kernel/LogLevel.hpp index 10cd0d43bee..52c2f70cda8 100644 --- a/ndb/include/kernel/LogLevel.hpp +++ b/ndb/include/kernel/LogLevel.hpp @@ -45,81 +45,30 @@ public: * Copy operator */ LogLevel & operator= (const LogLevel &); - - static const Uint32 MIN_LOGLEVEL_ID = CFG_LOGLEVEL_STARTUP; - + enum EventCategory { - /** - * Events during all kind of startups - */ - llStartUp = CFG_LOGLEVEL_STARTUP - MIN_LOGLEVEL_ID, - - /** - * Events during shutdown - */ - llShutdown = CFG_LOGLEVEL_SHUTDOWN - MIN_LOGLEVEL_ID, - - /** - * Transaction statistics - * Job level - * TCP/IP speed - */ - llStatistic = CFG_LOGLEVEL_STATISTICS - MIN_LOGLEVEL_ID, - - /** - * Checkpoints - */ - llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - MIN_LOGLEVEL_ID, - - /** - * Events during node restart - */ - llNodeRestart = CFG_LOGLEVEL_NODERESTART - MIN_LOGLEVEL_ID, - - /** - * Events related to connection / communication - */ - llConnection = CFG_LOGLEVEL_CONNECTION - MIN_LOGLEVEL_ID, - - /** - * Assorted event w.r.t unexpected happenings - */ - llError = CFG_LOGLEVEL_ERROR - MIN_LOGLEVEL_ID, - - /** - * Assorted event w.r.t warning - */ - llWarning = CFG_LOGLEVEL_WARNING - MIN_LOGLEVEL_ID, - - /** - * Assorted event w.r.t information - */ - llInfo = CFG_LOGLEVEL_INFO - MIN_LOGLEVEL_ID, - - /** - * Events related to global replication - */ - llGrep = CFG_LOGLEVEL_GREP - MIN_LOGLEVEL_ID + llStartUp = CFG_LOGLEVEL_STARTUP - CFG_MIN_LOGLEVEL, + llShutdown = CFG_LOGLEVEL_SHUTDOWN - CFG_MIN_LOGLEVEL, + llStatistic = CFG_LOGLEVEL_STATISTICS - CFG_MIN_LOGLEVEL, + llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - CFG_MIN_LOGLEVEL, + llNodeRestart = CFG_LOGLEVEL_NODERESTART - CFG_MIN_LOGLEVEL, + llConnection = CFG_LOGLEVEL_CONNECTION - CFG_MIN_LOGLEVEL, + llInfo = CFG_LOGLEVEL_INFO - CFG_MIN_LOGLEVEL, + llWarning = CFG_LOGLEVEL_WARNING - CFG_MIN_LOGLEVEL, + llError = CFG_LOGLEVEL_ERROR - CFG_MIN_LOGLEVEL, + llGrep = CFG_LOGLEVEL_GREP - CFG_MIN_LOGLEVEL, + llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL + ,llBackup = CFG_LOGLEVEL_BACKUP - CFG_MIN_LOGLEVEL }; - struct LogLevelCategoryName { - const char* name; - }; - - /** - * Log/event level category names. Remember to update the names whenever - * a new category is added. - */ - static const LogLevelCategoryName LOGLEVEL_CATEGORY_NAME[]; - /** * No of categories */ -#define _LOGLEVEL_CATEGORIES 10 +#define _LOGLEVEL_CATEGORIES (CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1); static const Uint32 LOGLEVEL_CATEGORIES = _LOGLEVEL_CATEGORIES; - + void clear(); - + /** * Note level is valid as 0-15 */ @@ -130,26 +79,33 @@ public: */ Uint32 getLogLevel(EventCategory ec) const; + /** + * Set this= max(this, ll) per category + */ + LogLevel& set_max(const LogLevel& ll); + + bool operator==(const LogLevel& l) const { + return memcmp(this, &l, sizeof(* this)) == 0; + } + + LogLevel& operator=(const class EventSubscribeReq & req); + private: /** * The actual data */ - Uint32 logLevelData[LOGLEVEL_CATEGORIES]; - - LogLevel(const LogLevel &); + Uint8 logLevelData[LOGLEVEL_CATEGORIES]; }; inline LogLevel::LogLevel(){ - clear(); + clear(); } inline LogLevel & LogLevel::operator= (const LogLevel & org){ - for(Uint32 i = 0; i= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES); - logLevelData[ec] = level; + logLevelData[ec] = (Uint8)level; } inline @@ -173,8 +129,30 @@ Uint32 LogLevel::getLogLevel(EventCategory ec) const{ assert(ec >= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES); - return logLevelData[ec]; + return (Uint32)logLevelData[ec]; } +inline +LogLevel & +LogLevel::set_max(const LogLevel & org){ + for(Uint32 i = 0; i + +inline +LogLevel& +LogLevel::operator=(const EventSubscribeReq& req) +{ + clear(); + for(size_t i = 0; i> 16)] = req.theData[i] & 0xFFFF; + } + return * this; +} #endif diff --git a/ndb/include/kernel/signaldata/CreateTable.hpp b/ndb/include/kernel/signaldata/CreateTable.hpp index 424367f28d5..67e510d2ed0 100644 --- a/ndb/include/kernel/signaldata/CreateTable.hpp +++ b/ndb/include/kernel/signaldata/CreateTable.hpp @@ -89,7 +89,8 @@ public: ArraySizeTooBig = 737, RecordTooBig = 738, InvalidPrimaryKeySize = 739, - NullablePrimaryKey = 740 + NullablePrimaryKey = 740, + InvalidCharset = 743 }; private: diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp index dec7145c897..a9a50f19fbc 100644 --- a/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -438,8 +438,8 @@ public: case DictTabInfo::ExtText: AttributeType = DictTabInfo::StringType; AttributeSize = DictTabInfo::an8Bit; - // head + inline part [ attr precision ] - AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + AttributeExtPrecision; + // head + inline part [ attr precision lower half ] + AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF); return true; }; return false; diff --git a/ndb/include/kernel/signaldata/EventReport.hpp b/ndb/include/kernel/signaldata/EventReport.hpp index b6106bb0ca4..1ad6e1bf7ac 100644 --- a/ndb/include/kernel/signaldata/EventReport.hpp +++ b/ndb/include/kernel/signaldata/EventReport.hpp @@ -135,12 +135,17 @@ public: //GREP GrepSubscriptionInfo = 52, - GrepSubscriptionAlert = 53 - }; + GrepSubscriptionAlert = 53, + //BACKUP + BackupStarted = 54, + BackupFailedToStart = 55, + BackupCompleted = 56, + BackupAborted = 57 + }; + void setEventType(EventType type); EventType getEventType() const; -private: UintR eventType; // DATA 0 }; diff --git a/ndb/include/kernel/signaldata/EventSubscribeReq.hpp b/ndb/include/kernel/signaldata/EventSubscribeReq.hpp index fd2821ea31d..84a1717b1de 100644 --- a/ndb/include/kernel/signaldata/EventSubscribeReq.hpp +++ b/ndb/include/kernel/signaldata/EventSubscribeReq.hpp @@ -27,7 +27,7 @@ * RECIVER: SimBlockCMCtrBlck */ -class EventSubscribeReq { +struct EventSubscribeReq { /** * Receiver(s) */ @@ -38,9 +38,8 @@ class EventSubscribeReq { */ friend class MgmtSrvr; -public: - STATIC_CONST( SignalLength = 22 ); -private: + STATIC_CONST( SignalLength = 2 + LogLevel::LOGLEVEL_CATEGORIES ); + /** * Note: If you use the same blockRef as you have used earlier, * you update your ongoing subscription @@ -53,8 +52,15 @@ private: */ Uint32 noOfEntries; - Uint32 theCategories[10]; - Uint32 theLevels[10]; + Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES]; + + EventSubscribeReq& operator= (const LogLevel& ll){ + noOfEntries = LogLevel::LOGLEVEL_CATEGORIES; + for(size_t i = 0; i +#include "EventSubscribeReq.hpp" #include "SignalData.hpp" /** @@ -39,11 +40,10 @@ class SetLogLevelOrd { friend class NodeLogLevel; private: - STATIC_CONST( SignalLength = 25 ); - + STATIC_CONST( SignalLength = 1 + LogLevel::LOGLEVEL_CATEGORIES ); + Uint32 noOfEntries; - Uint32 theCategories[12]; - Uint32 theLevels[12]; + Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES]; void clear(); @@ -51,6 +51,22 @@ private: * Note level is valid as 0-15 */ void setLogLevel(LogLevel::EventCategory ec, int level = 7); + + SetLogLevelOrd& operator= (const LogLevel& ll){ + noOfEntries = LogLevel::LOGLEVEL_CATEGORIES; + for(size_t i = 0; i +/* call in main() - does not return on error */ +extern int ndb_init(void); + #ifndef HAVE_STRDUP extern char * strdup(const char *s); #endif diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 5c470c1d25f..51a6895648f 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -32,6 +32,8 @@ #include class Ndb; +struct charset_info_st; +typedef struct charset_info_st CHARSET_INFO; /** * @class NdbDictionary @@ -257,6 +259,10 @@ public: /** * Set type of column * @param type Type of column + * + * @note setType resets all column attributes + * to (type dependent) defaults and should be the first + * method to call. Default type is Unsigned. */ void setType(Type type); @@ -301,28 +307,36 @@ public: */ int getLength() const; + /** + * For Char or Varchar or Text, set or get MySQL CHARSET_INFO. This + * specifies both character set and collation. See get_charset() + * etc in MySQL. (The cs is not "const" in MySQL). + */ + void setCharset(CHARSET_INFO* cs); + CHARSET_INFO* getCharset() const; + /** * For blob, set or get "inline size" i.e. number of initial bytes * to store in table's blob attribute. This part is normally in * main memory and can be indexed and interpreted. */ - void setInlineSize(int size) { setPrecision(size); } - int getInlineSize() const { return getPrecision(); } + void setInlineSize(int size); + int getInlineSize() const; /** * For blob, set or get "part size" i.e. number of bytes to store in * each tuple of the "blob table". Can be set to zero to omit parts * and to allow only inline bytes ("tinyblob"). */ - void setPartSize(int size) { setScale(size); } - int getPartSize() const { return getScale(); } + void setPartSize(int size); + int getPartSize() const; /** * For blob, set or get "stripe size" i.e. number of consecutive * parts to store in each node group. */ - void setStripeSize(int size) { setLength(size); } - int getStripeSize() const { return getLength(); } + void setStripeSize(int size); + int getStripeSize() const; /** * Get size of element diff --git a/ndb/include/transporter/TransporterRegistry.hpp b/ndb/include/transporter/TransporterRegistry.hpp index 3c6c307406c..ac6291f9e57 100644 --- a/ndb/include/transporter/TransporterRegistry.hpp +++ b/ndb/include/transporter/TransporterRegistry.hpp @@ -218,15 +218,18 @@ public: void printState(); #endif - unsigned short m_service_port; - + class Transporter_interface { + public: + unsigned short m_service_port; + const char *m_interface; + }; + Vector m_transporter_interface; + void add_transporter_interface(const char *interface, unsigned short port); protected: private: void * callbackObj; - TransporterService *m_transporter_service; - char *m_interface_name; struct NdbThread *m_start_clients_thread; bool m_run_start_clients_thread; diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp index 1d3e96d5c7e..3062d1e4e1b 100644 --- a/ndb/include/util/NdbSqlUtil.hpp +++ b/ndb/include/util/NdbSqlUtil.hpp @@ -40,11 +40,14 @@ public: * Compare kernel attribute values. Returns -1, 0, +1 for less, * equal, greater, respectively. Parameters are pointers to values, * full attribute size in words, and size of available data in words. + * There is also pointer to type specific extra info. Char types + * receive CHARSET_INFO in it. + * * If available size is less than full size, CmpUnknown may be * returned. If a value cannot be parsed, it compares like NULL i.e. * less than any valid value. */ - typedef int Cmp(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size); + typedef int Cmp(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size); enum CmpResult { CmpLess = -1, @@ -55,6 +58,7 @@ public: /** * Kernel data types. Must match m_typeList in NdbSqlUtil.cpp. + * Now also must match types in NdbDictionary. */ struct Type { enum Enum { @@ -90,6 +94,18 @@ public: */ static const Type& getType(Uint32 typeId); + /** + * Get type by id but replace char type by corresponding binary type. + */ + static const Type& getTypeBinary(Uint32 typeId); + + /** + * Check character set. + */ + static bool usable_in_pk(Uint32 typeId, const void* cs); + static bool usable_in_hash_index(Uint32 typeId, const void* cs); + static bool usable_in_ordered_index(Uint32 typeId, const void* cs); + private: /** * List of all types. Must match Type::Enum. diff --git a/ndb/include/util/SocketServer.hpp b/ndb/include/util/SocketServer.hpp index 334fa575e47..3860b9ca84b 100644 --- a/ndb/include/util/SocketServer.hpp +++ b/ndb/include/util/SocketServer.hpp @@ -76,7 +76,7 @@ public: * then close the socket * Returns true if succeding in binding */ - bool tryBind(unsigned short port, const char * intface = 0) const; + static bool tryBind(unsigned short port, const char * intface = 0); /** * Setup socket diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index 69874ab7ecc..03445622e6a 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -28,6 +28,10 @@ // // PUBLIC // +EventLoggerBase::~EventLoggerBase() +{ + +} /** * This matrix defines which event should be printed when @@ -35,122 +39,89 @@ * threshold - is in range [0-15] * severity - DEBUG to ALERT (Type of log message) */ -const EventLogger::EventRepLogLevelMatrix EventLogger::matrix[] = { +const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = { // CONNECTION - { EventReport::Connected, LogLevel::llConnection, 8, LL_INFO }, - { EventReport::Disconnected, LogLevel::llConnection, 8, LL_ALERT }, - { EventReport::CommunicationClosed, LogLevel::llConnection, 8, LL_INFO }, - { EventReport::CommunicationOpened, LogLevel::llConnection, 8, LL_INFO }, - { EventReport::ConnectedApiVersion, LogLevel::llConnection, 8, LL_INFO }, + { EventReport::Connected, LogLevel::llConnection, 8, Logger::LL_INFO }, + { EventReport::Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT }, + { EventReport::CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO }, + { EventReport::CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO }, + { EventReport::ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO }, // CHECKPOINT - { EventReport::GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, LL_INFO }, - { EventReport::GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, LL_INFO }, - { EventReport::LocalCheckpointStarted, LogLevel::llCheckpoint, 7, LL_INFO }, - { EventReport::LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, LL_INFO }, - { EventReport::LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, LL_ALERT }, - { EventReport::LCPFragmentCompleted, LogLevel::llCheckpoint, 11, LL_INFO }, - { EventReport::UndoLogBlocked, LogLevel::llCheckpoint, 7, LL_INFO }, + { EventReport::GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO }, + { EventReport::GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO }, + { EventReport::LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO }, + { EventReport::LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, Logger::LL_INFO }, + { EventReport::LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT }, + { EventReport::LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO }, + { EventReport::UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO }, // STARTUP - { EventReport::NDBStartStarted, LogLevel::llStartUp, 1, LL_INFO }, - { EventReport::NDBStartCompleted, LogLevel::llStartUp, 1, LL_INFO }, - { EventReport::STTORRYRecieved, LogLevel::llStartUp,15, LL_INFO }, - { EventReport::StartPhaseCompleted, LogLevel::llStartUp, 4, LL_INFO }, - { EventReport::CM_REGCONF, LogLevel::llStartUp, 3, LL_INFO }, - { EventReport::CM_REGREF, LogLevel::llStartUp, 8, LL_INFO }, - { EventReport::FIND_NEIGHBOURS, LogLevel::llStartUp, 8, LL_INFO }, - { EventReport::NDBStopStarted, LogLevel::llStartUp, 1, LL_INFO }, - { EventReport::NDBStopAborted, LogLevel::llStartUp, 1, LL_INFO }, - { EventReport::StartREDOLog, LogLevel::llStartUp, 10, LL_INFO }, - { EventReport::StartLog, LogLevel::llStartUp, 10, LL_INFO }, - { EventReport::UNDORecordsExecuted, LogLevel::llStartUp, 15, LL_INFO }, + { EventReport::NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO }, + { EventReport::NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO }, + { EventReport::STTORRYRecieved, LogLevel::llStartUp,15, Logger::LL_INFO }, + { EventReport::StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO }, + { EventReport::CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO }, + { EventReport::CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO }, + { EventReport::FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO }, + { EventReport::NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO }, + { EventReport::NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO }, + { EventReport::StartREDOLog, LogLevel::llStartUp, 10, Logger::LL_INFO }, + { EventReport::StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO }, + { EventReport::UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO }, // NODERESTART - { EventReport::NR_CopyDict, LogLevel::llNodeRestart, 8, LL_INFO }, - { EventReport::NR_CopyDistr, LogLevel::llNodeRestart, 8, LL_INFO }, - { EventReport::NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, LL_INFO }, - { EventReport::NR_CopyFragDone, LogLevel::llNodeRestart, 10, LL_INFO }, - { EventReport::NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, LL_INFO }, + { EventReport::NR_CopyDict, LogLevel::llNodeRestart, 8, Logger::LL_INFO }, + { EventReport::NR_CopyDistr, LogLevel::llNodeRestart, 8, Logger::LL_INFO }, + { EventReport::NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, Logger::LL_INFO }, + { EventReport::NR_CopyFragDone, LogLevel::llNodeRestart, 10, Logger::LL_INFO }, + { EventReport::NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, Logger::LL_INFO }, - { EventReport::NodeFailCompleted, LogLevel::llNodeRestart, 8, LL_ALERT}, - { EventReport::NODE_FAILREP, LogLevel::llNodeRestart, 8, LL_ALERT}, - { EventReport::ArbitState, LogLevel::llNodeRestart, 6, LL_INFO }, - { EventReport::ArbitResult, LogLevel::llNodeRestart, 2, LL_ALERT}, - { EventReport::GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, LL_INFO }, - { EventReport::GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, LL_INFO }, - { EventReport::LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, LL_INFO }, - { EventReport::LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, LL_INFO }, + { EventReport::NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT}, + { EventReport::NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT}, + { EventReport::ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO }, + { EventReport::ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT}, + { EventReport::GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO }, + { EventReport::GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO }, + { EventReport::LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO }, + { EventReport::LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO }, // STATISTIC - { EventReport::TransReportCounters, LogLevel::llStatistic, 8, LL_INFO }, - { EventReport::OperationReportCounters, LogLevel::llStatistic, 8, LL_INFO }, - { EventReport::TableCreated, LogLevel::llStatistic, 7, LL_INFO }, - { EventReport::JobStatistic, LogLevel::llStatistic, 9, LL_INFO }, - { EventReport::SendBytesStatistic, LogLevel::llStatistic, 9, LL_INFO }, - { EventReport::ReceiveBytesStatistic, LogLevel::llStatistic, 9, LL_INFO }, - { EventReport::MemoryUsage, LogLevel::llStatistic, 5, LL_INFO }, + { EventReport::TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO }, + { EventReport::OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO }, + { EventReport::TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO }, + { EventReport::JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO }, + { EventReport::SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO }, + { EventReport::ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO }, + { EventReport::MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO }, // ERROR - { EventReport::TransporterError, LogLevel::llError, 2, LL_ERROR }, - { EventReport::TransporterWarning, LogLevel::llError, 8, LL_WARNING }, - { EventReport::MissedHeartbeat, LogLevel::llError, 8, LL_WARNING }, - { EventReport::DeadDueToHeartbeat, LogLevel::llError, 8, LL_ALERT }, - { EventReport::WarningEvent, LogLevel::llError, 2, LL_WARNING }, + { EventReport::TransporterError, LogLevel::llError, 2, Logger::LL_ERROR }, + { EventReport::TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING }, + { EventReport::MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING }, + { EventReport::DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT }, + { EventReport::WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING }, // INFO - { EventReport::SentHeartbeat, LogLevel::llInfo, 12, LL_INFO }, - { EventReport::CreateLogBytes, LogLevel::llInfo, 11, LL_INFO }, - { EventReport::InfoEvent, LogLevel::llInfo, 2, LL_INFO }, + { EventReport::SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO }, + { EventReport::CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO }, + { EventReport::InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO }, //Global replication - { EventReport::GrepSubscriptionInfo, LogLevel::llGrep, 7, LL_INFO}, - { EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, LL_ALERT} + { EventReport::GrepSubscriptionInfo, LogLevel::llGrep, 7, Logger::LL_INFO}, + { EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, Logger::LL_ALERT}, + + // Backup + { EventReport::BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO }, + { EventReport::BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO }, + { EventReport::BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT}, + { EventReport::BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT } }; -const Uint32 EventLogger::matrixSize = sizeof(EventLogger::matrix)/ +const Uint32 EventLoggerBase::matrixSize = sizeof(EventLoggerBase::matrix)/ sizeof(EventRepLogLevelMatrix); -/** - * Default log levels for management nodes. - * - * threshold - is in range [0-15] - */ -const EventLogger::EventLogMatrix EventLogger::defEventLogMatrix[] = { - { LogLevel::llStartUp, 7 }, - { LogLevel::llShutdown, 7 }, - { LogLevel::llStatistic, 7 }, - { LogLevel::llCheckpoint, 7 }, - { LogLevel::llNodeRestart, 7 }, - { LogLevel::llConnection, 7 }, - { LogLevel::llError, 15 }, - { LogLevel::llInfo, 7 }, - { LogLevel::llGrep, 7 } -}; - -const Uint32 -EventLogger::defEventLogMatrixSize = sizeof(EventLogger::defEventLogMatrix)/ - sizeof(EventLogMatrix); -/** - * Specifies allowed event categories/log levels that can be set from - * the Management API/interactive shell. - */ -const EventLogger::EventCategoryName EventLogger::eventCategoryNames[] = { - { LogLevel::llStartUp, "STARTUP" }, - { LogLevel::llStatistic, "STATISTICS" }, - { LogLevel::llCheckpoint, "CHECKPOINT" }, - { LogLevel::llNodeRestart, "NODERESTART" }, - { LogLevel::llConnection, "CONNECTION" }, - { LogLevel::llInfo, "INFO" }, - { LogLevel::llGrep, "GREP" } -}; - -const Uint32 -EventLogger::noOfEventCategoryNames = sizeof(EventLogger::eventCategoryNames)/ - sizeof(EventLogger::EventCategoryName); - -char EventLogger::m_text[MAX_TEXT_LENGTH]; - const char* -EventLogger::getText(int type, +EventLogger::getText(char * m_text, size_t m_text_len, + int type, const Uint32* theData, NodeId nodeId) { // TODO: Change the switch implementation... @@ -164,13 +135,13 @@ EventLogger::getText(int type, EventReport::EventType eventType = (EventReport::EventType)type; switch (eventType){ case EventReport::Connected: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNode %u Connected", theNodeId, theData[1]); break; case EventReport::ConnectedApiVersion: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNode %u: API version %d.%d.%d", theNodeId, theData[1], @@ -179,7 +150,7 @@ EventLogger::getText(int type, getBuild(theData[2])); break; case EventReport::Disconnected: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNode %u Disconnected", theNodeId, theData[1]); @@ -188,7 +159,7 @@ EventLogger::getText(int type, //----------------------------------------------------------------------- // REPORT communication to node closed. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sCommunication to Node %u closed", theNodeId, theData[1]); @@ -197,7 +168,7 @@ EventLogger::getText(int type, //----------------------------------------------------------------------- // REPORT communication to node opened. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sCommunication to Node %u opened", theNodeId, theData[1]); @@ -206,7 +177,7 @@ EventLogger::getText(int type, //----------------------------------------------------------------------- // Start of NDB has been initiated. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sStart initiated (version %d.%d.%d)", theNodeId , getMajor(theData[1]), @@ -214,13 +185,13 @@ EventLogger::getText(int type, getBuild(theData[1])); break; case EventReport::NDBStopStarted: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%s%s shutdown initiated", theNodeId, (theData[1] == 1 ? "Cluster" : "Node")); break; case EventReport::NDBStopAborted: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNode shutdown aborted", theNodeId); break; @@ -228,7 +199,7 @@ EventLogger::getText(int type, //----------------------------------------------------------------------- // Start of NDB has been completed. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sStarted (version %d.%d.%d)", theNodeId , getMajor(theData[1]), @@ -240,7 +211,7 @@ EventLogger::getText(int type, //----------------------------------------------------------------------- // STTORRY recevied after restart finished. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sSTTORRY received after restart finished", theNodeId); break; @@ -266,7 +237,7 @@ EventLogger::getText(int type, type = ""; break; default:{ - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sStart phase %u completed (unknown = %d)", theNodeId, theData[1], @@ -274,7 +245,7 @@ EventLogger::getText(int type, return m_text; } } - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sStart phase %u completed %s", theNodeId, theData[1], @@ -283,7 +254,7 @@ EventLogger::getText(int type, break; } case EventReport::CM_REGCONF: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sCM_REGCONF president = %u, own Node = %u, our dynamic id = %u" , theNodeId, @@ -315,7 +286,7 @@ EventLogger::getText(int type, break; }//switch - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sCM_REGREF from Node %u to our Node %u. Cause = %s", theNodeId, theData[2], @@ -328,7 +299,7 @@ EventLogger::getText(int type, // REPORT Node Restart copied a fragment. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sWe are Node %u with dynamic ID %u, our left neighbour " "is Node %u, our right is Node %u", theNodeId, @@ -344,13 +315,13 @@ EventLogger::getText(int type, if (theData[1] == 0) { if (theData[3] != 0) { - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNode %u completed failure of Node %u", theNodeId, theData[3], theData[2]); } else { - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sAll nodes completed failure of Node %u", theNodeId, theData[2]); @@ -367,7 +338,7 @@ EventLogger::getText(int type, line = "DBLQH"; } - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNode failure of %u %s completed", theNodeId, theData[2], @@ -376,7 +347,7 @@ EventLogger::getText(int type, break; case EventReport::NODE_FAILREP: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode %u has failed. The Node state at failure " "was %u", theNodeId, @@ -395,41 +366,41 @@ EventLogger::getText(int type, const unsigned state = sd->code >> 16; switch (code) { case ArbitCode::ThreadStart: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sPresident restarts arbitration thread [state=%u]", theNodeId, state); break; case ArbitCode::PrepPart2: sd->ticket.getText(ticketText, sizeof(ticketText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sPrepare arbitrator node %u [ticket=%s]", theNodeId, sd->node, ticketText); break; case ArbitCode::PrepAtrun: sd->ticket.getText(ticketText, sizeof(ticketText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sReceive arbitrator node %u [ticket=%s]", theNodeId, sd->node, ticketText); break; case ArbitCode::ApiStart: sd->ticket.getText(ticketText, sizeof(ticketText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sStarted arbitrator node %u [ticket=%s]", theNodeId, sd->node, ticketText); break; case ArbitCode::ApiFail: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sLost arbitrator node %u - process failure [state=%u]", theNodeId, sd->node, state); break; case ArbitCode::ApiExit: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sLost arbitrator node %u - process exit [state=%u]", theNodeId, sd->node, state); break; default: ArbitCode::getErrText(code, errText, sizeof(errText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sLost arbitrator node %u - %s [state=%u]", theNodeId, sd->node, errText, state); break; @@ -446,48 +417,48 @@ EventLogger::getText(int type, const unsigned state = sd->code >> 16; switch (code) { case ArbitCode::LoseNodes: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration check lost - less than 1/2 nodes left", theNodeId); break; case ArbitCode::WinGroups: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration check won - node group majority", theNodeId); break; case ArbitCode::LoseGroups: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration check lost - missing node group", theNodeId); break; case ArbitCode::Partitioning: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNetwork partitioning - arbitration required", theNodeId); break; case ArbitCode::WinChoose: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration won - positive reply from node %u", theNodeId, sd->node); break; case ArbitCode::LoseChoose: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration lost - negative reply from node %u", theNodeId, sd->node); break; case ArbitCode::LoseNorun: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNetwork partitioning - no arbitrator available", theNodeId); break; case ArbitCode::LoseNocfg: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sNetwork partitioning - no arbitrator configured", theNodeId); break; default: ArbitCode::getErrText(code, errText, sizeof(errText)); - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sArbitration failure - %s [state=%u]", theNodeId, errText, state); break; @@ -500,7 +471,7 @@ EventLogger::getText(int type, // node is the master of this global checkpoint. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sGlobal checkpoint %u started", theNodeId, theData[1]); @@ -510,7 +481,7 @@ EventLogger::getText(int type, // This event reports that a global checkpoint has been completed on this // node and the node is the master of this global checkpoint. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sGlobal checkpoint %u completed", theNodeId, theData[1]); @@ -521,7 +492,7 @@ EventLogger::getText(int type, // node is the master of this local checkpoint. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLocal checkpoint %u started. " "Keep GCI = %u oldest restorable GCI = %u", theNodeId, @@ -535,7 +506,7 @@ EventLogger::getText(int type, // node and the node is the master of this local checkpoint. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLocal checkpoint %u completed", theNodeId, theData[1]); @@ -544,14 +515,14 @@ EventLogger::getText(int type, //----------------------------------------------------------------------- // This event reports that a table has been created. //----------------------------------------------------------------------- - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sTable with ID = %u created", theNodeId, theData[1]); break; case EventReport::LCPStoppedInCalcKeepGci: if (theData[1] == 0) - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sLocal Checkpoint stopped in CALCULATED_KEEP_GCI", theNodeId); break; @@ -560,7 +531,7 @@ EventLogger::getText(int type, // REPORT Node Restart completed copy of dictionary information. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode restart completed copy of dictionary information", theNodeId); break; @@ -569,7 +540,7 @@ EventLogger::getText(int type, // REPORT Node Restart completed copy of distribution information. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode restart completed copy of distribution information", theNodeId); break; @@ -578,7 +549,7 @@ EventLogger::getText(int type, // REPORT Node Restart is starting to copy the fragments. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode restart starting to copy the fragments " "to Node %u", theNodeId, @@ -589,7 +560,7 @@ EventLogger::getText(int type, // REPORT Node Restart copied a fragment. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sTable ID = %u, fragment ID = %u have been copied " "to Node %u", theNodeId, @@ -599,7 +570,7 @@ EventLogger::getText(int type, break; case EventReport::NR_CopyFragsCompleted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode restart completed copying the fragments " "to Node %u", theNodeId, @@ -607,7 +578,7 @@ EventLogger::getText(int type, break; case EventReport::LCPFragmentCompleted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sTable ID = %u, fragment ID = %u has completed LCP " "on Node %u", theNodeId, @@ -620,7 +591,7 @@ EventLogger::getText(int type, // Report information about transaction activity once per 10 seconds. // ------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sTrans. Count = %u, Commit Count = %u, " "Read Count = %u, Simple Read Count = %u,\n" "Write Count = %u, AttrInfo Count = %u, " @@ -639,7 +610,7 @@ EventLogger::getText(int type, theData[10]); break; case EventReport::OperationReportCounters: - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%sOperations=%u", theNodeId, theData[1]); @@ -649,7 +620,7 @@ EventLogger::getText(int type, // REPORT Undo Logging blocked due to buffer near to overflow. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sACC Blocked %u and TUP Blocked %u times last second", theNodeId, theData[1], @@ -658,7 +629,7 @@ EventLogger::getText(int type, case EventReport::TransporterError: case EventReport::TransporterWarning: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sTransporter to node %d reported error 0x%x", theNodeId, theData[1], @@ -669,7 +640,7 @@ EventLogger::getText(int type, // REPORT Undo Logging blocked due to buffer near to overflow. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode %d missed heartbeat %d", theNodeId, theData[1], @@ -680,21 +651,21 @@ EventLogger::getText(int type, // REPORT Undo Logging blocked due to buffer near to overflow. //----------------------------------------------------------------------- ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode %d declared dead due to missed heartbeat", theNodeId, theData[1]); break; case EventReport::JobStatistic: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sMean loop Counter in doJob last 8192 times = %u", theNodeId, theData[1]); break; case EventReport::SendBytesStatistic: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sMean send size to Node = %d last 4096 sends = %u bytes", theNodeId, theData[1], @@ -702,7 +673,7 @@ EventLogger::getText(int type, break; case EventReport::ReceiveBytesStatistic: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sMean receive size to Node = %d last 4096 sends = %u bytes", theNodeId, theData[1], @@ -710,14 +681,14 @@ EventLogger::getText(int type, break; case EventReport::SentHeartbeat: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode Sent Heartbeat to node = %d", theNodeId, theData[1]); break; case EventReport::CreateLogBytes: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLog part %u, log file %u, MB %u", theNodeId, theData[1], @@ -726,7 +697,7 @@ EventLogger::getText(int type, break; case EventReport::StartLog: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLog part %u, start MB %u, stop MB %u, last GCI, log exec %u", theNodeId, theData[1], @@ -736,7 +707,7 @@ EventLogger::getText(int type, break; case EventReport::StartREDOLog: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sNode: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]", theNodeId, theData[1], @@ -753,7 +724,7 @@ EventLogger::getText(int type, } ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%s UNDO %s %d [%d %d %d %d %d %d %d %d %d]", theNodeId, line, @@ -771,36 +742,36 @@ EventLogger::getText(int type, break; case EventReport::InfoEvent: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%s%s", theNodeId, (char *)&theData[1]); break; case EventReport::WarningEvent: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%s%s", theNodeId, (char *)&theData[1]); break; case EventReport::GCP_TakeoverStarted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sGCP Take over started", theNodeId); break; case EventReport::GCP_TakeoverCompleted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sGCP Take over completed", theNodeId); break; case EventReport::LCP_TakeoverStarted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLCP Take over started", theNodeId); break; case EventReport::LCP_TakeoverCompleted: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sLCP Take over completed (state = %d)", theNodeId, theData[1]); break; @@ -812,7 +783,7 @@ EventLogger::getText(int type, const int block = theData[5]; const int percent = (used*100)/total; - ::snprintf(m_text, sizeof(m_text), + ::snprintf(m_text, m_text_len, "%s%s usage %s %d%s(%d %dK pages of total %d)", theNodeId, (block==DBACC ? "Index" : (block == DBTUP ?"Data":"")), @@ -822,478 +793,508 @@ EventLogger::getText(int type, ); break; } - - case EventReport::GrepSubscriptionInfo : - { - GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; - switch(event) { - case GrepEvent::GrepSS_CreateSubIdConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Created subscription id" - " (subId=%d,SubKey=%d)" - " Return code: %d.", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_CreateSubIdConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: Created subscription id" - " (subId=%d,SubKey=%d)" - " Return code: %d.", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubCreateConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int nodegrp = theData[5]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Created subscription using" - " (subId=%d,SubKey=%d)" - " in primary system. Primary system has %d nodegroup(s)." - " Return code: %d", - subId, - subKey, - nodegrp, - err); - break; - } - case GrepEvent::GrepPS_SubCreateConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: All participants have created " - "subscriptions" - " using (subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubStartMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Logging started on meta data changes." - " using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubStartMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: All participants have started " - "logging meta data" - " changes on the subscription subId=%d,SubKey=%d) " - "(N.I yet)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubStartDataConf: { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Logging started on table data changes " - " using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubStartDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: All participants have started logging " - "table data changes on the subscription " - "subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubSyncMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: All participants have started " - " synchronization on meta data (META SCAN) using " - "(subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubSyncMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Synchronization started (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubSyncDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: All participants have started " - "synchronization " - " on table data (DATA SCAN) using (subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubSyncDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Synchronization started (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d" - " Return code: %d", - subId, - subKey, - gci, - err); - break; - } - case GrepEvent::GrepPS_SubRemoveConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: All participants have removed " - "subscription (subId=%d,SubKey=%d). I have cleaned " - "up resources I've used." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubRemoveConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Removed subscription " - "(subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } + { + GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; + switch(event) { + case GrepEvent::GrepSS_CreateSubIdConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Created subscription id" + " (subId=%d,SubKey=%d)" + " Return code: %d.", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_CreateSubIdConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Created subscription id" + " (subId=%d,SubKey=%d)" + " Return code: %d.", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubCreateConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + const int nodegrp = theData[5]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Created subscription using" + " (subId=%d,SubKey=%d)" + " in primary system. Primary system has %d nodegroup(s)." + " Return code: %d", + subId, + subKey, + nodegrp, + err); + break; + } + case GrepEvent::GrepPS_SubCreateConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have created " + "subscriptions" + " using (subId=%d,SubKey=%d)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubStartMetaConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Logging started on meta data changes." + " using (subId=%d,SubKey=%d)" + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_SubStartMetaConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have started " + "logging meta data" + " changes on the subscription subId=%d,SubKey=%d) " + "(N.I yet)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubStartDataConf: { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Logging started on table data changes " + " using (subId=%d,SubKey=%d)" + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_SubStartDataConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have started logging " + "table data changes on the subscription " + "subId=%d,SubKey=%d)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_SubSyncMetaConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have started " + " synchronization on meta data (META SCAN) using " + "(subId=%d,SubKey=%d)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubSyncMetaConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Synchronization started (META SCAN) on " + " meta data using (subId=%d,SubKey=%d)" + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepPS_SubSyncDataConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have started " + "synchronization " + " on table data (DATA SCAN) using (subId=%d,SubKey=%d)." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubSyncDataConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + const int gci = theData[5]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Synchronization started (DATA SCAN) on " + "table data using (subId=%d,SubKey=%d). GCI = %d" + " Return code: %d", + subId, + subKey, + gci, + err); + break; + } + case GrepEvent::GrepPS_SubRemoveConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: All participants have removed " + "subscription (subId=%d,SubKey=%d). I have cleaned " + "up resources I've used." + " Return code: %d", + subId, + subKey, + err); + break; + } + case GrepEvent::GrepSS_SubRemoveConf: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Removed subscription " + "(subId=%d,SubKey=%d)" + " Return code: %d", + subId, + subKey, + err); + break; + } default: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sUnknown GrepSubscriptonInfo event: %d", theNodeId, theData[1]); - } - break; - } - - case EventReport::GrepSubscriptionAlert : - { - GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; - switch(event) - { - case GrepEvent::GrepSS_CreateSubIdRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord:Error code: %d Error message: %s" - " (subId=%d,SubKey=%d)", - err, - GrepError::getErrorDesc((GrepError::Code)err), - subId, - subKey); - break; - } - case GrepEvent::GrepSS_SubCreateRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: FAILED to Created subscription using" - " (subId=%d,SubKey=%d)in primary system." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubStartMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Logging failed to start on meta " - "data changes." - " using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubStartDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Logging FAILED to start on table data " - " changes using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubSyncMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Synchronization FAILED (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubSyncDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d" - " Error code: %d Error Message: %s", - subId, - subKey, - gci, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepSS_SubRemoveRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::SSCoord: Failed to remove subscription " - "(subId=%d,SubKey=%d). " - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err) - ); - break; - } - - case GrepEvent::GrepPS_CreateSubIdRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: Error code: %d Error Message: %s" - " (subId=%d,SubKey=%d)", - err, - GrepError::getErrorDesc((GrepError::Code)err), - subId, - subKey); - break; - } - case GrepEvent::GrepPS_SubCreateRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: FAILED to Created subscription using" - " (subId=%d,SubKey=%d)in primary system." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubStartMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: Logging failed to start on meta " - "data changes." - " using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubStartDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: Logging FAILED to start on table data " - " changes using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubSyncMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: Synchronization FAILED (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubSyncDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d. " - " Error code: %d Error Message: %s", - subId, - subKey, - gci, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::GrepPS_SubRemoveRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - ::snprintf(m_text, sizeof(m_text), - "Grep::PSCoord: Failed to remove subscription " - "(subId=%d,SubKey=%d)." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - case GrepEvent::Rep_Disconnect: - { - const int err = theData[4]; - const int nodeId = theData[5]; - ::snprintf(m_text, sizeof(m_text), - "Rep: Node %d." - " Error code: %d Error Message: %s", - nodeId, - err, - GrepError::getErrorDesc((GrepError::Code)err)); - break; - } - - - default: - ::snprintf(m_text, - sizeof(m_text), - "%sUnknown GrepSubscriptionAlert event: %d", - theNodeId, - theData[1]); - break; - } - break; } + break; + } + case EventReport::GrepSubscriptionAlert : + { + GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; + switch(event) + { + case GrepEvent::GrepSS_CreateSubIdRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord:Error code: %d Error message: %s" + " (subId=%d,SubKey=%d)", + err, + GrepError::getErrorDesc((GrepError::Code)err), + subId, + subKey); + break; + } + case GrepEvent::GrepSS_SubCreateRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: FAILED to Created subscription using" + " (subId=%d,SubKey=%d)in primary system." + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubStartMetaRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Logging failed to start on meta " + "data changes." + " using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubStartDataRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Logging FAILED to start on table data " + " changes using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubSyncMetaRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Synchronization FAILED (META SCAN) on " + " meta data using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubSyncDataRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + const int gci = theData[5]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on " + "table data using (subId=%d,SubKey=%d). GCI = %d" + " Error code: %d Error Message: %s", + subId, + subKey, + gci, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepSS_SubRemoveRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::SSCoord: Failed to remove subscription " + "(subId=%d,SubKey=%d). " + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err) + ); + break; + } + + case GrepEvent::GrepPS_CreateSubIdRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Error code: %d Error Message: %s" + " (subId=%d,SubKey=%d)", + err, + GrepError::getErrorDesc((GrepError::Code)err), + subId, + subKey); + break; + } + case GrepEvent::GrepPS_SubCreateRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: FAILED to Created subscription using" + " (subId=%d,SubKey=%d)in primary system." + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubStartMetaRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Logging failed to start on meta " + "data changes." + " using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubStartDataRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Logging FAILED to start on table data " + " changes using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubSyncMetaRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Synchronization FAILED (META SCAN) on " + " meta data using (subId=%d,SubKey=%d)" + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubSyncDataRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + const int gci = theData[5]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on " + "table data using (subId=%d,SubKey=%d). GCI = %d. " + " Error code: %d Error Message: %s", + subId, + subKey, + gci, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::GrepPS_SubRemoveRef: + { + const int subId = theData[2]; + const int subKey = theData[3]; + const int err = theData[4]; + ::snprintf(m_text, m_text_len, + "Grep::PSCoord: Failed to remove subscription " + "(subId=%d,SubKey=%d)." + " Error code: %d Error Message: %s", + subId, + subKey, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + case GrepEvent::Rep_Disconnect: + { + const int err = theData[4]; + const int nodeId = theData[5]; + ::snprintf(m_text, m_text_len, + "Rep: Node %d." + " Error code: %d Error Message: %s", + nodeId, + err, + GrepError::getErrorDesc((GrepError::Code)err)); + break; + } + + + default: + ::snprintf(m_text, + m_text_len, + "%sUnknown GrepSubscriptionAlert event: %d", + theNodeId, + theData[1]); + break; + } + break; + } + + case EventReport::BackupStarted: + ::snprintf(m_text, + m_text_len, + "%sBackup %d started from node %d", + theNodeId, theData[2], refToNode(theData[1])); + break; + case EventReport::BackupFailedToStart: + ::snprintf(m_text, + m_text_len, + "%sBackup request from %d failed to start. Error: %d", + theNodeId, refToNode(theData[1]), theData[2]); + break; + case EventReport::BackupCompleted: + ::snprintf(m_text, + m_text_len, + "%sBackup %d started from node %d completed\n" + " StartGCP: %d StopGCP: %d\n" + " #Records: %d #LogRecords: %d\n" + " Data: %d bytes Log: %d bytes", + theNodeId, theData[2], refToNode(theData[1]), + theData[3], theData[4], theData[6], theData[8], + theData[5], theData[7]); + break; + case EventReport::BackupAborted: + ::snprintf(m_text, + m_text_len, + "%sBackup %d started from %d has been aborted. Error: %d", + theNodeId, + theData[2], + refToNode(theData[1]), + theData[3]); + break; default: ::snprintf(m_text, - sizeof(m_text), + m_text_len, "%sUnknown event: %d", theNodeId, theData[0]); @@ -1302,54 +1303,10 @@ EventLogger::getText(int type, return m_text; } -bool -EventLogger::matchEventCategory(const char * str, - LogLevel::EventCategory * cat, - bool exactMatch){ - unsigned i; - if(cat == 0 || str == 0) - return false; - - char * tmp = strdup(str); - for(i = 0; igetLogLevel(cat) : m_logLevel.getLogLevel(cat); + if (threshold <= set){ switch (severity){ - case LL_ALERT: - alert(EventLogger::getText(eventType, theData, nodeId)); + case Logger::LL_ALERT: + alert(EventLogger::getText(m_text, sizeof(m_text), + eventType, theData, nodeId)); break; - case LL_CRITICAL: - critical(EventLogger::getText(eventType, theData, nodeId)); + case Logger::LL_CRITICAL: + critical(EventLogger::getText(m_text, sizeof(m_text), + eventType, theData, nodeId)); break; - case LL_WARNING: - warning(EventLogger::getText(eventType, theData, nodeId)); + case Logger::LL_WARNING: + warning(EventLogger::getText(m_text, sizeof(m_text), + eventType, theData, nodeId)); break; - case LL_ERROR: - error(EventLogger::getText(eventType, theData, nodeId)); + case Logger::LL_ERROR: + error(EventLogger::getText(m_text, sizeof(m_text), + eventType, theData, nodeId)); break; - case LL_INFO: - info(EventLogger::getText(eventType, theData, nodeId)); + case Logger::LL_INFO: + info(EventLogger::getText(m_text, sizeof(m_text), + eventType, theData, nodeId)); break; - case LL_DEBUG: - debug(EventLogger::getText(eventType, theData, nodeId)); + case Logger::LL_DEBUG: + debug(EventLogger::getText(m_text, sizeof(m_text), + eventType, theData, nodeId)); break; default: - info(EventLogger::getText(eventType, theData, nodeId)); + info(EventLogger::getText(m_text, sizeof(m_text), + eventType, theData, nodeId)); break; } } // if (.. } -LogLevel& -EventLogger::getLoglevel() -{ - return m_logLevel; -} - int EventLogger::getFilterLevel() const { diff --git a/ndb/src/common/debugger/Makefile.am b/ndb/src/common/debugger/Makefile.am index 0278d0d2ba0..d0fb30717cd 100644 --- a/ndb/src/common/debugger/Makefile.am +++ b/ndb/src/common/debugger/Makefile.am @@ -2,7 +2,7 @@ SUBDIRS = signaldata noinst_LTLIBRARIES = libtrace.la -libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp LogLevel.cpp EventLogger.cpp GrepError.cpp +libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp GrepError.cpp include $(top_srcdir)/ndb/config/common.mk.am include $(top_srcdir)/ndb/config/type_kernel.mk.am diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp index 109c999852b..40325fbae99 100644 --- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -18,6 +18,7 @@ #include #include +#include #include "LocalConfig.hpp" #include @@ -272,43 +273,15 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 NdbConfig_SetPath(datadir); } - char localhost[MAXHOSTNAMELEN]; - if(NdbHost_GetHostName(localhost) != 0){ - snprintf(buf, 255, "Unable to get own hostname"); + if (hostname && hostname[0] != 0 && + !SocketServer::tryBind(0,hostname)) { + snprintf(buf, 255, "Config hostname(%s) don't match a local interface," + " tried to bind, error = %d - %s", + hostname, errno, strerror(errno)); setError(CR_ERROR, buf); return false; } - do { - if(strlen(hostname) == 0) - break; - - if(strcasecmp(hostname, localhost) == 0) - break; - - if(strcasecmp(hostname, "localhost") == 0) - break; - - struct in_addr local, config; - bool b1 = false, b2 = false, b3 = false; - b1 = Ndb_getInAddr(&local, localhost) == 0; - b2 = Ndb_getInAddr(&config, hostname) == 0; - b3 = memcmp(&local, &config, sizeof(local)) == 0; - - if(b1 && b2 && b3) - break; - - b1 = Ndb_getInAddr(&local, "localhost") == 0; - b3 = memcmp(&local, &config, sizeof(local)) == 0; - if(b1 && b2 && b3) - break; - - snprintf(buf, 255, "Local hostname(%s) and config hostname(%s) dont match", - localhost, hostname); - setError(CR_ERROR, buf); - return false; - } while(false); - unsigned int _type; if(ndb_mgm_get_int_parameter(it, CFG_TYPE_OF_SECTION, &_type)){ snprintf(buf, 255, "Unable to get type of node(%d) from config", @@ -344,7 +317,7 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 const char * name; struct in_addr addr; BaseString tmp; - if(!iter.get(CFG_TCP_HOSTNAME_1, &name) && strlen(name)){ + if(!iter.get(CFG_CONNECTION_HOSTNAME_1, &name) && strlen(name)){ if(Ndb_getInAddr(&addr, name) != 0){ tmp.assfmt("Unable to lookup/illegal hostname %s, " "connection from node %d to node %d", @@ -354,7 +327,7 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 } } - if(!iter.get(CFG_TCP_HOSTNAME_2, &name) && strlen(name)){ + if(!iter.get(CFG_CONNECTION_HOSTNAME_2, &name) && strlen(name)){ if(Ndb_getInAddr(&addr, name) != 0){ tmp.assfmt("Unable to lookup/illegal hostname %s, " "connection from node %d to node %d", diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp index a76c541f3f6..d7ad993c2af 100644 --- a/ndb/src/common/mgmcommon/IPCConfig.cpp +++ b/ndb/src/common/mgmcommon/IPCConfig.cpp @@ -345,18 +345,27 @@ IPCConfig::configureTransporters(Uint32 nodeId, const class ndb_mgm_configuration & config, class TransporterRegistry & tr){ - Uint32 noOfTransportersCreated= 0, server_port= 0; + Uint32 noOfTransportersCreated= 0; ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION); for(iter.first(); iter.valid(); iter.next()){ Uint32 nodeId1, nodeId2, remoteNodeId; + const char * remoteHostName= 0, * localHostName= 0; if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue; if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue; if(nodeId1 != nodeId && nodeId2 != nodeId) continue; remoteNodeId = (nodeId == nodeId1 ? nodeId2 : nodeId1); + { + const char * host1= 0, * host2= 0; + iter.get(CFG_CONNECTION_HOSTNAME_1, &host1); + iter.get(CFG_CONNECTION_HOSTNAME_2, &host2); + localHostName = (nodeId == nodeId1 ? host1 : host2); + remoteHostName = (nodeId == nodeId1 ? host2 : host1); + } + Uint32 sendSignalId = 1; Uint32 checksum = 1; if(iter.get(CFG_CONNECTION_SEND_SIGNAL_ID, &sendSignalId)) continue; @@ -365,14 +374,10 @@ IPCConfig::configureTransporters(Uint32 nodeId, Uint32 type = ~0; if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue; - Uint32 tmp_server_port= 0; - if(iter.get(CFG_CONNECTION_SERVER_PORT, &tmp_server_port)) break; + Uint32 server_port= 0; + if(iter.get(CFG_CONNECTION_SERVER_PORT, &server_port)) break; if (nodeId <= nodeId1 && nodeId <= nodeId2) { - if (server_port && server_port != tmp_server_port) { - ndbout << "internal error in config setup of server ports line= " << __LINE__ << endl; - exit(-1); - } - server_port= tmp_server_port; + tr.add_transporter_interface(localHostName, server_port); } switch(type){ @@ -388,7 +393,7 @@ IPCConfig::configureTransporters(Uint32 nodeId, if(iter.get(CFG_SHM_KEY, &conf.shmKey)) break; if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shmSize)) break; - conf.port= tmp_server_port; + conf.port= server_port; if(!tr.createTransporter(&conf)){ ndbout << "Failed to create SHM Transporter from: " @@ -437,14 +442,10 @@ IPCConfig::configureTransporters(Uint32 nodeId, case CONNECTION_TYPE_TCP:{ TCP_TransporterConfiguration conf; - const char * host1, * host2; - if(iter.get(CFG_TCP_HOSTNAME_1, &host1)) break; - if(iter.get(CFG_TCP_HOSTNAME_2, &host2)) break; - if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.sendBufferSize)) break; if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.maxReceiveSize)) break; - conf.port= tmp_server_port; + conf.port= server_port; const char * proxy; if (!iter.get(CFG_TCP_PROXY, &proxy)) { if (strlen(proxy) > 0 && nodeId2 == nodeId) { @@ -455,8 +456,8 @@ IPCConfig::configureTransporters(Uint32 nodeId, conf.localNodeId = nodeId; conf.remoteNodeId = remoteNodeId; - conf.localHostName = (nodeId == nodeId1 ? host1 : host2); - conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1); + conf.localHostName = localHostName; + conf.remoteHostName = remoteHostName; conf.byteOrder = 0; conf.compression = 0; conf.checksum = checksum; @@ -470,19 +471,15 @@ IPCConfig::configureTransporters(Uint32 nodeId, } case CONNECTION_TYPE_OSE:{ OSE_TransporterConfiguration conf; - - const char * host1, * host2; - if(iter.get(CFG_OSE_HOSTNAME_1, &host1)) break; - if(iter.get(CFG_OSE_HOSTNAME_2, &host2)) break; - + if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.prioASignalSize)) break; if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.prioBSignalSize)) break; if(iter.get(CFG_OSE_RECEIVE_ARRAY_SIZE, &conf.receiveBufferSize)) break; conf.localNodeId = nodeId; conf.remoteNodeId = remoteNodeId; - conf.localHostName = (nodeId == nodeId1 ? host1 : host2); - conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1); + conf.localHostName = localHostName; + conf.remoteHostName = remoteHostName; conf.byteOrder = 0; conf.compression = 0; conf.checksum = checksum; @@ -502,9 +499,6 @@ IPCConfig::configureTransporters(Uint32 nodeId, } } } - - tr.m_service_port= server_port; - return noOfTransportersCreated; } diff --git a/ndb/src/common/portlib/NdbTCP.cpp b/ndb/src/common/portlib/NdbTCP.cpp index 8448d64222f..a711a586203 100644 --- a/ndb/src/common/portlib/NdbTCP.cpp +++ b/ndb/src/common/portlib/NdbTCP.cpp @@ -15,6 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include @@ -27,13 +28,14 @@ static NdbMutex LOCK_gethostbyname = NDB_MUTEX_INITIALIZER; extern "C" int Ndb_getInAddr(struct in_addr * dst, const char *address) { + DBUG_ENTER("Ndb_getInAddr"); struct hostent * hostPtr; NdbMutex_Lock(&LOCK_gethostbyname); hostPtr = gethostbyname(address); if (hostPtr != NULL) { dst->s_addr = ((struct in_addr *) *hostPtr->h_addr_list)->s_addr; NdbMutex_Unlock(&LOCK_gethostbyname); - return 0; + DBUG_RETURN(0); } NdbMutex_Unlock(&LOCK_gethostbyname); @@ -47,9 +49,11 @@ Ndb_getInAddr(struct in_addr * dst, const char *address) { #endif ) { - return 0; + DBUG_RETURN(0); } - return -1; + DBUG_PRINT("error",("inet_addr(%s) - %d - %s", + address, errno, strerror(errno))); + DBUG_RETURN(-1); } #if 0 diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index fe1d4b04786..5679c3c5834 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -98,9 +98,8 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd) TransporterRegistry::TransporterRegistry(void * callback, unsigned _maxTransporters, - unsigned sizeOfLongSignalMemory) { - - m_transporter_service= 0; + unsigned sizeOfLongSignalMemory) +{ nodeIdSpecified = false; maxTransporters = _maxTransporters; sendCounter = 1; @@ -150,7 +149,6 @@ TransporterRegistry::~TransporterRegistry() { delete[] theTransporters; delete[] performStates; delete[] ioStates; - #ifdef NDB_OSE_TRANSPORTER if(theOSEReceiver != NULL){ theOSEReceiver->destroyPhantom(); @@ -1159,55 +1157,67 @@ TransporterRegistry::stop_clients() return true; } +void +TransporterRegistry::add_transporter_interface(const char *interface, unsigned short port) +{ + DBUG_ENTER("TransporterRegistry::add_transporter_interface"); + DBUG_PRINT("enter",("interface=%s, port= %d", interface, port)); + if (interface && strlen(interface) == 0) + interface= 0; + + for (unsigned i= 0; i < m_transporter_interface.size(); i++) + { + Transporter_interface &tmp= m_transporter_interface[i]; + if (port != tmp.m_service_port) + continue; + if (interface != 0 && tmp.m_interface != 0 && + strcmp(interface, tmp.m_interface) == 0) + { + DBUG_VOID_RETURN; // found match, no need to insert + } + if (interface == 0 && tmp.m_interface == 0) + { + DBUG_VOID_RETURN; // found match, no need to insert + } + } + Transporter_interface t; + t.m_service_port= port; + t.m_interface= interface; + m_transporter_interface.push_back(t); + DBUG_PRINT("exit",("interface and port added")); + DBUG_VOID_RETURN; +} + bool TransporterRegistry::start_service(SocketServer& socket_server) { -#if 0 - for (int i= 0, n= 0; n < nTransporters; i++){ - Transporter * t = theTransporters[i]; - if (!t) - continue; - n++; - if (t->isServer) { - t->m_service = new TransporterService(new SocketAuthSimple("ndbd passwd")); - if(!socket_server.setup(t->m_service, t->m_r_port, 0)) - { - ndbout_c("Unable to setup transporter service port: %d!\n" - "Please check if the port is already used,\n" - "(perhaps a mgmt server is already running)", - m_service_port); - delete t->m_service; - return false; - } - } + if (m_transporter_interface.size() > 0 && nodeIdSpecified != true) + { + ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified"); + return false; } -#endif - if (m_service_port != 0) { - - m_transporter_service = new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd")); - - if (nodeIdSpecified != true) { - ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified"); + for (unsigned i= 0; i < m_transporter_interface.size(); i++) + { + Transporter_interface &t= m_transporter_interface[i]; + if (t.m_service_port == 0) + { + continue; + } + TransporterService *transporter_service = + new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd")); + if(!socket_server.setup(transporter_service, + t.m_service_port, t.m_interface)) + { + ndbout_c("Unable to setup transporter service port: %s:%d!\n" + "Please check if the port is already used,\n" + "(perhaps the node is already running)", + t.m_interface ? t.m_interface : "*", t.m_service_port); + delete transporter_service; return false; } - - //m_interface_name = "ndbd"; - m_interface_name = 0; - - if(!socket_server.setup(m_transporter_service, m_service_port, m_interface_name)) - { - ndbout_c("Unable to setup transporter service port: %d!\n" - "Please check if the port is already used,\n" - "(perhaps a mgmt server is already running)", - m_service_port); - delete m_transporter_service; - return false; - } - m_transporter_service->setTransporterRegistry(this); - } else - m_transporter_service= 0; - + transporter_service->setTransporterRegistry(this); + } return true; } @@ -1281,3 +1291,5 @@ NdbOut & operator <<(NdbOut & out, SignalHeader & sh){ out << "trace: " << (int)sh.theTrace << endl; return out; } + +template class Vector; diff --git a/ndb/src/common/util/Makefile.am b/ndb/src/common/util/Makefile.am index 678added01e..efb249dd330 100644 --- a/ndb/src/common/util/Makefile.am +++ b/ndb/src/common/util/Makefile.am @@ -9,7 +9,7 @@ libgeneral_la_SOURCES = \ NdbSqlUtil.cpp new.cpp \ uucode.c random.c getarg.c version.c \ strdup.c strlcat.c strlcpy.c \ - ConfigValues.cpp + ConfigValues.cpp ndb_init.c include $(top_srcdir)/ndb/config/common.mk.am include $(top_srcdir)/ndb/config/type_util.mk.am diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index 84a6f6e6c21..6e4e5919e43 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -176,10 +176,29 @@ NdbSqlUtil::getType(Uint32 typeId) return m_typeList[Type::Undefined]; } +const NdbSqlUtil::Type& +NdbSqlUtil::getTypeBinary(Uint32 typeId) +{ + switch (typeId) { + case Type::Char: + typeId = Type::Binary; + break; + case Type::Varchar: + typeId = Type::Varbinary; + break; + case Type::Text: + typeId = Type::Blob; + break; + default: + break; + } + return getType(typeId); +} + // compare int -NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpTinyint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Int8 v; } u1, u2; @@ -193,7 +212,7 @@ NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s } int -NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpTinyunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Uint8 v; } u1, u2; @@ -207,7 +226,7 @@ NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uin } int -NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpSmallint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Int16 v; } u1, u2; @@ -221,7 +240,7 @@ NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpSmallunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Uint16 v; } u1, u2; @@ -235,7 +254,7 @@ NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Ui } int -NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpMediumint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { const Uint32* p; const unsigned char* v; } u1, u2; @@ -251,7 +270,7 @@ NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpMediumunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { const Uint32* p; const unsigned char* v; } u1, u2; @@ -267,7 +286,7 @@ NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, U } int -NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpInt(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Int32 v; } u1, u2; @@ -281,7 +300,7 @@ NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) } int -NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpUnsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; Uint32 v; } u1, u2; @@ -295,7 +314,7 @@ NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpBigint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); if (size >= 2) { @@ -314,7 +333,7 @@ NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si } int -NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpBigunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); if (size >= 2) { @@ -333,7 +352,7 @@ NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint } int -NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpFloat(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); union { Uint32 p[1]; float v; } u1, u2; @@ -348,7 +367,7 @@ NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 siz } int -NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpDouble(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); if (size >= 2) { @@ -368,7 +387,7 @@ NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si } int -NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpDecimal(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); // not used by MySQL or NDB @@ -377,27 +396,34 @@ NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s } int -NdbSqlUtil::cmpChar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpChar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { - assert(full >= size && size > 0); + // collation does not work on prefix for some charsets + assert(full == size && size > 0); /* - * Char is blank-padded to length and null-padded to word size. There - * is no terminator so we compare the full values. + * Char is blank-padded to length and null-padded to word size. */ - union { const Uint32* p; const char* v; } u1, u2; + union { const Uint32* p; const uchar* v; } u1, u2; u1.p = p1; u2.p = p2; - int k = memcmp(u1.v, u2.v, size << 2); - return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown; + // not const in MySQL + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + // length in bytes including null padding to Uint32 + uint l1 = (full << 2); + int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1); + return k < 0 ? -1 : k > 0 ? +1 : 0; } int -NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpVarchar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* * Varchar is not allowed to contain a null byte and the value is * null-padded. Therefore comparison does not need to use the length. + * + * Not used before MySQL 5.0. Format is likely to change. Handle + * only binary collation for now. */ union { const Uint32* p; const char* v; } u1, u2; u1.p = p1; @@ -408,7 +434,7 @@ NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s } int -NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpBinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* @@ -422,12 +448,14 @@ NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si } int -NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpVarbinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* * Binary data of variable length padded with nulls. The comparison * does not need to use the length. + * + * Not used before MySQL 5.0. Format is likely to change. */ union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; @@ -438,11 +466,13 @@ NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpDatetime(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* * Datetime is CC YY MM DD hh mm ss \0 + * + * Not used via MySQL. */ union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; @@ -459,11 +489,13 @@ NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpTimespec(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* * Timespec is CC YY MM DD hh mm ss \0 NN NN NN NN + * + * Not used via MySQL. */ union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; @@ -490,12 +522,11 @@ NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 } int -NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpBlob(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { assert(full >= size && size > 0); /* - * Blob comparison is on the inline bytes. Except for larger header - * the format is like Varbinary. + * Blob comparison is on the inline bytes (null padded). */ const unsigned head = NDB_BLOB_HEAD_SIZE; // skip blob head @@ -510,25 +541,107 @@ NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size } int -NdbSqlUtil::cmpText(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) +NdbSqlUtil::cmpText(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) { - assert(full >= size && size > 0); + // collation does not work on prefix for some charsets + assert(full == size && size > 0); /* - * Text comparison is on the inline bytes. Except for larger header - * the format is like Varchar. + * Text comparison is on the inline bytes (blank padded). Currently + * not supported for multi-byte charsets. */ const unsigned head = NDB_BLOB_HEAD_SIZE; // skip blob head if (size >= head + 1) { - union { const Uint32* p; const char* v; } u1, u2; + union { const Uint32* p; const uchar* v; } u1, u2; u1.p = p1 + head; u2.p = p2 + head; - int k = memcmp(u1.v, u2.v, (size - head) << 2); - return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown; + // not const in MySQL + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + // length in bytes including null padding to Uint32 + uint l1 = (full << 2); + int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1); + return k < 0 ? -1 : k > 0 ? +1 : 0; } return CmpUnknown; } +// check charset + +bool +NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info) +{ + const Type& type = getType(typeId); + switch (type.m_typeId) { + case Type::Undefined: + break; + case Type::Char: + { + const CHARSET_INFO *cs = (const CHARSET_INFO*)info; + return + cs != 0 && + cs->cset != 0 && + cs->coll != 0 && + cs->coll->strnxfrm != 0 && + cs->strxfrm_multiply == 1; // current limitation + } + break; + case Type::Varchar: + return true; // Varchar not used via MySQL + case Type::Blob: + case Type::Text: + break; + default: + return true; + } + return false; +} + +bool +NdbSqlUtil::usable_in_hash_index(Uint32 typeId, const void* info) +{ + return usable_in_pk(typeId, info); +} + +bool +NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info) +{ + const Type& type = getType(typeId); + switch (type.m_typeId) { + case Type::Undefined: + break; + case Type::Char: + { + const CHARSET_INFO *cs = (const CHARSET_INFO*)info; + return + cs != 0 && + cs->cset != 0 && + cs->coll != 0 && + cs->coll->strnxfrm != 0 && + cs->coll->strnncollsp != 0 && + cs->strxfrm_multiply == 1; // current limitation + } + break; + case Type::Varchar: + return true; // Varchar not used via MySQL + case Type::Text: + { + const CHARSET_INFO *cs = (const CHARSET_INFO*)info; + return + cs != 0 && + cs->mbmaxlen == 1 && // extra limitation + cs->cset != 0 && + cs->coll != 0 && + cs->coll->strnxfrm != 0 && + cs->coll->strnncollsp != 0 && + cs->strxfrm_multiply == 1; // current limitation + } + break; + default: + return true; + } + return false; +} + #ifdef NDB_SQL_UTIL_TEST #include @@ -556,6 +669,7 @@ const Testcase testcase[] = { int main(int argc, char** argv) { + ndb_init(); // for charsets unsigned count = argc > 1 ? atoi(argv[1]) : 1000000; ndbout_c("count = %u", count); assert(count != 0); diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp index 0cc06a54496..c3cffa1399b 100644 --- a/ndb/src/common/util/SocketServer.cpp +++ b/ndb/src/common/util/SocketServer.cpp @@ -16,6 +16,7 @@ #include +#include #include @@ -46,7 +47,7 @@ SocketServer::~SocketServer() { } bool -SocketServer::tryBind(unsigned short port, const char * intface) const { +SocketServer::tryBind(unsigned short port, const char * intface) { struct sockaddr_in servaddr; memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; @@ -83,7 +84,8 @@ bool SocketServer::setup(SocketServer::Service * service, unsigned short port, const char * intface){ - + DBUG_ENTER("SocketServer::setup"); + DBUG_PRINT("enter",("interface=%s, port=%d", intface, port)); struct sockaddr_in servaddr; memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; @@ -92,36 +94,44 @@ SocketServer::setup(SocketServer::Service * service, if(intface != 0){ if(Ndb_getInAddr(&servaddr.sin_addr, intface)) - return false; + DBUG_RETURN(false); } const NDB_SOCKET_TYPE sock = socket(AF_INET, SOCK_STREAM, 0); if (sock == NDB_INVALID_SOCKET) { - return false; + DBUG_PRINT("error",("socket() - %d - %s", + errno, strerror(errno))); + DBUG_RETURN(false); } const int on = 1; if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on)) == -1) { + DBUG_PRINT("error",("getsockopt() - %d - %s", + errno, strerror(errno))); NDB_CLOSE_SOCKET(sock); - return false; + DBUG_RETURN(false); } if (bind(sock, (struct sockaddr*) &servaddr, sizeof(servaddr)) == -1) { + DBUG_PRINT("error",("bind() - %d - %s", + errno, strerror(errno))); NDB_CLOSE_SOCKET(sock); - return false; + DBUG_RETURN(false); } if (listen(sock, m_maxSessions) == -1){ + DBUG_PRINT("error",("listen() - %d - %s", + errno, strerror(errno))); NDB_CLOSE_SOCKET(sock); - return false; + DBUG_RETURN(false); } ServiceInstance i; i.m_socket = sock; i.m_service = service; m_services.push_back(i); - return true; + DBUG_RETURN(true); } void @@ -177,8 +187,9 @@ void* socketServerThread_C(void* _ss){ SocketServer * ss = (SocketServer *)_ss; + my_thread_init(); ss->doRun(); - + my_thread_end(); NdbThread_Exit(0); return 0; } @@ -287,8 +298,10 @@ void* sessionThread_C(void* _sc){ SocketServer::Session * si = (SocketServer::Session *)_sc; + my_thread_init(); if(!transfer(si->m_socket)){ si->m_stopped = true; + my_thread_end(); NdbThread_Exit(0); return 0; } @@ -301,6 +314,7 @@ sessionThread_C(void* _sc){ } si->m_stopped = true; + my_thread_end(); NdbThread_Exit(0); return 0; } diff --git a/ndb/src/common/debugger/LogLevel.cpp b/ndb/src/common/util/ndb_init.c similarity index 67% rename from ndb/src/common/debugger/LogLevel.cpp rename to ndb/src/common/util/ndb_init.c index f9e2f318432..b160ed3636b 100644 --- a/ndb/src/common/debugger/LogLevel.cpp +++ b/ndb/src/common/util/ndb_init.c @@ -14,17 +14,16 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include +#include +#include -const LogLevel::LogLevelCategoryName LogLevel::LOGLEVEL_CATEGORY_NAME[] = { - { "LogLevelStartup" }, - { "LogLevelShutdown" }, - { "LogLevelStatistic" }, - { "LogLevelCheckpoint" }, - { "LogLevelNodeRestart" }, - { "LogLevelConnection" }, - { "LogLevelError" }, - { "LogLevelWarning" }, - { "LogLevelInfo" }, - { "LogLevelGrep" } -}; +int +ndb_init() +{ + if (my_init()) { + const char* err = "my_init() failed - exit\n"; + write(2, err, strlen(err)); + exit(1); + } + return 0; +} diff --git a/ndb/src/cw/cpcd/APIService.cpp b/ndb/src/cw/cpcd/APIService.cpp index 46b043c7004..de0e40cebfc 100644 --- a/ndb/src/cw/cpcd/APIService.cpp +++ b/ndb/src/cw/cpcd/APIService.cpp @@ -47,7 +47,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ fun, \ - desc } + desc, 0 } #define CPCD_ARG(name, type, opt, desc) \ { name, \ @@ -58,7 +58,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ 0, \ - desc } + desc, 0 } #define CPCD_ARG2(name, type, opt, min, max, desc) \ { name, \ @@ -69,7 +69,7 @@ ParserRow::IgnoreMinMax, \ min, max, \ 0, \ - desc } + desc, 0 } #define CPCD_END() \ { 0, \ @@ -80,7 +80,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ 0, \ - 0 } + 0, 0 } #define CPCD_CMD_ALIAS(name, realName, fun) \ { name, \ @@ -91,7 +91,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ 0, \ - 0 } + 0, 0 } #define CPCD_ARG_ALIAS(name, realName, fun) \ { name, \ @@ -102,7 +102,7 @@ ParserRow::IgnoreMinMax, \ 0, 0, \ 0, \ - 0 } + 0, 0 } const ParserRow commands[] = diff --git a/ndb/src/cw/cpcd/CPCD.cpp b/ndb/src/cw/cpcd/CPCD.cpp index 44db10422b9..bc9f350755f 100644 --- a/ndb/src/cw/cpcd/CPCD.cpp +++ b/ndb/src/cw/cpcd/CPCD.cpp @@ -378,7 +378,7 @@ CPCD::getProcessList() { } void -CPCD::RequestStatus::err(enum RequestStatusCode status, char *msg) { +CPCD::RequestStatus::err(enum RequestStatusCode status, const char *msg) { m_status = status; snprintf(m_errorstring, sizeof(m_errorstring), "%s", msg); } diff --git a/ndb/src/cw/cpcd/CPCD.hpp b/ndb/src/cw/cpcd/CPCD.hpp index 4a7cab23bab..a5c0bef1dac 100644 --- a/ndb/src/cw/cpcd/CPCD.hpp +++ b/ndb/src/cw/cpcd/CPCD.hpp @@ -91,7 +91,7 @@ public: RequestStatus() { m_status = OK; m_errorstring[0] = '\0'; }; /** @brief Sets an errorcode and a printable message */ - void err(enum RequestStatusCode, char *); + void err(enum RequestStatusCode, const char *); /** @brief Returns the error message */ char *getErrMsg() { return m_errorstring; }; diff --git a/ndb/src/cw/cpcd/main.cpp b/ndb/src/cw/cpcd/main.cpp index 913c31de1f7..207b81bfa89 100644 --- a/ndb/src/cw/cpcd/main.cpp +++ b/ndb/src/cw/cpcd/main.cpp @@ -28,12 +28,12 @@ #include "common.hpp" -static char *work_dir = CPCD_DEFAULT_WORK_DIR; +static const char *work_dir = CPCD_DEFAULT_WORK_DIR; static int port = CPCD_DEFAULT_TCP_PORT; static int use_syslog = 0; -static char *logfile = NULL; -static char *config_file = CPCD_DEFAULT_CONFIG_FILE; -static char *user = 0; +static const char *logfile = NULL; +static const char *config_file = CPCD_DEFAULT_CONFIG_FILE; +static const char *user = 0; static struct getargs args[] = { { "work-dir", 'w', arg_string, &work_dir, diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index b3e9ff735ac..08a8bf83e20 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -944,6 +945,13 @@ Backup::sendBackupRef(BlockReference senderRef, Signal *signal, ref->errorCode = errorCode; ref->masterRef = numberToRef(BACKUP, getMasterNodeId()); sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB); + + if(errorCode != BackupRef::IAmNotMaster){ + signal->theData[0] = EventReport::BackupFailedToStart; + signal->theData[1] = senderRef; + signal->theData[2] = errorCode; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); + } } void @@ -1226,7 +1234,13 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) conf->nodes = ptr.p->nodes; sendSignal(ptr.p->clientRef, GSN_BACKUP_CONF, signal, BackupConf::SignalLength, JBB); - + + signal->theData[0] = EventReport::BackupStarted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB); + ptr.p->masterData.state.setState(DEFINED); /** * Prepare Trig @@ -2069,6 +2083,18 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) rep->nodes = ptr.p->nodes; sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal, BackupCompleteRep::SignalLength, JBB); + + signal->theData[0] = EventReport::BackupCompleted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + signal->theData[3] = ptr.p->startGCP; + signal->theData[4] = ptr.p->stopGCP; + signal->theData[5] = ptr.p->noOfBytes; + signal->theData[6] = ptr.p->noOfRecords; + signal->theData[7] = ptr.p->noOfLogBytes; + signal->theData[8] = ptr.p->noOfLogRecords; + ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); } /***************************************************************************** @@ -2259,6 +2285,12 @@ Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr) rep->reason = ptr.p->errorCode; sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal, BackupAbortRep::SignalLength, JBB); + + signal->theData[0] = EventReport::BackupAborted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + signal->theData[3] = ptr.p->errorCode; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); }//if // ptr.p->masterData.state.setState(INITIAL); diff --git a/ndb/src/kernel/blocks/backup/read.cpp b/ndb/src/kernel/blocks/backup/read.cpp index 921c352ea13..89cc08ee9de 100644 --- a/ndb/src/kernel/blocks/backup/read.cpp +++ b/ndb/src/kernel/blocks/backup/read.cpp @@ -48,6 +48,7 @@ static Uint32 logEntryNo; int main(int argc, const char * argv[]){ + ndb_init(); if(argc <= 1){ printf("Usage: %s ", argv[0]); exit(1); diff --git a/ndb/src/kernel/blocks/backup/restore/main.cpp b/ndb/src/kernel/blocks/backup/restore/main.cpp index a330aa51373..5708415c61e 100644 --- a/ndb/src/kernel/blocks/backup/restore/main.cpp +++ b/ndb/src/kernel/blocks/backup/restore/main.cpp @@ -206,6 +206,7 @@ free_data_callback() int main(int argc, const char** argv) { + ndb_init(); if (!readArguments(argc, argv)) { return -1; @@ -331,7 +332,7 @@ main(int argc, const char** argv) for (i= 0; i < g_consumers.size(); i++) g_consumers[i]->endOfTuples(); - + RestoreLogIterator logIter(metaData); if (!logIter.readHeader()) { @@ -357,7 +358,7 @@ main(int argc, const char** argv) } } clearConsumers(); - return 1; + return 0; } // main template class Vector; diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index e2085eb612c..234d832655c 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -97,7 +97,7 @@ Cmvmi::Cmvmi(const Configuration & conf) : const ndb_mgm_configuration_iterator * db = theConfig.getOwnConfigIterator(); for(unsigned j = 0; jnoOfEntries; i++){ - category = (LogLevel::EventCategory)llOrd->theCategories[i]; - level = llOrd->theLevels[i]; - + category = (LogLevel::EventCategory)(llOrd->theData[i] >> 16); + level = llOrd->theData[i] & 0xFFFF; + clogLevel.setLogLevel(category, level); } }//execSET_LOGLEVELORD() @@ -196,10 +196,10 @@ void Cmvmi::execEVENT_REP(Signal* signal) Uint32 threshold = 16; LogLevel::EventCategory eventCategory = (LogLevel::EventCategory)0; - for(unsigned int i = 0; i< EventLogger::matrixSize; i++){ - if(EventLogger::matrix[i].eventType == eventType){ - eventCategory = EventLogger::matrix[i].eventCategory; - threshold = EventLogger::matrix[i].threshold; + for(unsigned int i = 0; i< EventLoggerBase::matrixSize; i++){ + if(EventLoggerBase::matrix[i].eventType == eventType){ + eventCategory = EventLoggerBase::matrix[i].eventCategory; + threshold = EventLoggerBase::matrix[i].threshold; break; } } @@ -250,17 +250,7 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){ sendSignal(subReq->blockRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB); return; } - /** - * If it's a new subscription, clear the loglevel - * - * Clear only if noOfEntries is 0, this is needed beacuse we set - * the default loglevels for the MGMT nodes during the inital connect phase. - * See reportConnected(). - */ - if (subReq->noOfEntries == 0){ - ptr.p->logLevel.clear(); - } - + ptr.p->logLevel.clear(); ptr.p->blockRef = subReq->blockRef; } @@ -276,10 +266,9 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){ LogLevel::EventCategory category; Uint32 level = 0; for(Uint32 i = 0; inoOfEntries; i++){ - category = (LogLevel::EventCategory)subReq->theCategories[i]; - level = subReq->theLevels[i]; - ptr.p->logLevel.setLogLevel(category, - level); + category = (LogLevel::EventCategory)(subReq->theData[i] >> 16); + level = subReq->theData[i] & 0xFFFF; + ptr.p->logLevel.setLogLevel(category, level); } } @@ -384,11 +373,6 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal) globalTransporterRegistry.setIOState(i, HaltIO); globalTransporterRegistry.do_disconnect(i); - - /** - * Cancel possible event subscription - */ - cancelSubscription(i); } } if (failNo != 0) { @@ -494,6 +478,8 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal) globalTransporterRegistry.do_connect(hostId); } + cancelSubscription(hostId); + signal->theData[0] = EventReport::Disconnected; signal->theData[1] = hostId; sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); @@ -539,20 +525,6 @@ void Cmvmi::execCONNECT_REP(Signal *signal){ if(type == NodeInfo::MGM){ jam(); globalTransporterRegistry.setIOState(hostId, NoHalt); - - EventSubscribeReq* dst = (EventSubscribeReq *)&signal->theData[0]; - - for (Uint32 i = 0; i < EventLogger::defEventLogMatrixSize; i++) { - dst->theCategories[i] = EventLogger::defEventLogMatrix[i].eventCategory; - dst->theLevels[i] = EventLogger::defEventLogMatrix[i].threshold; - } - - dst->noOfEntries = EventLogger::defEventLogMatrixSize; - /* The BlockNumber is hardcoded as 1 in MgmtSrvr */ - dst->blockRef = numberToRef(MIN_API_BLOCK_NO, hostId); - - execEVENT_SUBSCRIBE_REQ(signal); - } //------------------------------------------ diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 7126842459e..4757f1d2bf3 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -15,6 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include +#include #define DBDICT_C #include "Dbdict.hpp" @@ -4100,6 +4101,8 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->noOfKeyAttr = tabPtr.p->noOfPrimkey; req->noOfNewAttr = 0; + // noOfCharsets passed to TUP in upper half + req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16); req->checksumIndicator = 1; req->noOfAttributeGroups = 1; req->GCPIndicator = 0; @@ -4161,6 +4164,8 @@ Dbdict::sendLQHADDATTRREQ(Signal* signal, entry.attrId = attrPtr.p->attributeId; entry.attrDescriptor = attrPtr.p->attributeDescriptor; entry.extTypeInfo = attrPtr.p->extType; + // charset number passed to TUP, TUX in upper half + entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF); if (tabPtr.p->isIndex()) { Uint32 primaryAttrId; if (attrPtr.p->nextAttrInTable != RNIL) { @@ -4697,6 +4702,8 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, Uint32 keyLength = 0; Uint32 attrCount = tablePtr.p->noOfAttributes; Uint32 nullCount = 0; + Uint32 noOfCharsets = 0; + Uint16 charsets[128]; Uint32 recordLength = 0; AttributeRecordPtr attrPtr; c_attributeRecordHash.removeAll(); @@ -4751,6 +4758,31 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision; attrPtr.p->extScale = attrDesc.AttributeExtScale; attrPtr.p->extLength = attrDesc.AttributeExtLength; + // charset in upper half of precision + unsigned csNumber = (attrPtr.p->extPrecision >> 16); + if (csNumber != 0) { + CHARSET_INFO* cs = get_charset(csNumber, MYF(0)); + if (cs == NULL) { + parseP->errorCode = CreateTableRef::InvalidCharset; + parseP->errorLine = __LINE__; + return; + } + unsigned i = 0; + while (i < noOfCharsets) { + if (charsets[i] == csNumber) + break; + i++; + } + if (i == noOfCharsets) { + noOfCharsets++; + if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) { + parseP->errorCode = CreateTableRef::InvalidFormat; + parseP->errorLine = __LINE__; + return; + } + charsets[i] = csNumber; + } + } /** * Ignore incoming old-style type and recompute it. @@ -4814,6 +4846,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, tablePtr.p->noOfPrimkey = keyCount; tablePtr.p->noOfNullAttr = nullCount; + tablePtr.p->noOfCharsets = noOfCharsets; tablePtr.p->tupKeyLength = keyLength; tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS, @@ -6317,6 +6350,8 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) w.add(DictTabInfo::AttributeStoredInd, (Uint32)DictTabInfo::Stored); // ext type overrides w.add(DictTabInfo::AttributeExtType, aRec->extType); + w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision); + w.add(DictTabInfo::AttributeExtScale, aRec->extScale); w.add(DictTabInfo::AttributeExtLength, aRec->extLength); w.add(DictTabInfo::AttributeEnd, (Uint32)true); } diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 5ddaa67a7d6..a94af7b59c8 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -455,7 +455,7 @@ public: Uint16 totalAttrReceived; Uint16 fragCopyCreation; Uint16 noOfKeyAttr; - Uint16 noOfNewAttr; + Uint32 noOfNewAttr; // noOfCharsets in upper half Uint16 noOfAttributeGroups; Uint16 lh3DistrBits; Uint16 tableType; diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 3b05a133bbb..8342870d69c 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -1444,6 +1444,7 @@ Dblqh::sendAddAttrReq(Signal* signal) tupreq->notused1 = 0; tupreq->attrId = attrId; tupreq->attrDescriptor = entry.attrDescriptor; + tupreq->extTypeInfo = entry.extTypeInfo; sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ, signal, TupAddAttrReq::SignalLength, JBB); return; @@ -7699,6 +7700,7 @@ void Dblqh::accScanConfScanLab(Signal* signal) ndbrequire(sz == boundAiLength); EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO, signal, TuxBoundInfo::SignalLength + boundAiLength); + jamEntry(); if (req->errorCode != 0) { jam(); /* diff --git a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp b/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp index 0f3881e9024..2c62adab3e5 100644 --- a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp +++ b/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp @@ -22,26 +22,59 @@ class AttributeOffset { private: static void setOffset(Uint32 & desc, Uint32 offset); + static void setCharsetPos(Uint32 & desc, Uint32 offset); static void setNullFlagPos(Uint32 & desc, Uint32 offset); static Uint32 getOffset(const Uint32 &); + static bool getCharsetFlag(const Uint32 &); + static Uint32 getCharsetPos(const Uint32 &); static Uint32 getNullFlagPos(const Uint32 &); static Uint32 getNullFlagOffset(const Uint32 &); static Uint32 getNullFlagBitOffset(const Uint32 &); static bool isNULL(const Uint32 &, const Uint32 &); }; -#define AO_ATTRIBUTE_OFFSET_MASK (0xffff) -#define AO_NULL_FLAG_POS_MASK (0x7ff) -#define AO_NULL_FLAG_POS_SHIFT (21) -#define AO_NULL_FLAG_WORD_MASK (31) -#define AO_NULL_FLAG_OFFSET_SHIFT (5) +/** + * Allow for 4096 attributes, all nullable, and for 128 different + * character sets. + * + * a = Attribute offset - 11 bits 0-10 ( addr word in 8 kb ) + * c = Has charset flag 1 bits 11-11 + * s = Charset pointer position - 7 bits 12-18 ( in table descriptor ) + * f = Null flag offset in word - 5 bits 20-24 ( address 32 bits ) + * w = Null word offset - 7 bits 25-32 ( f+w addr 4096 attrs ) + * + * 1111111111222222222233 + * 01234567890123456789012345678901 + * aaaaaaaaaaacsssssss fffffwwwwwww + */ + +#define AO_ATTRIBUTE_OFFSET_SHIFT 0 +#define AO_ATTRIBUTE_OFFSET_MASK 0x7ff + +#define AO_CHARSET_FLAG_SHIFT 11 +#define AO_CHARSET_POS_SHIFT 12 +#define AO_CHARSET_POS_MASK 127 + +#define AO_NULL_FLAG_POS_MASK 0xfff // f+w +#define AO_NULL_FLAG_POS_SHIFT 20 + +#define AO_NULL_FLAG_WORD_MASK 31 // f +#define AO_NULL_FLAG_OFFSET_SHIFT 5 inline void AttributeOffset::setOffset(Uint32 & desc, Uint32 offset){ ASSERT_MAX(offset, AO_ATTRIBUTE_OFFSET_MASK, "AttributeOffset::setOffset"); - desc |= offset; + desc |= (offset << AO_ATTRIBUTE_OFFSET_SHIFT); +} + +inline +void +AttributeOffset::setCharsetPos(Uint32 & desc, Uint32 offset) { + ASSERT_MAX(offset, AO_CHARSET_POS_MASK, "AttributeOffset::setCharsetPos"); + desc |= (1 << AO_CHARSET_FLAG_SHIFT); + desc |= (offset << AO_CHARSET_POS_SHIFT); } inline @@ -55,7 +88,21 @@ inline Uint32 AttributeOffset::getOffset(const Uint32 & desc) { - return desc & AO_ATTRIBUTE_OFFSET_MASK; + return (desc >> AO_ATTRIBUTE_OFFSET_SHIFT) & AO_ATTRIBUTE_OFFSET_MASK; +} + +inline +bool +AttributeOffset::getCharsetFlag(const Uint32 & desc) +{ + return (desc >> AO_CHARSET_FLAG_SHIFT) & 1; +} + +inline +Uint32 +AttributeOffset::getCharsetPos(const Uint32 & desc) +{ + return (desc >> AO_CHARSET_POS_SHIFT) & AO_CHARSET_POS_MASK; } inline diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index cb7e35ea73e..ce81c1c9bc8 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -502,6 +502,7 @@ struct Fragoperrec { Uint32 attributeCount; Uint32 freeNullBit; Uint32 noOfNewAttrCount; + Uint32 charsetIndex; BlockReference lqhBlockrefFrag; }; typedef Ptr FragoperrecPtr; @@ -785,6 +786,7 @@ struct Tablerec { ReadFunction* readFunctionArray; UpdateFunction* updateFunctionArray; + CHARSET_INFO** charsetArray; Uint32 readKeyArray; Uint32 tabDescriptor; @@ -796,6 +798,7 @@ struct Tablerec { Uint16 tupheadsize; Uint16 noOfAttr; Uint16 noOfKeyAttr; + Uint16 noOfCharsets; Uint16 noOfNewAttr; Uint16 noOfNullAttr; Uint16 noOfAttributeGroups; @@ -1001,17 +1004,20 @@ public: void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node); /* - * TUX reads primary table attributes for index keys. Input is - * attribute ids in AttributeHeader format. Output is pointers to - * attribute data within tuple or 0 for NULL value. + * TUX reads primary table attributes for index keys. Tuple is + * specified by location of original tuple and version number. Input + * is attribute ids in AttributeHeader format. Output is attribute + * data with headers. Uses readAttributes with xfrm option set. + * Returns number of words or negative (-terrorCode) on error. */ - void tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData); + int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut); /* * TUX reads primary key without headers into an array of words. Used - * for md5 summing and when returning keyinfo. + * for md5 summing and when returning keyinfo. Returns number of + * words or negative (-terrorCode) on error. */ - void tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData); + int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut); /* * TUX checks if tuple is visible to scan. @@ -1365,10 +1371,11 @@ private: //------------------------------------------------------------------ int readAttributes(Page* const pagePtr, Uint32 TupHeadOffset, - Uint32* inBuffer, + const Uint32* inBuffer, Uint32 inBufLen, Uint32* outBuffer, - Uint32 TmaxRead); + Uint32 TmaxRead, + bool xfrmFlag); //------------------------------------------------------------------ //------------------------------------------------------------------ @@ -1614,6 +1621,20 @@ private: Uint32 attrDescriptor, Uint32 attrDes2); +// ***************************************************************** +// Read char routines optionally (tXfrmFlag) apply strxfrm +// ***************************************************************** + + bool readCharNotNULL(Uint32* outBuffer, + AttributeHeader* ahOut, + Uint32 attrDescriptor, + Uint32 attrDes2); + + bool readCharNULLable(Uint32* outBuffer, + AttributeHeader* ahOut, + Uint32 attrDescriptor, + Uint32 attrDes2); + //------------------------------------------------------------------ //------------------------------------------------------------------ bool nullFlagCheck(Uint32 attrDes2); @@ -1909,7 +1930,8 @@ private: void updatePackedList(Signal* signal, Uint16 ahostIndex); void setUpDescriptorReferences(Uint32 descriptorReference, - Tablerec* const regTabPtr); + Tablerec* const regTabPtr, + const Uint32* offset); void setUpKeyArray(Tablerec* const regTabPtr); bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex); void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId); @@ -2098,7 +2120,8 @@ private: //----------------------------------------------------------------------------- // Public methods - Uint32 allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups); + Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset); + Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset); void freeTabDescr(Uint32 retRef, Uint32 retNo); Uint32 getTabDescrWord(Uint32 index); void setTabDescrWord(Uint32 index, Uint32 word); @@ -2217,6 +2240,7 @@ private: Uint32 tMaxRead; Uint32 tOutBufIndex; Uint32* tTupleHeader; + bool tXfrmFlag; // updateAttributes module Uint32 tInBufIndex; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index 0a47778f7c1..dfd1e37d4f5 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -903,7 +903,8 @@ int Dbtup::handleReadReq(Signal* signal, &cinBuffer[0], regOperPtr->attrinbufLen, dst, - dstLen); + dstLen, + false); if (TnoOfDataRead != (Uint32)-1) { /* ------------------------------------------------------------------------- */ // We have read all data into coutBuffer. Now send it to the API. @@ -1274,7 +1275,8 @@ int Dbtup::interpreterStartLab(Signal* signal, &cinBuffer[5], RinitReadLen, &dst[0], - dstLen); + dstLen, + false); if (TnoDataRW != (Uint32)-1) { RattroutCounter = TnoDataRW; RinstructionCounter += RinitReadLen; @@ -1347,7 +1349,8 @@ int Dbtup::interpreterStartLab(Signal* signal, &cinBuffer[RinstructionCounter], RfinalRLen, &dst[RattroutCounter], - (dstLen - RattroutCounter)); + (dstLen - RattroutCounter), + false); if (TnoDataRW != (Uint32)-1) { RattroutCounter += TnoDataRW; } else { @@ -1487,7 +1490,8 @@ int Dbtup::interpreterNextLab(Signal* signal, &theAttrinfo, (Uint32)1, &TregMemBuffer[theRegister], - (Uint32)3); + (Uint32)3, + false); if (TnoDataRW == 2) { /* ------------------------------------------------------------- */ // Two words read means that we get the instruction plus one 32 @@ -1833,7 +1837,8 @@ int Dbtup::interpreterNextLab(Signal* signal, Int32 TnoDataR = readAttributes(pagePtr, TupHeadOffset, &attrId, 1, - tmpArea, tmpAreaSz); + tmpArea, tmpAreaSz, + false); if (TnoDataR == -1) { jam(); @@ -1929,7 +1934,8 @@ int Dbtup::interpreterNextLab(Signal* signal, Int32 TnoDataR = readAttributes(pagePtr, TupHeadOffset, &attrId, 1, - tmpArea, tmpAreaSz); + tmpArea, tmpAreaSz, + false); if (TnoDataR == -1) { jam(); @@ -1957,7 +1963,8 @@ int Dbtup::interpreterNextLab(Signal* signal, Int32 TnoDataR = readAttributes(pagePtr, TupHeadOffset, &attrId, 1, - tmpArea, tmpAreaSz); + tmpArea, tmpAreaSz, + false); if (TnoDataR == -1) { jam(); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index 1e57f127fbc..f3391ff7b59 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -1067,6 +1067,7 @@ Dbtup::initTab(Tablerec* const regTabPtr) }//for regTabPtr->readFunctionArray = NULL; regTabPtr->updateFunctionArray = NULL; + regTabPtr->charsetArray = NULL; regTabPtr->tabDescriptor = RNIL; regTabPtr->attributeGroupDescriptor = RNIL; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp index ec2c63c736e..2dd707ebafc 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp @@ -112,10 +112,11 @@ Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& no node = &pagePtr.p->pageWord[pageOffset] + attrDataOffset; } -void -Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData) +int +Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut) { ljamEntry(); + // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i = fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -134,6 +135,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu while (true) { ptrCheckGuard(opPtr, cnoOfOprec, operationrec); if (opPtr.p->realPageIdC != RNIL) { + // update page and offset pagePtr.i = opPtr.p->realPageIdC; pageOffset = opPtr.p->pageOffsetC; ptrCheckGuard(pagePtr, cnoOfPage, page); @@ -147,33 +149,34 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS)); } } - const Uint32 tabDescriptor = tablePtr.p->tabDescriptor; - const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset]; - for (Uint32 i = 0; i < numAttrs; i++) { - AttributeHeader ah(attrIds[i]); - const Uint32 attrId = ah.getAttributeId(); - const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE); - const Uint32 desc1 = tableDescriptor[index].tabDescr; - const Uint32 desc2 = tableDescriptor[index + 1].tabDescr; - if (AttributeDescriptor::getNullable(desc1)) { - Uint32 offset = AttributeOffset::getNullFlagOffset(desc2); - ndbrequire(offset < tablePtr.p->tupNullWords); - offset += tablePtr.p->tupNullIndex; - ndbrequire(offset < tablePtr.p->tupheadsize); - if (AttributeOffset::isNULL(tupleHeader[offset], desc2)) { - ljam(); - attrData[i] = 0; - continue; - } - } - attrData[i] = tupleHeader + AttributeOffset::getOffset(desc2); + // read key attributes from found tuple version + // save globals + TablerecPtr tabptr_old = tabptr; + FragrecordPtr fragptr_old = fragptr; + OperationrecPtr operPtr_old = operPtr; + // new globals + tabptr = tablePtr; + fragptr = fragPtr; + operPtr.i = RNIL; + operPtr.p = NULL; + // do it + int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true); + // restore globals + tabptr = tabptr_old; + fragptr = fragptr_old; + operPtr = operPtr_old; + // done + if (ret == (Uint32)-1) { + ret = terrorCode ? (-(int)terrorCode) : -1; } + return ret; } -void -Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData) +int +Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut) { ljamEntry(); + // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i = fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -184,25 +187,45 @@ Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pk pagePtr.i = pageId; ptrCheckGuard(pagePtr, cnoOfPage, page); const Uint32 tabDescriptor = tablePtr.p->tabDescriptor; - const Uint32 numAttrs = tablePtr.p->noOfKeyAttr; const Uint32* attrIds = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr; - const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset]; - Uint32 size = 0; - for (Uint32 i = 0; i < numAttrs; i++) { - AttributeHeader ah(attrIds[i]); - const Uint32 attrId = ah.getAttributeId(); - const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE); - const Uint32 desc1 = tableDescriptor[index].tabDescr; - const Uint32 desc2 = tableDescriptor[index + 1].tabDescr; - ndbrequire(! AttributeDescriptor::getNullable(desc1)); - const Uint32 attrSize = AttributeDescriptor::getSizeInWords(desc1); - const Uint32* attrData = tupleHeader + AttributeOffset::getOffset(desc2); - for (Uint32 j = 0; j < attrSize; j++) { - pkData[size + j] = attrData[j]; + const Uint32 numAttrs = tablePtr.p->noOfKeyAttr; + // read pk attributes from original tuple + // save globals + TablerecPtr tabptr_old = tabptr; + FragrecordPtr fragptr_old = fragptr; + OperationrecPtr operPtr_old = operPtr; + // new globals + tabptr = tablePtr; + fragptr = fragPtr; + operPtr.i = RNIL; + operPtr.p = NULL; + // do it + int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true); + // restore globals + tabptr = tabptr_old; + fragptr = fragptr_old; + operPtr = operPtr_old; + // done + if (ret != (Uint32)-1) { + // remove headers + Uint32 n = 0; + Uint32 i = 0; + while (n < numAttrs) { + const AttributeHeader ah(dataOut[i]); + Uint32 size = ah.getDataSize(); + ndbrequire(size != 0); + for (Uint32 j = 0; j < size; j++) { + dataOut[i + j - n] = dataOut[i + j + 1]; + } + n += 1; + i += 1 + size; } - size += attrSize; + ndbrequire(i == ret); + ret -= numAttrs; + } else { + ret = terrorCode ? (-(int)terrorCode) : -1; } - *pkSize = size; + return ret; } bool diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 09889a51fa3..efea312b865 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -20,12 +20,14 @@ #include #include #include +#include #include #include #include #include #include #include "AttributeOffset.hpp" +#include #define ljam() { jamLine(20000 + __LINE__); } #define ljamEntry() { jamEntryLine(20000 + __LINE__); } @@ -52,7 +54,10 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) /* Uint32 schemaVersion = signal->theData[8];*/ Uint32 noOfKeyAttr = signal->theData[9]; - Uint32 noOfNewAttr = signal->theData[10]; + Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF); + /* DICT sends number of character sets in upper half */ + Uint32 noOfCharsets = (signal->theData[10] >> 16); + Uint32 checksumIndicator = signal->theData[11]; Uint32 noOfAttributeGroups = signal->theData[12]; Uint32 globalCheckpointIdIndicator = signal->theData[13]; @@ -75,6 +80,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) fragOperPtr.p->attributeCount = noOfAttributes; fragOperPtr.p->freeNullBit = noOfNullAttr; fragOperPtr.p->noOfNewAttrCount = noOfNewAttr; + fragOperPtr.p->charsetIndex = 0; ndbrequire(reqinfo == ZADDFRAG); @@ -156,6 +162,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regTabPtr.p->tupheadsize = regTabPtr.p->tupGCPIndex; regTabPtr.p->noOfKeyAttr = noOfKeyAttr; + regTabPtr.p->noOfCharsets = noOfCharsets; regTabPtr.p->noOfAttr = noOfAttributes; regTabPtr.p->noOfNewAttr = noOfNewAttr; regTabPtr.p->noOfNullAttr = noOfNullAttr; @@ -163,13 +170,14 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regTabPtr.p->notNullAttributeMask.clear(); - Uint32 tableDescriptorRef = allocTabDescr(noOfAttributes, noOfKeyAttr, noOfAttributeGroups); + Uint32 offset[10]; + Uint32 tableDescriptorRef = allocTabDescr(regTabPtr.p, offset); if (tableDescriptorRef == RNIL) { ljam(); fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); return; }//if - setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p); + setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset); } else { ljam(); fragOperPtr.p->definingFragment = false; @@ -251,6 +259,9 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); Uint32 attrId = signal->theData[2]; Uint32 attrDescriptor = signal->theData[3]; + // DICT sends extended type (ignored) and charset number + Uint32 extType = (signal->theData[4] & 0xFF); + Uint32 csNumber = (signal->theData[4] >> 16); regTabPtr.i = fragOperPtr.p->tableidFrag; ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); @@ -304,6 +315,29 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) } else { ndbrequire(false); }//if + if (csNumber != 0) { + CHARSET_INFO* cs = get_charset(csNumber, MYF(0)); + if (cs == NULL) { + ljam(); + terrorCode = TupAddAttrRef::InvalidCharset; + addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); + return; + } + Uint32 i = 0; + while (i < fragOperPtr.p->charsetIndex) { + ljam(); + if (regTabPtr.p->charsetArray[i] == cs) + break; + i++; + } + if (i == fragOperPtr.p->charsetIndex) { + ljam(); + fragOperPtr.p->charsetIndex++; + } + ndbrequire(i < regTabPtr.p->noOfCharsets); + regTabPtr.p->charsetArray[i] = cs; + AttributeOffset::setCharsetPos(attrDes2, i); + } setTabDescrWord(firstTabDesIndex + 1, attrDes2); if (regTabPtr.p->tupheadsize > MAX_TUPLE_SIZE_IN_WORDS) { @@ -340,20 +374,28 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) return; }//Dbtup::execTUP_ADD_ATTRREQ() -void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference, - Tablerec* const regTabPtr) -{ - Uint32 noOfAttributes = regTabPtr->noOfAttr; - descriptorReference += ZTD_SIZE; - ReadFunction * tmp = (ReadFunction*)&tableDescriptor[descriptorReference].tabDescr; - regTabPtr->readFunctionArray = tmp; - regTabPtr->updateFunctionArray = (UpdateFunction*)(tmp + noOfAttributes); +/* + * Descriptor has these parts: + * + * 0 readFunctionArray ( one for each attribute ) + * 1 updateFunctionArray ( ditto ) + * 2 charsetArray ( pointers to distinct CHARSET_INFO ) + * 3 readKeyArray ( attribute ids of keys ) + * 4 attributeGroupDescriptor ( currently size 1 but unused ) + * 5 tabDescriptor ( attribute descriptors, each ZAD_SIZE ) + */ - TableDescriptor * start = &tableDescriptor[descriptorReference]; - TableDescriptor * end = (TableDescriptor*)(tmp + 2 * noOfAttributes); - regTabPtr->readKeyArray = descriptorReference + (end - start); - regTabPtr->attributeGroupDescriptor = regTabPtr->readKeyArray + regTabPtr->noOfKeyAttr; - regTabPtr->tabDescriptor = regTabPtr->attributeGroupDescriptor + regTabPtr->noOfAttributeGroups; +void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference, + Tablerec* const regTabPtr, + const Uint32* offset) +{ + Uint32* desc = &tableDescriptor[descriptorReference].tabDescr; + regTabPtr->readFunctionArray = (ReadFunction*)(desc + offset[0]); + regTabPtr->updateFunctionArray = (UpdateFunction*)(desc + offset[1]); + regTabPtr->charsetArray = (CHARSET_INFO**)(desc + offset[2]); + regTabPtr->readKeyArray = descriptorReference + offset[3]; + regTabPtr->attributeGroupDescriptor = descriptorReference + offset[4]; + regTabPtr->tabDescriptor = descriptorReference + offset[5]; }//Dbtup::setUpDescriptorReferences() Uint32 @@ -491,14 +533,18 @@ void Dbtup::releaseTabDescr(Tablerec* const regTabPtr) Uint32 descriptor = regTabPtr->readKeyArray; if (descriptor != RNIL) { ljam(); + Uint32 offset[10]; + getTabDescrOffsets(regTabPtr, offset); + regTabPtr->tabDescriptor = RNIL; regTabPtr->readKeyArray = RNIL; regTabPtr->readFunctionArray = NULL; regTabPtr->updateFunctionArray = NULL; + regTabPtr->charsetArray = NULL; regTabPtr->attributeGroupDescriptor= RNIL; - Uint32 sizeFunctionArrays = 2 * (regTabPtr->noOfAttr * sizeOfReadFunction()); - descriptor -= (sizeFunctionArrays + ZTD_SIZE); + // move to start of descriptor + descriptor -= offset[3]; Uint32 retNo = getTabDescrWord(descriptor + ZTD_DATASIZE); ndbrequire(getTabDescrWord(descriptor + ZTD_HEADER) == ZTD_TYPE_NORMAL); ndbrequire(retNo == getTabDescrWord((descriptor + retNo) - ZTD_TR_SIZE)); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index cc47ef7e78f..a4e7cb47249 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -35,6 +35,7 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr) for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) { Uint32 attrDescriptorStart = startDescriptor + (i << ZAD_LOG_SIZE); Uint32 attrDescriptor = tableDescriptor[attrDescriptorStart].tabDescr; + Uint32 attrOffset = tableDescriptor[attrDescriptorStart + 1].tabDescr; if (!AttributeDescriptor::getDynamic(attrDescriptor)) { if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) || (AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) { @@ -54,6 +55,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr) } else { ndbrequire(false); }//if + // replace read function of char attribute + if (AttributeOffset::getCharsetFlag(attrOffset)) { + ljam(); + regTabPtr->readFunctionArray[i] = &Dbtup::readCharNotNULL; + } } else { if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1) { ljam(); @@ -72,6 +78,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr) regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable; regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable; }//if + // replace read function of char attribute + if (AttributeOffset::getCharsetFlag(attrOffset)) { + ljam(); + regTabPtr->readFunctionArray[i] = &Dbtup::readCharNULLable; + } }//if } else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) { if (!AttributeDescriptor::getNullable(attrDescriptor)) { @@ -146,10 +157,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr) /* ---------------------------------------------------------------- */ int Dbtup::readAttributes(Page* const pagePtr, Uint32 tupHeadOffset, - Uint32* inBuffer, + const Uint32* inBuffer, Uint32 inBufLen, Uint32* outBuffer, - Uint32 maxRead) + Uint32 maxRead, + bool xfrmFlag) { Tablerec* const regTabPtr = tabptr.p; Uint32 numAttributes = regTabPtr->noOfAttr; @@ -162,6 +174,7 @@ int Dbtup::readAttributes(Page* const pagePtr, tCheckOffset = regTabPtr->tupheadsize; tMaxRead = maxRead; tTupleHeader = &pagePtr->pageWord[tupHeadOffset]; + tXfrmFlag = xfrmFlag; ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE); while (inBufIndex < inBufLen) { @@ -542,6 +555,74 @@ Dbtup::readDynSmallVarSize(Uint32* outBuffer, return false; }//Dbtup::readDynSmallVarSize() + +bool +Dbtup::readCharNotNULL(Uint32* outBuffer, + AttributeHeader* ahOut, + Uint32 attrDescriptor, + Uint32 attrDes2) +{ + Uint32 indexBuf = tOutBufIndex; + Uint32 readOffset = AttributeOffset::getOffset(attrDes2); + Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor); + Uint32 newIndexBuf = indexBuf + attrNoOfWords; + Uint32 maxRead = tMaxRead; + + ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset); + if (newIndexBuf <= maxRead) { + ljam(); + ahOut->setDataSize(attrNoOfWords); + if (! tXfrmFlag) { + MEMCOPY_NO_WORDS(&outBuffer[indexBuf], + &tTupleHeader[readOffset], + attrNoOfWords); + } else { + ljam(); + Tablerec* regTabPtr = tabptr.p; + Uint32 i = AttributeOffset::getCharsetPos(attrDes2); + ndbrequire(i < tabptr.p->noOfCharsets); + // not const in MySQL + CHARSET_INFO* cs = tabptr.p->charsetArray[i]; + // XXX should strip Uint32 null padding + const unsigned nBytes = attrNoOfWords << 2; + unsigned n = + (*cs->coll->strnxfrm)(cs, + (uchar*)&outBuffer[indexBuf], + nBytes, + (const uchar*)&tTupleHeader[readOffset], + nBytes); + // pad with ascii spaces + while (n < nBytes) + ((uchar*)&outBuffer[indexBuf])[n++] = 0x20; + } + tOutBufIndex = newIndexBuf; + return true; + } else { + ljam(); + terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; + return false; + } +} + +bool +Dbtup::readCharNULLable(Uint32* outBuffer, + AttributeHeader* ahOut, + Uint32 attrDescriptor, + Uint32 attrDes2) +{ + if (!nullFlagCheck(attrDes2)) { + ljam(); + return readCharNotNULL(outBuffer, + ahOut, + attrDescriptor, + attrDes2); + } else { + ljam(); + ahOut->setNULL(); + return true; + } +} + /* ---------------------------------------------------------------------- */ /* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */ /* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */ diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp index d31ab43f108..642ba270760 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp @@ -31,12 +31,33 @@ /* memory attached to fragments (could be allocated per table */ /* instead. Performs its task by a buddy algorithm. */ /* **************************************************************** */ -Uint32 Dbtup::allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups) + +Uint32 +Dbtup::getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset) +{ + // belongs to configure.in + unsigned sizeOfPointer = sizeof(CHARSET_INFO*); + ndbrequire((sizeOfPointer & 0x3) == 0); + sizeOfPointer = (sizeOfPointer >> 2); + // do in layout order and return offsets (see DbtupMeta.cpp) + Uint32 allocSize = 0; + // magically aligned to 8 bytes + offset[0] = allocSize += ZTD_SIZE; + offset[1] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction(); + offset[2] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction(); + offset[3] = allocSize += regTabPtr->noOfCharsets * sizeOfPointer; + offset[4] = allocSize += regTabPtr->noOfKeyAttr; + offset[5] = allocSize += regTabPtr->noOfAttributeGroups; + allocSize += regTabPtr->noOfAttr * ZAD_SIZE; + allocSize += ZTD_TRAILER_SIZE; + // return number of words + return allocSize; +} + +Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset) { Uint32 reference = RNIL; - Uint32 allocSize = (ZTD_SIZE + ZTD_TRAILER_SIZE) + (noOfAttributes * ZAD_SIZE); - allocSize += noOfAttributeGroups; - allocSize += ((2 * noOfAttributes * sizeOfReadFunction()) + noOfKeyAttr); + Uint32 allocSize = getTabDescrOffsets(regTabPtr, offset); /* ---------------------------------------------------------------- */ /* ALWAYS ALLOCATE A MULTIPLE OF 16 BYTES */ /* ---------------------------------------------------------------- */ diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index a93ff4566e7..c0b49364ee6 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -751,7 +751,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, &tableDescriptor[regTabPtr->readKeyArray].tabDescr, regTabPtr->noOfKeyAttr, keyBuffer, - ZATTR_BUFFER_SIZE); + ZATTR_BUFFER_SIZE, + true); ndbrequire(noPrimKey != (Uint32)-1); Uint32 numAttrsToRead; @@ -792,7 +793,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, &readBuffer[0], numAttrsToRead, mainBuffer, - ZATTR_BUFFER_SIZE); + ZATTR_BUFFER_SIZE, + true); ndbrequire(noMainWords != (Uint32)-1); } else { ljam(); @@ -816,7 +818,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, &readBuffer[0], numAttrsToRead, copyBuffer, - ZATTR_BUFFER_SIZE); + ZATTR_BUFFER_SIZE, + true); ndbrequire(noCopyWords != (Uint32)-1); if ((noMainWords == noCopyWords) && diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp index 36ac20611bb..8dca52cec04 100644 --- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp +++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp @@ -162,11 +162,6 @@ private: // AttributeHeader size is assumed to be 1 word static const unsigned AttributeHeaderSize = 1; - /* - * Array of pointers to TUP table attributes. Always read-on|y. - */ - typedef const Uint32** TableData; - /* * Logical tuple address, "local key". Identifies table tuples. */ @@ -330,11 +325,15 @@ private: /* * Attribute metadata. Size must be multiple of word size. + * + * Prefix comparison of char data must use strxfrm and binary + * comparison. The charset is currently unused. */ struct DescAttr { Uint32 m_attrDesc; // standard AttributeDescriptor Uint16 m_primaryAttrId; - Uint16 m_typeId; + unsigned m_typeId : 6; + unsigned m_charset : 10; }; static const unsigned DescAttrSize = sizeof(DescAttr) >> 2; @@ -553,9 +552,9 @@ private: void execREAD_CONFIG_REQ(Signal* signal); // utils void setKeyAttrs(const Frag& frag); - void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData); - void readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData); - void copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize); + void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData); + void readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize); + void copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize); /* * DbtuxMeta.cpp @@ -622,17 +621,15 @@ private: /* * DbtuxSearch.cpp */ - void searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos); - void searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos); + void searchToAdd(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos); + void searchToRemove(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos); void searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos); /* * DbtuxCmp.cpp */ - int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize); - int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey); + int cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize); int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize); - int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey); /* * DbtuxDebug.cpp @@ -679,17 +676,27 @@ private: Uint32 c_typeOfStart; /* - * Array of index key attribute ids in AttributeHeader format. - * Includes fixed attribute sizes. This is global data set at - * operation start and is not passed as a parameter. + * Global data set at operation start. Unpacked from index metadata. + * Not passed as parameter to methods. Invalid across timeslices. + * + * TODO inline all into index metadata */ + + // index key attr ids with sizes in AttributeHeader format Data c_keyAttrs; - // buffer for search key data as pointers to TUP storage - TableData c_searchKey; + // pointers to index key comparison functions + NdbSqlUtil::Cmp** c_sqlCmp; - // buffer for current entry key data as pointers to TUP storage - TableData c_entryKey; + /* + * Other buffers used during the operation. + */ + + // buffer for search key data with headers + Data c_searchKey; + + // buffer for current entry key data with headers + Data c_entryKey; // buffer for scan bounds and keyinfo (primary key) Data c_dataBuffer; diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp index debb5252386..549720cc17c 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp @@ -18,21 +18,24 @@ #include "Dbtux.hpp" /* - * Search key vs node prefix. + * Search key vs node prefix or entry * - * The comparison starts at given attribute position (in fact 0). The - * position is updated by number of equal initial attributes found. The - * prefix may be partial in which case CmpUnknown may be returned. + * The comparison starts at given attribute position. The position is + * updated by number of equal initial attributes found. The entry data + * may be partial in which case CmpUnknown may be returned. */ int -Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen) +Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen) { const unsigned numAttrs = frag.m_numAttrs; const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); // number of words of attribute data left unsigned len2 = maxlen; - // skip to right position in search key - searchKey += start; + // skip to right position in search key only + for (unsigned i = 0; i < start; i++) { + jam(); + searchKey += AttributeHeaderSize + searchKey.ah().getDataSize(); + } int ret = 0; while (start < numAttrs) { if (len2 <= AttributeHeaderSize) { @@ -41,22 +44,21 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons break; } len2 -= AttributeHeaderSize; - if (*searchKey != 0) { + if (! searchKey.ah().isNULL()) { if (! entryData.ah().isNULL()) { jam(); // current attribute const DescAttr& descAttr = descEnt.m_descAttr[start]; - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); - ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined); // full data size const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc); ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize()); const unsigned size2 = min(size1, len2); len2 -= size2; // compare - const Uint32* const p1 = *searchKey; + NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start]; + const Uint32* const p1 = &searchKey[AttributeHeaderSize]; const Uint32* const p2 = &entryData[AttributeHeaderSize]; - ret = (*type.m_cmp)(p1, p2, size1, size2); + ret = (*cmp)(0, p1, p2, size1, size2); if (ret != 0) { jam(); break; @@ -75,7 +77,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons break; } } - searchKey += 1; + searchKey += AttributeHeaderSize + searchKey.ah().getDataSize(); entryData += AttributeHeaderSize + entryData.ah().getDataSize(); start++; } @@ -83,60 +85,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons } /* - * Search key vs tree entry. - * - * Start position is updated as in previous routine. - */ -int -Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey) -{ - const unsigned numAttrs = frag.m_numAttrs; - const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); - // skip to right position - searchKey += start; - entryKey += start; - int ret = 0; - while (start < numAttrs) { - if (*searchKey != 0) { - if (*entryKey != 0) { - jam(); - // current attribute - const DescAttr& descAttr = descEnt.m_descAttr[start]; - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); - ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined); - // full data size - const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc); - // compare - const Uint32* const p1 = *searchKey; - const Uint32* const p2 = *entryKey; - ret = (*type.m_cmp)(p1, p2, size1, size1); - if (ret != 0) { - jam(); - break; - } - } else { - jam(); - // not NULL > NULL - ret = +1; - break; - } - } else { - if (*entryKey != 0) { - jam(); - // NULL < not NULL - ret = -1; - break; - } - } - searchKey += 1; - entryKey += 1; - start++; - } - return ret; -} - -/* - * Scan bound vs node prefix. + * Scan bound vs node prefix or entry. * * Compare lower or upper bound and index attribute data. The attribute * data may be partial in which case CmpUnknown may be returned. @@ -183,9 +132,8 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne jam(); // current attribute const unsigned index = boundInfo.ah().getAttributeId(); + ndbrequire(index < frag.m_numAttrs); const DescAttr& descAttr = descEnt.m_descAttr[index]; - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); - ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined); ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId); // full data size const unsigned size1 = boundInfo.ah().getDataSize(); @@ -193,9 +141,10 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne const unsigned size2 = min(size1, len2); len2 -= size2; // compare + NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index]; const Uint32* const p1 = &boundInfo[AttributeHeaderSize]; const Uint32* const p2 = &entryData[AttributeHeaderSize]; - int ret = (*type.m_cmp)(p1, p2, size1, size2); + int ret = (*cmp)(0, p1, p2, size1, size2); if (ret != 0) { jam(); return ret; @@ -244,72 +193,3 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne return +1; } } - -/* - * Scan bound vs tree entry. - */ -int -Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey) -{ - const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); - // direction 0-lower 1-upper - ndbrequire(dir <= 1); - // initialize type to equality - unsigned type = 4; - while (boundCount != 0) { - // get and skip bound type - type = boundInfo[0]; - boundInfo += 1; - if (! boundInfo.ah().isNULL()) { - if (*entryKey != 0) { - jam(); - // current attribute - const unsigned index = boundInfo.ah().getAttributeId(); - const DescAttr& descAttr = descEnt.m_descAttr[index]; - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); - ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined); - // full data size - const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc); - // compare - const Uint32* const p1 = &boundInfo[AttributeHeaderSize]; - const Uint32* const p2 = *entryKey; - int ret = (*type.m_cmp)(p1, p2, size1, size1); - if (ret != 0) { - jam(); - return ret; - } - } else { - jam(); - // not NULL > NULL - return +1; - } - } else { - jam(); - if (*entryKey != 0) { - jam(); - // NULL < not NULL - return -1; - } - } - boundInfo += AttributeHeaderSize + boundInfo.ah().getDataSize(); - entryKey += 1; - boundCount -= 1; - } - if (dir == 0) { - // lower bound - jam(); - if (type == 1) { - jam(); - return +1; - } - return -1; - } else { - // upper bound - jam(); - if (type == 3) { - jam(); - return -1; - } - return +1; - } -} diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp index 11f4f12b7f6..8d31d2c6a55 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp @@ -207,14 +207,10 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar& } // check ordering within node for (unsigned j = 1; j < node.getOccup(); j++) { - unsigned start = 0; const TreeEnt ent1 = node.getEnt(j - 1); const TreeEnt ent2 = node.getEnt(j); - if (j == 1) { - readKeyAttrs(frag, ent1, start, c_searchKey); - } else { - memcpy(c_searchKey, c_entryKey, frag.m_numAttrs << 2); - } + unsigned start = 0; + readKeyAttrs(frag, ent1, start, c_searchKey); readKeyAttrs(frag, ent2, start, c_entryKey); int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey); if (ret == 0) diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp index f6f1610c8c1..39cd8e25184 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp @@ -16,8 +16,6 @@ #define DBTUX_GEN_CPP #include "Dbtux.hpp" -#include -#include Dbtux::Dbtux(const Configuration& conf) : SimulatedBlock(DBTUX, conf), @@ -202,8 +200,9 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal) } // allocate buffers c_keyAttrs = (Uint32*)allocRecord("c_keyAttrs", sizeof(Uint32), MaxIndexAttributes); - c_searchKey = (TableData)allocRecord("c_searchKey", sizeof(Uint32*), MaxIndexAttributes); - c_entryKey = (TableData)allocRecord("c_entryKey", sizeof(Uint32*), MaxIndexAttributes); + c_sqlCmp = (NdbSqlUtil::Cmp**)allocRecord("c_sqlCmp", sizeof(NdbSqlUtil::Cmp*), MaxIndexAttributes); + c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize); + c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize); c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxAttrDataSize + 1) >> 1); // ack ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); @@ -218,7 +217,8 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal) void Dbtux::setKeyAttrs(const Frag& frag) { - Data keyAttrs = c_keyAttrs; // global + Data keyAttrs = c_keyAttrs; // global + NdbSqlUtil::Cmp** sqlCmp = c_sqlCmp; // global const unsigned numAttrs = frag.m_numAttrs; const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); for (unsigned i = 0; i < numAttrs; i++) { @@ -227,75 +227,71 @@ Dbtux::setKeyAttrs(const Frag& frag) // set attr id and fixed size keyAttrs.ah() = AttributeHeader(descAttr.m_primaryAttrId, size); keyAttrs += 1; + // set comparison method pointer + const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getTypeBinary(descAttr.m_typeId); + ndbrequire(sqlType.m_cmp != 0); + *(sqlCmp++) = sqlType.m_cmp; } } void -Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData) +Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData) { ConstData keyAttrs = c_keyAttrs; // global const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit]; const TupLoc tupLoc = ent.m_tupLoc; const Uint32 tupVersion = ent.m_tupVersion; ndbrequire(start < frag.m_numAttrs); - const unsigned numAttrs = frag.m_numAttrs - start; - // start applies to both keys and output data + const Uint32 numAttrs = frag.m_numAttrs - start; + // skip to start position in keyAttrs only keyAttrs += start; - keyData += start; - c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, numAttrs, keyAttrs, keyData); + int ret = c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, keyAttrs, numAttrs, keyData); jamEntry(); + // TODO handle error + ndbrequire(ret > 0); } void -Dbtux::readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData) +Dbtux::readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize) { const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit]; const TupLoc tupLoc = ent.m_tupLoc; - Uint32 size = 0; - c_tup->tuxReadKeys(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, &size, pkData); - ndbrequire(size != 0); - pkSize = size; + int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, pkData); + jamEntry(); + // TODO handle error + ndbrequire(ret > 0); + pkSize = ret; } /* - * Input is pointers to table attributes. Output is array of attribute - * data with headers. Copies whatever fits. + * Copy attribute data with headers. Input is all index key data. + * Copies whatever fits. */ void -Dbtux::copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2) +Dbtux::copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2) { - ConstData keyAttrs = c_keyAttrs; // global - const unsigned numAttrs = frag.m_numAttrs; + unsigned n = frag.m_numAttrs; unsigned len2 = maxlen2; - for (unsigned n = 0; n < numAttrs; n++) { + while (n != 0) { jam(); - const unsigned attrId = keyAttrs.ah().getAttributeId(); - const unsigned dataSize = keyAttrs.ah().getDataSize(); - const Uint32* const p1 = *data1; - if (p1 != 0) { + const unsigned dataSize = data1.ah().getDataSize(); + // copy header + if (len2 == 0) + return; + data2[0] = data1[0]; + data1 += 1; + data2 += 1; + len2 -= 1; + // copy data + for (unsigned i = 0; i < dataSize; i++) { if (len2 == 0) return; - data2.ah() = AttributeHeader(attrId, dataSize); - data2 += 1; - len2 -= 1; - unsigned n = dataSize; - for (unsigned i = 0; i < dataSize; i++) { - if (len2 == 0) - return; - *data2 = p1[i]; - data2 += 1; - len2 -= 1; - } - } else { - if (len2 == 0) - return; - data2.ah() = AttributeHeader(attrId, 0); - data2.ah().setNULL(); - data2 += 1; + data2[i] = data1[i]; len2 -= 1; } - keyAttrs += 1; - data1 += 1; + data1 += dataSize; + data2 += dataSize; + n -= 1; } #ifdef VM_TRACE memset(data2, DataFillByte, len2 << 2); diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp index 4bb3b940d91..3c0af3ca79d 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp @@ -178,19 +178,31 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal) descAttr.m_attrDesc = req->attrDescriptor; descAttr.m_primaryAttrId = req->primaryAttrId; descAttr.m_typeId = req->extTypeInfo & 0xFF; + descAttr.m_charset = (req->extTypeInfo >> 16); #ifdef VM_TRACE if (debugFlags & DebugMeta) { debugOut << "Add frag " << fragPtr.i << " attr " << attrId << " " << descAttr << endl; } #endif - // check if type is valid and has a comparison method - const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId); + // check that type is valid and has a binary comparison method + const NdbSqlUtil::Type& type = NdbSqlUtil::getTypeBinary(descAttr.m_typeId); if (type.m_typeId == NdbSqlUtil::Type::Undefined || type.m_cmp == 0) { jam(); errorCode = TuxAddAttrRef::InvalidAttributeType; break; } +#ifdef dbtux_uses_charset + if (descAttr.m_charset != 0) { + CHARSET_INFO *cs = get_charset(descAttr.m_charset, MYF(0)); + // here use the non-binary type + if (! NdbSqlUtil::usable_in_ordered_index(descAttr.m_typeId, cs)) { + jam(); + errorCode = TuxAddAttrRef::InvalidCharset; + break; + } + } +#endif if (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd) { jam(); // initialize tree header diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp index c4c33ff931f..5b161d3c4ce 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp @@ -112,6 +112,7 @@ Dbtux::execACC_SCANREQ(Signal* signal) void Dbtux::execTUX_BOUND_INFO(Signal* signal) { + jamEntry(); struct BoundInfo { unsigned offset; unsigned size; @@ -389,7 +390,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal) jam(); const TreeEnt ent = scan.m_scanPos.m_ent; // read tuple key - readTablePk(frag, ent, pkSize, pkData); + readTablePk(frag, ent, pkData, pkSize); // get read lock or exclusive lock AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); lockReq->returnCode = RNIL; @@ -480,7 +481,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal) jam(); if (pkSize == 0) { jam(); - readTablePk(frag, ent, pkSize, pkData); + readTablePk(frag, ent, pkData, pkSize); } } // conf signal diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp index 84048b308bc..bffbb8f5594 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp @@ -25,7 +25,7 @@ * TODO optimize for initial equal attrs in node min/max */ void -Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos) +Dbtux::searchToAdd(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos) { const TreeHead& tree = frag.m_tree; const unsigned numAttrs = frag.m_numAttrs; @@ -144,7 +144,7 @@ Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt sear * to it. */ void -Dbtux::searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos) +Dbtux::searchToRemove(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos) { const TreeHead& tree = frag.m_tree; const unsigned numAttrs = frag.m_numAttrs; diff --git a/ndb/src/kernel/blocks/dbtux/Times.txt b/ndb/src/kernel/blocks/dbtux/Times.txt index 84819ddcf97..03473353a52 100644 --- a/ndb/src/kernel/blocks/dbtux/Times.txt +++ b/ndb/src/kernel/blocks/dbtux/Times.txt @@ -83,7 +83,7 @@ optim 13 mc02/a 39 ms 59 ms 50 pct mc02/c 9 ms 12 ms 44 pct mc02/d 246 ms 289 ms 17 pct -[ case d: what happened to PK read performance? ] +[ case d: bug in testOIBasic killed PK read performance ] optim 14 mc02/a 41 ms 60 ms 44 pct mc02/b 46 ms 81 ms 73 pct @@ -91,5 +91,21 @@ optim 14 mc02/a 41 ms 60 ms 44 pct mc02/d 242 ms 285 ms 17 pct [ case b: do long keys suffer from many subroutine calls? ] +[ case d: bug in testOIBasic killed PK read performance ] + +none mc02/a 35 ms 60 ms 71 pct + mc02/b 42 ms 75 ms 76 pct + mc02/c 5 ms 12 ms 106 pct + mc02/d 165 ms 238 ms 44 pct + +[ johan re-installed mc02 as fedora gcc-3.3.2 ] +[ case c: table scan has improved... ] + +charsets mc02/a 35 ms 60 ms 71 pct + mc02/b 42 ms 84 ms 97 pct + mc02/c 5 ms 12 ms 109 pct + mc02/d 190 ms 236 ms 23 pct + +[ case b: TUX can no longer use pointers to TUP data ] vim: set et: diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index 491733975a8..20844db75b6 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -56,6 +56,7 @@ const char programName[] = "NDB Kernel"; NDB_MAIN(ndb_kernel){ + ndb_init(); // Print to stdout/console g_eventLogger.createConsoleHandler(); g_eventLogger.setCategory("NDB"); diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 3099c71b792..b7054a1bf22 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -15,7 +15,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include -#include #include "Configuration.hpp" #include @@ -105,7 +104,6 @@ Configuration::init(int argc, const char** argv){ } // check for depricated flag '-i' - my_init(); #ifndef DBUG_OFF if (debug_option) DBUG_PUSH(debug_option); @@ -506,7 +504,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ for(unsigned j = 0; jsetLogLevel((LogLevel::EventCategory)j, tmp); } } diff --git a/ndb/src/kernel/vm/MetaData.hpp b/ndb/src/kernel/vm/MetaData.hpp index f6a941e8f9f..11e262664c1 100644 --- a/ndb/src/kernel/vm/MetaData.hpp +++ b/ndb/src/kernel/vm/MetaData.hpp @@ -107,6 +107,9 @@ public: /* Number of primary key attributes (should be computed) */ Uint16 noOfPrimkey; + /* Number of distinct character sets (computed) */ + Uint16 noOfCharsets; + /* Length of primary key in words (should be computed) */ /* For ordered index this is tree node size in words */ Uint16 tupKeyLength; diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 08b83a8d750..fccd5c7983b 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -954,13 +954,52 @@ struct ndb_mgm_event_categories { const char* name; enum ndb_mgm_event_category category; +} categories[] = { + { "STARTUP", NDB_MGM_EVENT_CATEGORY_STARTUP }, + { "SHUTDOWN", NDB_MGM_EVENT_CATEGORY_SHUTDOWN }, + { "STATISTICS", NDB_MGM_EVENT_CATEGORY_STATISTIC }, + { "NODERESTART", NDB_MGM_EVENT_CATEGORY_NODE_RESTART }, + { "CONNECTION", NDB_MGM_EVENT_CATEGORY_CONNECTION }, + { "CHECKPOINT", NDB_MGM_EVENT_CATEGORY_CHECKPOINT }, + { "DEBUG", NDB_MGM_EVENT_CATEGORY_DEBUG }, + { "INFO", NDB_MGM_EVENT_CATEGORY_INFO }, + { "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR }, + { "GREP", NDB_MGM_EVENT_CATEGORY_GREP }, + { "BACKUP", NDB_MGM_EVENT_CATEGORY_BACKUP }, + { 0, NDB_MGM_ILLEGAL_EVENT_CATEGORY } }; +extern "C" +ndb_mgm_event_category +ndb_mgm_match_event_category(const char * status) +{ + if(status == 0) + return NDB_MGM_ILLEGAL_EVENT_CATEGORY; + + for(int i = 0; categories[i].name !=0 ; i++) + if(strcmp(status, categories[i].name) == 0) + return categories[i].category; + + return NDB_MGM_ILLEGAL_EVENT_CATEGORY; +} + +extern "C" +const char * +ndb_mgm_get_event_category_string(enum ndb_mgm_event_category status) +{ + int i; + for(i = 0; categories[i].name != 0; i++) + if(categories[i].category == status) + return categories[i].name; + + return 0; +} + extern "C" int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId, - /*enum ndb_mgm_event_category*/ - char * category, int level, + enum ndb_mgm_event_category cat, + int level, struct ndb_mgm_reply* /*reply*/) { SET_ERROR(handle, NDB_MGM_NO_ERROR, @@ -975,14 +1014,14 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId, Properties args; args.put("node", nodeId); - args.put("category", category); + args.put("category", cat); args.put("level", level); - + const Properties *reply; reply = ndb_mgm_call(handle, clusterlog_reply, "set cluster loglevel", &args); CHECK_REPLY(reply, -1); - + BaseString result; reply->get("result", result); if(strcmp(result.c_str(), "Ok") != 0) { @@ -997,8 +1036,8 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId, extern "C" int ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId, - /*enum ndb_mgm_event_category category*/ - char * category, int level, + enum ndb_mgm_event_category category, + int level, struct ndb_mgm_reply* /*reply*/) { SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_set_loglevel_node"); @@ -1030,6 +1069,48 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId, return 0; } +extern "C" +int +ndb_mgm_listen_event(NdbMgmHandle handle, int filter[]) +{ + SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_listen_event"); + const ParserRow stat_reply[] = { + MGM_CMD("listen event", NULL, ""), + MGM_ARG("result", Int, Mandatory, "Error message"), + MGM_ARG("msg", String, Optional, "Error message"), + MGM_END() + }; + CHECK_HANDLE(handle, -1); + + SocketClient s(handle->hostname, handle->port); + const NDB_SOCKET_TYPE sockfd = s.connect(); + if (sockfd < 0) { + setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__, + "Unable to connect to"); + return -1; + } + + Properties args; + { + BaseString tmp; + for(int i = 0; filter[i] != 0; i += 2){ + tmp.appfmt("%d=%d ", filter[i+1], filter[i]); + } + args.put("filter", tmp.c_str()); + } + + int tmp = handle->socket; + handle->socket = sockfd; + + const Properties *reply; + reply = ndb_mgm_call(handle, stat_reply, "listen event", &args); + + handle->socket = tmp; + + CHECK_REPLY(reply, -1); + return sockfd; +} + extern "C" int ndb_mgm_get_stat_port(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/) diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index 91d057f8c30..fbb74d7c151 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -611,9 +611,9 @@ CommandInterpreter::executeHelp(char* parameters) << endl; ndbout << " = "; - for(Uint32 i = 0; inode_states[i].node_id; else { - ndbout << "Unable to locate management server, shutdown manually with #STOP" + ndbout << "Unable to locate management server, " + << "shutdown manually with STOP" << endl; + return; } } } @@ -721,11 +723,13 @@ const char *status_string(ndb_mgm_node_status status) static void print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it, - const char *proc_name, int no_proc, ndb_mgm_node_type type, int master_id) + const char *proc_name, int no_proc, ndb_mgm_node_type type, + int master_id) { int i; ndbout << "[" << proc_name - << "(" << ndb_mgm_get_node_type_string(type) << ")]\t" << no_proc << " node(s)" << endl; + << "(" << ndb_mgm_get_node_type_string(type) << ")]\t" + << no_proc << " node(s)" << endl; for(i=0; i < state->no_of_nodes; i++) { struct ndb_mgm_node_state *node_state= &(state->node_states[i]); if(node_state->node_type == type) { @@ -733,7 +737,9 @@ print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it, ndbout << "id=" << node_id; if(node_state->version != 0) { const char *hostname= node_state->connect_address; - if (hostname == 0 || strlen(hostname) == 0 || strcmp(hostname,"0.0.0.0") == 0) + if (hostname == 0 + || strlen(hostname) == 0 + || strcmp(hostname,"0.0.0.0") == 0) ndbout << " "; else ndbout << "\t@" << hostname; @@ -761,7 +767,8 @@ print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it, ndb_mgm_get_string_parameter(it, CFG_NODE_HOST, &config_hostname); if (config_hostname == 0 || config_hostname[0] == 0) config_hostname= "any host"; - ndbout << " (not connected, accepting connect from " << config_hostname << ")" << endl; + ndbout << " (not connected, accepting connect from " + << config_hostname << ")" << endl; } } } @@ -1240,55 +1247,40 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters, { connect(); (void) all; - (void) parameters; - SetLogLevelOrd logLevel; logLevel.clear(); - LogLevel::EventCategory cat; - int level; - if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) { - for(Uint32 i = 0; i 15){ - ndbout << "Invalid loglevel specification row, level 0-15" << endl; - free(tmpString); - return ; - } - logLevel.setLogLevel(cat, level); - - item = strtok_r(NULL, ", ", &tmpPtr); - } - free(tmpString); + BaseString tmp(parameters); + Vector spec; + tmp.split(spec, "="); + if(spec.size() != 2){ + ndbout << "Invalid loglevel specification: " << parameters << endl; + return; } + spec[0].trim().ndb_toupper(); + int category = ndb_mgm_match_event_category(spec[0].c_str()); + if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){ + category = atoi(spec[0].c_str()); + if(category < NDB_MGM_MIN_EVENT_CATEGORY || + category > NDB_MGM_MAX_EVENT_CATEGORY){ + ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl; + return; + } + } + + int level = atoi(spec[1].c_str()); + if(level < 0 || level > 15){ + ndbout << "Invalid level: " << spec[1].c_str() << endl; + return; + } + struct ndb_mgm_reply reply; int result; result = ndb_mgm_set_loglevel_node(m_mgmsrv, - processId, // fast fix - pekka - (char*)EventLogger::getEventCategoryName(cat), + processId, + (ndb_mgm_event_category)category, level, &reply); - + if (result < 0) { ndbout_c("Executing LOGLEVEL on node %d failed.", processId); printError(); @@ -1296,7 +1288,7 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters, ndbout << "Executing LOGLEVEL on node " << processId << " OK!" << endl; } - + } //***************************************************************************** @@ -1626,54 +1618,41 @@ CommandInterpreter::executeEventReporting(int processId, bool all) { connect(); - SetLogLevelOrd logLevel; logLevel.clear(); - char categoryTxt[255]; - int level; - LogLevel::EventCategory cat; - if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) { - for(Uint32 i = 0; i 15){ - ndbout << "Invalid loglevel specification row, level 0-15" << endl; - free(tmpString); - return ; - } - logLevel.setLogLevel(cat, level); - - item = strtok_r(NULL, ", ", &tmpPtr); - } - free(tmpString); + BaseString tmp(parameters); + Vector spec; + tmp.split(spec, "="); + if(spec.size() != 2){ + ndbout << "Invalid loglevel specification: " << parameters << endl; + return; } + + spec[0].trim().ndb_toupper(); + int category = ndb_mgm_match_event_category(spec[0].c_str()); + if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){ + category = atoi(spec[0].c_str()); + if(category < NDB_MGM_MIN_EVENT_CATEGORY || + category > NDB_MGM_MAX_EVENT_CATEGORY){ + ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl; + return; + } + } + + int level = atoi(spec[1].c_str()); + if(level < 0 || level > 15){ + ndbout << "Invalid level: " << spec[1].c_str() << endl; + return; + } + + struct ndb_mgm_reply reply; int result; - result = - ndb_mgm_set_loglevel_clusterlog(m_mgmsrv, - processId, // fast fix - pekka - (char*) - EventLogger::getEventCategoryName(cat), - level, - &reply); + result = ndb_mgm_set_loglevel_clusterlog(m_mgmsrv, + processId, // fast fix - pekka + (ndb_mgm_event_category)category, + level, + &reply); if (result != 0) { ndbout_c("Executing CLUSTERLOG on node %d failed", processId); @@ -1693,13 +1672,45 @@ CommandInterpreter::executeStartBackup(char* /*parameters*/) connect(); struct ndb_mgm_reply reply; unsigned int backupId; + + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 }; + int fd = ndb_mgm_listen_event(m_mgmsrv, filter); int result = ndb_mgm_start_backup(m_mgmsrv, &backupId, &reply); if (result != 0) { ndbout << "Start of backup failed" << endl; printError(); - } else { - ndbout << "Backup started. Backup id " << backupId << "." << endl; + close(fd); + return; } + + char *tmp; + char buf[1024]; + { + SocketInputStream in(fd); + int count = 0; + do { + tmp = in.gets(buf, 1024); + if(tmp) + { + ndbout << tmp; + int id; + if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){ + count++; + } + } + } while(count < 2); + } + + SocketInputStream in(fd, 10); + do { + tmp = in.gets(buf, 1024); + if(tmp && tmp[0] != 0) + { + ndbout << tmp; + } + } while(tmp && tmp[0] != 0); + + close(fd); } void diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp index df6659df0b1..69f968677cd 100644 --- a/ndb/src/mgmclient/main.cpp +++ b/ndb/src/mgmclient/main.cpp @@ -44,6 +44,7 @@ handler(int sig){ } int main(int argc, const char** argv){ + ndb_init(); int optind = 0; const char *_host = 0; int _port = 0; diff --git a/ndb/src/mgmsrv/CommandInterpreter.cpp b/ndb/src/mgmsrv/CommandInterpreter.cpp index 316b6d5795e..2c2aeda21ed 100644 --- a/ndb/src/mgmsrv/CommandInterpreter.cpp +++ b/ndb/src/mgmsrv/CommandInterpreter.cpp @@ -52,7 +52,7 @@ static const char* helpTexts[] = { "{|ALL} CLUSTERLOG {=}+ Set log level for cluster log", "QUIT Quit management server", }; -static const int noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*); +static const unsigned noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*); static const char* helpTextShow = "SHOW prints NDB Cluster information\n\n" @@ -389,14 +389,14 @@ void CommandInterpreter::executeHelp(char* parameters) { << endl; ndbout << " = "; - for(i = 0; i = " << "0 - 15" << endl; @@ -831,12 +831,13 @@ void CommandInterpreter::executeStatus(int processId, //***************************************************************************** void CommandInterpreter::executeLogLevel(int processId, const char* parameters, bool all) { +#if 0 (void)all; // Don't want compiler warning SetLogLevelOrd logLevel; logLevel.clear(); if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) { - for(Uint32 i = 0; iget("NodeId1", &id1)); require(ctx.m_currentSection->get("NodeId2", &id2)); + require(ctx.m_currentSection->get("HostName1", &hostName1)); + require(ctx.m_currentSection->get("HostName2", &hostName2)); + DBUG_PRINT("info",("NodeId1=%d HostName1=\"%s\"",id1,hostName1)); + DBUG_PRINT("info",("NodeId2=%d HostName2=\"%s\"",id2,hostName2)); + if (id1 > id2) { Uint32 tmp= id1; + const char *tmp_name= hostName1; + hostName1= hostName2; id1= id2; + hostName2= tmp_name; id2= tmp; } const Properties * node; require(ctx.m_config->get("Node", id1, &node)); - BaseString hostname; - require(node->get("HostName", hostname)); + BaseString hostname(hostName1); + // require(node->get("HostName", hostname)); if (hostname.c_str()[0] == 0) { - ctx.reportError("Hostname required on nodeid %d since it will act as server.", id1); + ctx.reportError("Hostname required on nodeid %d since it will " + "act as server.", id1); DBUG_RETURN(false); } Uint32 port= 0; - if (!node->get("ServerPort", &port) && !ctx.m_userProperties.get("ServerPort_", id1, &port)) { + if (!node->get("ServerPort", &port) && + !ctx.m_userProperties.get("ServerPort_", id1, &port)) { Uint32 adder= 0; { BaseString server_port_adder(hostname); @@ -2932,7 +2944,8 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){ Uint32 base= 0; if (!ctx.m_userProperties.get("ServerPortBase", &base)){ - if(!(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) && + if(!(ctx.m_userDefaults && + ctx.m_userDefaults->get("PortNumber", &base)) && !ctx.m_systemDefaults->get("PortNumber", &base)) { base= strtoll(NDB_BASE_PORT,0,0)+2; // ctx.reportError("Cannot retrieve base port number"); @@ -2945,12 +2958,15 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){ } if(ctx.m_currentSection->contains("PortNumber")) { - ndbout << "PortNumber should no longer be specificied per connection, please remove from config. Will be changed to " << port << endl; + ndbout << "PortNumber should no longer be specificied " + << "per connection, please remove from config. " + << "Will be changed to " << port << endl; ctx.m_currentSection->put("PortNumber", port, true); } else ctx.m_currentSection->put("PortNumber", port); - DBUG_PRINT("info", ("connection %d-%d port %d host %s", id1, id2, port, hostname.c_str())); + DBUG_PRINT("info", ("connection %d-%d port %d host %s", + id1, id2, port, hostname.c_str())); DBUG_RETURN(true); } diff --git a/ndb/src/mgmsrv/Makefile.am b/ndb/src/mgmsrv/Makefile.am index 8fa9ec5f63e..36cb7f87d3d 100644 --- a/ndb/src/mgmsrv/Makefile.am +++ b/ndb/src/mgmsrv/Makefile.am @@ -12,8 +12,6 @@ ndb_mgmd_SOURCES = \ main.cpp \ Services.cpp \ convertStrToInt.cpp \ - NodeLogLevel.cpp \ - NodeLogLevelList.cpp \ SignalQueue.cpp \ MgmtSrvrConfig.cpp \ ConfigInfo.cpp \ diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 8380f3fd86a..944eb47c618 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include -#include +#include #include "MgmtSrvr.hpp" #include "MgmtErrorReporter.hpp" @@ -45,7 +45,6 @@ #include #include -#include "NodeLogLevel.hpp" #include #include @@ -62,71 +61,16 @@ #endif extern int global_flag_send_heartbeat_now; - -static -void -CmdBackupCallback(const MgmtSrvr::BackupEvent & event) -{ - char str[255]; - - ndbout << endl; - - bool ok = false; - switch(event.Event){ - case MgmtSrvr::BackupEvent::BackupStarted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d started", event.Started.BackupId); - break; - case MgmtSrvr::BackupEvent::BackupFailedToStart: - ok = true; - snprintf(str, sizeof(str), - "Backup failed to start (Error %d)", - event.FailedToStart.ErrorCode); - break; - case MgmtSrvr::BackupEvent::BackupCompleted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d completed", - event.Completed.BackupId); - ndbout << str << endl; - - snprintf(str, sizeof(str), - " StartGCP: %d StopGCP: %d", - event.Completed.startGCP, event.Completed.stopGCP); - ndbout << str << endl; - - snprintf(str, sizeof(str), - " #Records: %d #LogRecords: %d", - event.Completed.NoOfRecords, event.Completed.NoOfLogRecords); - ndbout << str << endl; - - snprintf(str, sizeof(str), - " Data: %d bytes Log: %d bytes", - event.Completed.NoOfBytes, event.Completed.NoOfLogBytes); - break; - case MgmtSrvr::BackupEvent::BackupAborted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d has been aborted reason %d", - event.Aborted.BackupId, - event.Aborted.Reason); - break; - } - if(!ok){ - snprintf(str, sizeof(str), "Unknown backup event: %d", event.Event); - } - ndbout << str << endl; -} - +extern int g_no_nodeid_checks; void * MgmtSrvr::logLevelThread_C(void* m) { MgmtSrvr *mgm = (MgmtSrvr*)m; - + my_thread_init(); mgm->logLevelThreadRun(); + my_thread_end(); NdbThread_Exit(0); /* NOTREACHED */ return 0; @@ -136,9 +80,10 @@ void * MgmtSrvr::signalRecvThread_C(void *m) { MgmtSrvr *mgm = (MgmtSrvr*)m; - + my_thread_init(); mgm->signalRecvThreadRun(); + my_thread_end(); NdbThread_Exit(0); /* NOTREACHED */ return 0; @@ -188,44 +133,66 @@ MgmtSrvr::signalRecvThreadRun() EventLogger g_EventLogger; +static NdbOut& +operator<<(NdbOut& out, const LogLevel & ll) +{ + out << "[LogLevel: "; + for(size_t i = 0; inext()) != NULL) { - if (n->getNodeId() == _startedNodeId) { - setNodeLogLevel(_startedNodeId, n->getLogLevelOrd(), true); - } + SetLogLevelOrd ord; + + m_started_nodes.lock(); + while(m_started_nodes.size() > 0){ + Uint32 node = m_started_nodes[0]; + m_started_nodes.erase(0, false); + m_started_nodes.unlock(); + + setEventReportingLevelImpl(node, req); + + ord = m_nodeLogLevel[node]; + setNodeLogLevelImpl(node, ord); + + m_started_nodes.lock(); + } + m_started_nodes.unlock(); + + m_log_level_requests.lock(); + while(m_log_level_requests.size() > 0){ + req = m_log_level_requests[0]; + m_log_level_requests.erase(0, false); + m_log_level_requests.unlock(); + + LogLevel tmp; + tmp = req; + ndbout << "req3: " << tmp << endl; + + if(req.blockRef == 0){ + req.blockRef = _ownReference; + setEventReportingLevelImpl(0, req); + } else { + ord = req; + setNodeLogLevelImpl(req.blockRef, ord); } - // Cluster log - while ((n = _clusterLogLevelList->next()) != NULL) { - if (n->getNodeId() == _startedNodeId) { - setEventReportingLevel(_startedNodeId, n->getLogLevelOrd(), true); - } - } - _startedNodeId = 0; - - NdbMutex_Unlock(threadMutex); - - } // if (_startedNodeId != 0) { - + m_log_level_requests.lock(); + } + m_log_level_requests.unlock(); NdbSleep_MilliSleep(_logLevelThreadSleep); - } // while (!_isStopThread) - - NdbMutex_Destroy(threadMutex); -} - -void -MgmtSrvr::setStatisticsListner(StatisticsListner* listner) -{ - m_statisticsListner = listner; + } } void @@ -272,7 +239,7 @@ class ErrorItem { public: int _errorCode; - const BaseString _errorText; + const char * _errorText; }; bool @@ -429,79 +396,9 @@ MgmtSrvr::getPort() const { ndb_mgm_destroy_iterator(iter); - /***************** - * Set Stat Port * - *****************/ -#if 0 - if (!mgmProps->get("PortNumberStats", &tmp)){ - ndbout << "Could not find PortNumberStats in the configuration file." - << endl; - return false; - } - glob.port_stats = tmp; -#endif - -#if 0 - const char * host; - if(ndb_mgm_get_string_parameter(iter, mgmProps->get("ExecuteOnComputer", host)){ - ndbout << "Failed to find \"ExecuteOnComputer\" for my node" << endl; - ndbout << "Unable to verify own hostname" << endl; - return false; - } - - const char * hostname; - { - const Properties * p; - char buf[255]; - snprintf(buf, sizeof(buf), "Computer_%s", host.c_str()); - if(!glob.cluster_config->get(buf, &p)){ - ndbout << "Failed to find computer " << host << " in config" << endl; - ndbout << "Unable to verify own hostname" << endl; - return false; - } - if(!p->get("HostName", &hostname)){ - ndbout << "Failed to find \"HostName\" for computer " << host - << " in config" << endl; - ndbout << "Unable to verify own hostname" << endl; - return false; - } - if(NdbHost_GetHostName(buf) != 0){ - ndbout << "Unable to get own hostname" << endl; - ndbout << "Unable to verify own hostname" << endl; - return false; - } - } - - const char * ip_address; - if(mgmProps->get("IpAddress", &ip_address)){ - glob.use_specific_ip = true; - glob.interface_name = strdup(ip_address); - return true; - } - - glob.interface_name = strdup(hostname); -#endif - return port; } -int -MgmtSrvr::getStatPort() const { -#if 0 - const Properties *mgmProps; - if(!getConfig()->get("Node", _ownNodeId, &mgmProps)) - return -1; - - int tmp = -1; - if(!mgmProps->get("PortNumberStats", (Uint32 *)&tmp)) - return -1; - - return tmp; -#else - return -1; -#endif -} - /* Constructor */ MgmtSrvr::MgmtSrvr(NodeId nodeId, const BaseString &configFilename, @@ -510,28 +407,24 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, _blockNumber(1), // Hard coded block number since it makes it easy to send // signals to other management servers. _ownReference(0), + m_allocated_resources(*this), theSignalIdleList(NULL), theWaitState(WAIT_SUBSCRIBE_CONF), - theConfCount(0), - m_allocated_resources(*this) { - + m_statisticsListner(this){ + DBUG_ENTER("MgmtSrvr::MgmtSrvr"); _config = NULL; - _isStatPortActive = false; - _isClusterLogStatActive = false; _isStopThread = false; _logLevelThread = NULL; _logLevelThreadSleep = 500; m_signalRecvThread = NULL; - _startedNodeId = 0; theFacade = 0; m_newConfig = NULL; m_configFilename = configFilename; - setCallback(CmdBackupCallback); m_localNdbConfigFilename = ndb_config_filename; m_nextConfigGenerationNumber = 0; @@ -583,21 +476,40 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, ndb_mgm_destroy_iterator(iter); } - m_statisticsListner = NULL; - - _nodeLogLevelList = new NodeLogLevelList(); - _clusterLogLevelList = new NodeLogLevelList(); - _props = NULL; - _ownNodeId= 0; NodeId tmp= nodeId; BaseString error_string; - if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, 0, 0, error_string)){ +#if 0 + char my_hostname[256]; + struct sockaddr_in tmp_addr; + SOCKET_SIZE_TYPE addrlen= sizeof(tmp_addr); + if (!g_no_nodeid_checks) { + if (gethostname(my_hostname, sizeof(my_hostname))) { + ndbout << "error: gethostname() - " << strerror(errno) << endl; + exit(-1); + } + if (Ndb_getInAddr(&(((sockaddr_in*)&tmp_addr)->sin_addr),my_hostname)) { + ndbout << "error: Ndb_getInAddr(" << my_hostname << ") - " + << strerror(errno) << endl; + exit(-1); + } + } + if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, + (struct sockaddr *)&tmp_addr, + &addrlen, error_string)){ ndbout << "Unable to obtain requested nodeid: " << error_string.c_str() << endl; exit(-1); } +#else + if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, + 0, 0, error_string)){ + ndbout << "Unable to obtain requested nodeid: " + << error_string.c_str() << endl; + exit(-1); + } +#endif _ownNodeId = tmp; @@ -610,6 +522,18 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, } } + { + MgmStatService::StatListener se; + se.m_socket = -1; + for(size_t t = 0; ttheClusterMgr->getNodeInfo(processId); - version = node.m_info.m_version; - if(theFacade->theClusterMgr->getNodeInfo(processId).connected) - if(m_versionRec.callback != 0) - m_versionRec.callback(processId, version, this,0); - else - if(m_versionRec.callback != 0) - m_versionRec.callback(processId, 0, this,0); - + if (getOwnNodeId() == processId) + { + version= NDB_VERSION; } - - if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API) { + else if (getNodeType(processId) == NDB_MGM_NODE_TYPE_NDB) + { + ClusterMgr::Node node= theFacade->theClusterMgr->getNodeInfo(processId); + if(node.connected) + version= node.m_info.m_version; + else + version= 0; + } + else if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API || + getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) + { return sendVersionReq(processId); } + if(m_versionRec.callback != 0) + m_versionRec.callback(processId, version, this,0); m_versionRec.inUse = false ; - return 0; + m_versionRec.version[processId]= version; + + return 0; } int @@ -1460,17 +1384,14 @@ MgmtSrvr::status(int processId, Uint32 * nodegroup, Uint32 * connectCount) { - if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API) { + if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API || + getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) { if(versionNode(processId, false,0,0) ==0) * version = m_versionRec.version[processId]; else * version = 0; } - if (getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) { - * version = NDB_VERSION; - } - const ClusterMgr::Node node = theFacade->theClusterMgr->getNodeInfo(processId); @@ -1540,175 +1461,72 @@ MgmtSrvr::status(int processId, return -1; } - - -//**************************************************************************** -//**************************************************************************** -int -MgmtSrvr::startStatisticEventReporting(int level) -{ - SetLogLevelOrd ll; - NodeId nodeId = 0; - - ll.clear(); - ll.setLogLevel(LogLevel::llStatistic, level); - - if (level > 0) { - _isStatPortActive = true; - } else { - _isStatPortActive = false; - - if (_isClusterLogStatActive) { - return 0; - } - } - - while (getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB)) { - setEventReportingLevelImpl(nodeId, ll); - } - - return 0; -} - -int -MgmtSrvr::setEventReportingLevel(int processId, const SetLogLevelOrd & ll, - bool isResend) -{ - for (Uint32 i = 0; i < ll.noOfEntries; i++) { - if (ll.theCategories[i] == LogLevel::llStatistic) { - if (ll.theLevels[i] > 0) { - _isClusterLogStatActive = true; - break; - } else { - _isClusterLogStatActive = false; - - if (_isStatPortActive) { - return 0; - } - break; - } - } // if (ll.theCategories - } // for (int i = 0 - - return setEventReportingLevelImpl(processId, ll, isResend); -} int MgmtSrvr::setEventReportingLevelImpl(int processId, - const SetLogLevelOrd & ll, - bool isResend) + const EventSubscribeReq& ll) { - Uint32 i; - for(i = 0; inext()) != NULL) { - if (n->getNodeId() == processId && - n->getCategory() == ll.theCategories[i]) { - - n->setLevel(ll.theLevels[i]); - found = true; - } - } - if (!found) { - _clusterLogLevelList->add(new NodeLogLevel(processId, ll)); - } - } - } - + int result = okToSendTo(processId, true); if (result != 0) { return result; } - NdbApiSignal* signal = getSignal(); - if (signal == NULL) { - return COULD_NOT_ALLOCATE_MEMORY; - } + NdbApiSignal signal(_ownReference); EventSubscribeReq * dst = - CAST_PTR(EventSubscribeReq, signal->getDataPtrSend()); - for(i = 0; itheCategories[i] = ll.theCategories[i]; - dst->theLevels[i] = ll.theLevels[i]; - } - - dst->noOfEntries = ll.noOfEntries; - dst->blockRef = _ownReference; + CAST_PTR(EventSubscribeReq, signal.getDataPtrSend()); - signal->set(TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ, - EventSubscribeReq::SignalLength); + * dst = ll; + + signal.set(TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ, + EventSubscribeReq::SignalLength); + + theFacade->lock_mutex(); + send(&signal, processId, NODE_TYPE_DB); + theFacade->unlock_mutex(); - result = sendSignal(processId, WAIT_SUBSCRIBE_CONF, signal, true); - if (result == -1) { - return SEND_OR_RECEIVE_FAILED; - } - else { - // Increment the conf counter - theConfCount++; - } - return 0; } //**************************************************************************** //**************************************************************************** int -MgmtSrvr::setNodeLogLevel(int processId, const SetLogLevelOrd & ll, - bool isResend) +MgmtSrvr::setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll) { - Uint32 i; - for(i = 0; inext()) != NULL) { - if (n->getNodeId() == processId && - n->getCategory() == ll.theCategories[i]) { - - n->setLevel(ll.theLevels[i]); - found = true; - } - } - if (!found) { - _clusterLogLevelList->add(new NodeLogLevel(processId, ll)); - } - } - } - int result = okToSendTo(processId, true); if (result != 0) { return result; } - NdbApiSignal* signal = getSignal(); - if (signal == NULL) { - return COULD_NOT_ALLOCATE_MEMORY; - } - - SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal->getDataPtrSend()); - - for(i = 0; itheCategories[i] = ll.theCategories[i]; - dst->theLevels[i] = ll.theLevels[i]; - } + NdbApiSignal signal(_ownReference); - dst->noOfEntries = ll.noOfEntries; + SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal.getDataPtrSend()); - signal->set(TestOrd::TraceAPI, CMVMI, GSN_SET_LOGLEVELORD, - SetLogLevelOrd::SignalLength); - - result = sendSignal(processId, NO_WAIT, signal, true); - if (result == -1) { - return SEND_OR_RECEIVE_FAILED; - } + * dst = ll; + + signal.set(TestOrd::TraceAPI, CMVMI, GSN_SET_LOGLEVELORD, + SetLogLevelOrd::SignalLength); + + theFacade->lock_mutex(); + theFacade->sendSignalUnCond(&signal, processId); + theFacade->unlock_mutex(); return 0; } +int +MgmtSrvr::send(NdbApiSignal* signal, Uint32 node, Uint32 node_type){ + Uint32 max = (node == 0) ? MAX_NODES : node + 1; + + for(; node < max; node++){ + while(nodeTypes[node] != (int)node_type && node < max) node++; + if(nodeTypes[node] != (int)node_type) + break; + theFacade->sendSignalUnCond(signal, node); + } + return 0; +} //**************************************************************************** //**************************************************************************** @@ -2003,7 +1821,7 @@ const char* MgmtSrvr::getErrorText(int errorCode) for (int i = 0; i < noOfErrorCodes; ++i) { if (errorCode == errorTable[i]._errorCode) { - return errorTable[i]._errorText.c_str(); + return errorTable[i]._errorText; } } @@ -2011,21 +1829,6 @@ const char* MgmtSrvr::getErrorText(int errorCode) return text; } -/***************************************************************************** - * Handle reception of various signals - *****************************************************************************/ - -int -MgmtSrvr::handleSTATISTICS_CONF(NdbApiSignal* signal) -{ - //ndbout << "MgmtSrvr::handleSTATISTICS_CONF" << endl; - - int x = signal->readData(1); - //ndbout << "MgmtSrvr::handleSTATISTICS_CONF, x: " << x << endl; - _statistics._test1 = x; - return 0; -} - void MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) { @@ -2049,51 +1852,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) } break; - case GSN_STATISTICS_CONF: - if (theWaitState != WAIT_STATISTICS) { - g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected " - "signal received, gsn %d, theWaitState = %d", - gsn, theWaitState); - - return; - } - returnCode = handleSTATISTICS_CONF(signal); - if (returnCode != -1) { - theWaitState = NO_WAIT; - } - break; - - - case GSN_SET_VAR_CONF: - if (theWaitState != WAIT_SET_VAR) { - g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected " - "signal received, gsn %d, theWaitState = %d", - gsn, theWaitState); - return; - } - theWaitState = NO_WAIT; - _setVarReqResult = 0; - break; - - case GSN_SET_VAR_REF: - if (theWaitState != WAIT_SET_VAR) { - g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected " - "signal received, gsn %d, theWaitState = %d", - gsn, theWaitState); - return; - } - theWaitState = NO_WAIT; - _setVarReqResult = -1; - break; - case GSN_EVENT_SUBSCRIBE_CONF: - theConfCount--; // OK, we've received a conf message - if (theConfCount < 0) { - g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected " - "signal received, gsn %d, theWaitState = %d", - gsn, theWaitState); - theConfCount = 0; - } break; case GSN_EVENT_REP: @@ -2173,7 +1932,6 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) event.Completed.NoOfLogBytes = rep->noOfLogBytes; event.Completed.NoOfRecords = rep->noOfRecords; event.Completed.NoOfLogRecords = rep->noOfLogRecords; - event.Completed.stopGCP = rep->stopGCP; event.Completed.startGCP = rep->startGCP; event.Nodes = rep->nodes; @@ -2276,20 +2034,19 @@ void MgmtSrvr::handleStatus(NodeId nodeId, bool alive) { if (alive) { - _startedNodeId = nodeId; // Used by logLevelThreadRun() + m_started_nodes.push_back(nodeId); Uint32 theData[25]; theData[0] = EventReport::Connected; theData[1] = nodeId; + eventReport(_ownNodeId, theData); } else { handleStopReply(nodeId, 0); - theConfCount++; // Increment the event subscr conf count because - + Uint32 theData[25]; theData[0] = EventReport::Disconnected; theData[1] = nodeId; - + eventReport(_ownNodeId, theData); - g_EventLogger.info("Lost connection to node %d", nodeId); } } @@ -2337,32 +2094,42 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, SOCKET_SIZE_TYPE *client_addr_len, BaseString &error_string) { - Guard g(&f_node_id_mutex); -#if 0 - ndbout << "MgmtSrvr::getFreeNodeId type=" << type - << " *nodeid=" << *nodeId << endl; -#endif - - NodeBitmask connected_nodes(m_reserved_nodes); - if (theFacade && theFacade->theClusterMgr) { - for(Uint32 i = 0; i < MAX_NODES; i++) - if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB) { - const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i); - if (node.connected) - connected_nodes.bitOR(node.m_state.m_connected_nodes); - } + DBUG_ENTER("MgmtSrvr::alloc_node_id"); + DBUG_PRINT("enter", ("nodeid=%d, type=%d, client_addr=%d", + *nodeId, type, client_addr)); + if (g_no_nodeid_checks) { + if (*nodeId == 0) { + error_string.appfmt("no-nodeid-ckecks set in manegment server.\n" + "node id must be set explicitly in connectstring"); + DBUG_RETURN(false); + } + DBUG_RETURN(true); + } + Guard g(&f_node_id_mutex); + int no_mgm= 0; + NodeBitmask connected_nodes(m_reserved_nodes); + for(Uint32 i = 0; i < MAX_NODES; i++) + { + if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB && + theFacade && theFacade->theClusterMgr) { + const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i); + if (node.connected) { + connected_nodes.bitOR(node.m_state.m_connected_nodes); + } + } else if (getNodeType(i) == NDB_MGM_NODE_TYPE_MGM) + no_mgm++; } - bool found_matching_id= false; bool found_matching_type= false; bool found_free_node= false; - const char *config_hostname = 0; + unsigned id_found= 0; + const char *config_hostname= 0; struct in_addr config_addr= {0}; int r_config_addr= -1; unsigned type_c= 0; - ndb_mgm_configuration_iterator iter(*(ndb_mgm_configuration *)_config->m_configValues, - CFG_SECTION_NODE); + ndb_mgm_configuration_iterator + iter(*(ndb_mgm_configuration *)_config->m_configValues, CFG_SECTION_NODE); for(iter.first(); iter.valid(); iter.next()) { unsigned tmp= 0; if(iter.get(CFG_NODE_ID, &tmp)) abort(); @@ -2370,15 +2137,16 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, continue; found_matching_id= true; if(iter.get(CFG_TYPE_OF_SECTION, &type_c)) abort(); - if(type_c != type) + if(type_c != (unsigned)type) continue; found_matching_type= true; if (connected_nodes.get(tmp)) continue; found_free_node= true; if(iter.get(CFG_NODE_HOST, &config_hostname)) abort(); - - if (config_hostname && config_hostname[0] != 0 && client_addr) { + if (config_hostname && config_hostname[0] == 0) + config_hostname= 0; + else if (client_addr) { // check hostname compatability const void *tmp_in= &(((sockaddr_in*)client_addr)->sin_addr); if((r_config_addr= Ndb_getInAddr(&config_addr, config_hostname)) != 0 @@ -2388,39 +2156,76 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, || memcmp(&tmp_addr, tmp_in, sizeof(config_addr)) != 0) { // not localhost #if 0 - ndbout << "MgmtSrvr::getFreeNodeId compare failed for \"" << config_hostname - << "\" id=" << tmp << endl; + ndbout << "MgmtSrvr::getFreeNodeId compare failed for \"" + << config_hostname + << "\" id=" << tmp << endl; #endif continue; } // connecting through localhost - // check if config_hostname match hostname - char my_hostname[256]; - if (gethostname(my_hostname, sizeof(my_hostname)) != 0) - continue; - if(Ndb_getInAddr(&tmp_addr, my_hostname) != 0 - || memcmp(&tmp_addr, &config_addr, sizeof(config_addr)) != 0) { - // no match + // check if config_hostname is local + if (!SocketServer::tryBind(0,config_hostname)) { continue; } } + } else { // client_addr == 0 + if (!SocketServer::tryBind(0,config_hostname)) { + continue; + } } - *nodeId= tmp; - if (client_addr) - m_connect_address[tmp]= ((struct sockaddr_in *)client_addr)->sin_addr; - else - Ndb_getInAddr(&(m_connect_address[tmp]), "localhost"); - m_reserved_nodes.set(tmp); -#if 0 - ndbout << "MgmtSrvr::getFreeNodeId found type=" << type - << " *nodeid=" << *nodeId << endl; -#endif - return true; + if (*nodeId != 0 || + type != NDB_MGM_NODE_TYPE_MGM || + no_mgm == 1) { // any match is ok + id_found= tmp; + break; + } + if (id_found) { // mgmt server may only have one match + error_string.appfmt("Ambiguous node id's %d and %d.\n" + "Suggest specifying node id in connectstring,\n" + "or specifying unique host names in config file.", + id_found, tmp); + DBUG_RETURN(false); + } + if (config_hostname == 0) { + error_string.appfmt("Ambiguity for node id %d.\n" + "Suggest specifying node id in connectstring,\n" + "or specifying unique host names in config file,\n" + "or specifying just one mgmt server in config file.", + tmp); + DBUG_RETURN(false); + } + id_found= tmp; // mgmt server matched, check for more matches + } + + if (id_found) + { + *nodeId= id_found; + DBUG_PRINT("info", ("allocating node id %d",*nodeId)); + { + int r= 0; + if (client_addr) + m_connect_address[id_found]= + ((struct sockaddr_in *)client_addr)->sin_addr; + else if (config_hostname) + r= Ndb_getInAddr(&(m_connect_address[id_found]), config_hostname); + else { + char name[256]; + r= gethostname(name, sizeof(name)); + if (r == 0) { + name[sizeof(name)-1]= 0; + r= Ndb_getInAddr(&(m_connect_address[id_found]), name); + } + } + if (r) + m_connect_address[id_found].s_addr= 0; + } + m_reserved_nodes.set(id_found); + DBUG_RETURN(true); } if (found_matching_type && !found_free_node) { - // we have a temporary error which might be due to that we have got the latest - // connect status from db-nodes. Force update. + // we have a temporary error which might be due to that + // we have got the latest connect status from db-nodes. Force update. global_flag_send_heartbeat_now= 1; } @@ -2429,7 +2234,8 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, const char *alias, *str; alias= ndb_mgm_get_node_type_alias_string(type, &str); type_string.assfmt("%s(%s)", alias, str); - alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c, &str); + alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c, + &str); type_c_string.assfmt("%s(%s)", alias, str); } @@ -2438,11 +2244,14 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, if (found_matching_type) if (found_free_node) error_string.appfmt("Connection done from wrong host ip %s.", - inet_ntoa(((struct sockaddr_in *)(client_addr))->sin_addr)); + inet_ntoa(((struct sockaddr_in *) + (client_addr))->sin_addr)); else - error_string.appfmt("No free node id found for %s.", type_string.c_str()); + error_string.appfmt("No free node id found for %s.", + type_string.c_str()); else - error_string.appfmt("No %s node defined in config file.", type_string.c_str()); + error_string.appfmt("No %s node defined in config file.", + type_string.c_str()); else error_string.append("No nodes defined in config file."); } else { @@ -2451,19 +2260,23 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, if (found_free_node) { // have to split these into two since inet_ntoa overwrites itself error_string.appfmt("Connection with id %d done from wrong host ip %s,", - *nodeId, inet_ntoa(((struct sockaddr_in *)(client_addr))->sin_addr)); + *nodeId, inet_ntoa(((struct sockaddr_in *) + (client_addr))->sin_addr)); error_string.appfmt(" expected %s(%s).", config_hostname, - r_config_addr ? "lookup failed" : inet_ntoa(config_addr)); + r_config_addr ? + "lookup failed" : inet_ntoa(config_addr)); } else - error_string.appfmt("Id %d already allocated by another node.", *nodeId); + error_string.appfmt("Id %d already allocated by another node.", + *nodeId); else error_string.appfmt("Id %d configured as %s, connect attempted as %s.", - *nodeId, type_c_string.c_str(), type_string.c_str()); + *nodeId, type_c_string.c_str(), + type_string.c_str()); else - error_string.appfmt("No node defined with id=%d in config file.", *nodeId); + error_string.appfmt("No node defined with id=%d in config file.", + *nodeId); } - - return false; + DBUG_RETURN(false); } bool @@ -2483,91 +2296,23 @@ MgmtSrvr::getNextNodeId(NodeId * nodeId, enum ndb_mgm_node_type type) const return true; } +#include "Services.hpp" + void MgmtSrvr::eventReport(NodeId nodeId, const Uint32 * theData) { const EventReport * const eventReport = (EventReport *)&theData[0]; - + EventReport::EventType type = eventReport->getEventType(); - - if (type == EventReport::TransReportCounters || - type == EventReport::OperationReportCounters) { - - if (_isClusterLogStatActive) { - g_EventLogger.log(type, theData, nodeId); - } - - if (_isStatPortActive) { - char theTime[128]; - struct tm* tm_now; - time_t now; - now = time((time_t*)NULL); -#ifdef NDB_WIN32 - tm_now = localtime(&now); -#else - tm_now = gmtime(&now); -#endif - - snprintf(theTime, sizeof(theTime), - STATISTIC_DATE, - tm_now->tm_year + 1900, - tm_now->tm_mon, - tm_now->tm_mday, - tm_now->tm_hour, - tm_now->tm_min, - tm_now->tm_sec); - - char str[255]; - - if (type == EventReport::TransReportCounters) { - snprintf(str, sizeof(str), - STATISTIC_LINE, - theTime, - (int)now, - nodeId, - theData[1], - theData[2], - theData[3], - // theData[4], simple reads - theData[5], - theData[6], - theData[7], - theData[8]); - } else if (type == EventReport::OperationReportCounters) { - snprintf(str, sizeof(str), - OP_STATISTIC_LINE, - theTime, - (int)now, - nodeId, - theData[1]); - } - - if(m_statisticsListner != 0){ - m_statisticsListner->println_statistics(str); - } - } - - return; - - } // if (type == - // Log event - g_EventLogger.log(type, theData, nodeId); - + g_EventLogger.log(type, theData, nodeId, + &m_statisticsListner.m_clients[0].m_logLevel); + m_statisticsListner.log(type, theData, nodeId); } /*************************************************************************** * Backup ***************************************************************************/ - -MgmtSrvr::BackupCallback -MgmtSrvr::setCallback(BackupCallback aCall) -{ - BackupCallback ret = m_backupCallback; - m_backupCallback = aCall; - return ret; -} - int MgmtSrvr::startBackup(Uint32& backupId, bool waitCompleted) { @@ -2674,102 +2419,8 @@ MgmtSrvr::abortBackup(Uint32 backupId) void MgmtSrvr::backupCallback(BackupEvent & event) { - char str[255]; - - bool ok = false; - switch(event.Event){ - case BackupEvent::BackupStarted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d started", event.Started.BackupId); - break; - case BackupEvent::BackupFailedToStart: - ok = true; - snprintf(str, sizeof(str), - "Backup failed to start (Backup error %d)", - event.FailedToStart.ErrorCode); - break; - case BackupEvent::BackupCompleted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d completed", - event.Completed.BackupId); - g_EventLogger.info(str); - - snprintf(str, sizeof(str), - " StartGCP: %d StopGCP: %d", - event.Completed.startGCP, event.Completed.stopGCP); - g_EventLogger.info(str); - - snprintf(str, sizeof(str), - " #Records: %d #LogRecords: %d", - event.Completed.NoOfRecords, event.Completed.NoOfLogRecords); - g_EventLogger.info(str); - - snprintf(str, sizeof(str), - " Data: %d bytes Log: %d bytes", - event.Completed.NoOfBytes, event.Completed.NoOfLogBytes); - break; - case BackupEvent::BackupAborted: - ok = true; - snprintf(str, sizeof(str), - "Backup %d has been aborted reason %d", - event.Aborted.BackupId, - event.Aborted.Reason); - break; - } - if(!ok){ - snprintf(str, sizeof(str), - "Unknown backup event: %d", - event.Event); - - } - g_EventLogger.info(str); - - switch (theWaitState){ - case WAIT_BACKUP_STARTED: - switch(event.Event){ - case BackupEvent::BackupStarted: - case BackupEvent::BackupFailedToStart: - m_lastBackupEvent = event; - theWaitState = NO_WAIT; - break; - default: - snprintf(str, sizeof(str), - "Received event %d in unexpected state WAIT_BACKUP_STARTED", - event.Event); - g_EventLogger.info(str); - return; - } - - break; - case WAIT_BACKUP_COMPLETED: - switch(event.Event){ - case BackupEvent::BackupCompleted: - case BackupEvent::BackupAborted: - case BackupEvent::BackupFailedToStart: - m_lastBackupEvent = event; - theWaitState = NO_WAIT; - break; - default: - snprintf(str, sizeof(str), - "Received event %d in unexpected state WAIT_BACKUP_COMPLETED", - event.Event); - g_EventLogger.info(str); - return; - } - break; - default: - snprintf(str, sizeof(str), "Received event %d in unexpected state = %d", - event.Event, theWaitState); - g_EventLogger.info(str); - return; - - } - - if(m_backupCallback != 0){ - (* m_backupCallback)(event); - } + m_lastBackupEvent = event; + theWaitState = NO_WAIT; } @@ -2957,15 +2608,15 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value, switch(p_type){ case 0: res = i2.set(param, val_32); - ndbout_c("Updateing node %d param: %d to %d", node, param, val_32); + ndbout_c("Updating node %d param: %d to %d", node, param, val_32); break; case 1: res = i2.set(param, val_64); - ndbout_c("Updateing node %d param: %d to %Ld", node, param, val_32); + ndbout_c("Updating node %d param: %d to %Ld", node, param, val_32); break; case 2: res = i2.set(param, val_char); - ndbout_c("Updateing node %d param: %d to %s", node, param, val_char); + ndbout_c("Updating node %d param: %d to %s", node, param, val_char); break; default: abort(); @@ -2981,3 +2632,7 @@ template class Vector; #if __SUNPRO_CC != 0x560 template bool SignalQueue::waitFor(Vector&, SigMatch*&, NdbApiSignal*&, unsigned); #endif + +template class MutexVector; +template class MutexVector; +template class MutexVector; diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index d7f9f7a1af3..3f3e98dbcc1 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -28,8 +28,8 @@ #include #include "SignalQueue.hpp" #include - -#include "NodeLogLevelList.hpp" +#include +#include /** * @desc Block number for Management server. @@ -43,6 +43,29 @@ class Config; class SetLogLevelOrd; class SocketServer; +class MgmStatService : public EventLoggerBase +{ + friend class MgmtSrvr; +public: + struct StatListener : public EventLoggerBase { + NDB_SOCKET_TYPE m_socket; + }; + +private: + class MgmtSrvr * m_mgmsrv; + MutexVector m_clients; +public: + MgmStatService(class MgmtSrvr * m) : m_clients(5) { + m_mgmsrv = m; + } + + void add_listener(const StatListener&); + + void log(int eventType, const Uint32* theData, NodeId nodeId); + + void stopSessions(); +}; + /** * @class MgmtSrvr * @brief Main class for the management server. @@ -63,11 +86,6 @@ class SocketServer; class MgmtSrvr { public: - class StatisticsListner { - public: - virtual void println_statistics(const BaseString &s) = 0; - }; - // some compilers need all of this class Allocated_resources; friend class Allocated_resources; @@ -84,11 +102,6 @@ public: NodeBitmask m_reserved_nodes; }; - /** - * Set a reference to the socket server. - */ - void setStatisticsListner(StatisticsListner* listner); - /** * Start/initate the event log. */ @@ -150,15 +163,6 @@ public: STATIC_CONST( OPERATION_IN_PROGRESS = 6667 ); STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 ); - /** - * This class holds all statistical variables fetched with - * the getStatistics methods. - */ - class Statistics { // TODO, Real statistic data to be added - public: - int _test1; - }; - /** * This enum specifies the different signal loggig modes possible to set * with the setSignalLoggingMode method. @@ -206,7 +210,7 @@ public: typedef void (* EnterSingleCallback)(int nodeId, void * anyData, int errorCode); typedef void (* ExitSingleCallback)(int nodeId, void * anyData, - int errorCode); + int errorCode); /** * Lock configuration @@ -313,13 +317,6 @@ public: bool abort = false, int * stopCount = 0, StopCallback = 0, void * anyData = 0); - int setEventReportingLevel(int processId, - const class SetLogLevelOrd & logLevel, - bool isResend = false); - - int startStatisticEventReporting(int level = 5); - - struct BackupEvent { enum Event { BackupStarted = 1, @@ -356,8 +353,6 @@ public: /** * Backup functionallity */ - typedef void (* BackupCallback)(const BackupEvent& Event); - BackupCallback setCallback(BackupCallback); int startBackup(Uint32& backupId, bool waitCompleted = false); int abortBackup(Uint32 backupId); int performBackup(Uint32* backupId); @@ -377,22 +372,8 @@ public: // INVALID_LEVEL //************************************************************************** - /** - * Sets the Node's log level, i.e., its local event reporting. - * - * @param processId the DB node id. - * @param logLevel the log level. - * @param isResend Flag to indicate for resending log levels - * during node restart - - * @return 0 if successful or NO_CONTACT_WITH_PROCESS, - * SEND_OR_RECEIVE_FAILED, - * COULD_NOT_ALLOCATE_MEMORY - */ - int setNodeLogLevel(int processId, - const class SetLogLevelOrd & logLevel, - bool isResend = false); - + int setEventReportingLevelImpl(int processId, const EventSubscribeReq& ll); + int setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll); /** * Insert an error in a DB process. @@ -508,11 +489,6 @@ public: */ NodeId getPrimaryNode() const; - /** - * Returns the statistics port number. - * @return statistic port number. - */ - int getStatPort() const; /** * Returns the port number. * @return port number. @@ -526,10 +502,7 @@ public: private: //************************************************************************** - int setEventReportingLevelImpl(int processId, - const class SetLogLevelOrd & logLevel, - bool isResend = false); - + int setEventReportingLevel(int processId, LogLevel::EventCategory, Uint32); /** * Check if it is possible to send a signal to a (DB) process @@ -563,10 +536,6 @@ private: Allocated_resources m_allocated_resources; struct in_addr m_connect_address[MAX_NODES]; - int _setVarReqResult; // The result of the SET_VAR_REQ response - Statistics _statistics; // handleSTATISTICS_CONF store the result here, - // and getStatistics reads it. - //************************************************************************** // Specific signal handling methods //************************************************************************** @@ -598,14 +567,6 @@ private: // Returns: - //************************************************************************** - int handleSTATISTICS_CONF(NdbApiSignal* signal); - //************************************************************************** - // Description: Handle reception of signal STATISTICS_CONF - // Parameters: - // signal: The recieved signal - // Returns: TODO, to be defined - //************************************************************************** - void handle_MGM_LOCK_CONFIG_REQ(NdbApiSignal *signal); void handle_MGM_UNLOCK_CONFIG_REQ(NdbApiSignal *signal); @@ -631,7 +592,6 @@ private: */ enum WaitSignalType { NO_WAIT, // We don't expect to receive any signal - WAIT_STATISTICS, // Accept STATISTICS_CONF WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF WAIT_SUBSCRIBE_CONF, // Accept event subscription confirmation WAIT_STOP, @@ -733,14 +693,6 @@ private: class SignalQueue m_signalRecvQueue; - enum ndb_mgm_node_type nodeTypes[MAX_NODES]; - - int theConfCount; // The number of expected conf signals - - StatisticsListner * m_statisticsListner; // Used for sending statistics info - bool _isStatPortActive; - bool _isClusterLogStatActive; - struct StopRecord { StopRecord(){ inUse = false; callback = 0; singleUserMode = false;} bool inUse; @@ -765,10 +717,16 @@ private: void handleStopReply(NodeId nodeId, Uint32 errCode); int translateStopRef(Uint32 errCode); - + bool _isStopThread; int _logLevelThreadSleep; - int _startedNodeId; + MutexVector m_started_nodes; + MutexVector m_log_level_requests; + LogLevel m_nodeLogLevel[MAX_NODES]; + enum ndb_mgm_node_type nodeTypes[MAX_NODES]; + friend class MgmApiSession; + friend class MgmStatService; + MgmStatService m_statisticsListner; /** * Handles the thread wich upon a 'Node is started' event will @@ -782,15 +740,12 @@ private: static void *signalRecvThread_C(void *); void signalRecvThreadRun(); - NodeLogLevelList* _nodeLogLevelList; - NodeLogLevelList* _clusterLogLevelList; - void backupCallback(BackupEvent &); - BackupCallback m_backupCallback; BackupEvent m_lastBackupEvent; Config *_props; + int send(class NdbApiSignal* signal, Uint32 node, Uint32 node_type); public: /** * This method does not exist diff --git a/ndb/src/mgmsrv/NodeLogLevel.cpp b/ndb/src/mgmsrv/NodeLogLevel.cpp deleted file mode 100644 index 5271cdb0f2b..00000000000 --- a/ndb/src/mgmsrv/NodeLogLevel.cpp +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include "NodeLogLevel.hpp" -// TODO_RONM: Clearly getCategory and getLevel is not correctly coded. Must be taken care of. - -NodeLogLevel::NodeLogLevel(int nodeId, const SetLogLevelOrd& ll) -{ - m_nodeId = nodeId; - m_logLevel = ll; -} - -NodeLogLevel::~NodeLogLevel() -{ -} - -int -NodeLogLevel::getNodeId() const -{ - return m_nodeId; -} - -Uint32 -NodeLogLevel::getCategory() const -{ - for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++) - { - return m_logLevel.theCategories[i]; - } - return 0; -} - -int -NodeLogLevel::getLevel() const -{ - for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++) - { - return m_logLevel.theLevels[i]; - } - return 0; -} - -void -NodeLogLevel::setLevel(int level) -{ - for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++) - { - m_logLevel.theLevels[i] = level; - } - -} - -SetLogLevelOrd -NodeLogLevel::getLogLevelOrd() const -{ - return m_logLevel; -} diff --git a/ndb/src/mgmsrv/NodeLogLevel.hpp b/ndb/src/mgmsrv/NodeLogLevel.hpp deleted file mode 100644 index 3ad758cde99..00000000000 --- a/ndb/src/mgmsrv/NodeLogLevel.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef NODELOGLEVEL_H -#define NODELOGLEVEL_H - -#include - -#include - -/** - * Holds a DB node's log level settings for both local and event log levels. - * It only holds one log level setting even though SetLogLevelOrd can handle - * multiple log levels at once, it is not used in that way in the managment - * server. - * - * @version #@ $Id: NodeLogLevel.hpp,v 1.2 2003/07/05 17:40:22 elathal Exp $ - */ -class NodeLogLevel -{ -public: - NodeLogLevel(int nodeId, const SetLogLevelOrd& ll); - ~NodeLogLevel(); - - int getNodeId() const; - Uint32 getCategory() const; - int getLevel() const; - void setLevel(int level); - SetLogLevelOrd getLogLevelOrd() const; - -private: - NodeLogLevel(); - NodeLogLevel(const NodeLogLevel&); - bool operator == (const NodeLogLevel&); - NodeLogLevel operator = (const NodeLogLevel&); - - int m_nodeId; - SetLogLevelOrd m_logLevel; -}; - -#endif diff --git a/ndb/src/mgmsrv/NodeLogLevelList.cpp b/ndb/src/mgmsrv/NodeLogLevelList.cpp deleted file mode 100644 index 6c7c091c1a8..00000000000 --- a/ndb/src/mgmsrv/NodeLogLevelList.cpp +++ /dev/null @@ -1,182 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include - -#include "NodeLogLevelList.hpp" -#include "NodeLogLevel.hpp" - -// -// PUBLIC -// - -NodeLogLevelList::NodeLogLevelList() : - m_size(0), - m_pHeadNode(NULL), - m_pTailNode(NULL), - m_pCurrNode(NULL) -{ -} - -NodeLogLevelList::~NodeLogLevelList() -{ - removeAll(); -} - -void -NodeLogLevelList::add(NodeLogLevel* pNewNode) -{ - NodeLogLevelNode* pNode = new NodeLogLevelNode(); - - if (m_pHeadNode == NULL) - { - m_pHeadNode = pNode; - pNode->pPrev = NULL; - } - else - { - m_pTailNode->pNext = pNode; - pNode->pPrev = m_pTailNode; - } - m_pTailNode = pNode; - pNode->pNext = NULL; - pNode->pHandler = pNewNode; - - m_size++; -} - -bool -NodeLogLevelList::remove(NodeLogLevel* pRemoveNode) -{ - NodeLogLevelNode* pNode = m_pHeadNode; - bool removed = false; - do - { - if (pNode->pHandler == pRemoveNode) - { - removeNode(pNode); - removed = true; - break; - } - } while ( (pNode = next(pNode)) != NULL); - - return removed; -} - -void -NodeLogLevelList::removeAll() -{ - while (m_pHeadNode != NULL) - { - removeNode(m_pHeadNode); - } -} - -NodeLogLevel* -NodeLogLevelList::next() -{ - NodeLogLevel* pHandler = NULL; - if (m_pCurrNode == NULL) - { - m_pCurrNode = m_pHeadNode; - if (m_pCurrNode != NULL) - { - pHandler = m_pCurrNode->pHandler; - } - } - else - { - m_pCurrNode = next(m_pCurrNode); // Next node - if (m_pCurrNode != NULL) - { - pHandler = m_pCurrNode->pHandler; - } - } - - return pHandler; -} - -int -NodeLogLevelList::size() const -{ - return m_size; -} - -// -// PRIVATE -// - -NodeLogLevelList::NodeLogLevelNode* -NodeLogLevelList::next(NodeLogLevelNode* pNode) -{ - NodeLogLevelNode* pCurr = pNode; - if (pNode->pNext != NULL) - { - pCurr = pNode->pNext; - } - else - { - // Tail - pCurr = NULL; - } - return pCurr; -} - -NodeLogLevelList::NodeLogLevelNode* -NodeLogLevelList::prev(NodeLogLevelNode* pNode) -{ - NodeLogLevelNode* pCurr = pNode; - if (pNode->pPrev != NULL) // head - { - pCurr = pNode->pPrev; - } - else - { - // Head - pCurr = NULL; - } - - return pCurr; -} - -void -NodeLogLevelList::removeNode(NodeLogLevelNode* pNode) -{ - if (pNode->pPrev == NULL) // If head - { - m_pHeadNode = pNode->pNext; - } - else - { - pNode->pPrev->pNext = pNode->pNext; - } - - if (pNode->pNext == NULL) // if tail - { - m_pTailNode = pNode->pPrev; - } - else - { - pNode->pNext->pPrev = pNode->pPrev; - } - - pNode->pNext = NULL; - pNode->pPrev = NULL; - delete pNode->pHandler; // Delete log handler - delete pNode; - - m_size--; -} diff --git a/ndb/src/mgmsrv/NodeLogLevelList.hpp b/ndb/src/mgmsrv/NodeLogLevelList.hpp deleted file mode 100644 index 4a55ee211e2..00000000000 --- a/ndb/src/mgmsrv/NodeLogLevelList.hpp +++ /dev/null @@ -1,93 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef NODELOGLEVELLIST_H -#define NODELOGLEVELLIST_H - -class NodeLogLevel; - -/** - * Provides a simple linked list of NodeLogLevel. - * - * @see NodeLogLevel - * @version #@ $Id: NodeLogLevelList.hpp,v 1.1 2002/08/09 12:53:50 eyualex Exp $ - */ -class NodeLogLevelList -{ -public: - /** - * Default Constructor. - */ - NodeLogLevelList(); - - /** - * Destructor. - */ - ~NodeLogLevelList(); - - /** - * Adds a new node. - * - * @param pNewHandler a new NodeLogLevel. - */ - void add(NodeLogLevel* pNewNode); - - /** - * Removes a NodeLogLevel from the list and call its destructor. - * - * @param pRemoveHandler the NodeLogLevel to remove - */ - bool remove(NodeLogLevel* pRemoveNode); - - /** - * Removes all items. - */ - void removeAll(); - - /** - * Returns the next node in the list. - * returns a node or NULL. - */ - NodeLogLevel* next(); - - /** - * Returns the size of the list. - */ - int size() const; -private: - /** List node */ - struct NodeLogLevelNode - { - NodeLogLevelNode* pPrev; - NodeLogLevelNode* pNext; - NodeLogLevel* pHandler; - }; - - NodeLogLevelNode* next(NodeLogLevelNode* pNode); - NodeLogLevelNode* prev(NodeLogLevelNode* pNode); - - void removeNode(NodeLogLevelNode* pNode); - - int m_size; - - NodeLogLevelNode* m_pHeadNode; - NodeLogLevelNode* m_pTailNode; - NodeLogLevelNode* m_pCurrNode; -}; - -#endif - - diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index c529e277e0e..684c10dbd4d 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -133,7 +134,7 @@ ParserRow commands[] = { MGM_ARG("public key", String, Mandatory, "Public key"), MGM_CMD("get version", &MgmApiSession::getVersion, ""), - + MGM_CMD("get status", &MgmApiSession::getStatus, ""), MGM_CMD("get info clusterlog", &MgmApiSession::getInfoClusterLog, ""), @@ -236,7 +237,11 @@ ParserRow commands[] = { MGM_ARG("node", String, Mandatory, "Node"), MGM_ARG("parameter", String, Mandatory, "Parameter"), MGM_ARG("value", String, Mandatory, "Value"), - + + MGM_CMD("listen event", &MgmApiSession::listen_event, ""), + MGM_ARG("node", Int, Optional, "Node"), + MGM_ARG("filter", String, Mandatory, "Event category"), + MGM_END() }; @@ -289,7 +294,8 @@ MgmApiSession::runSession() { break; } } - NDB_CLOSE_SOCKET(m_socket); + if(m_socket >= 0) + NDB_CLOSE_SOCKET(m_socket); } #ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT @@ -418,7 +424,8 @@ MgmApiSession::get_nodeid(Parser_t::Context &, &addr, &addrlen, error_string)){ const char *alias; const char *str; - alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)nodetype, &str); + alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type) + nodetype, &str); m_output->println(cmd); m_output->println("result: %s", error_string.c_str()); m_output->println(""); @@ -554,7 +561,7 @@ MgmApiSession::getStatPort(Parser_t::Context &, const class Properties &) { m_output->println("get statport reply"); - m_output->println("tcpport: %d", m_mgmsrv.getStatPort()); + m_output->println("tcpport: %d", 0); m_output->println(""); } @@ -756,13 +763,12 @@ MgmApiSession::bye(Parser::Context &, void MgmApiSession::setClusterLogLevel(Parser::Context &, Properties const &args) { - Uint32 node, level; - BaseString categoryName, errorString; + Uint32 node, level, category; + BaseString errorString; SetLogLevelOrd logLevel; int result; - logLevel.clear(); args.get("node", &node); - args.get("category", categoryName); + args.get("category", &category); args.get("level", &level); /* XXX should use constants for this value */ @@ -771,25 +777,17 @@ MgmApiSession::setClusterLogLevel(Parser::Context &, goto error; } - categoryName.ndb_toupper(); - - LogLevel::EventCategory category; - if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) { - errorString.assign("Unknown category"); - goto error; - } - - logLevel.setLogLevel(category, level); - result = m_mgmsrv.setEventReportingLevel(node, logLevel); - + EventSubscribeReq req; + req.blockRef = 0; + req.noOfEntries = 1; + req.theData[0] = (category << 16) | level; + m_mgmsrv.m_log_level_requests.push_back(req); + m_output->println("set cluster loglevel reply"); - if(result != 0) - m_output->println("result: %s", m_mgmsrv.getErrorText(result)); - else - m_output->println("result: Ok"); + m_output->println("result: Ok"); m_output->println(""); return; - error: +error: m_output->println("set cluster loglevel reply"); m_output->println("result: %s", errorString.c_str()); m_output->println(""); @@ -798,13 +796,13 @@ MgmApiSession::setClusterLogLevel(Parser::Context &, void MgmApiSession::setLogLevel(Parser::Context &, Properties const &args) { - Uint32 node = 0, level = 0; - BaseString categoryName, errorString; + Uint32 node = 0, level = 0, category; + BaseString errorString; SetLogLevelOrd logLevel; int result; logLevel.clear(); args.get("node", &node); - args.get("category", categoryName); + args.get("category", &category); args.get("level", &level); /* XXX should use constants for this value */ @@ -813,23 +811,14 @@ MgmApiSession::setLogLevel(Parser::Context &, goto error; } - categoryName.ndb_toupper(); - - LogLevel::EventCategory category; - if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) { - errorString.assign("Unknown category"); - goto error; - } - - logLevel.setLogLevel(category, level); - - result = m_mgmsrv.setNodeLogLevel(node, logLevel); - + EventSubscribeReq req; + req.blockRef = node; + req.noOfEntries = 1; + req.theData[0] = (category << 16) | level; + m_mgmsrv.m_log_level_requests.push_back(req); + m_output->println("set loglevel reply"); - if(result != 0) - m_output->println("result: %s", m_mgmsrv.getErrorText(result)); - else - m_output->println("result: Ok"); + m_output->println("result: Ok"); m_output->println(""); return; error: @@ -1248,33 +1237,91 @@ MgmApiSession::configChange(Parser_t::Context &, m_output->println(""); } -void -MgmStatService::println_statistics(const BaseString &line){ - MutexVector copy(m_sockets.size()); - m_sockets.lock(); - int i; - for(i = m_sockets.size() - 1; i >= 0; i--){ - if(println_socket(m_sockets[i], MAX_WRITE_TIMEOUT, line.c_str()) == -1){ - copy.push_back(m_sockets[i]); - m_sockets.erase(i, false); +static NdbOut& +operator<<(NdbOut& out, const LogLevel & ll) +{ + out << "[LogLevel: "; + for(size_t i = 0; i= 0; i--){ - NDB_CLOSE_SOCKET(copy[i]); - copy.erase(i); + + char m_text[256]; + EventLogger::getText(m_text, sizeof(m_text), eventType, theData, nodeId); + + Vector copy; + m_clients.lock(); + int i; + for(i = m_clients.size() - 1; i >= 0; i--){ + if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat)){ + if(m_clients[i].m_socket >= 0 && + println_socket(m_clients[i].m_socket, + MAX_WRITE_TIMEOUT, m_text) == -1){ + copy.push_back(m_clients[i].m_socket); + m_clients.erase(i, false); + } + } } - if(m_sockets.size() == 0 || false){ - m_mgmsrv->startStatisticEventReporting(0); + m_clients.unlock(); + + for(i = 0; (unsigned)im_log_level_requests.push_back(req); + } + } +} + +void +MgmStatService::add_listener(const StatListener& client){ + m_clients.push_back(client); + LogLevel tmp = m_logLevel; + tmp.set_max(client.m_logLevel); + + if(!(tmp == m_logLevel)){ + m_logLevel = tmp; + EventSubscribeReq req; + req = tmp; + req.blockRef = 0; + m_mgmsrv->m_log_level_requests.push_back(req); } } void MgmStatService::stopSessions(){ - for(int i = m_sockets.size() - 1; i >= 0; i--){ - NDB_CLOSE_SOCKET(m_sockets[i]); - m_sockets.erase(i); + for(int i = m_clients.size() - 1; i >= 0; i--){ + if(m_clients[i].m_socket >= 0){ + NDB_CLOSE_SOCKET(m_clients[i].m_socket); + m_clients.erase(i); + } } } @@ -1298,6 +1345,75 @@ MgmApiSession::setParameter(Parser_t::Context &, m_output->println(""); } +void +MgmApiSession::listen_event(Parser::Context & ctx, + Properties const & args) { + + BaseString node, param, value; + args.get("node", node); + args.get("filter", param); + + int result = 0; + BaseString msg; + + MgmStatService::StatListener le; + le.m_socket = m_socket; + + Vector list; + param.trim(); + param.split(list, " ,"); + for(size_t i = 0; i spec; + list[i].trim(); + list[i].split(spec, "=:"); + if(spec.size() != 2){ + msg.appfmt("Invalid filter specification: >%s< >%s< %d", + param.c_str(), list[i].c_str(), spec.size()); + result = -1; + goto done; + } + + spec[0].trim().ndb_toupper(); + int category = ndb_mgm_match_event_category(spec[0].c_str()); + if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){ + category = atoi(spec[0].c_str()); + if(category < NDB_MGM_MIN_EVENT_CATEGORY || + category > NDB_MGM_MAX_EVENT_CATEGORY){ + msg.appfmt("Unknown category: >%s<", spec[0].c_str()); + result = -1; + goto done; + } + } + + int level = atoi(spec[1].c_str()); + if(level < 0 || level > 15){ + msg.appfmt("Invalid level: >%s<", spec[1].c_str()); + result = -1; + goto done; + } + category -= CFG_MIN_LOGLEVEL; + le.m_logLevel.setLogLevel((LogLevel::EventCategory)category, level); + } + + if(list.size() == 0){ + msg.appfmt("Empty filter specification"); + result = -1; + goto done; + } + + m_mgmsrv.m_statisticsListner.add_listener(le); + + m_stop = true; + m_socket = -1; + +done: + m_output->println("listen event"); + m_output->println("result: %d", result); + if(result != 0) + m_output->println("msg: %s", msg.c_str()); + m_output->println(""); +} + template class MutexVector; template class Vector const*>; template class Vector; diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp index 9cf8b59be8f..e47820826b6 100644 --- a/ndb/src/mgmsrv/Services.hpp +++ b/ndb/src/mgmsrv/Services.hpp @@ -83,7 +83,8 @@ public: void configChange(Parser_t::Context &ctx, const class Properties &args); void setParameter(Parser_t::Context &ctx, const class Properties &args); - + void listen_event(Parser_t::Context &ctx, const class Properties &args); + void repCommand(Parser_t::Context &ctx, const class Properties &args); }; @@ -103,28 +104,4 @@ public: } }; -class MgmStatService : public SocketServer::Service, - public MgmtSrvr::StatisticsListner -{ - class MgmtSrvr * m_mgmsrv; - MutexVector m_sockets; -public: - MgmStatService() : m_sockets(5) { - m_mgmsrv = 0; - } - - void setMgm(class MgmtSrvr * mgmsrv){ - m_mgmsrv = mgmsrv; - } - - SocketServer::Session * newSession(NDB_SOCKET_TYPE socket){ - m_sockets.push_back(socket); - m_mgmsrv->startStatisticEventReporting(5); - return 0; - } - - void stopSessions(); - - void println_statistics(const BaseString &line); -}; #endif diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index 323a836cdd4..a582d082312 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -15,7 +15,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include -#include #include "MgmtSrvr.hpp" #include "EventLogger.hpp" @@ -70,7 +69,6 @@ struct MgmGlobals { bool use_specific_ip; char * interface_name; int port; - int port_stats; /** The configuration of the cluster */ Config * cluster_config; @@ -82,6 +80,7 @@ struct MgmGlobals { SocketServer * socketServer; }; +int g_no_nodeid_checks= 0; static MgmGlobals glob; @@ -118,7 +117,9 @@ struct getargs args[] = { "Specify configuration file connect string (will default use Ndb.cfg if available)", "filename" }, { "interactive", 0, arg_flag, &glob.interactive, - "Run interactive. Not supported but provided for testing purposes", "" }, + "Run interactive. Not supported but provided for testing purposes", "" }, + { "no-nodeid-checks", 0, arg_flag, &g_no_nodeid_checks, + "Do not provide any node id checks", "" }, { "nodaemon", 0, arg_flag, &glob.non_interactive, "Don't run as daemon, but don't read from stdin", "non-interactive" } }; @@ -129,6 +130,7 @@ int num_args = sizeof(args) / sizeof(args[0]); * MAIN */ NDB_MAIN(mgmsrv){ + ndb_init(); /** * OSE specific. Enable shared ownership of file system resources. * This is needed in order to use the cluster log since the events @@ -151,7 +153,6 @@ NDB_MAIN(mgmsrv){ glob.daemon= 0; } - my_init(); #ifndef DBUG_OFF if (debug_option) DBUG_PUSH(debug_option); @@ -169,8 +170,6 @@ NDB_MAIN(mgmsrv){ MgmApiService * mapi = new MgmApiService(); - MgmStatService * mstat = new MgmStatService(); - /**************************** * Read configuration files * ****************************/ @@ -230,13 +229,6 @@ NDB_MAIN(mgmsrv){ goto error_end; } - if(!glob.socketServer->setup(mstat, glob.port_stats, glob.interface_name)){ - ndbout_c("Unable to setup statistic port: %d!\nPlease check if the port" - " is already used.", glob.port_stats); - delete mstat; - goto error_end; - } - if(!glob.mgmObject->check_start()){ ndbout_c("Unable to check start management server."); ndbout_c("Probably caused by illegal initial configuration file."); @@ -267,10 +259,7 @@ NDB_MAIN(mgmsrv){ } //glob.mgmObject->saveConfig(); - - mstat->setMgm(glob.mgmObject); mapi->setMgm(glob.mgmObject); - glob.mgmObject->setStatisticsListner(mstat); char msg[256]; snprintf(msg, sizeof(msg), @@ -278,8 +267,8 @@ NDB_MAIN(mgmsrv){ ndbout_c(msg); g_EventLogger.info(msg); - snprintf(msg, 256, "Id: %d, Command port: %d, Statistics port: %d", - glob.localNodeId, glob.port, glob.port_stats); + snprintf(msg, 256, "Id: %d, Command port: %d", + glob.localNodeId, glob.port); ndbout_c(msg); g_EventLogger.info(msg); @@ -309,7 +298,6 @@ NDB_MAIN(mgmsrv){ MgmGlobals::MgmGlobals(){ // Default values port = 0; - port_stats = 0; config_filename = NULL; local_config_filename = NULL; interface_name = 0; @@ -336,17 +324,12 @@ MgmGlobals::~MgmGlobals(){ * @fn readLocalConfig * @param glob : Global variables * @return true if success, false otherwise. - * - * How to get LOCAL CONFIGURATION FILE: - * 1. Use local config file name (-l) - * 2. Use environment NDB_HOME + Ndb.cfg - * If NDB_HOME is not set this results in reading from local dir */ static bool readLocalConfig(){ // Read local config file LocalConfig lc; - if(!lc.init(glob.local_config_filename)){ + if(!lc.init(0,glob.local_config_filename)){ lc.printError(); return false; } @@ -360,10 +343,6 @@ readLocalConfig(){ * @fn readGlobalConfig * @param glob : Global variables * @return true if success, false otherwise. - * - * How to get the GLOBAL CONFIGURATION: - * 1. Use config file name (this is a text file)(-c) - * 2. Use name from line 2 of local config file, ex: file:///c/ndb/Ndb_cfg.bin */ static bool readGlobalConfig() { diff --git a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp b/ndb/src/mgmsrv/mkconfig/mkconfig.cpp index 3b2046d7b49..28823aaa35e 100644 --- a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp +++ b/ndb/src/mgmsrv/mkconfig/mkconfig.cpp @@ -32,6 +32,7 @@ void usage(const char * prg){ NDB_COMMAND(mkconfig, "mkconfig", "mkconfig", "Make a binary configuration from a config file", 16384){ + ndb_init(); if(argc < 3){ usage(argv[0]); return 0; diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index 1e28fbc2db5..6cfacc2c340 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -65,7 +65,7 @@ NdbDictionary::Column::getName() const { void NdbDictionary::Column::setType(Type t){ - m_impl.m_type = t; + m_impl.init(t); } NdbDictionary::Column::Type @@ -103,6 +103,54 @@ NdbDictionary::Column::getLength() const{ return m_impl.m_length; } +void +NdbDictionary::Column::setInlineSize(int size) +{ + m_impl.m_precision = size; +} + +void +NdbDictionary::Column::setCharset(CHARSET_INFO* cs) +{ + m_impl.m_cs = cs; +} + +CHARSET_INFO* +NdbDictionary::Column::getCharset() const +{ + return m_impl.m_cs; +} + +int +NdbDictionary::Column::getInlineSize() const +{ + return m_impl.m_precision; +} + +void +NdbDictionary::Column::setPartSize(int size) +{ + m_impl.m_scale = size; +} + +int +NdbDictionary::Column::getPartSize() const +{ + return m_impl.m_scale; +} + +void +NdbDictionary::Column::setStripeSize(int size) +{ + m_impl.m_length = size; +} + +int +NdbDictionary::Column::getStripeSize() const +{ + return m_impl.m_length; +} + int NdbDictionary::Column::getSize() const{ return m_impl.m_attrSize; @@ -821,6 +869,8 @@ NdbDictionary::Dictionary::getNdbError() const { NdbOut& operator<<(NdbOut& out, const NdbDictionary::Column& col) { + const CHARSET_INFO *cs = col.getCharset(); + const char *csname = cs ? cs->name : "?"; out << col.getName() << " "; switch (col.getType()) { case NdbDictionary::Column::Tinyint: @@ -863,10 +913,10 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) out << "Decimal(" << col.getScale() << "," << col.getPrecision() << ")"; break; case NdbDictionary::Column::Char: - out << "Char(" << col.getLength() << ")"; + out << "Char(" << col.getLength() << ";" << csname << ")"; break; case NdbDictionary::Column::Varchar: - out << "Varchar(" << col.getLength() << ")"; + out << "Varchar(" << col.getLength() << ";" << csname << ")"; break; case NdbDictionary::Column::Binary: out << "Binary(" << col.getLength() << ")"; @@ -886,7 +936,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) break; case NdbDictionary::Column::Text: out << "Text(" << col.getInlineSize() << "," << col.getPartSize() - << ";" << col.getStripeSize() << ")"; + << ";" << col.getStripeSize() << ";" << csname << ")"; break; case NdbDictionary::Column::Undefined: out << "Undefined"; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 815ecf8ca6c..0b2a0386a6b 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -36,6 +36,7 @@ #include "NdbEventOperationImpl.hpp" #include "NdbBlob.hpp" #include +#include #define DEBUG_PRINT 0 #define INCOMPATIBLE_VERSION -2 @@ -64,6 +65,7 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col) m_name = col.m_name; m_type = col.m_type; m_precision = col.m_precision; + m_cs = col.m_cs; m_scale = col.m_scale; m_length = col.m_length; m_pk = col.m_pk; @@ -87,10 +89,66 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col) } void -NdbColumnImpl::init() +NdbColumnImpl::init(Type t) { + // do not use default_charset_info as it may not be initialized yet + // use binary collation until NDB tests can handle charsets + CHARSET_INFO* default_cs = &my_charset_latin1_bin; m_attrId = -1; - m_type = NdbDictionary::Column::Unsigned; + m_type = t; + switch (m_type) { + case Tinyint: + case Tinyunsigned: + case Smallint: + case Smallunsigned: + case Mediumint: + case Mediumunsigned: + case Int: + case Unsigned: + case Bigint: + case Bigunsigned: + case Float: + case Double: + m_precision = 0; + m_scale = 0; + m_length = 1; + m_cs = NULL; + break; + case Decimal: + m_precision = 10; + m_scale = 0; + m_length = 1; + m_cs = NULL; + break; + case Char: + case Varchar: + m_precision = 0; + m_scale = 0; + m_length = 1; + m_cs = default_cs; + break; + case Binary: + case Varbinary: + case Datetime: + case Timespec: + m_precision = 0; + m_scale = 0; + m_length = 1; + m_cs = NULL; + break; + case Blob: + m_precision = 256; + m_scale = 8000; + m_length = 4; + m_cs = NULL; + break; + case Text: + m_precision = 256; + m_scale = 8000; + m_length = 4; + m_cs = default_cs; + break; + } m_pk = false; m_nullable = false; m_tupleKey = false; @@ -98,12 +156,10 @@ NdbColumnImpl::init() m_distributionKey = false; m_distributionGroup = false; m_distributionGroupBits = 8; - m_length = 1; - m_scale = 5; - m_precision = 5; m_keyInfoPos = 0; - m_attrSize = 4, - m_arraySize = 1, + // next 2 are set at run time + m_attrSize = 0; + m_arraySize = 0; m_autoIncrement = false; m_autoIncrementInitialValue = 1; m_blobTable = NULL; @@ -146,52 +202,12 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const return false; } } - if(m_length != col.m_length){ + if (m_precision != col.m_precision || + m_scale != col.m_scale || + m_length != col.m_length || + m_cs != col.m_cs) { return false; } - - switch(m_type){ - case NdbDictionary::Column::Undefined: - break; - case NdbDictionary::Column::Tinyint: - case NdbDictionary::Column::Tinyunsigned: - case NdbDictionary::Column::Smallint: - case NdbDictionary::Column::Smallunsigned: - case NdbDictionary::Column::Mediumint: - case NdbDictionary::Column::Mediumunsigned: - case NdbDictionary::Column::Int: - case NdbDictionary::Column::Unsigned: - case NdbDictionary::Column::Float: - break; - case NdbDictionary::Column::Decimal: - if(m_scale != col.m_scale || - m_precision != col.m_precision){ - return false; - } - break; - case NdbDictionary::Column::Char: - case NdbDictionary::Column::Varchar: - case NdbDictionary::Column::Binary: - case NdbDictionary::Column::Varbinary: - if(m_length != col.m_length){ - return false; - } - break; - case NdbDictionary::Column::Bigint: - case NdbDictionary::Column::Bigunsigned: - case NdbDictionary::Column::Double: - case NdbDictionary::Column::Datetime: - case NdbDictionary::Column::Timespec: - break; - case NdbDictionary::Column::Blob: - case NdbDictionary::Column::Text: - if (m_precision != col.m_precision || - m_scale != col.m_scale || - m_length != col.m_length) { - return false; - } - break; - } if (m_autoIncrement != col.m_autoIncrement){ return false; } @@ -209,14 +225,18 @@ NdbColumnImpl::create_psuedo(const char * name){ if(!strcmp(name, "NDB$FRAGMENT")){ col->setType(NdbDictionary::Column::Unsigned); col->m_impl.m_attrId = AttributeHeader::FRAGMENT; + col->m_impl.m_attrSize = 4; + col->m_impl.m_arraySize = 1; } else if(!strcmp(name, "NDB$ROW_COUNT")){ col->setType(NdbDictionary::Column::Bigunsigned); col->m_impl.m_attrId = AttributeHeader::ROW_COUNT; col->m_impl.m_attrSize = 8; + col->m_impl.m_arraySize = 1; } else if(!strcmp(name, "NDB$COMMIT_COUNT")){ col->setType(NdbDictionary::Column::Bigunsigned); col->m_impl.m_attrId = AttributeHeader::COMMIT_COUNT; col->m_impl.m_attrSize = 8; + col->m_impl.m_arraySize = 1; } else { abort(); } @@ -1127,6 +1147,7 @@ indexTypeMapping[] = { { -1, -1 } }; +// TODO: remove, api-kernel type codes must match now static const ApiKernelMapping columnTypeMapping[] = { @@ -1233,9 +1254,23 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, return 703; } col->m_extType = attrDesc.AttributeExtType; - col->m_precision = attrDesc.AttributeExtPrecision; + col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF); col->m_scale = attrDesc.AttributeExtScale; col->m_length = attrDesc.AttributeExtLength; + // charset in upper half of precision + unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16); + // charset is defined exactly for char types + if (col->getCharType() != (cs_number != 0)) { + delete impl; + return 703; + } + if (col->getCharType()) { + col->m_cs = get_charset(cs_number, MYF(0)); + if (col->m_cs == NULL) { + delete impl; + return 743; + } + } // translate to old kernel types and sizes if (! attrDesc.translateExtType()) { @@ -1486,9 +1521,23 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, getKernelConstant(col->m_type, columnTypeMapping, DictTabInfo::ExtUndefined); - tmpAttr.AttributeExtPrecision = col->m_precision; + tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF); tmpAttr.AttributeExtScale = col->m_scale; tmpAttr.AttributeExtLength = col->m_length; + // charset is defined exactly for char types + if (col->getCharType() != (col->m_cs != NULL)) { + m_error.code = 703; + return -1; + } + // primary key type check + if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) { + m_error.code = 743; + return -1; + } + // charset in upper half of precision + if (col->getCharType()) { + tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16); + } // DICT will ignore and recompute this (void)tmpAttr.translateExtType(); @@ -1950,6 +1999,14 @@ NdbDictInterface::createIndex(Ndb & ndb, m_error.code = 4245; return -1; } + // index key type check + if (it == DictTabInfo::UniqueHashIndex && + ! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) || + it == DictTabInfo::OrderedIndex && + ! NdbSqlUtil::usable_in_ordered_index(col->m_type, col->m_cs)) { + m_error.code = 743; + return -1; + } attributeList.id[i] = col->m_attrId; } if (it == DictTabInfo::UniqueHashIndex) { diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index da5e7e45c36..cf659c71397 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -52,7 +52,7 @@ public: NdbColumnImpl(NdbDictionary::Column &); // This is not a copy constructor ~NdbColumnImpl(); NdbColumnImpl& operator=(const NdbColumnImpl&); - void init(); + void init(Type t = Unsigned); int m_attrId; BaseString m_name; @@ -60,6 +60,7 @@ public: int m_precision; int m_scale; int m_length; + CHARSET_INFO * m_cs; // not const in MySQL bool m_pk; bool m_tupleKey; @@ -82,6 +83,7 @@ public: Uint32 m_keyInfoPos; Uint32 m_extType; // used by restore (kernel type in versin v2x) bool getInterpretableType() const ; + bool getCharType() const; bool getBlobType() const; /** @@ -446,6 +448,14 @@ NdbColumnImpl::getInterpretableType() const { m_type == NdbDictionary::Column::Bigunsigned); } +inline +bool +NdbColumnImpl::getCharType() const { + return (m_type == NdbDictionary::Column::Char || + m_type == NdbDictionary::Column::Varchar || + m_type == NdbDictionary::Column::Text); +} + inline bool NdbColumnImpl::getBlobType() const { diff --git a/ndb/src/ndbapi/NdbIndexOperation.cpp b/ndb/src/ndbapi/NdbIndexOperation.cpp index 0742f8d911c..bf4b07842f6 100644 --- a/ndb/src/ndbapi/NdbIndexOperation.cpp +++ b/ndb/src/ndbapi/NdbIndexOperation.cpp @@ -164,6 +164,7 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo, Uint32 tData; Uint32 tKeyInfoPosition; const char* aValue = aValuePassed; + Uint32 xfrmData[1024]; Uint32 tempData[1024]; if ((theStatus == OperationDefined) && @@ -224,6 +225,21 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo, m_theIndexDefined[i][2] = true; Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + const char* aValueToWrite = aValue; + + CHARSET_INFO* cs = tAttrInfo->m_cs; + if (cs != 0) { + // current limitation: strxfrm does not increase length + assert(cs->strxfrm_multiply == 1); + unsigned n = + (*cs->coll->strnxfrm)(cs, + (uchar*)xfrmData, sizeof(xfrmData), + (const uchar*)aValue, sizeInBytes); + while (n < sizeInBytes) + ((uchar*)xfrmData)[n++] = 0x20; + aValue = (char*)xfrmData; + } + Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ; Uint32 totalSizeInWords = (sizeInBytes + 3)/4;// Inc. bits in last word Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word @@ -314,13 +330,20 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo, if ((tOpType == InsertRequest) || (tOpType == WriteRequest)) { if (!tAttrInfo->m_indexOnly){ + // invalid data can crash kernel + if (cs != NULL && + (*cs->cset->well_formed_len)(cs, + aValueToWrite, + aValueToWrite + sizeInBytes, + sizeInBytes) != sizeInBytes) + goto equal_error4; Uint32 ahValue; Uint32 sz = totalSizeInWords; AttributeHeader::init(&ahValue, tAttrId, sz); insertATTRINFO( ahValue ); - insertATTRINFOloop((Uint32*)aValue, sizeInWords); + insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords); if (bitsInLastWord != 0) { - tData = *(Uint32*)(aValue + (sizeInWords << 2)); + tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2)); tData = convertEndian(tData); tData = tData & ((1 << bitsInLastWord) - 1); tData = convertEndian(tData); @@ -411,7 +434,10 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo, equal_error3: setErrorCodeAbort(4209); - + return -1; + + equal_error4: + setErrorCodeAbort(744); return -1; } diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp index 6d995e06582..ad838ddd601 100644 --- a/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -492,6 +492,17 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, // Insert Attribute Id into ATTRINFO part. const Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + + CHARSET_INFO* cs = tAttrInfo->m_cs; + // invalid data can crash kernel + if (cs != NULL && + (*cs->cset->well_formed_len)(cs, + aValue, + aValue + sizeInBytes, + sizeInBytes) != sizeInBytes) { + setErrorCodeAbort(744); + return -1; + } #if 0 tAttrSize = tAttrInfo->theAttrSize; tArraySize = tAttrInfo->theArraySize; diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp index 19cb133dbf7..e5166fc4a82 100644 --- a/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -60,6 +60,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, Uint32 tData; Uint32 tKeyInfoPosition; const char* aValue = aValuePassed; + Uint32 xfrmData[1024]; Uint32 tempData[1024]; if ((theStatus == OperationDefined) && @@ -117,6 +118,21 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, theTupleKeyDefined[i][2] = true; Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + const char* aValueToWrite = aValue; + + CHARSET_INFO* cs = tAttrInfo->m_cs; + if (cs != 0) { + // current limitation: strxfrm does not increase length + assert(cs->strxfrm_multiply == 1); + unsigned n = + (*cs->coll->strnxfrm)(cs, + (uchar*)xfrmData, sizeof(xfrmData), + (const uchar*)aValue, sizeInBytes); + while (n < sizeInBytes) + ((uchar*)xfrmData)[n++] = 0x20; + aValue = (char*)xfrmData; + } + Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ; Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Inc. bits in last word Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word @@ -206,13 +222,20 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, if ((tOpType == InsertRequest) || (tOpType == WriteRequest)) { if (!tAttrInfo->m_indexOnly){ + // invalid data can crash kernel + if (cs != NULL && + (*cs->cset->well_formed_len)(cs, + aValueToWrite, + aValueToWrite + sizeInBytes, + sizeInBytes) != sizeInBytes) + goto equal_error4; Uint32 ahValue; const Uint32 sz = totalSizeInWords; AttributeHeader::init(&ahValue, tAttrId, sz); insertATTRINFO( ahValue ); - insertATTRINFOloop((Uint32*)aValue, sizeInWords); + insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords); if (bitsInLastWord != 0) { - tData = *(Uint32*)(aValue + (sizeInWords << 2)); + tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2)); tData = convertEndian(tData); tData = tData & ((1 << bitsInLastWord) - 1); tData = convertEndian(tData); @@ -311,6 +334,10 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, equal_error3: setErrorCodeAbort(4209); return -1; + + equal_error4: + setErrorCodeAbort(744); + return -1; } /****************************************************************************** diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp index 86c174c4545..ac5f4268386 100644 --- a/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/ndb/src/ndbapi/NdbScanOperation.cpp @@ -1096,30 +1096,43 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo, theStatus == SetBound && (0 <= type && type <= 4) && len <= 8000) { - // bound type - + // insert bound type insertATTRINFO(type); - // attribute header Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + // normalize char bound + CHARSET_INFO* cs = tAttrInfo->m_cs; + Uint32 xfrmData[2000]; + if (cs != NULL && aValue != NULL) { + // current limitation: strxfrm does not increase length + assert(cs->strxfrm_multiply == 1); + unsigned n = + (*cs->coll->strnxfrm)(cs, + (uchar*)xfrmData, sizeof(xfrmData), + (const uchar*)aValue, sizeInBytes); + while (n < sizeInBytes) + ((uchar*)xfrmData)[n++] = 0x20; + aValue = (char*)xfrmData; + } if (len != sizeInBytes && (len != 0)) { setErrorCodeAbort(4209); return -1; } + // insert attribute header len = aValue != NULL ? sizeInBytes : 0; Uint32 tIndexAttrId = tAttrInfo->m_attrId; Uint32 sizeInWords = (len + 3) / 4; AttributeHeader ah(tIndexAttrId, sizeInWords); insertATTRINFO(ah.m_value); if (len != 0) { - // attribute data + // insert attribute data if ((UintPtr(aValue) & 0x3) == 0 && (len & 0x3) == 0) insertATTRINFOloop((const Uint32*)aValue, sizeInWords); else { - Uint32 temp[2000]; - memcpy(temp, aValue, len); + Uint32 tempData[2000]; + memcpy(tempData, aValue, len); while ((len & 0x3) != 0) - ((char*)temp)[len++] = 0; - insertATTRINFOloop(temp, sizeInWords); + ((char*)tempData)[len++] = 0; + insertATTRINFOloop(tempData, sizeInWords); } } @@ -1206,11 +1219,11 @@ NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols, if((r1_null ^ (unsigned)r2->isNULL())){ return (r1_null ? -1 : 1); } - Uint32 type = NdbColumnImpl::getImpl(* r1->m_column).m_extType; + const NdbColumnImpl & col = NdbColumnImpl::getImpl(* r1->m_column); Uint32 size = (r1->theAttrSize * r1->theArraySize + 3) / 4; if(!r1_null){ - const NdbSqlUtil::Type& t = NdbSqlUtil::getType(type); - int r = (*t.m_cmp)(d1, d2, size, size); + const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(col.m_extType); + int r = (*sqlType.m_cmp)(col.m_cs, d1, d2, size, size); if(r){ assert(r != NdbSqlUtil::CmpUnknown); return r; diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp index d4b4c92e78a..fd357ef4e37 100644 --- a/ndb/src/ndbapi/Ndbinit.cpp +++ b/ndb/src/ndbapi/Ndbinit.cpp @@ -16,7 +16,6 @@ #include -#include #include "NdbApiSignal.hpp" #include "NdbImpl.hpp" @@ -62,7 +61,6 @@ Ndb::Ndb( const char* aDataBase , const char* aSchema) { abort(); // old and new Ndb constructor used mixed theNoOfNdbObjects++; if (global_ndb_cluster_connection == 0) { - my_init(); global_ndb_cluster_connection= new Ndb_cluster_connection(ndbConnectString); global_ndb_cluster_connection->connect(); } diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 7991004e3d0..037c441cc38 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -280,6 +280,9 @@ ErrorBundle ErrorCodes[] = { { 739, SE, "Unsupported primary key length" }, { 740, SE, "Nullable primary key not supported" }, { 741, SE, "Unsupported alter table" }, + { 742, SE, "Unsupported attribute type in index" }, + { 743, SE, "Unsupported character set in table or index" }, + { 744, SE, "Character string is invalid for given character set" }, { 241, SE, "Invalid schema object version" }, { 283, SE, "Table is being dropped" }, { 284, SE, "Table not defined in transaction coordinator" }, diff --git a/ndb/test/include/NDBT_Table.hpp b/ndb/test/include/NDBT_Table.hpp index 59db3ed1092..d2f99b85187 100644 --- a/ndb/test/include/NDBT_Table.hpp +++ b/ndb/test/include/NDBT_Table.hpp @@ -33,10 +33,10 @@ public: { assert(_name != 0); + setType(_type); + setLength(_length); setNullable(_nullable); setPrimaryKey(_pk); - setLength(_length); - setType(_type); } }; diff --git a/ndb/test/ndbapi/acid.cpp b/ndb/test/ndbapi/acid.cpp index 157b3c7b3ef..3eb1625be26 100644 --- a/ndb/test/ndbapi/acid.cpp +++ b/ndb/test/ndbapi/acid.cpp @@ -434,6 +434,7 @@ extern "C" void* NdbThreadFuncRead(void* pArg) NDB_COMMAND(acid, "acid", "acid", "acid", 65535) { + ndb_init(); long nSeconds = 60; int rc = NDBT_OK; diff --git a/ndb/test/ndbapi/acid2.cpp b/ndb/test/ndbapi/acid2.cpp index 434a0450daa..7bd7ec00ac5 100644 --- a/ndb/test/ndbapi/acid2.cpp +++ b/ndb/test/ndbapi/acid2.cpp @@ -610,6 +610,7 @@ extern "C" void* ThreadFunc(void*) int main(int argc, char* argv[]) { + ndb_init(); Uint32 nSeconds = 1; Uint32 nThread = 1; diff --git a/ndb/test/ndbapi/bank/Bank.cpp b/ndb/test/ndbapi/bank/Bank.cpp index 4581d1a9842..c6029259357 100644 --- a/ndb/test/ndbapi/bank/Bank.cpp +++ b/ndb/test/ndbapi/bank/Bank.cpp @@ -156,7 +156,14 @@ int Bank::performTransactionImpl1(int fromAccountId, int check; + // Ok, all clear to do the transaction + Uint64 transId; + if (getNextTransactionId(transId) != NDBT_OK){ + return NDBT_FAILED; + } + NdbConnection* pTrans = m_ndb.startTransaction(); + if( pTrans == NULL ) { const NdbError err = m_ndb.getNdbError(); if (err.status == NdbError::TemporaryError){ @@ -167,6 +174,13 @@ int Bank::performTransactionImpl1(int fromAccountId, return NDBT_FAILED; } + Uint64 currTime; + if (prepareGetCurrTimeOp(pTrans, currTime) != NDBT_OK){ + ERR(pTrans->getNdbError()); + m_ndb.closeTransaction(pTrans); + return NDBT_FAILED; + } + /** * Check balance on from account */ @@ -205,29 +219,6 @@ int Bank::performTransactionImpl1(int fromAccountId, return NDBT_FAILED; } - check = pTrans->execute(NoCommit); - if( check == -1 ) { - const NdbError err = pTrans->getNdbError(); - m_ndb.closeTransaction(pTrans); - if (err.status == NdbError::TemporaryError){ - ERR(err); - return NDBT_TEMPORARY; - } - ERR(err); - return NDBT_FAILED; - } - - Uint32 balanceFrom = balanceFromRec->u_32_value(); - // ndbout << "balanceFrom: " << balanceFrom << endl; - - if (((Int64)balanceFrom - amount) < 0){ - m_ndb.closeTransaction(pTrans); - //ndbout << "Not enough funds" << endl; - return NOT_ENOUGH_FUNDS; - } - - Uint32 fromAccountType = fromAccountTypeRec->u_32_value(); - /** * Read balance on to account */ @@ -278,21 +269,22 @@ int Bank::performTransactionImpl1(int fromAccountId, return NDBT_FAILED; } + + Uint32 balanceFrom = balanceFromRec->u_32_value(); + // ndbout << "balanceFrom: " << balanceFrom << endl; + + if (((Int64)balanceFrom - amount) < 0){ + m_ndb.closeTransaction(pTrans); + //ndbout << "Not enough funds" << endl; + return NOT_ENOUGH_FUNDS; + } + + Uint32 fromAccountType = fromAccountTypeRec->u_32_value(); + Uint32 balanceTo = balanceToRec->u_32_value(); // ndbout << "balanceTo: " << balanceTo << endl; Uint32 toAccountType = toAccountTypeRec->u_32_value(); - // Ok, all clear to do the transaction - Uint64 transId; - if (getNextTransactionId(transId) != NDBT_OK){ - return NDBT_FAILED; - } - - Uint64 currTime; - if (getCurrTime(currTime) != NDBT_OK){ - return NDBT_FAILED; - } - /** * Update balance on from account */ @@ -1988,35 +1980,13 @@ int Bank::readSystemValue(SystemValueId sysValId, Uint64 & value){ ERR(m_ndb.getNdbError()); return NDBT_FAILED; } - - NdbOperation* pOp = pTrans->getNdbOperation("SYSTEM_VALUES"); - if (pOp == NULL) { + + if (prepareReadSystemValueOp(pTrans, sysValId, value) != NDBT_OK) { ERR(pTrans->getNdbError()); m_ndb.closeTransaction(pTrans); return NDBT_FAILED; } - - check = pOp->readTuple(); - if( check == -1 ) { - ERR(pTrans->getNdbError()); - m_ndb.closeTransaction(pTrans); - return NDBT_FAILED; - } - - check = pOp->equal("SYSTEM_VALUES_ID", sysValId); - if( check == -1 ) { - ERR(pTrans->getNdbError()); - m_ndb.closeTransaction(pTrans); - return NDBT_FAILED; - } - - NdbRecAttr* valueRec = pOp->getValue("VALUE"); - if( valueRec ==NULL ) { - ERR(pTrans->getNdbError()); - m_ndb.closeTransaction(pTrans); - return NDBT_FAILED; - } - + check = pTrans->execute(Commit); if( check == -1 ) { ERR(pTrans->getNdbError()); @@ -2024,13 +1994,38 @@ int Bank::readSystemValue(SystemValueId sysValId, Uint64 & value){ return NDBT_FAILED; } - value = valueRec->u_64_value(); - m_ndb.closeTransaction(pTrans); return NDBT_OK; } +int Bank::prepareReadSystemValueOp(NdbConnection* pTrans, SystemValueId sysValId, Uint64 & value){ + + int check; + + NdbOperation* pOp = pTrans->getNdbOperation("SYSTEM_VALUES"); + if (pOp == NULL) { + return NDBT_FAILED; + } + + check = pOp->readTuple(); + if( check == -1 ) { + return NDBT_FAILED; + } + + check = pOp->equal("SYSTEM_VALUES_ID", sysValId); + if( check == -1 ) { + return NDBT_FAILED; + } + + NdbRecAttr* valueRec = pOp->getValue("VALUE", (char *)&value); + if( valueRec == NULL ) { + return NDBT_FAILED; + } + + return NDBT_OK; +} + int Bank::writeSystemValue(SystemValueId sysValId, Uint64 value){ int check; @@ -2307,6 +2302,10 @@ int Bank::getCurrTime(Uint64 &time){ return readSystemValue(CurrentTime, time); } +int Bank::prepareGetCurrTimeOp(NdbConnection *pTrans, Uint64 &time){ + return prepareReadSystemValueOp(pTrans, CurrentTime, time); +} + int Bank::performSumAccounts(int maxSleepBetweenSums, int yield){ if (init() != NDBT_OK) diff --git a/ndb/test/ndbapi/bank/Bank.hpp b/ndb/test/ndbapi/bank/Bank.hpp index e6816fd7111..34c5ff51cc2 100644 --- a/ndb/test/ndbapi/bank/Bank.hpp +++ b/ndb/test/ndbapi/bank/Bank.hpp @@ -29,7 +29,7 @@ public: Bank(); - int createAndLoadBank(bool overWrite); + int createAndLoadBank(bool overWrite, int num_accounts=10); int dropBank(); int performTransactions(int maxSleepBetweenTrans = 20, int yield=0); @@ -118,6 +118,9 @@ private: int incCurrTime(Uint64 &value); int getCurrTime(Uint64 &time); + int prepareReadSystemValueOp(NdbConnection*, SystemValueId sysValId, Uint64 &time); + int prepareGetCurrTimeOp(NdbConnection*, Uint64 &time); + int createTables(); int createTable(const char* tabName); diff --git a/ndb/test/ndbapi/bank/BankLoad.cpp b/ndb/test/ndbapi/bank/BankLoad.cpp index bbaac27735b..39dc8097115 100644 --- a/ndb/test/ndbapi/bank/BankLoad.cpp +++ b/ndb/test/ndbapi/bank/BankLoad.cpp @@ -53,7 +53,7 @@ int Bank::getNumAccountTypes(){ return accountTypesSize; } -int Bank::createAndLoadBank(bool ovrWrt){ +int Bank::createAndLoadBank(bool ovrWrt, int num_accounts){ m_ndb.init(); if (m_ndb.waitUntilReady() != 0) @@ -78,7 +78,7 @@ int Bank::createAndLoadBank(bool ovrWrt){ if (loadAccountType() != NDBT_OK) return NDBT_FAILED; - if (loadAccount(10) != NDBT_OK) + if (loadAccount(num_accounts) != NDBT_OK) return NDBT_FAILED; if (loadSystemValues() != NDBT_OK) diff --git a/ndb/test/ndbapi/bank/bankCreator.cpp b/ndb/test/ndbapi/bank/bankCreator.cpp index 5331ec6ba69..301d8bda6d2 100644 --- a/ndb/test/ndbapi/bank/bankCreator.cpp +++ b/ndb/test/ndbapi/bank/bankCreator.cpp @@ -27,6 +27,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/bank/bankMakeGL.cpp b/ndb/test/ndbapi/bank/bankMakeGL.cpp index 54bc559fbf9..9e2762ed8ae 100644 --- a/ndb/test/ndbapi/bank/bankMakeGL.cpp +++ b/ndb/test/ndbapi/bank/bankMakeGL.cpp @@ -27,6 +27,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/bank/bankSumAccounts.cpp b/ndb/test/ndbapi/bank/bankSumAccounts.cpp index c0a903f9034..b576161b27b 100644 --- a/ndb/test/ndbapi/bank/bankSumAccounts.cpp +++ b/ndb/test/ndbapi/bank/bankSumAccounts.cpp @@ -27,6 +27,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/bank/bankTimer.cpp b/ndb/test/ndbapi/bank/bankTimer.cpp index ba3165fccb4..874afd9c21e 100644 --- a/ndb/test/ndbapi/bank/bankTimer.cpp +++ b/ndb/test/ndbapi/bank/bankTimer.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; int _wait = 30; diff --git a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp b/ndb/test/ndbapi/bank/bankTransactionMaker.cpp index fe9b53e0c8d..e5ff9aeb918 100644 --- a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp +++ b/ndb/test/ndbapi/bank/bankTransactionMaker.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; int _wait = 20; diff --git a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp b/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp index f9d974bb5f7..cf298ecc8e3 100644 --- a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp +++ b/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/bank/testBank.cpp b/ndb/test/ndbapi/bank/testBank.cpp index 77ac1172d7c..3ef2799cd3c 100644 --- a/ndb/test/ndbapi/bank/testBank.cpp +++ b/ndb/test/ndbapi/bank/testBank.cpp @@ -141,6 +141,7 @@ TESTCASE("Bank", NDBT_TESTSUITE_END(testBank); int main(int argc, const char** argv){ + ndb_init(); // Tables should not be auto created testBank.setCreateTable(false); diff --git a/ndb/test/ndbapi/benchronja.cpp b/ndb/test/ndbapi/benchronja.cpp index ce0aee35e8f..91b2a041186 100644 --- a/ndb/test/ndbapi/benchronja.cpp +++ b/ndb/test/ndbapi/benchronja.cpp @@ -66,6 +66,7 @@ static int ThreadReady[MAXTHREADS]; static int ThreadStart[MAXTHREADS]; NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){ + ndb_init(); ThreadNdb tabThread[MAXTHREADS]; int i = 0 ; diff --git a/ndb/test/ndbapi/bulk_copy.cpp b/ndb/test/ndbapi/bulk_copy.cpp index 18881cae216..8821a92fb27 100644 --- a/ndb/test/ndbapi/bulk_copy.cpp +++ b/ndb/test/ndbapi/bulk_copy.cpp @@ -221,6 +221,7 @@ int insertFile(Ndb* pNdb, int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; int _help = 0; diff --git a/ndb/test/ndbapi/cdrserver.cpp b/ndb/test/ndbapi/cdrserver.cpp index 8354d28f53f..8d15061e94b 100644 --- a/ndb/test/ndbapi/cdrserver.cpp +++ b/ndb/test/ndbapi/cdrserver.cpp @@ -113,6 +113,7 @@ using namespace std; int main(int argc, const char** argv) { + ndb_init(); /******** NDB ***********/ /* Ndb MyNdb( "TEST_DB" ); diff --git a/ndb/test/ndbapi/celloDb.cpp b/ndb/test/ndbapi/celloDb.cpp index ec61e783585..2d6401c355a 100644 --- a/ndb/test/ndbapi/celloDb.cpp +++ b/ndb/test/ndbapi/celloDb.cpp @@ -73,6 +73,7 @@ static int failed = 0 ; NDB_COMMAND(celloDb, "celloDb", "celloDb", "celloDb", 65535) { + ndb_init(); int tTableId; int i; diff --git a/ndb/test/ndbapi/create_all_tabs.cpp b/ndb/test/ndbapi/create_all_tabs.cpp index 55d04888144..97236b98b36 100644 --- a/ndb/test/ndbapi/create_all_tabs.cpp +++ b/ndb/test/ndbapi/create_all_tabs.cpp @@ -25,6 +25,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _temp = false; int _help = 0; diff --git a/ndb/test/ndbapi/create_tab.cpp b/ndb/test/ndbapi/create_tab.cpp index c2e3b7f64ea..f3f18982ed0 100644 --- a/ndb/test/ndbapi/create_tab.cpp +++ b/ndb/test/ndbapi/create_tab.cpp @@ -25,6 +25,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _temp = false; int _help = 0; diff --git a/ndb/test/ndbapi/drop_all_tabs.cpp b/ndb/test/ndbapi/drop_all_tabs.cpp index 59c57396acd..c024a81a5e6 100644 --- a/ndb/test/ndbapi/drop_all_tabs.cpp +++ b/ndb/test/ndbapi/drop_all_tabs.cpp @@ -23,6 +23,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _help = 0; struct getargs args[] = { diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp index 9192ec21b93..8c0ba46130c 100644 --- a/ndb/test/ndbapi/flexAsynch.cpp +++ b/ndb/test/ndbapi/flexAsynch.cpp @@ -145,6 +145,7 @@ tellThreads(StartType what) NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535) { + ndb_init(); ThreadNdb* pThreadData; int tLoops=0, i; int returnValue = NDBT_OK; diff --git a/ndb/test/ndbapi/flexBench.cpp b/ndb/test/ndbapi/flexBench.cpp index 38c8f6e280f..b19944498f4 100644 --- a/ndb/test/ndbapi/flexBench.cpp +++ b/ndb/test/ndbapi/flexBench.cpp @@ -281,6 +281,7 @@ tellThreads(ThreadData* pt, StartType what) NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535) { + ndb_init(); ThreadData* pThreadsData; int tLoops = 0, i; int returnValue = NDBT_OK; diff --git a/ndb/test/ndbapi/flexHammer.cpp b/ndb/test/ndbapi/flexHammer.cpp index c1c47923de9..80cc7c5a53f 100644 --- a/ndb/test/ndbapi/flexHammer.cpp +++ b/ndb/test/ndbapi/flexHammer.cpp @@ -178,6 +178,7 @@ tellThreads(ThreadNdb* threadArrayP, const StartType what) NDB_COMMAND(flexHammer, "flexHammer", "flexHammer", "flexHammer", 65535) //main(int argc, const char** argv) { + ndb_init(); ThreadNdb* pThreads = NULL; // Pointer to thread data array Ndb* pMyNdb = NULL; // Pointer to Ndb object int tLoops = 0; diff --git a/ndb/test/ndbapi/flexScan.cpp b/ndb/test/ndbapi/flexScan.cpp index 5b5b4dde730..b09d71fb010 100644 --- a/ndb/test/ndbapi/flexScan.cpp +++ b/ndb/test/ndbapi/flexScan.cpp @@ -297,6 +297,7 @@ static int checkThreadResults(ThreadNdb *threadArrayP, char *action) NDB_COMMAND(flexScan, "flexScan", "flexScan", "flexScan", 65535) { + ndb_init(); ThreadNdb* pThreads = NULL; Ndb* pMyNdb = NULL; int tLoops = 0; diff --git a/ndb/test/ndbapi/flexTT.cpp b/ndb/test/ndbapi/flexTT.cpp index c0ff31d1677..162fc080218 100644 --- a/ndb/test/ndbapi/flexTT.cpp +++ b/ndb/test/ndbapi/flexTT.cpp @@ -171,6 +171,7 @@ tellThreads(StartType what) NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535) { + ndb_init(); ThreadNdb* pThreadData; int returnValue = NDBT_OK; int i; diff --git a/ndb/test/ndbapi/flexTimedAsynch.cpp b/ndb/test/ndbapi/flexTimedAsynch.cpp index 761be53fdd3..27380cc79fd 100644 --- a/ndb/test/ndbapi/flexTimedAsynch.cpp +++ b/ndb/test/ndbapi/flexTimedAsynch.cpp @@ -174,6 +174,7 @@ void deleteAttributeSpace(){ NDB_COMMAND(flexTimedAsynch, "flexTimedAsynch", "flexTimedAsynch [-tpoilcas]", "flexTimedAsynch", 65535) { + ndb_init(); ThreadNdb tabThread[MAXTHREADS]; int tLoops=0; int returnValue; diff --git a/ndb/test/ndbapi/flex_bench_mysql.cpp b/ndb/test/ndbapi/flex_bench_mysql.cpp index 7cc883ab3e6..8e1fbcd9058 100644 --- a/ndb/test/ndbapi/flex_bench_mysql.cpp +++ b/ndb/test/ndbapi/flex_bench_mysql.cpp @@ -308,6 +308,7 @@ tellThreads(ThreadData* pt, StartType what) NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535) { + ndb_init(); ThreadData* pThreadsData; int tLoops = 0; int returnValue = NDBT_OK; diff --git a/ndb/test/ndbapi/index.cpp b/ndb/test/ndbapi/index.cpp index 508186de529..c22da594164 100644 --- a/ndb/test/ndbapi/index.cpp +++ b/ndb/test/ndbapi/index.cpp @@ -81,63 +81,63 @@ static void createTable(Ndb &myNdb, bool storeInACC, bool twoKey, bool longKey) int res; column.setName("NAME"); - column.setPrimaryKey(true); column.setType(NdbDictionary::Column::Char); column.setLength((longKey)? 1024 // 1KB => long key :12); + column.setPrimaryKey(true); column.setNullable(false); table.addColumn(column); if (twoKey) { column.setName("KEY2"); - column.setPrimaryKey(true); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(true); column.setNullable(false); table.addColumn(column); } column.setName("PNUM1"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("PNUM2"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("PNUM3"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("PNUM4"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("AGE"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); column.setName("STRING_AGE"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Char); column.setLength(1); column.setLength(256); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); @@ -771,6 +771,7 @@ static void dropIndex(Ndb &myNdb, unsigned int noOfIndexes) NDB_COMMAND(indexTest, "indexTest", "indexTest", "indexTest", 65535) { + ndb_init(); bool createTableOp, createIndexOp, dropIndexOp, insertOp, updateOp, deleteOp, readOp, readIndexOp, updateIndexOp, deleteIndexOp, twoKey, longKey; unsigned int noOfTuples = 1; unsigned int noOfOperations = 1; diff --git a/ndb/test/ndbapi/index2.cpp b/ndb/test/ndbapi/index2.cpp index e49113d2f1b..f739468d7df 100644 --- a/ndb/test/ndbapi/index2.cpp +++ b/ndb/test/ndbapi/index2.cpp @@ -81,16 +81,16 @@ static void createTable(Ndb &myNdb, bool storeInACC, bool twoKey, bool longKey) int res; column.setName("X"); - column.setPrimaryKey(true); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(true); column.setNullable(false); table.addColumn(column); column.setName("Y"); - column.setPrimaryKey(false); column.setType(NdbDictionary::Column::Unsigned); column.setLength(1); + column.setPrimaryKey(false); column.setNullable(false); table.addColumn(column); @@ -608,6 +608,7 @@ static void dropIndex(Ndb &myNdb, unsigned int noOfIndexes) NDB_COMMAND(indexTest, "indexTest", "indexTest", "indexTest", 65535) { + ndb_init(); bool createTableOp, createIndexOp, dropIndexOp, insertOp, updateOp, deleteOp, readOp, readIndexOp, updateIndexOp, deleteIndexOp, twoKey, longKey; unsigned int noOfTuples = 1; unsigned int noOfOperations = 1; diff --git a/ndb/test/ndbapi/initronja.cpp b/ndb/test/ndbapi/initronja.cpp index b3215104822..3ce274e4319 100644 --- a/ndb/test/ndbapi/initronja.cpp +++ b/ndb/test/ndbapi/initronja.cpp @@ -46,6 +46,7 @@ static char attrName[MAXATTR][MAXSTRLEN]; inline int InsertRecords(Ndb*, int) ; NDB_COMMAND(initronja, "initronja", "initronja", "initronja", 65535){ + ndb_init(); Ndb* pNdb = NULL ; NdbSchemaCon *MySchemaTransaction = NULL ; diff --git a/ndb/test/ndbapi/interpreterInTup.cpp b/ndb/test/ndbapi/interpreterInTup.cpp index 47960cd5d12..20d84e6e96d 100644 --- a/ndb/test/ndbapi/interpreterInTup.cpp +++ b/ndb/test/ndbapi/interpreterInTup.cpp @@ -105,6 +105,7 @@ int bTestPassed = 0; int main(int argc, const char** argv) { + ndb_init(); int operationType = 0; int tupTest = 0; diff --git a/ndb/test/ndbapi/mainAsyncGenerator.cpp b/ndb/test/ndbapi/mainAsyncGenerator.cpp index f613c66d07b..16cb50e160f 100644 --- a/ndb/test/ndbapi/mainAsyncGenerator.cpp +++ b/ndb/test/ndbapi/mainAsyncGenerator.cpp @@ -282,6 +282,7 @@ threadRoutine(void *arg) NDB_COMMAND(DbAsyncGenerator, "DbAsyncGenerator", "DbAsyncGenerator", "DbAsyncGenerator", 65535) { + ndb_init(); int i; int j; int k; diff --git a/ndb/test/ndbapi/msa.cpp b/ndb/test/ndbapi/msa.cpp index 7a734f9cb79..e39f7a8c64a 100644 --- a/ndb/test/ndbapi/msa.cpp +++ b/ndb/test/ndbapi/msa.cpp @@ -971,6 +971,7 @@ void ShowHelp(const char* szCmd) int main(int argc, char* argv[]) { + ndb_init(); int iRes = -1; g_nNumThreads = 0; g_nMaxCallsPerSecond = 0; diff --git a/ndb/test/ndbapi/restarter.cpp b/ndb/test/ndbapi/restarter.cpp index 9a522f5dcac..d6831494b48 100644 --- a/ndb/test/ndbapi/restarter.cpp +++ b/ndb/test/ndbapi/restarter.cpp @@ -28,6 +28,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _hostName = NULL; int _loops = 10; diff --git a/ndb/test/ndbapi/restarter2.cpp b/ndb/test/ndbapi/restarter2.cpp index f2bcf6f8e7b..846748a7bba 100644 --- a/ndb/test/ndbapi/restarter2.cpp +++ b/ndb/test/ndbapi/restarter2.cpp @@ -26,6 +26,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _hostName = NULL; int _loops = 10; diff --git a/ndb/test/ndbapi/restarts.cpp b/ndb/test/ndbapi/restarts.cpp index 0ec2883d53c..184e754de4a 100644 --- a/ndb/test/ndbapi/restarts.cpp +++ b/ndb/test/ndbapi/restarts.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _restartName = NULL; int _loops = 1; diff --git a/ndb/test/ndbapi/size.cpp b/ndb/test/ndbapi/size.cpp index c506771ebde..ff178b11d68 100644 --- a/ndb/test/ndbapi/size.cpp +++ b/ndb/test/ndbapi/size.cpp @@ -19,6 +19,7 @@ int main(void) { + ndb_init(); printf("cdrstruct=%d\n",sizeof(struct cdr_record)); printf("long int=%d\n",sizeof(long int)); printf("int=%d\n",sizeof(int)); diff --git a/ndb/test/ndbapi/slow_select.cpp b/ndb/test/ndbapi/slow_select.cpp index a953e1539d0..625dbc34457 100644 --- a/ndb/test/ndbapi/slow_select.cpp +++ b/ndb/test/ndbapi/slow_select.cpp @@ -36,6 +36,7 @@ static void lookup(); int main(void){ + ndb_init(); Ndb g_ndb("test"); g_ndb.init(1024); diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp index d328a7db292..509cd4780bf 100644 --- a/ndb/test/ndbapi/testBackup.cpp +++ b/ndb/test/ndbapi/testBackup.cpp @@ -215,7 +215,7 @@ int runDropTable(NDBT_Context* ctx, NDBT_Step* step){ int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){ Bank bank; int overWriteExisting = true; - if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK) + if (bank.createAndLoadBank(overWriteExisting, 10) != NDBT_OK) return NDBT_FAILED; return NDBT_OK; } @@ -428,6 +428,15 @@ TESTCASE("BackupBank", INITIALIZER(runCreateBank); STEP(runBankTimer); STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); + STEP(runBankTransactions); STEP(runBankGL); // TODO STEP(runBankSum); STEP(runBackupBank); @@ -473,6 +482,7 @@ TESTCASE("FailSlave", NDBT_TESTSUITE_END(testBackup); int main(int argc, const char** argv){ + ndb_init(); return testBackup.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testBasic.cpp b/ndb/test/ndbapi/testBasic.cpp index 26622f9b066..7d03016b87a 100644 --- a/ndb/test/ndbapi/testBasic.cpp +++ b/ndb/test/ndbapi/testBasic.cpp @@ -1278,6 +1278,7 @@ TESTCASE("MassiveTransaction", NDBT_TESTSUITE_END(testBasic); int main(int argc, const char** argv){ + ndb_init(); return testBasic.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testBasicAsynch.cpp b/ndb/test/ndbapi/testBasicAsynch.cpp index a97920e53da..6daa22fdc6a 100644 --- a/ndb/test/ndbapi/testBasicAsynch.cpp +++ b/ndb/test/ndbapi/testBasicAsynch.cpp @@ -181,6 +181,7 @@ TESTCASE("PkDeleteAsynch", NDBT_TESTSUITE_END(testBasicAsynch); int main(int argc, const char** argv){ + ndb_init(); return testBasicAsynch.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp index 64881ca39ab..e18f4a8bd1a 100644 --- a/ndb/test/ndbapi/testBlobs.cpp +++ b/ndb/test/ndbapi/testBlobs.cpp @@ -1338,6 +1338,7 @@ static struct { NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535) { + ndb_init(); while (++argv, --argc > 0) { const char* arg = argv[0]; if (strcmp(arg, "-batch") == 0) { diff --git a/ndb/test/ndbapi/testDataBuffers.cpp b/ndb/test/ndbapi/testDataBuffers.cpp index 2e29dbb0d7b..94658d5c6b9 100644 --- a/ndb/test/ndbapi/testDataBuffers.cpp +++ b/ndb/test/ndbapi/testDataBuffers.cpp @@ -545,6 +545,7 @@ testcase(int flag) NDB_COMMAND(testDataBuffers, "testDataBuffers", "testDataBuffers", "testDataBuffers", 65535) { + ndb_init(); while (++argv, --argc > 0) { char const* p = argv[0]; if (*p++ != '-' || strlen(p) != 1) diff --git a/ndb/test/ndbapi/testDeadlock.cpp b/ndb/test/ndbapi/testDeadlock.cpp index f51b3cea1e5..a445823b8a8 100644 --- a/ndb/test/ndbapi/testDeadlock.cpp +++ b/ndb/test/ndbapi/testDeadlock.cpp @@ -491,6 +491,7 @@ wl1822_main(char scantx) NDB_COMMAND(testOdbcDriver, "testDeadlock", "testDeadlock", "testDeadlock", 65535) { + ndb_init(); while (++argv, --argc > 0) { const char* arg = argv[0]; if (strcmp(arg, "-scan") == 0) { diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index a0c7bb1414b..7cba5ce4cc8 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -1128,9 +1128,9 @@ runCreateAutoincrementTable(NDBT_Context* ctx, NDBT_Step* step){ myTable.setName(tabname); myColumn.setName("ATTR1"); - myColumn.setPrimaryKey(true); myColumn.setType(NdbDictionary::Column::Unsigned); myColumn.setLength(1); + myColumn.setPrimaryKey(true); myColumn.setNullable(false); myColumn.setAutoIncrement(true); if (startvalue != ~0) // check that default value starts with 1 @@ -1576,6 +1576,7 @@ TESTCASE("DictionaryPerf", NDBT_TESTSUITE_END(testDict); int main(int argc, const char** argv){ + ndb_init(); // Tables should not be auto created testDict.setCreateTable(false); myRandom48Init(NdbTick_CurrentMillisecond()); diff --git a/ndb/test/ndbapi/testGrep.cpp b/ndb/test/ndbapi/testGrep.cpp index 0bf84cb4ec8..713aefbeafa 100644 --- a/ndb/test/ndbapi/testGrep.cpp +++ b/ndb/test/ndbapi/testGrep.cpp @@ -533,6 +533,7 @@ TESTCASE("FailSlave", NDBT_TESTSUITE_END(testGrep); int main(int argc, const char** argv){ + ndb_init(); return testGrep.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testGrepVerify.cpp b/ndb/test/ndbapi/testGrepVerify.cpp index 05445c1ba1b..52dcda9a162 100644 --- a/ndb/test/ndbapi/testGrepVerify.cpp +++ b/ndb/test/ndbapi/testGrepVerify.cpp @@ -40,6 +40,7 @@ continue; } int main(int argc, const char** argv){ + ndb_init(); const char * connectString = NULL; diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index 6ebbfd8b680..bef3b310c96 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1528,6 +1528,7 @@ TESTCASE("UniqueNull", NDBT_TESTSUITE_END(testIndex); int main(int argc, const char** argv){ + ndb_init(); return testIndex.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testInterpreter.cpp b/ndb/test/ndbapi/testInterpreter.cpp index 9c584d6f581..0baba33d2b2 100644 --- a/ndb/test/ndbapi/testInterpreter.cpp +++ b/ndb/test/ndbapi/testInterpreter.cpp @@ -224,6 +224,7 @@ TESTCASE("NdbErrorOperation", NDBT_TESTSUITE_END(testInterpreter); int main(int argc, const char** argv){ + ndb_init(); // TABLE("T1"); return testInterpreter.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testMgm.cpp b/ndb/test/ndbapi/testMgm.cpp index d5b9372cc9b..ef653d3f972 100644 --- a/ndb/test/ndbapi/testMgm.cpp +++ b/ndb/test/ndbapi/testMgm.cpp @@ -178,6 +178,7 @@ TESTCASE("SingleUserMode", NDBT_TESTSUITE_END(testMgm); int main(int argc, const char** argv){ + ndb_init(); myRandom48Init(NdbTick_CurrentMillisecond()); return testMgm.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp index 5b171d42578..47987629fe3 100644 --- a/ndb/test/ndbapi/testNdbApi.cpp +++ b/ndb/test/ndbapi/testNdbApi.cpp @@ -1006,6 +1006,7 @@ TESTCASE("NdbErrorOperation", NDBT_TESTSUITE_END(testNdbApi); int main(int argc, const char** argv){ + ndb_init(); // TABLE("T1"); return testNdbApi.execute(argc, argv); } diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index 89b38c78e71..6bfe59f8d3f 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -434,6 +434,7 @@ TESTCASE("StopOnError", NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ + ndb_init(); #if 0 // It might be interesting to have longer defaults for num // loops in this test diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index 29d03f0c33e..f9eb3514926 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -28,6 +28,7 @@ #include #include #include +#include // options @@ -37,6 +38,8 @@ struct Opt { const char* m_bound; const char* m_case; bool m_core; + const char* m_csname; + CHARSET_INFO* m_cs; bool m_dups; NdbDictionary::Object::FragmentType m_fragtype; unsigned m_idxloop; @@ -59,6 +62,8 @@ struct Opt { m_bound("01234"), m_case(0), m_core(false), + m_csname("latin1_bin"), + m_cs(0), m_dups(false), m_fragtype(NdbDictionary::Object::FragUndefined), m_idxloop(4), @@ -94,6 +99,7 @@ printhelp() << " -bound xyz use only these bound types 0-4 [" << d.m_bound << "]" << endl << " -case abc only given test cases (letters a-z)" << endl << " -core core dump on error [" << d.m_core << "]" << endl + << " -csname S charset (collation) of non-pk char column [" << d.m_csname << "]" << endl << " -dups allow duplicate tuples from index scan [" << d.m_dups << "]" << endl << " -fragtype T fragment type single/small/medium/large" << endl << " -index xyz only given index numbers (digits 1-9)" << endl @@ -979,10 +985,14 @@ createtable(Par par) for (unsigned k = 0; k < tab.m_cols; k++) { const Col& col = tab.m_col[k]; NdbDictionary::Column c(col.m_name); - c.setPrimaryKey(col.m_pk); c.setType(col.m_type); c.setLength(col.m_length); + c.setPrimaryKey(col.m_pk); c.setNullable(col.m_nullable); + if (c.getCharset()) { // test if char type + if (! col.m_pk) + c.setCharset(par.m_cs); + } t.addColumn(c); } con.m_dic = con.m_ndb->getDictionary(); @@ -2236,9 +2246,8 @@ pkreadfast(Par par, unsigned count) keyrow.calc(par, i); CHK(keyrow.selrow(par) == 0); NdbRecAttr* rec; - CHK(con.getValue((Uint32)0, rec) == 0); - CHK(con.executeScan() == 0); // get 1st column + CHK(con.getValue((Uint32)0, rec) == 0); CHK(con.execute(Commit) == 0); con.closeTransaction(); } @@ -3150,6 +3159,10 @@ runtest(Par par) LL1("start"); if (par.m_seed != 0) srandom(par.m_seed); + assert(par.m_csname != 0); + CHARSET_INFO* cs; + CHK((cs = get_charset_by_name(par.m_csname, MYF(0))) != 0 || (cs = get_charset_by_csname(par.m_csname, MY_CS_PRIMARY, MYF(0))) != 0); + par.m_cs = cs; Con con; CHK(con.connect() == 0); par.m_con = &con; @@ -3201,6 +3214,7 @@ runtest(Par par) NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) { + ndb_init(); while (++argv, --argc > 0) { const char* arg = argv[0]; if (*arg != '-') { @@ -3232,6 +3246,12 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) g_opt.m_core = true; continue; } + if (strcmp(arg, "-csname") == 0) { + if (++argv, --argc > 0) { + g_opt.m_csname = strdup(argv[0]); + continue; + } + } if (strcmp(arg, "-dups") == 0) { g_opt.m_dups = true; continue; diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp index bb58e69e898..ba41e1d1c40 100644 --- a/ndb/test/ndbapi/testOperations.cpp +++ b/ndb/test/ndbapi/testOperations.cpp @@ -230,6 +230,7 @@ runClearTable(NDBT_Context* ctx, NDBT_Step* step){ int main(int argc, const char** argv){ + ndb_init(); NDBT_TestSuite ts("testOperations"); for(Uint32 i = 0; i 0) NdbSleep_MilliSleep(doSleep); - if (first_batch || !oneTrans) { + // if (first_batch || !oneTrans) { + if (first_batch) { first_batch = false; pTrans = pNdb->startTransaction(); @@ -774,8 +775,10 @@ HugoTransactions::loadTable(Ndb* pNdb, // Execute the transaction and insert the record if (!oneTrans || (c + batch) >= records) { - closeTrans = true; + // closeTrans = true; + closeTrans = false; check = pTrans->execute( Commit ); + pTrans->restart(); } else { closeTrans = false; check = pTrans->execute( NoCommit ); diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index ba316bac01b..4ff94bcf296 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -15,7 +15,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include -#include #include "NDBT.hpp" #include "NDBT_Test.hpp" @@ -990,7 +989,6 @@ int NDBT_TestSuite::execute(int argc, const char** argv){ } #ifndef DBUG_OFF - my_init(); if (debug_option) DBUG_PUSH(debug_option); #endif diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp index f33c5d8c313..71b4b49b3a6 100644 --- a/ndb/test/src/NdbBackup.cpp +++ b/ndb/test/src/NdbBackup.cpp @@ -140,14 +140,16 @@ NdbBackup::execRestore(bool _restore_data, */ snprintf(buf, buf_len, - "scp %s:%s/BACKUP/BACKUP-%d/* .", + "scp %s:%s/BACKUP/BACKUP-%d/BACKUP-%d*.%d.* .", host, path, - _backup_id); + _backup_id, + _backup_id, + _node_id); ndbout << "buf: "<< buf < 1); - // restore metadata first - res = execRestore(false, true, ndbNodes[0].node_id, _backup_id); - + // restore metadata first and data for first node + res = execRestore(true, true, ndbNodes[0].node_id, _backup_id); - // Restore data once for each node - for(size_t i = 0; i < ndbNodes.size(); i++){ - res = execRestore(true, false, ndbNodes[i].node_id, _backup_id); - } + // Restore data once for each node + for(size_t i = 1; i < ndbNodes.size(); i++){ + res = execRestore(true, false, ndbNodes[i].node_id, _backup_id); } return 0; diff --git a/ndb/test/tools/copy_tab.cpp b/ndb/test/tools/copy_tab.cpp index 33ce8e01d9a..30141acaa78 100644 --- a/ndb/test/tools/copy_tab.cpp +++ b/ndb/test/tools/copy_tab.cpp @@ -24,6 +24,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _to_tabname = NULL; diff --git a/ndb/test/tools/cpcc.cpp b/ndb/test/tools/cpcc.cpp index e30d458ffee..dd59e577f2c 100644 --- a/ndb/test/tools/cpcc.cpp +++ b/ndb/test/tools/cpcc.cpp @@ -173,6 +173,7 @@ add_hosts(Vector & hosts, BaseString list){ int main(int argc, const char** argv){ + ndb_init(); int help = 0; const char *cmd=0, *name=0, *group=0, *owner=0; int list = 0, start = 0, stop = 0, rm = 0; diff --git a/ndb/test/tools/create_index.cpp b/ndb/test/tools/create_index.cpp index f883755ea24..75a657522f6 100644 --- a/ndb/test/tools/create_index.cpp +++ b/ndb/test/tools/create_index.cpp @@ -26,6 +26,7 @@ int main(int argc, const char** argv){ + ndb_init(); const char* _dbname = "TEST_DB"; int _help = 0; diff --git a/ndb/test/tools/hugoCalculator.cpp b/ndb/test/tools/hugoCalculator.cpp index 7f2751be2ba..82c4bbff1a4 100644 --- a/ndb/test/tools/hugoCalculator.cpp +++ b/ndb/test/tools/hugoCalculator.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv) { + ndb_init(); int _row = 0; int _column = 0; int _updates = 0; diff --git a/ndb/test/tools/hugoFill.cpp b/ndb/test/tools/hugoFill.cpp index dee6ce2e6c8..6253bd1bb12 100644 --- a/ndb/test/tools/hugoFill.cpp +++ b/ndb/test/tools/hugoFill.cpp @@ -25,6 +25,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _records = 0; const char* _tabname = NULL; diff --git a/ndb/test/tools/hugoLoad.cpp b/ndb/test/tools/hugoLoad.cpp index be7f878d106..c697ad22aad 100644 --- a/ndb/test/tools/hugoLoad.cpp +++ b/ndb/test/tools/hugoLoad.cpp @@ -24,6 +24,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _records = 0; const char* _tabname = NULL; diff --git a/ndb/test/tools/hugoLockRecords.cpp b/ndb/test/tools/hugoLockRecords.cpp index e2c2cd13f00..629408d401d 100644 --- a/ndb/test/tools/hugoLockRecords.cpp +++ b/ndb/test/tools/hugoLockRecords.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoPkDelete.cpp b/ndb/test/tools/hugoPkDelete.cpp index 1855f19796f..78a90ebcb46 100644 --- a/ndb/test/tools/hugoPkDelete.cpp +++ b/ndb/test/tools/hugoPkDelete.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoPkRead.cpp b/ndb/test/tools/hugoPkRead.cpp index 50351f08195..cf08b137e8e 100644 --- a/ndb/test/tools/hugoPkRead.cpp +++ b/ndb/test/tools/hugoPkRead.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoPkReadRecord.cpp b/ndb/test/tools/hugoPkReadRecord.cpp index 85f20bd2060..38b7cae2bf4 100644 --- a/ndb/test/tools/hugoPkReadRecord.cpp +++ b/ndb/test/tools/hugoPkReadRecord.cpp @@ -28,6 +28,7 @@ int main(int argc, const char** argv) { + ndb_init(); int _row = 0; int _hex = 0; int _primaryKey = 0; diff --git a/ndb/test/tools/hugoPkUpdate.cpp b/ndb/test/tools/hugoPkUpdate.cpp index e7edc3a991d..286be14a01c 100644 --- a/ndb/test/tools/hugoPkUpdate.cpp +++ b/ndb/test/tools/hugoPkUpdate.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoScanRead.cpp b/ndb/test/tools/hugoScanRead.cpp index 47ea8f4a8a7..cdfdcea4654 100644 --- a/ndb/test/tools/hugoScanRead.cpp +++ b/ndb/test/tools/hugoScanRead.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/hugoScanUpdate.cpp b/ndb/test/tools/hugoScanUpdate.cpp index 3e2255ca0f3..96a487a02bf 100644 --- a/ndb/test/tools/hugoScanUpdate.cpp +++ b/ndb/test/tools/hugoScanUpdate.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); int _records = 0; int _loops = 1; diff --git a/ndb/test/tools/restart.cpp b/ndb/test/tools/restart.cpp index 88cfb231a72..9ad20801fd7 100644 --- a/ndb/test/tools/restart.cpp +++ b/ndb/test/tools/restart.cpp @@ -27,6 +27,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _hostName = NULL; int _initial = 0; diff --git a/ndb/test/tools/transproxy.cpp b/ndb/test/tools/transproxy.cpp index 384a8a34f03..90e216ec785 100644 --- a/ndb/test/tools/transproxy.cpp +++ b/ndb/test/tools/transproxy.cpp @@ -346,6 +346,7 @@ start() int main(int av, char** ac) { + ndb_init(); debug("start"); hostname = "ndb-srv7"; if (Ndb_getInAddr(&hostaddr.sin_addr, hostname) != 0) { diff --git a/ndb/test/tools/verify_index.cpp b/ndb/test/tools/verify_index.cpp index 1295b657e9b..6c8e304e1a1 100644 --- a/ndb/test/tools/verify_index.cpp +++ b/ndb/test/tools/verify_index.cpp @@ -27,6 +27,7 @@ int main(int argc, const char** argv){ + ndb_init(); int _parallelism = 240; const char* _tabname = NULL; const char* _indexname = NULL; diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp index 5110947c6a2..aa5798376ae 100644 --- a/ndb/tools/delete_all.cpp +++ b/ndb/tools/delete_all.cpp @@ -26,6 +26,7 @@ static int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism=240); int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _dbname = "TEST_DB"; diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp index 859a9544a79..0ab11a0fdd2 100644 --- a/ndb/tools/desc.cpp +++ b/ndb/tools/desc.cpp @@ -22,6 +22,7 @@ int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _dbname = "TEST_DB"; int _unqualified = 0; diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp index 327f15741c9..70c29461c23 100644 --- a/ndb/tools/drop_index.cpp +++ b/ndb/tools/drop_index.cpp @@ -23,6 +23,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _dbname = "TEST_DB"; diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp index 70e5d85aabe..15c229cb0fb 100644 --- a/ndb/tools/drop_tab.cpp +++ b/ndb/tools/drop_tab.cpp @@ -23,6 +23,7 @@ #include int main(int argc, const char** argv){ + ndb_init(); const char* _tabname = NULL; const char* _dbname = "TEST_DB"; diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp index d6465f3214f..4b24929ee4b 100644 --- a/ndb/tools/listTables.cpp +++ b/ndb/tools/listTables.cpp @@ -22,7 +22,6 @@ */ #include -#include #include #include @@ -167,6 +166,7 @@ const char *debug_option= 0; #endif int main(int argc, const char** argv){ + ndb_init(); int _loops = 1; const char* _tabname = NULL; const char* _dbname = "TEST_DB"; @@ -209,7 +209,6 @@ int main(int argc, const char** argv){ _tabname = argv[optind]; #ifndef DBUG_OFF - my_init(); if (debug_option) DBUG_PUSH(debug_option); #endif diff --git a/ndb/tools/ndbsql.cpp b/ndb/tools/ndbsql.cpp index 6af5f47f6f4..1997e4abebd 100644 --- a/ndb/tools/ndbsql.cpp +++ b/ndb/tools/ndbsql.cpp @@ -671,6 +671,7 @@ void print_help_virtual() { int main(int argc, const char** argv) { + ndb_init(); const char* usage = "Usage: ndbsql [-h] [-d dsn] [-f file] [stmt]\n-h help\n-d \n-f batch mode\nstmt single SQL statement\n"; const char* dsn = "TEST_DB"; bool helpFlg = false, batchMode = false; diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp index eb95947fc0f..8fb8437ba5f 100644 --- a/ndb/tools/select_all.cpp +++ b/ndb/tools/select_all.cpp @@ -16,7 +16,6 @@ #include -#include #include @@ -42,6 +41,7 @@ int scanReadRecords(Ndb*, bool orderby); int main(int argc, const char** argv){ + ndb_init(); int _parallelism = 240; const char* _delimiter = "\t"; int _header = true; @@ -89,7 +89,6 @@ int main(int argc, const char** argv){ _tabname = argv[optind]; #ifndef DBUG_OFF - my_init(); if (debug_option) DBUG_PUSH(debug_option); #endif diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp index bb7c9dea49b..6650421e637 100644 --- a/ndb/tools/select_count.cpp +++ b/ndb/tools/select_count.cpp @@ -33,6 +33,7 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab, UtilTransactions::ScanLock lock); int main(int argc, const char** argv){ + ndb_init(); const char* _dbname = "TEST_DB"; int _parallelism = 240; int _help = 0; diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp index 63469c6d746..c27b46c9356 100644 --- a/ndb/tools/waiter.cpp +++ b/ndb/tools/waiter.cpp @@ -30,6 +30,7 @@ int waitClusterStatus(const char* _addr, ndb_mgm_node_status _status, unsigned int _timeout); int main(int argc, const char** argv){ + ndb_init(); const char* _hostName = NULL; int _no_contact = 0; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index da32476ae74..3b332d44d53 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -138,6 +138,16 @@ static int ndb_to_mysql_error(const NdbError *err) } + +inline +int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans) +{ + int m_batch_execute= 0; + if (false && m_batch_execute) + return 0; + return trans->execute(NoCommit); +} + /* Place holder for ha_ndbcluster thread specific data */ @@ -217,7 +227,8 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) void ha_ndbcluster::no_uncommitted_rows_update(int c) { DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update"); - struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; + struct Ndb_table_local_info *info= + (struct Ndb_table_local_info *)m_table_info; info->no_uncommitted_rows_count+= c; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", ((const NDBTAB *)m_table)->getTableId(), @@ -1023,7 +1034,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) } } - if (trans->execute(NoCommit) != 0) + if (execute_no_commit(this,trans) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1135,7 +1146,7 @@ inline int ha_ndbcluster::next_result(byte *buf) */ if (ops_pending && blobs_pending) { - if (trans->execute(NoCommit) != 0) + if (execute_no_commit(this,trans) != 0) DBUG_RETURN(ndb_err(trans)); ops_pending= 0; blobs_pending= false; @@ -1163,7 +1174,7 @@ inline int ha_ndbcluster::next_result(byte *buf) DBUG_PRINT("info", ("ops_pending: %d", ops_pending)); if (current_thd->transaction.on) { - if (ops_pending && (trans->execute(NoCommit) != 0)) + if (ops_pending && (execute_no_commit(this,trans) != 0)) DBUG_RETURN(ndb_err(trans)); } else @@ -1503,7 +1514,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) ERR_RETURN(op->getNdbError()); } - if (trans->execute(NoCommit) != 0) + if (execute_no_commit(this,trans) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -1591,7 +1602,7 @@ int ha_ndbcluster::write_row(byte *record) bulk_insert_not_flushed= false; if (thd->transaction.on) { - if (trans->execute(NoCommit) != 0) + if (execute_no_commit(this,trans) != 0) { skip_auto_increment= true; no_uncommitted_rows_execute_failure(); @@ -1766,7 +1777,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) } // Execute update operation - if (!cursor && trans->execute(NoCommit) != 0) { + if (!cursor && execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -1836,7 +1847,7 @@ int ha_ndbcluster::delete_row(const byte *record) } // Execute delete operation - if (trans->execute(NoCommit) != 0) { + if (execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2266,7 +2277,7 @@ int ha_ndbcluster::close_scan() deleteing/updating transaction before closing the scan */ DBUG_PRINT("info", ("ops_pending: %d", ops_pending)); - if (trans->execute(NoCommit) != 0) { + if (execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2573,7 +2584,7 @@ int ha_ndbcluster::end_bulk_insert() "rows_inserted:%d, bulk_insert_rows: %d", rows_inserted, bulk_insert_rows)); bulk_insert_not_flushed= false; - if (trans->execute(NoCommit) != 0) { + if (execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); my_errno= error= ndb_err(trans); } @@ -2915,6 +2926,8 @@ static int create_ndb_column(NDBCOL &col, { // Set name col.setName(field->field_name); + // Get char set + CHARSET_INFO *cs= field->charset(); // Set type and sizes const enum enum_field_types mysql_type= field->real_type(); switch (mysql_type) { @@ -2986,15 +2999,19 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_STRING: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Binary); - else + else { col.setType(NDBCOL::Char); + col.setCharset(cs); + } col.setLength(field->pack_length()); break; case MYSQL_TYPE_VAR_STRING: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Varbinary); - else + else { col.setType(NDBCOL::Varchar); + col.setCharset(cs); + } col.setLength(field->pack_length()); break; // Blob types (all come in as MYSQL_TYPE_BLOB) @@ -3002,8 +3019,10 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_TINY_BLOB: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Blob); - else + else { col.setType(NDBCOL::Text); + col.setCharset(cs); + } col.setInlineSize(256); // No parts col.setPartSize(0); @@ -3013,8 +3032,10 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_BLOB: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Blob); - else + else { col.setType(NDBCOL::Text); + col.setCharset(cs); + } // Use "<=" even if "<" is the exact condition if (field->max_length() <= (1 << 8)) goto mysql_type_tiny_blob; @@ -3033,8 +3054,10 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_MEDIUM_BLOB: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Blob); - else + else { col.setType(NDBCOL::Text); + col.setCharset(cs); + } col.setInlineSize(256); col.setPartSize(4000); col.setStripeSize(8); @@ -3043,8 +3066,10 @@ static int create_ndb_column(NDBCOL &col, case MYSQL_TYPE_LONG_BLOB: if (field->flags & BINARY_FLAG) col.setType(NDBCOL::Blob); - else + else { col.setType(NDBCOL::Text); + col.setCharset(cs); + } col.setInlineSize(256); col.setPartSize(8000); col.setStripeSize(4); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index a25d3e18310..5d8aa0e76db 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -263,6 +263,8 @@ class ha_ndbcluster: public handler void no_uncommitted_rows_update(int); void no_uncommitted_rows_init(THD *); void no_uncommitted_rows_reset(THD *); + + friend int execute_no_commit(ha_ndbcluster*, NdbConnection*); }; bool ndbcluster_init(void);