From 1486a5a7442dd18d9f1b5b3291e885eff53b01b5 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 22 Jan 2008 14:18:47 +0100 Subject: [PATCH 1/5] DictCache.hpp, Ndb.hpp: Add new variable m_highest_seen when only peeking at auto_increment NEXTID and not retrieving to cache. Add new method to check tupleId before calling data node ndb_restore.result, ndb_restore.test: Changed test to use information_schema to check auto_increment DictCache.cpp, Ndb.cpp: Add new variable m_highest_seen when only peeking at auto_increment NEXTID and not retrieving to cache. Add new method to check tupleId before calling data node. When setting the auto_increment value we'll also read up the new value, this is useful if we use the table the first time in this MySQL Server and haven't yet seen the NEXTID value. The kernel will avoid updating since it already has the value but will also read up the NEXTID value to ensure we don't need to do this any more time. ndb_auto_increment.result: Updated result file since it was incorrect ndb/include/ndbapi/Ndb.hpp: Add new variable m_highest_seen when only peeking at auto_increment NEXTID and not retrieving to cache. Add new method to check tupleId before calling data node ndb/src/ndbapi/DictCache.hpp: Add new variable m_highest_seen when only peeking at auto_increment NEXTID and not retrieving to cache. Add new method to check tupleId before calling data node ndb/src/ndbapi/DictCache.cpp: Add new variable m_highest_seen when only peeking at auto_increment NEXTID and not retrieving to cache. Add new method to check tupleId before calling data node. When setting the auto_increment value we'll also read up the new value, this is useful if we use the table the first time in this MySQL Server and haven't yet seen the NEXTID value. The kernel will avoid updating since it already has the value but will also read up the NEXTID value to ensure we don't need to do this any more time. ndb/src/ndbapi/Ndb.cpp: Add new variable m_highest_seen when only peeking at auto_increment NEXTID and not retrieving to cache. Add new method to check tupleId before calling data node. When setting the auto_increment value we'll also read up the new value, this is useful if we use the table the first time in this MySQL Server and haven't yet seen the NEXTID value. The kernel will avoid updating since it already has the value but will also read up the NEXTID value to ensure we don't need to do this any more time. mysql-test/r/ndb_restore.result: Changed test to use information_schema to check auto_increment mysql-test/t/ndb_restore.test: Changed test to use information_schema to check auto_increment mysql-test/r/ndb_auto_increment.result: Updated result file since it was incorrect --- mysql-test/r/ndb_auto_increment.result | 6 +- mysql-test/r/ndb_restore.result | 50 +++++++--- mysql-test/t/ndb_restore.test | 25 +++-- ndb/include/ndbapi/Ndb.hpp | 8 +- ndb/src/ndbapi/DictCache.cpp | 1 + ndb/src/ndbapi/DictCache.hpp | 1 + ndb/src/ndbapi/Ndb.cpp | 125 ++++++++++++++++++------- 7 files changed, 149 insertions(+), 67 deletions(-) diff --git a/mysql-test/r/ndb_auto_increment.result b/mysql-test/r/ndb_auto_increment.result index b7c9fa8e2b5..f8ef5af2770 100644 --- a/mysql-test/r/ndb_auto_increment.result +++ b/mysql-test/r/ndb_auto_increment.result @@ -421,10 +421,10 @@ select * from t1 order by a; a 1 20 -21 33 34 35 +65 insert into t1 values (100); insert into t1 values (NULL); insert into t1 values (NULL); @@ -432,11 +432,11 @@ select * from t1 order by a; a 1 20 -21 -22 33 34 35 +65 +66 100 101 set auto_increment_offset = @old_auto_increment_offset; diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result index 9faac2df0a4..c48333f6ea8 100644 --- a/mysql-test/r/ndb_restore.result +++ b/mysql-test/r/ndb_restore.result @@ -266,21 +266,41 @@ a 2000 3000 10000 -show table status like 't1_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 3001 X X X X X X X -show table status like 't2_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 501 X X X X X X X -show table status like 't4_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 290000001 X X X X X X X -show table status like 't7_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 29 X X X X X X X -show table status like 't10_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 10001 X X X X X X X +select max(capgoaledatta) from t1_c; +max(capgoaledatta) +3000 +select auto_increment from information_schema.tables +where table_name = 't1_c'; +auto_increment +3001 +select max(capgotod) from t2_c; +max(capgotod) +500 +select auto_increment from information_schema.tables +where table_name = 't2_c'; +auto_increment +501 +select max(capfa) from t4_c; +max(capfa) +290000000 +select auto_increment from information_schema.tables +where table_name = 't4_c'; +auto_increment +290000001 +select max(dardtestard) from t7_c; +max(dardtestard) +28 +select auto_increment from information_schema.tables +where table_name = 't7_c'; +auto_increment +29 +select max(a) from t10_c; +max(a) +10000 +select auto_increment from information_schema.tables +where table_name = 't10_c'; +auto_increment +10001 drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9, t10; drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; 520093696, diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index 266a0c7fbc1..940b53adbe1 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -231,16 +231,21 @@ select count(*) select * from t10_c order by a; # Bug #27775 cont'd # - auto inc info should be correct ---replace_column 1 X 2 X 3 X 4 X 5 X 6 X 7 X 8 X 9 X 10 X 12 X 13 X 14 X 15 X 16 X 17 X 18 X -show table status like 't1_c'; ---replace_column 1 X 2 X 3 X 4 X 5 X 6 X 7 X 8 X 9 X 10 X 12 X 13 X 14 X 15 X 16 X 17 X 18 X -show table status like 't2_c'; ---replace_column 1 X 2 X 3 X 4 X 5 X 6 X 7 X 8 X 9 X 10 X 12 X 13 X 14 X 15 X 16 X 17 X 18 X -show table status like 't4_c'; ---replace_column 1 X 2 X 3 X 4 X 5 X 6 X 7 X 8 X 9 X 10 X 12 X 13 X 14 X 15 X 16 X 17 X 18 X -show table status like 't7_c'; ---replace_column 1 X 2 X 3 X 4 X 5 X 6 X 7 X 8 X 9 X 10 X 12 X 13 X 14 X 15 X 16 X 17 X 18 X -show table status like 't10_c'; +select max(capgoaledatta) from t1_c; +select auto_increment from information_schema.tables +where table_name = 't1_c'; +select max(capgotod) from t2_c; +select auto_increment from information_schema.tables +where table_name = 't2_c'; +select max(capfa) from t4_c; +select auto_increment from information_schema.tables +where table_name = 't4_c'; +select max(dardtestard) from t7_c; +select auto_increment from information_schema.tables +where table_name = 't7_c'; +select max(a) from t10_c; +select auto_increment from information_schema.tables +where table_name = 't10_c'; --disable_warnings drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9, t10; diff --git a/ndb/include/ndbapi/Ndb.hpp b/ndb/include/ndbapi/Ndb.hpp index 01bc899b4e1..2674c5db868 100644 --- a/ndb/include/ndbapi/Ndb.hpp +++ b/ndb/include/ndbapi/Ndb.hpp @@ -1399,9 +1399,9 @@ public: int readAutoIncrementValue(const NdbDictionary::Table * aTable, Uint64 & tupleId); int setAutoIncrementValue(const char* aTableName, - Uint64 tupleId, bool increase); + Uint64 tupleId, bool modify); int setAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 tupleId, bool increase); + Uint64 tupleId, bool modify); private: int getTupleIdFromNdb(Ndb_local_table_info* info, Uint64 & tupleId, Uint32 cacheSize, @@ -1409,7 +1409,9 @@ private: int readTupleIdFromNdb(Ndb_local_table_info* info, Uint64 & tupleId); int setTupleIdInNdb(Ndb_local_table_info* info, - Uint64 tupleId, bool increase); + Uint64 tupleId, bool modify); + int checkTupleIdInNdb(Ndb_local_table_info* info, + Uint64 tupleId); int opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op); public: diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index 6a815067233..9234b6b5219 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -46,6 +46,7 @@ Ndb_local_table_info::Ndb_local_table_info(NdbTableImpl *table_impl) m_table_impl= table_impl; m_first_tuple_id = ~(Uint64)0; m_last_tuple_id = ~(Uint64)0; + m_highest_seen = 0; } Ndb_local_table_info::~Ndb_local_table_info() diff --git a/ndb/src/ndbapi/DictCache.hpp b/ndb/src/ndbapi/DictCache.hpp index db90a07d487..6ada55cc05e 100644 --- a/ndb/src/ndbapi/DictCache.hpp +++ b/ndb/src/ndbapi/DictCache.hpp @@ -36,6 +36,7 @@ public: // range of cached tuple ids per thread Uint64 m_first_tuple_id; Uint64 m_last_tuple_id; + Uint64 m_highest_seen; Uint64 m_local_data[1]; // Must be last member. Used to access extra space. private: diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index dcdee3d4ea1..350b66f2aee 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -767,7 +767,7 @@ Ndb::getNodeId() } /**************************************************************************** -Uint64 getAutoIncrementValue( const char* aTableName, +int getAutoIncrementValue( const char* aTableName, Uint64 & tupleId, Uint32 cacheSize, Uint64 step, @@ -779,6 +779,7 @@ Parameters: aTableName (IN) : The table name. step (IN) : Specifies the step between the autoincrement values. start (IN) : Start value for first value +Returns: 0 if succesful, -1 if error encountered Remark: Returns a new autoincrement value to the application. The autoincrement values can be increased by steps (default 1) and a number of values can be prefetched @@ -892,9 +893,20 @@ Ndb::getTupleIdFromNdb(Ndb_local_table_info* info, DBUG_RETURN(0); } +/**************************************************************************** +int readAutoIncrementValue( const char* aTableName, + Uint64 & autoValue, + bool modify); + +Parameters: aTableName (IN) : The table name. + autoValue (OUT) : The current autoincrement value + modify (IN) : Modify existing value (not initialization) +Returns: 0 if succesful, -1 if error encountered +Remark: Returns the current autoincrement value to the application. +****************************************************************************/ int Ndb::readAutoIncrementValue(const char* aTableName, - Uint64 & tupleId) + Uint64 & autoValue) { DBUG_ENTER("Ndb::readAutoIncrementValue"); BaseString internal_tabname(internalize_table_name(aTableName)); @@ -905,15 +917,15 @@ Ndb::readAutoIncrementValue(const char* aTableName, theError.code = theDictionary->getNdbError().code; DBUG_RETURN(-1); } - if (readTupleIdFromNdb(info, tupleId) == -1) + if (readTupleIdFromNdb(info, autoValue) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)autoValue)); DBUG_RETURN(0); } int Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & tupleId) + Uint64 & autoValue) { DBUG_ENTER("Ndb::readAutoIncrementValue"); assert(aTable != 0); @@ -926,9 +938,9 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, theError.code = theDictionary->getNdbError().code; DBUG_RETURN(-1); } - if (readTupleIdFromNdb(info, tupleId) == -1) + if (readTupleIdFromNdb(info, autoValue) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)autoValue)); DBUG_RETURN(0); } @@ -956,9 +968,20 @@ Ndb::readTupleIdFromNdb(Ndb_local_table_info* info, DBUG_RETURN(0); } +/**************************************************************************** +int setAutoIncrementValue( const char* aTableName, + Uint64 autoValue, + bool modify); + +Parameters: aTableName (IN) : The table name. + autoValue (IN) : The new autoincrement value + modify (IN) : Modify existing value (not initialization) +Returns: 0 if succesful, -1 if error encountered +Remark: Sets a new autoincrement value for the application. +****************************************************************************/ int Ndb::setAutoIncrementValue(const char* aTableName, - Uint64 tupleId, bool increase) + Uint64 autoValue, bool modify) { DBUG_ENTER("Ndb::setAutoIncrementValue"); BaseString internal_tabname(internalize_table_name(aTableName)); @@ -969,14 +992,14 @@ Ndb::setAutoIncrementValue(const char* aTableName, theError.code = theDictionary->getNdbError().code; DBUG_RETURN(-1); } - if (setTupleIdInNdb(info, tupleId, increase) == -1) + if (setTupleIdInNdb(info, autoValue, modify) == -1) DBUG_RETURN(-1); DBUG_RETURN(0); } int Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 tupleId, bool increase) + Uint64 autoValue, bool modify) { DBUG_ENTER("Ndb::setAutoIncrementValue"); assert(aTable != 0); @@ -989,38 +1012,42 @@ Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, theError.code = theDictionary->getNdbError().code; DBUG_RETURN(-1); } - if (setTupleIdInNdb(info, tupleId, increase) == -1) + if (setTupleIdInNdb(info, autoValue, modify) == -1) DBUG_RETURN(-1); DBUG_RETURN(0); } int Ndb::setTupleIdInNdb(Ndb_local_table_info* info, - Uint64 tupleId, bool increase) + Uint64 tupleId, bool modify) { DBUG_ENTER("Ndb::setTupleIdInNdb"); - if (increase) + if (modify) { - if (info->m_first_tuple_id != info->m_last_tuple_id) + if (checkTupleIdInNdb(info, tupleId)) { - assert(info->m_first_tuple_id < info->m_last_tuple_id); - if (tupleId <= info->m_first_tuple_id + 1) - DBUG_RETURN(0); - if (tupleId <= info->m_last_tuple_id) + if (info->m_first_tuple_id != info->m_last_tuple_id) { - info->m_first_tuple_id = tupleId - 1; - DBUG_PRINT("info", - ("Setting next auto increment cached value to %lu", - (ulong)tupleId)); - DBUG_RETURN(0); + assert(info->m_first_tuple_id < info->m_last_tuple_id); + if (tupleId <= info->m_first_tuple_id + 1) + DBUG_RETURN(0); + if (tupleId <= info->m_last_tuple_id) + { + info->m_first_tuple_id = tupleId - 1; + DBUG_PRINT("info", + ("Setting next auto increment cached value to %lu", + (ulong)tupleId)); + DBUG_RETURN(0); + } } + /* + * if tupleId <= NEXTID, do nothing. otherwise update NEXTID to + * tupleId and set cached range to first = last = tupleId - 1. + */ + Uint64 opValue = tupleId; + if (opTupleIdOnNdb(info, opValue, 2) == -1) + DBUG_RETURN(-1); } - /* - * if tupleId <= NEXTID, do nothing. otherwise update NEXTID to - * tupleId and set cached range to first = last = tupleId - 1. - */ - if (opTupleIdOnNdb(info, tupleId, 2) == -1) - DBUG_RETURN(-1); } else { @@ -1033,6 +1060,32 @@ Ndb::setTupleIdInNdb(Ndb_local_table_info* info, DBUG_RETURN(0); } +int +Ndb::checkTupleIdInNdb(Ndb_local_table_info* info, Uint64 tupleId) +{ + DBUG_ENTER("Ndb::checkTupleIdIndNdb"); + if ((info->m_first_tuple_id != ~(Uint64)0) && + (info->m_first_tuple_id > tupleId)) + { + /* + * If we have ever cached a value in this object and this cached + * value is larger than the value we're trying to set then we + * need not check with the real value in the SYSTAB_0 table. + */ + DBUG_RETURN(0); + } + if (info->m_highest_seen > tupleId) + { + /* + * Although we've never cached any higher value we have read + * a higher value and again it isn't necessary to change the + * auto increment value. + */ + DBUG_RETURN(0); + } + DBUG_RETURN(1); +} + int Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op) { @@ -1094,6 +1147,7 @@ Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op) info->m_first_tuple_id = ~(Uint64)0; info->m_last_tuple_id = ~(Uint64)0; + info->m_highest_seen = 0; break; case 2: tOperation->interpretedUpdateTuple(); @@ -1103,19 +1157,18 @@ Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op) // compare NEXTID >= opValue tOperation->branch_le(2, 1, 0); tOperation->write_attr("NEXTID", 1); - tOperation->interpret_exit_ok(); tOperation->def_label(0); - tOperation->interpret_exit_nok(9999); - + tOperation->interpret_exit_ok(); + tRecAttrResult = tOperation->getValue("NEXTID"); if (tConnection->execute( Commit ) == -1) { - if (tConnection->theError.code != 9999) - goto error_handler; + goto error_handler; } else { + info->m_highest_seen = tRecAttrResult->u_64_value(); DBUG_PRINT("info", - ("Setting next auto increment value (db) to %lu", + ("Setting auto increment value (db) to %lu", (ulong)opValue)); info->m_first_tuple_id = info->m_last_tuple_id = opValue - 1; } @@ -1126,7 +1179,7 @@ Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op) tRecAttrResult = tOperation->getValue("NEXTID"); if (tConnection->execute( Commit ) == -1 ) goto error_handler; - opValue = tRecAttrResult->u_64_value(); // out + info->m_highest_seen = opValue = tRecAttrResult->u_64_value(); // out break; default: goto error_handler; From 0d9ed67997457ec0066f9854a876d2c5aa2fbd9a Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 22 Jan 2008 15:04:56 +0100 Subject: [PATCH 2/5] Fixed incorrect signature comment --- ndb/src/ndbapi/Ndb.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index 350b66f2aee..f55986d2e6b 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -768,7 +768,7 @@ Ndb::getNodeId() /**************************************************************************** int getAutoIncrementValue( const char* aTableName, - Uint64 & tupleId, + Uint64 & autoValue, Uint32 cacheSize, Uint64 step, Uint64 start); @@ -895,12 +895,10 @@ Ndb::getTupleIdFromNdb(Ndb_local_table_info* info, /**************************************************************************** int readAutoIncrementValue( const char* aTableName, - Uint64 & autoValue, - bool modify); + Uint64 & autoValue); Parameters: aTableName (IN) : The table name. autoValue (OUT) : The current autoincrement value - modify (IN) : Modify existing value (not initialization) Returns: 0 if succesful, -1 if error encountered Remark: Returns the current autoincrement value to the application. ****************************************************************************/ From 7e6ab3a006faa58c8ca3b77391536c4a1a242e8c Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 23 Jan 2008 11:40:06 +0100 Subject: [PATCH 3/5] Ndb.hpp, Ndb.cpp, ha_ndbcluster.cc: Add a check if setting an auto_increment field will change it's next value before retrieving tuple_id_range lock. This avoids hitting locks when updating auto_increment values to a lower value than the current maximum. This is useful in loading a table with auto_increment where one loads the highest numbered pk's first and then proceeds backwards to the first. This can then be achieved with the same performance as a normal insert without auto_increment. ndb_restore.result: Updated result file mysql-test/suite/ndb/r/ndb_restore.result: Updated result file sql/ha_ndbcluster.cc: Add a check if setting an auto_increment field will change it's next value before retrieving tuple_id_range lock. This avoids hitting locks when updating auto_increment values to a lower value than the current maximum. This is useful in loading a table with auto_increment where one loads the highest numbered pk's first and then proceeds backwards to the first. This can then be achieved with the same performance as a normal insert without auto_increment. storage/ndb/include/ndbapi/Ndb.hpp: Add a check if setting an auto_increment field will change it's next value before retrieving tuple_id_range lock. This avoids hitting locks when updating auto_increment values to a lower value than the current maximum. This is useful in loading a table with auto_increment where one loads the highest numbered pk's first and then proceeds backwards to the first. This can then be achieved with the same performance as a normal insert without auto_increment. storage/ndb/src/ndbapi/Ndb.cpp: Add a check if setting an auto_increment field will change it's next value before retrieving tuple_id_range lock. This avoids hitting locks when updating auto_increment values to a lower value than the current maximum. This is useful in loading a table with auto_increment where one loads the highest numbered pk's first and then proceeds backwards to the first. This can then be achieved with the same performance as a normal insert without auto_increment. --- mysql-test/suite/ndb/r/ndb_restore.result | 318 +++++----------------- sql/ha_ndbcluster.cc | 11 +- storage/ndb/include/ndbapi/Ndb.hpp | 23 +- storage/ndb/src/ndbapi/Ndb.cpp | 125 ++++++--- 4 files changed, 179 insertions(+), 298 deletions(-) diff --git a/mysql-test/suite/ndb/r/ndb_restore.result b/mysql-test/suite/ndb/r/ndb_restore.result index aba6997d218..0ebec9c96ae 100644 --- a/mysql-test/suite/ndb/r/ndb_restore.result +++ b/mysql-test/suite/ndb/r/ndb_restore.result @@ -18,12 +18,12 @@ CREATE TABLE `t2_c` ( PRIMARY KEY (`capgotod`), KEY `i quadaddsvr` (`gotod`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; -INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'),(5,0,'',NULL,NULL,''); +INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'); CREATE TABLE `t3_c` ( `CapGoaledatta` smallint(5) unsigned NOT NULL default '0', `capgotod` smallint(5) unsigned NOT NULL default '0', PRIMARY KEY (`capgotod`,`CapGoaledatta`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED; +) ENGINE=ndbcluster DEFAULT CHARSET=latin1; INSERT INTO `t3_c` VALUES (5,3),(2,4),(5,4),(1,3); CREATE TABLE `t4_c` ( `capfa` bigint(20) unsigned NOT NULL auto_increment, @@ -116,8 +116,8 @@ CREATE TABLE `t9_c` ( PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3); -CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1; -INSERT INTO t10_c VALUES (1),(2),(3); +create table t10_c (a int auto_increment key) ENGINE=ndbcluster; +insert into t10_c values (1),(2),(3); insert into t10_c values (10000),(2000),(3000); create table t1 engine=myisam as select * from t1_c; create table t2 engine=myisam as select * from t2_c; @@ -129,8 +129,6 @@ create table t7 engine=myisam as select * from t7_c; create table t8 engine=myisam as select * from t8_c; create table t9 engine=myisam as select * from t9_c; create table t10 engine=myisam as select * from t10_c; -ForceVarPart: 0 -ForceVarPart: 1 CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; DELETE FROM test.backup_info; LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; @@ -138,14 +136,29 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info; @the_backup_id:=backup_id DROP TABLE test.backup_info; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; -ForceVarPart: 0 -ForceVarPart: 1 -select * from information_schema.columns where table_name = "t1_c"; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT -NULL test t1_c capgoaledatta 1 NULL NO mediumint NULL NULL 7 0 NULL NULL mediumint(5) unsigned PRI auto_increment select,insert,update,references -NULL test t1_c goaledatta 2 NO char 2 2 NULL NULL latin1 latin1_swedish_ci char(2) PRI select,insert,update,references -NULL test t1_c maturegarbagefa 3 NO varchar 32 32 NULL NULL latin1 latin1_swedish_ci varchar(32) PRI select,insert,update,references +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; +show tables; +Tables_in_test +t1 +t10 +t2 +t3 +t4 +t5 +t6 +t7 +t8 +t9 +t4_c +t3_c +t2_c +t5_c +t6_c +t7_c +t8_c +t9_c +t10_c +t1_c select count(*) from t1; count(*) 5 @@ -159,15 +172,15 @@ count(*) 5 select count(*) from t2; count(*) -7 +6 select count(*) from t2_c; count(*) -7 +6 select count(*) from (select * from t2 union select * from t2_c) a; count(*) -7 +6 select count(*) from t3; count(*) 4 @@ -253,238 +266,41 @@ a 2000 3000 10000 -show table status like 't1_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 3001 X X X X X X X -show table status like 't2_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 501 X X X X X X X -show table status like 't4_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 290000001 X X X X X X X -show table status like 't7_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 29 X X X X X X X -show table status like 't10_c'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -X X X X X X X X X X 10001 X X X X X X X -ALTER TABLE t7_c -PARTITION BY LINEAR KEY (`dardtestard`); -CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM test.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; -SELECT @the_backup_id:=backup_id FROM test.backup_info; -@the_backup_id:=backup_id - -DROP TABLE test.backup_info; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; -select count(*) from t1; -count(*) -5 -select count(*) from t1_c; -count(*) -5 -select count(*) -from (select * from t1 union -select * from t1_c) a; -count(*) -5 -select count(*) from t2; -count(*) -7 -select count(*) from t2_c; -count(*) -7 -select count(*) -from (select * from t2 union -select * from t2_c) a; -count(*) -7 -select count(*) from t3; -count(*) -4 -select count(*) from t3_c; -count(*) -4 -select count(*) -from (select * from t3 union -select * from t3_c) a; -count(*) -4 -select count(*) from t4; -count(*) -22 -select count(*) from t4_c; -count(*) -22 -select count(*) -from (select * from t4 union -select * from t4_c) a; -count(*) -22 -select count(*) from t5; -count(*) -3 -select count(*) from t5_c; -count(*) -3 -select count(*) -from (select * from t5 union -select * from t5_c) a; -count(*) -3 -select count(*) from t6; -count(*) -8 -select count(*) from t6_c; -count(*) -8 -select count(*) -from (select * from t6 union -select * from t6_c) a; -count(*) -8 -select count(*) from t7; -count(*) -5 -select count(*) from t7_c; -count(*) -5 -select count(*) -from (select * from t7 union -select * from t7_c) a; -count(*) -5 -select count(*) from t8; -count(*) -3 -select count(*) from t8_c; -count(*) -3 -select count(*) -from (select * from t8 union -select * from t8_c) a; -count(*) -3 -select count(*) from t9; -count(*) -3 -select count(*) from t9_c; -count(*) -3 -select count(*) -from (select * from t9 union -select * from t9_c) a; -count(*) -3 -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; -select count(*) from t1; -count(*) -5 -select count(*) from t1_c; -count(*) -5 -select count(*) -from (select * from t1 union -select * from t1_c) a; -count(*) -5 -select count(*) from t2; -count(*) -7 -select count(*) from t2_c; -count(*) -7 -select count(*) -from (select * from t2 union -select * from t2_c) a; -count(*) -7 -select count(*) from t3; -count(*) -4 -select count(*) from t3_c; -count(*) -4 -select count(*) -from (select * from t3 union -select * from t3_c) a; -count(*) -4 -select count(*) from t4; -count(*) -22 -select count(*) from t4_c; -count(*) -22 -select count(*) -from (select * from t4 union -select * from t4_c) a; -count(*) -22 -select count(*) from t5; -count(*) -3 -select count(*) from t5_c; -count(*) -3 -select count(*) -from (select * from t5 union -select * from t5_c) a; -count(*) -3 -select count(*) from t6; -count(*) -8 -select count(*) from t6_c; -count(*) -8 -select count(*) -from (select * from t6 union -select * from t6_c) a; -count(*) -8 -select count(*) from t7; -count(*) -5 -select count(*) from t7_c; -count(*) -5 -select count(*) -from (select * from t7 union -select * from t7_c) a; -count(*) -5 -select count(*) from t8; -count(*) -3 -select count(*) from t8_c; -count(*) -3 -select count(*) -from (select * from t8 union -select * from t8_c) a; -count(*) -3 -select count(*) from t9; -count(*) -3 -select count(*) from t9_c; -count(*) -3 -select count(*) -from (select * from t9 union -select * from t9_c) a; -count(*) -3 -drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; -CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM test.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; -SELECT @the_backup_id:=backup_id FROM test.backup_info; -@the_backup_id:=backup_id - -DROP TABLE test.backup_info; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; -drop table if exists t2_c; +select max(capgoaledatta) from t1_c; +max(capgoaledatta) +3000 +select auto_increment from information_schema.tables +where table_name = 't1_c'; +auto_increment +3001 +select max(capgotod) from t2_c; +max(capgotod) +500 +select auto_increment from information_schema.tables +where table_name = 't2_c'; +auto_increment +501 +select max(capfa) from t4_c; +max(capfa) +290000000 +select auto_increment from information_schema.tables +where table_name = 't4_c'; +auto_increment +290000001 +select max(dardtestard) from t7_c; +max(dardtestard) +28 +select auto_increment from information_schema.tables +where table_name = 't7_c'; +auto_increment +29 +select max(a) from t10_c; +max(a) +10000 +select auto_increment from information_schema.tables +where table_name = 't10_c'; +auto_increment +10001 +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9, t10; +drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; 520093696, diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index b9d7e846d84..6d745140c96 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2738,10 +2738,13 @@ ha_ndbcluster::set_auto_inc(Field *field) ("Trying to set next auto increment value to %s", llstr(next_val, buff))); #endif - Ndb_tuple_id_range_guard g(m_share); - if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) - == -1) - ERR_RETURN(ndb->getNdbError()); + if (ndb->checkUpdateAutoIncrementValue(m_share->tuple_id_range, next_val)) + { + Ndb_tuple_id_range_guard g(m_share); + if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) + == -1) + ERR_RETURN(ndb->getNdbError()); + } DBUG_RETURN(0); } diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp index d71b9df9879..f31638db283 100644 --- a/storage/ndb/include/ndbapi/Ndb.hpp +++ b/storage/ndb/include/ndbapi/Ndb.hpp @@ -1515,37 +1515,40 @@ public: TupleIdRange() {} Uint64 m_first_tuple_id; Uint64 m_last_tuple_id; + Uint64 m_highest_seen; void reset() { m_first_tuple_id = ~(Uint64)0; m_last_tuple_id = ~(Uint64)0; + m_highest_seen = 0; }; }; int initAutoIncrement(); int getAutoIncrementValue(const char* aTableName, - Uint64 & tupleId, Uint32 cacheSize, + Uint64 & autoValue, Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1); int getAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & tupleId, Uint32 cacheSize, + Uint64 & autoValue, Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1); int getAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 & tupleId, + TupleIdRange & range, Uint64 & autoValue, Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1); int readAutoIncrementValue(const char* aTableName, - Uint64 & tupleId); + Uint64 & autoValue); int readAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & tupleId); + Uint64 & autoValue); int readAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 & tupleId); + TupleIdRange & range, Uint64 & autoValue); int setAutoIncrementValue(const char* aTableName, - Uint64 tupleId, bool modify); + Uint64 autoValue, bool modify); int setAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 tupleId, bool modify); + Uint64 autoValue, bool modify); int setAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 tupleId, + TupleIdRange & range, Uint64 autoValue, bool modify); + bool checkUpdateAutoIncrementValue(TupleIdRange & range, Uint64 autoValue); private: int getTupleIdFromNdb(const NdbTableImpl* table, TupleIdRange & range, Uint64 & tupleId, @@ -1554,6 +1557,8 @@ private: TupleIdRange & range, Uint64 & tupleId); int setTupleIdInNdb(const NdbTableImpl* table, TupleIdRange & range, Uint64 tupleId, bool modify); + int checkTupleIdInNdb(TupleIdRange & range, + Uint64 tupleId); int opTupleIdOnNdb(const NdbTableImpl* table, TupleIdRange & range, Uint64 & opValue, Uint32 op); public: diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 15647861eef..23b0e0915ae 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -942,6 +942,7 @@ Parameters: aTableName (IN) : The table name. step (IN) : Specifies the step between the autoincrement values. start (IN) : Start value for first value +Returns: 0 if succesful, -1 if error encountered Remark: Returns a new autoincrement value to the application. The autoincrement values can be increased by steps (default 1) and a number of values can be prefetched @@ -1072,9 +1073,18 @@ Ndb::getTupleIdFromNdb(const NdbTableImpl* table, DBUG_RETURN(0); } +/**************************************************************************** +int readAutoIncrementValue( const char* aTableName, + Uint64 & autoValue); + +Parameters: aTableName (IN) : The table name. + autoValue (OUT) : The current autoincrement value +Returns: 0 if succesful, -1 if error encountered +Remark: Returns the current autoincrement value to the application. +****************************************************************************/ int Ndb::readAutoIncrementValue(const char* aTableName, - Uint64 & tupleId) + Uint64 & autoValue) { DBUG_ENTER("Ndb::readAutoIncrementValue"); ASSERT_NOT_MYSQLD; @@ -1088,15 +1098,15 @@ Ndb::readAutoIncrementValue(const char* aTableName, } const NdbTableImpl* table = info->m_table_impl; TupleIdRange & range = info->m_tuple_id_range; - if (readTupleIdFromNdb(table, range, tupleId) == -1) + if (readTupleIdFromNdb(table, range, autoValue) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)autoValue)); DBUG_RETURN(0); } int Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & tupleId) + Uint64 & autoValue) { DBUG_ENTER("Ndb::readAutoIncrementValue"); ASSERT_NOT_MYSQLD; @@ -1111,23 +1121,23 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, DBUG_RETURN(-1); } TupleIdRange & range = info->m_tuple_id_range; - if (readTupleIdFromNdb(table, range, tupleId) == -1) + if (readTupleIdFromNdb(table, range, autoValue) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)autoValue)); DBUG_RETURN(0); } int Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 & tupleId) + TupleIdRange & range, Uint64 & autoValue) { DBUG_ENTER("Ndb::readAutoIncrementValue"); assert(aTable != 0); const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable); - if (readTupleIdFromNdb(table, range, tupleId) == -1) + if (readTupleIdFromNdb(table, range, autoValue) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)autoValue)); DBUG_RETURN(0); } @@ -1155,9 +1165,20 @@ Ndb::readTupleIdFromNdb(const NdbTableImpl* table, DBUG_RETURN(0); } +/**************************************************************************** +int setAutoIncrementValue( const char* aTableName, + Uint64 autoValue, + bool modify); + +Parameters: aTableName (IN) : The table name. + autoValue (IN) : The new autoincrement value + modify (IN) : Modify existing value (not initialization) +Returns: 0 if succesful, -1 if error encountered +Remark: Sets a new autoincrement value for the application. +****************************************************************************/ int Ndb::setAutoIncrementValue(const char* aTableName, - Uint64 tupleId, bool increase) + Uint64 autoValue, bool modify) { DBUG_ENTER("Ndb::setAutoIncrementValue"); ASSERT_NOT_MYSQLD; @@ -1171,14 +1192,14 @@ Ndb::setAutoIncrementValue(const char* aTableName, } const NdbTableImpl* table = info->m_table_impl; TupleIdRange & range = info->m_tuple_id_range; - if (setTupleIdInNdb(table, range, tupleId, increase) == -1) + if (setTupleIdInNdb(table, range, autoValue, modify) == -1) DBUG_RETURN(-1); DBUG_RETURN(0); } int Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 tupleId, bool increase) + Uint64 autoValue, bool modify) { DBUG_ENTER("Ndb::setAutoIncrementValue"); ASSERT_NOT_MYSQLD; @@ -1193,52 +1214,55 @@ Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, DBUG_RETURN(-1); } TupleIdRange & range = info->m_tuple_id_range; - if (setTupleIdInNdb(table, range, tupleId, increase) == -1) + if (setTupleIdInNdb(table, range, autoValue, modify) == -1) DBUG_RETURN(-1); DBUG_RETURN(0); } int Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 tupleId, - bool increase) + TupleIdRange & range, Uint64 autoValue, + bool modify) { DBUG_ENTER("Ndb::setAutoIncrementValue"); assert(aTable != 0); const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable); - if (setTupleIdInNdb(table, range, tupleId, increase) == -1) + if (setTupleIdInNdb(table, range, autoValue, modify) == -1) DBUG_RETURN(-1); DBUG_RETURN(0); } int Ndb::setTupleIdInNdb(const NdbTableImpl* table, - TupleIdRange & range, Uint64 tupleId, bool increase) + TupleIdRange & range, Uint64 tupleId, bool modify) { DBUG_ENTER("Ndb::setTupleIdInNdb"); - if (increase) + if (modify) { - if (range.m_first_tuple_id != range.m_last_tuple_id) + if (checkTupleIdInNdb(range, tupleId)) { - assert(range.m_first_tuple_id < range.m_last_tuple_id); - if (tupleId <= range.m_first_tuple_id + 1) - DBUG_RETURN(0); - if (tupleId <= range.m_last_tuple_id) + if (range.m_first_tuple_id != range.m_last_tuple_id) { - range.m_first_tuple_id = tupleId - 1; - DBUG_PRINT("info", - ("Setting next auto increment cached value to %lu", - (ulong)tupleId)); - DBUG_RETURN(0); + assert(range.m_first_tuple_id < range.m_last_tuple_id); + if (tupleId <= range.m_first_tuple_id + 1) + DBUG_RETURN(0); + if (tupleId <= range.m_last_tuple_id) + { + range.m_first_tuple_id = tupleId - 1; + DBUG_PRINT("info", + ("Setting next auto increment cached value to %lu", + (ulong)tupleId)); + DBUG_RETURN(0); + } } + /* + * if tupleId <= NEXTID, do nothing. otherwise update NEXTID to + * tupleId and set cached range to first = last = tupleId - 1. + */ + if (opTupleIdOnNdb(table, range, tupleId, 2) == -1) + DBUG_RETURN(-1); } - /* - * if tupleId <= NEXTID, do nothing. otherwise update NEXTID to - * tupleId and set cached range to first = last = tupleId - 1. - */ - if (opTupleIdOnNdb(table, range, tupleId, 2) == -1) - DBUG_RETURN(-1); } else { @@ -1277,6 +1301,39 @@ int Ndb::initAutoIncrement() return 0; } +bool +Ndb::checkUpdateAutoIncrementValue(TupleIdRange & range, Uint64 autoValue) +{ + return(checkTupleIdInNdb(range, autoValue) != 0); +} + +int +Ndb::checkTupleIdInNdb(TupleIdRange & range, Uint64 tupleId) +{ + DBUG_ENTER("Ndb::checkTupleIdIndNdb"); + if ((range.m_first_tuple_id != ~(Uint64)0) && + (range.m_first_tuple_id > tupleId)) + { + /* + * If we have ever cached a value in this object and this cached + * value is larger than the value we're trying to set then we + * need not check with the real value in the SYSTAB_0 table. + */ + DBUG_RETURN(0); + } + if (range.m_highest_seen > tupleId) + { + /* + * Although we've never cached any higher value we have read + * a higher value and again it isn't necessary to change the + * auto increment value. + */ + DBUG_RETURN(0); + } + DBUG_RETURN(1); +} + + int Ndb::opTupleIdOnNdb(const NdbTableImpl* table, TupleIdRange & range, Uint64 & opValue, Uint32 op) From 0f151f6b7a1700105a9eb4ce3edf7f5e8533601e Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 23 Jan 2008 11:41:45 +0100 Subject: [PATCH 4/5] Ndb.hpp: Changed function attribute names to match implementation ndb/include/ndbapi/Ndb.hpp: Changed function attribute names to match implementation --- ndb/include/ndbapi/Ndb.hpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ndb/include/ndbapi/Ndb.hpp b/ndb/include/ndbapi/Ndb.hpp index 2674c5db868..c979db2b418 100644 --- a/ndb/include/ndbapi/Ndb.hpp +++ b/ndb/include/ndbapi/Ndb.hpp @@ -1386,22 +1386,22 @@ public: * * @param cacheSize number of values to cache in this Ndb object * - * @return 0 or -1 on error, and tupleId in out parameter + * @return 0 or -1 on error, and autoValue in out parameter */ int getAutoIncrementValue(const char* aTableName, - Uint64 & tupleId, Uint32 cacheSize, + Uint64 & autoValue, Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1); int getAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & tupleId, Uint32 cacheSize, + Uint64 & autoValue, Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1); int readAutoIncrementValue(const char* aTableName, - Uint64 & tupleId); + Uint64 & autoValue); int readAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & tupleId); + Uint64 & autoValue); int setAutoIncrementValue(const char* aTableName, - Uint64 tupleId, bool modify); + Uint64 autoValue, bool modify); int setAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 tupleId, bool modify); + Uint64 autoValue, bool modify); private: int getTupleIdFromNdb(Ndb_local_table_info* info, Uint64 & tupleId, Uint32 cacheSize, From 77b2e8f314f1df93df6661b2bd89ab806c41d359 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 25 Jan 2008 10:43:30 +0100 Subject: [PATCH 5/5] ndb_restore.result, ndb_restore.test: Changed to use information_schema to check auto_increment Ndb.cpp: Bug #33534 Bad performance of INSERT's in auto_incremented tables: Saving highest seen value when setting auto_increment fields ndb_auto_increment.result: Regenerated result mysql-test/suite/ndb/r/ndb_auto_increment.result: Regenerated result mysql-test/suite/ndb/r/ndb_restore.result: Changed to use information_schema to check auto_increment mysql-test/suite/ndb/t/ndb_restore.test: Changed to use information_schema to check auto_increment storage/ndb/src/ndbapi/Ndb.cpp: Bug #33534 Bad performance of INSERT's in auto_incremented tables: Saving highest seen value when setting auto_increment fields --- .../suite/ndb/r/ndb_auto_increment.result | 6 +- mysql-test/suite/ndb/r/ndb_restore.result | 268 +++++++++++++++--- mysql-test/suite/ndb/t/ndb_restore.test | 168 ++++++++++- storage/ndb/src/ndbapi/Ndb.cpp | 10 +- 4 files changed, 404 insertions(+), 48 deletions(-) diff --git a/mysql-test/suite/ndb/r/ndb_auto_increment.result b/mysql-test/suite/ndb/r/ndb_auto_increment.result index 5740ed38242..78612b35113 100644 --- a/mysql-test/suite/ndb/r/ndb_auto_increment.result +++ b/mysql-test/suite/ndb/r/ndb_auto_increment.result @@ -421,10 +421,10 @@ select * from t1 order by a; a 1 20 -21 33 34 35 +65 insert into t1 values (100); insert into t1 values (NULL); insert into t1 values (NULL); @@ -432,11 +432,11 @@ select * from t1 order by a; a 1 20 -21 -22 33 34 35 +65 +66 100 101 set auto_increment_offset = @old_auto_increment_offset; diff --git a/mysql-test/suite/ndb/r/ndb_restore.result b/mysql-test/suite/ndb/r/ndb_restore.result index 0ebec9c96ae..a33c5c5f31c 100644 --- a/mysql-test/suite/ndb/r/ndb_restore.result +++ b/mysql-test/suite/ndb/r/ndb_restore.result @@ -18,12 +18,12 @@ CREATE TABLE `t2_c` ( PRIMARY KEY (`capgotod`), KEY `i quadaddsvr` (`gotod`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; -INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'); +INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'),(5,0,'',NULL,NULL,''); CREATE TABLE `t3_c` ( `CapGoaledatta` smallint(5) unsigned NOT NULL default '0', `capgotod` smallint(5) unsigned NOT NULL default '0', PRIMARY KEY (`capgotod`,`CapGoaledatta`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1; +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED; INSERT INTO `t3_c` VALUES (5,3),(2,4),(5,4),(1,3); CREATE TABLE `t4_c` ( `capfa` bigint(20) unsigned NOT NULL auto_increment, @@ -116,8 +116,8 @@ CREATE TABLE `t9_c` ( PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3); -create table t10_c (a int auto_increment key) ENGINE=ndbcluster; -insert into t10_c values (1),(2),(3); +CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1; +INSERT INTO t10_c VALUES (1),(2),(3); insert into t10_c values (10000),(2000),(3000); create table t1 engine=myisam as select * from t1_c; create table t2 engine=myisam as select * from t2_c; @@ -129,6 +129,8 @@ create table t7 engine=myisam as select * from t7_c; create table t8 engine=myisam as select * from t8_c; create table t9 engine=myisam as select * from t9_c; create table t10 engine=myisam as select * from t10_c; +ForceVarPart: 0 +ForceVarPart: 1 CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; DELETE FROM test.backup_info; LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; @@ -136,29 +138,14 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info; @the_backup_id:=backup_id DROP TABLE test.backup_info; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; -show tables; -Tables_in_test -t1 -t10 -t2 -t3 -t4 -t5 -t6 -t7 -t8 -t9 -t4_c -t3_c -t2_c -t5_c -t6_c -t7_c -t8_c -t9_c -t10_c -t1_c +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; +ForceVarPart: 0 +ForceVarPart: 1 +select * from information_schema.columns where table_name = "t1_c"; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT +NULL test t1_c capgoaledatta 1 NULL NO mediumint NULL NULL 7 0 NULL NULL mediumint(5) unsigned PRI auto_increment select,insert,update,references +NULL test t1_c goaledatta 2 NO char 2 2 NULL NULL latin1 latin1_swedish_ci char(2) PRI select,insert,update,references +NULL test t1_c maturegarbagefa 3 NO varchar 32 32 NULL NULL latin1 latin1_swedish_ci varchar(32) PRI select,insert,update,references select count(*) from t1; count(*) 5 @@ -172,15 +159,15 @@ count(*) 5 select count(*) from t2; count(*) -6 +7 select count(*) from t2_c; count(*) -6 +7 select count(*) from (select * from t2 union select * from t2_c) a; count(*) -6 +7 select count(*) from t3; count(*) 4 @@ -301,6 +288,223 @@ select auto_increment from information_schema.tables where table_name = 't10_c'; auto_increment 10001 -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9, t10; -drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; +ALTER TABLE t7_c +PARTITION BY LINEAR KEY (`dardtestard`); +CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM test.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM test.backup_info; +@the_backup_id:=backup_id + +DROP TABLE test.backup_info; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; +select count(*) from t1; +count(*) +5 +select count(*) from t1_c; +count(*) +5 +select count(*) +from (select * from t1 union +select * from t1_c) a; +count(*) +5 +select count(*) from t2; +count(*) +7 +select count(*) from t2_c; +count(*) +7 +select count(*) +from (select * from t2 union +select * from t2_c) a; +count(*) +7 +select count(*) from t3; +count(*) +4 +select count(*) from t3_c; +count(*) +4 +select count(*) +from (select * from t3 union +select * from t3_c) a; +count(*) +4 +select count(*) from t4; +count(*) +22 +select count(*) from t4_c; +count(*) +22 +select count(*) +from (select * from t4 union +select * from t4_c) a; +count(*) +22 +select count(*) from t5; +count(*) +3 +select count(*) from t5_c; +count(*) +3 +select count(*) +from (select * from t5 union +select * from t5_c) a; +count(*) +3 +select count(*) from t6; +count(*) +8 +select count(*) from t6_c; +count(*) +8 +select count(*) +from (select * from t6 union +select * from t6_c) a; +count(*) +8 +select count(*) from t7; +count(*) +5 +select count(*) from t7_c; +count(*) +5 +select count(*) +from (select * from t7 union +select * from t7_c) a; +count(*) +5 +select count(*) from t8; +count(*) +3 +select count(*) from t8_c; +count(*) +3 +select count(*) +from (select * from t8 union +select * from t8_c) a; +count(*) +3 +select count(*) from t9; +count(*) +3 +select count(*) from t9_c; +count(*) +3 +select count(*) +from (select * from t9 union +select * from t9_c) a; +count(*) +3 +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; +select count(*) from t1; +count(*) +5 +select count(*) from t1_c; +count(*) +5 +select count(*) +from (select * from t1 union +select * from t1_c) a; +count(*) +5 +select count(*) from t2; +count(*) +7 +select count(*) from t2_c; +count(*) +7 +select count(*) +from (select * from t2 union +select * from t2_c) a; +count(*) +7 +select count(*) from t3; +count(*) +4 +select count(*) from t3_c; +count(*) +4 +select count(*) +from (select * from t3 union +select * from t3_c) a; +count(*) +4 +select count(*) from t4; +count(*) +22 +select count(*) from t4_c; +count(*) +22 +select count(*) +from (select * from t4 union +select * from t4_c) a; +count(*) +22 +select count(*) from t5; +count(*) +3 +select count(*) from t5_c; +count(*) +3 +select count(*) +from (select * from t5 union +select * from t5_c) a; +count(*) +3 +select count(*) from t6; +count(*) +8 +select count(*) from t6_c; +count(*) +8 +select count(*) +from (select * from t6 union +select * from t6_c) a; +count(*) +8 +select count(*) from t7; +count(*) +5 +select count(*) from t7_c; +count(*) +5 +select count(*) +from (select * from t7 union +select * from t7_c) a; +count(*) +5 +select count(*) from t8; +count(*) +3 +select count(*) from t8_c; +count(*) +3 +select count(*) +from (select * from t8 union +select * from t8_c) a; +count(*) +3 +select count(*) from t9; +count(*) +3 +select count(*) from t9_c; +count(*) +3 +select count(*) +from (select * from t9 union +select * from t9_c) a; +count(*) +3 +drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; +CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM test.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM test.backup_info; +@the_backup_id:=backup_id + +DROP TABLE test.backup_info; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t2_c; 520093696, diff --git a/mysql-test/suite/ndb/t/ndb_restore.test b/mysql-test/suite/ndb/t/ndb_restore.test index 940b53adbe1..c8b07fd351a 100644 --- a/mysql-test/suite/ndb/t/ndb_restore.test +++ b/mysql-test/suite/ndb/t/ndb_restore.test @@ -33,13 +33,15 @@ CREATE TABLE `t2_c` ( PRIMARY KEY (`capgotod`), KEY `i quadaddsvr` (`gotod`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; -INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'); +INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'),(5,0,'',NULL,NULL,''); +# Added ROW_FORMAT=FIXED to use below to see that setting is preserved +# by restore CREATE TABLE `t3_c` ( `CapGoaledatta` smallint(5) unsigned NOT NULL default '0', `capgotod` smallint(5) unsigned NOT NULL default '0', PRIMARY KEY (`capgotod`,`CapGoaledatta`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1; +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED; INSERT INTO `t3_c` VALUES (5,3),(2,4),(5,4),(1,3); # Bug #27775 - mediumint auto inc not restored correctly @@ -147,8 +149,8 @@ INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net',' # auto inc table not handled correctly when restored from cluster backup # - before fix ndb_restore would not set auto inc value correct, # seen by select below -create table t10_c (a int auto_increment key) ENGINE=ndbcluster; -insert into t10_c values (1),(2),(3); +CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1; +INSERT INTO t10_c VALUES (1),(2),(3); # Bug #27775 - mediumint auto inc not restored correctly # - check int insert into t10_c values (10000),(2000),(3000); @@ -164,14 +166,27 @@ create table t8 engine=myisam as select * from t8_c; create table t9 engine=myisam as select * from t9_c; create table t10 engine=myisam as select * from t10_c; +# check that force varpart is preserved by ndb_restore +# t3_c has ROW_FORMAT=FIXED i.e. ForceVarPart=0 +--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -d test t3_c | grep ForceVarPart +--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -d test t2_c | grep ForceVarPart --source include/ndb_backup.inc -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT +# check that force varpart is preserved by ndb_restore +# t3_c has ROW_FORMAT=FIXED i.e. ForceVarPart=0 +--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -d test t3_c | grep ForceVarPart +--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -d test t2_c | grep ForceVarPart -show tables; +# Bug #30667 +# ndb table discovery does not work correcly with information schema +# - prior to bug fix this would yeild no output and a warning +select * from information_schema.columns where table_name = "t1_c"; +# random output order?? +#show tables; select count(*) from t1; select count(*) from t1_c; @@ -247,9 +262,146 @@ select max(a) from t10_c; select auto_increment from information_schema.tables where table_name = 't10_c'; +# +# Try Partitioned tables as well +# +ALTER TABLE t7_c +PARTITION BY LINEAR KEY (`dardtestard`); + +--source include/ndb_backup.inc +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT + +select count(*) from t1; +select count(*) from t1_c; +select count(*) + from (select * from t1 union + select * from t1_c) a; + +select count(*) from t2; +select count(*) from t2_c; +select count(*) + from (select * from t2 union + select * from t2_c) a; + +select count(*) from t3; +select count(*) from t3_c; +select count(*) + from (select * from t3 union + select * from t3_c) a; + +select count(*) from t4; +select count(*) from t4_c; +select count(*) + from (select * from t4 union + select * from t4_c) a; + +select count(*) from t5; +select count(*) from t5_c; +select count(*) + from (select * from t5 union + select * from t5_c) a; + +select count(*) from t6; +select count(*) from t6_c; +select count(*) + from (select * from t6 union + select * from t6_c) a; + +select count(*) from t7; +select count(*) from t7_c; +select count(*) + from (select * from t7 union + select * from t7_c) a; + +select count(*) from t8; +select count(*) from t8_c; +select count(*) + from (select * from t8 union + select * from t8_c) a; + +select count(*) from t9; +select count(*) from t9_c; +select count(*) + from (select * from t9 union + select * from t9_c) a; + +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT + +select count(*) from t1; +select count(*) from t1_c; +select count(*) + from (select * from t1 union + select * from t1_c) a; + +select count(*) from t2; +select count(*) from t2_c; +select count(*) + from (select * from t2 union + select * from t2_c) a; + +select count(*) from t3; +select count(*) from t3_c; +select count(*) + from (select * from t3 union + select * from t3_c) a; + +select count(*) from t4; +select count(*) from t4_c; +select count(*) + from (select * from t4 union + select * from t4_c) a; + +select count(*) from t5; +select count(*) from t5_c; +select count(*) + from (select * from t5 union + select * from t5_c) a; + +select count(*) from t6; +select count(*) from t6_c; +select count(*) + from (select * from t6 union + select * from t6_c) a; + +select count(*) from t7; +select count(*) from t7_c; +select count(*) + from (select * from t7 union + select * from t7_c) a; + +select count(*) from t8; +select count(*) from t8_c; +select count(*) + from (select * from t8 union + select * from t8_c) a; + +select count(*) from t9; +select count(*) from t9_c; +select count(*) + from (select * from t9 union + select * from t9_c) a; + +# +# Drop all table except t2_c +# This to make sure that error returned from ndb_restore above is +# guaranteed to be from t2_c, this since order of tables in backup +# is none deterministic +# +drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; +--source include/ndb_backup.inc +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --core=0 -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id 2>&1 | grep Translate || true + +# +# Cleanup +# + --disable_warnings -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9, t10; -drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t2_c; --enable_warnings # diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 23b0e0915ae..769773c166a 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -1399,15 +1399,15 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->write_attr("NEXTID", 1); tOperation->interpret_exit_ok(); tOperation->def_label(0); - tOperation->interpret_exit_nok(9999); - + tOperation->interpret_exit_ok(); + tRecAttrResult = tOperation->getValue("NEXTID"); if (tConnection->execute( NdbTransaction::Commit ) == -1) { - if (tConnection->theError.code != 9999) - goto error_handler; + goto error_handler; } else { + range.m_highest_seen = tRecAttrResult->u_64_value(); DBUG_PRINT("info", ("Setting next auto increment value (db) to %lu", (ulong) opValue)); @@ -1420,7 +1420,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tRecAttrResult = tOperation->getValue("NEXTID"); if (tConnection->execute( NdbTransaction::Commit ) == -1 ) goto error_handler; - opValue = tRecAttrResult->u_64_value(); // out + range.m_highest_seen = opValue = tRecAttrResult->u_64_value(); // out break; default: goto error_handler;