From 99621b01ebf47a14805bd0229041f1270705fa27 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Wed, 21 Jun 2006 09:20:55 +0200 Subject: [PATCH 01/14] ndb autotest - add BUILD/compile-ndb-autotest to simplify building autotest on different mysql versions --- BUILD/compile-ndb-autotest | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100755 BUILD/compile-ndb-autotest diff --git a/BUILD/compile-ndb-autotest b/BUILD/compile-ndb-autotest new file mode 100755 index 00000000000..3ef0091c155 --- /dev/null +++ b/BUILD/compile-ndb-autotest @@ -0,0 +1,9 @@ +#! /bin/sh + +path=`dirname $0` +. "$path/SETUP.sh" + +extra_flags="$fast_cflags $max_cflags -g" +extra_configs="$max_configs --with-ndb-test --with-ndb-ccflags="-DERROR_INSERT"" + +. "$path/FINISH.sh" From 14ba65ff64f4a91ff226a6dc9237f832fff63f6e Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Wed, 21 Jun 2006 13:39:04 +0200 Subject: [PATCH 02/14] ndb - bug#20007 varpages did not get freed on drop table --- storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 3 ++ .../ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 6 +++ .../ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 41 ++++++++++++++++++- .../src/kernel/blocks/dbtup/DbtupPageMap.cpp | 5 +++ .../src/kernel/blocks/dbtup/DbtupVarAlloc.cpp | 7 ++++ .../ndb/src/kernel/blocks/dbtup/tuppage.hpp | 10 ++++- 6 files changed, 69 insertions(+), 3 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 4ff6e069963..09e7809de99 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -246,6 +246,7 @@ inline const Uint32* ALIGN_WORD(const void* ptr) #define ZTUP_SCAN 10 #define ZFREE_EXTENT 11 #define ZUNMAP_PAGES 12 +#define ZFREE_VAR_PAGES 13 #define ZSCAN_PROCEDURE 0 #define ZCOPY_PROCEDURE 2 @@ -620,6 +621,7 @@ struct Fragrecord { Uint32 m_tablespace_id; Uint32 m_logfile_group_id; Disk_alloc_info m_disk_alloc_info; + Uint32 m_var_page_chunks; }; typedef Ptr FragrecordPtr; @@ -2335,6 +2337,7 @@ private: void releaseFragment(Signal* signal, Uint32 tableId); + void drop_fragment_free_var_pages(Signal*); void drop_fragment_free_exent(Signal*, TablerecPtr, FragrecordPtr, Uint32); void drop_fragment_unmap_pages(Signal*, TablerecPtr, FragrecordPtr, Uint32); void drop_fragment_unmap_page_callback(Signal* signal, Uint32, Uint32); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index 8a68905cef9..c59cf4015af 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -227,6 +227,12 @@ void Dbtup::execCONTINUEB(Signal* signal) drop_fragment_unmap_pages(signal, tabPtr, fragPtr, signal->theData[3]); return; } + case ZFREE_VAR_PAGES: + { + ljam(); + drop_fragment_free_var_pages(signal); + return; + } default: ndbrequire(false); break; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 5cfd1f8cb77..f779c93ec94 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -140,6 +140,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regFragPtr.p->m_undo_complete= false; regFragPtr.p->m_lcp_scan_op = RNIL; regFragPtr.p->m_lcp_keep_list = RNIL; + regFragPtr.p->m_var_page_chunks = RNIL; Uint32 noAllocatedPages= allocFragPages(regFragPtr.p, pages); @@ -970,7 +971,7 @@ Dbtup::drop_fragment_unmap_pages(Signal *signal, case -1: break; default: - ndbrequire(res == pagePtr.i); + ndbrequire((Uint32)res == pagePtr.i); drop_fragment_unmap_page_callback(signal, pos, res); } return; @@ -1052,6 +1053,44 @@ Dbtup::drop_fragment_free_exent(Signal *signal, } } + signal->theData[0] = ZFREE_VAR_PAGES; + signal->theData[1] = tabPtr.i; + signal->theData[2] = fragPtr.i; + sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); +} + +void +Dbtup::drop_fragment_free_var_pages(Signal* signal) +{ + ljam(); + Uint32 tableId = signal->theData[1]; + Uint32 fragPtrI = signal->theData[2]; + + TablerecPtr tabPtr; + tabPtr.i= tableId; + ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); + + FragrecordPtr fragPtr; + fragPtr.i = fragPtrI; + ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); + + PagePtr pagePtr; + if ((pagePtr.i = fragPtr.p->m_var_page_chunks) != RNIL) + { + c_page_pool.getPtr(pagePtr); + Var_page* page = (Var_page*)pagePtr.p; + fragPtr.p->m_var_page_chunks = page->next_chunk; + + Uint32 sz = page->chunk_size; + returnCommonArea(pagePtr.i, sz); + + signal->theData[0] = ZFREE_VAR_PAGES; + signal->theData[1] = tabPtr.i; + signal->theData[2] = fragPtr.i; + sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); + return; + } + releaseFragPages(fragPtr.p); Uint32 i; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index db6f5e3b185..ef90462cfb2 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -299,6 +299,11 @@ void Dbtup::releaseFragPages(Fragrecord* regFragPtr) LocalDLList tmp(c_page_pool, regFragPtr->thFreeFirst); tmp.remove(); } + + { + LocalSLList tmp(c_page_pool, regFragPtr->m_empty_pages); + tmp.remove(); + } return; } else { diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp index 94bd75108a4..52ab66b5c0e 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp @@ -323,6 +323,13 @@ Dbtup::get_empty_var_page(Fragrecord* fragPtr) ptr.p->nextList = RNIL; list.add(ret.i + 1, ptr); } + + c_page_pool.getPtr(ret); + + Var_page* page = (Var_page*)ret.p; + page->chunk_size = cnt; + page->next_chunk = fragPtr->m_var_page_chunks; + fragPtr->m_var_page_chunks = ret.i; return ret.i; } diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp index 04ed18da58d..4b4df909061 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp @@ -107,8 +107,14 @@ struct Tup_varsize_page Uint32 page_state; Uint32 next_page; Uint32 prev_page; - Uint32 first_cluster_page; - Uint32 last_cluster_page; + union { + Uint32 first_cluster_page; + Uint32 chunk_size; + }; + union { + Uint32 last_cluster_page; + Uint32 next_chunk; + }; Uint32 next_cluster_page; Uint32 prev_cluster_page; Uint32 frag_page_id; From 8f06aae041ba089c5f392573910d7eb35b17769f Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Wed, 21 Jun 2006 14:00:26 +0200 Subject: [PATCH 03/14] ndb - bug#20197 also close scan which are in "delivered" state, as it's impossible to release locks afterwards backport from 5.1 --- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 2bd61296554..a71942f5cc8 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -6978,6 +6978,18 @@ void Dbtc::checkScanActiveInFailedLqh(Signal* signal, found = true; } } + + ScanFragList deliv(c_scan_frag_pool, scanptr.p->m_delivered_scan_frags); + for(deliv.first(ptr); !ptr.isNull(); deliv.next(ptr)) + { + jam(); + if (refToNode(ptr.p->lqhBlockref) == failedNodeId) + { + jam(); + found = true; + break; + } + } } if(found){ jam(); From a32d815a2505d854f921b6be48f9b50fbb0db30e Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Wed, 21 Jun 2006 16:01:23 +0200 Subject: [PATCH 04/14] ndb - bug#19275 make sure tablename is release in case of alter table --- .../ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 35 ++++++++++++++----- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index ef08c06822f..ed6b94fd2e8 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -4695,11 +4695,6 @@ Dbdict::alterTab_writeTableConf(Signal* signal, SegmentedSectionPtr tabInfoPtr; getSection(tabInfoPtr, alterTabPtr.p->m_tabInfoPtrI); signal->setSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO); -#ifndef DBUG_OFF - ndbout_c("DICT_TAB_INFO in DICT"); - SimplePropertiesSectionReader reader(tabInfoPtr, getSectionSegmentPool()); - reader.printAll(ndbout); -#endif EXECUTE_DIRECT(SUMA, GSN_ALTER_TAB_REQ, signal, AlterTabReq::SignalLength); releaseSections(signal); @@ -6960,13 +6955,37 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash) { TableRecordPtr tablePtr; c_tableRecordPool.getPtr(tablePtr, tableId); - if (removeFromHash){ + if (removeFromHash) + { jam(); release_object(tablePtr.p->m_obj_ptr_i); } + else + { + Rope tmp(c_rope_pool, tablePtr.p->tableName); + tmp.erase(); + } - Rope frm(c_rope_pool, tablePtr.p->frmData); - frm.erase(); + { + Rope tmp(c_rope_pool, tablePtr.p->frmData); + tmp.erase(); + } + + { + Rope tmp(c_rope_pool, tablePtr.p->tsData); + tmp.erase(); + } + + { + Rope tmp(c_rope_pool, tablePtr.p->ngData); + tmp.erase(); + } + + { + Rope tmp(c_rope_pool, tablePtr.p->rangeData); + tmp.erase(); + } + tablePtr.p->tabState = TableRecord::NOT_DEFINED; LocalDLFifoList list(c_attributeRecordPool, From dc752699656de8ddf39e9c30cc3202fd03791455 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Thu, 22 Jun 2006 10:24:44 +0200 Subject: [PATCH 05/14] ndb - bug#16341 create tablespace/logfile group should "back out changes" --- mysql-test/r/ndb_dd_ddl.result | 58 ++++++---- mysql-test/t/ndb_dd_ddl.test | 69 +++++++---- sql/ha_ndbcluster.cc | 33 +++++- .../kernel/signaldata/CreateFilegroup.hpp | 1 + storage/ndb/include/ndbapi/NdbDictionary.hpp | 33 +++++- .../ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 3 +- storage/ndb/src/ndbapi/NdbDictionary.cpp | 85 +++++++++++--- storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp | 109 +++++++++++++----- storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp | 17 +-- 9 files changed, 301 insertions(+), 107 deletions(-) diff --git a/mysql-test/r/ndb_dd_ddl.result b/mysql-test/r/ndb_dd_ddl.result index 47b95214024..71f42974929 100644 --- a/mysql-test/r/ndb_dd_ddl.result +++ b/mysql-test/r/ndb_dd_ddl.result @@ -4,12 +4,12 @@ CREATE DATABASE mysqltest; **** Begin Duplicate Statement Testing **** CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE = 1M ENGINE=NDB; CREATE LOGFILE GROUP lg2 ADD UNDOFILE 'undofile2.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE 1M ENGINE NDB; ERROR HY000: Failed to create LOGFILE GROUP @@ -19,35 +19,35 @@ Error 1296 Got error 1514 'Currently there is a limit of one logfile group' from Error 1515 Failed to create LOGFILE GROUP CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE = 1M ENGINE=NDB; ERROR HY000: Failed to create LOGFILE GROUP ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M ENGINE NDB; +INITIAL_SIZE 1M ENGINE NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M ENGINE=NDB; +INITIAL_SIZE 1M ENGINE=NDB; ERROR HY000: Failed to alter: CREATE UNDOFILE CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; ERROR HY000: Failed to create TABLESPACE ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE=NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE=NDB; ERROR HY000: Failed to alter: CREATE DATAFILE CREATE TABLE mysqltest.t1 @@ -94,20 +94,20 @@ DROP DATABASE IF EXISTS mysqltest; **** Begin Statment CaSe Testing **** creaTE LOgfilE GrOuP lg1 adD undoFILE 'undofile.dat' -initiAL_siZE 16M +initiAL_siZE 1M UnDo_BuFfEr_SiZe = 1M ENGInE=NDb; altER LOgFiLE GrOUp lg1 AdD UnDOfILe 'uNdOfiLe02.daT' -INItIAl_SIzE 4M ENgINE nDB; +INItIAl_SIzE 1M ENgINE nDB; CrEAtE TABLEspaCE ts1 ADD DATAfilE 'datafile.dat' UsE LoGFiLE GRoUP lg1 -INITiaL_SizE 12M +INITiaL_SizE 1M ENGiNe NDb; AlTeR tAbLeSpAcE ts1 AdD DaTaFiLe 'dAtAfiLe2.daT' -InItIaL_SiZe 12M +InItIaL_SiZe 1M EnGiNe=NDB; CREATE TABLE t1 (pk1 int not null primary key, b int not null, c int not null) @@ -129,21 +129,21 @@ EnGiNe=nDb; **** Begin = And No = Testing **** CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE=16M +INITIAL_SIZE=1M UNDO_BUFFER_SIZE=1M ENGINE=NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE=4M +INITIAL_SIZE=1M ENGINE=NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE=12M +INITIAL_SIZE=1M ENGINE=NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE=12M +INITIAL_SIZE=1M ENGINE=NDB; CREATE TABLE t1 (pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) @@ -165,21 +165,21 @@ ENGINE=NDB; CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE 1M ENGINE NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLE t1 (pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) @@ -199,3 +199,19 @@ ENGINE NDB; DROP LOGFILE GROUP lg1 ENGINE NDB; **** End = And No = **** +create table t1 (a int primary key) engine = myisam; +create logfile group lg1 add undofile '/home/jonas/src/51-work/mysql-test/var/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb;; +ERROR HY000: Failed to create UNDOFILE +create logfile group lg1 +add undofile 'undofile.dat' +initial_size 1M +undo_buffer_size = 1M +engine=ndb; +create tablespace ts1 add datafile '/home/jonas/src/51-work/mysql-test/var/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb;; +ERROR HY000: Failed to create DATAFILE +drop tablespace ts1 +engine ndb; +ERROR HY000: Failed to drop TABLESPACE +drop logfile group lg1 +engine ndb; +drop table t1; diff --git a/mysql-test/t/ndb_dd_ddl.test b/mysql-test/t/ndb_dd_ddl.test index 339f7bc2f22..2a0755c2748 100644 --- a/mysql-test/t/ndb_dd_ddl.test +++ b/mysql-test/t/ndb_dd_ddl.test @@ -40,7 +40,7 @@ CREATE DATABASE mysqltest; CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE = 1M ENGINE=NDB; @@ -48,7 +48,7 @@ ENGINE=NDB; --error ER_CREATE_FILEGROUP_FAILED CREATE LOGFILE GROUP lg2 ADD UNDOFILE 'undofile2.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE 1M ENGINE NDB; SHOW WARNINGS; @@ -56,42 +56,42 @@ SHOW WARNINGS; --error ER_CREATE_FILEGROUP_FAILED CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE = 1M ENGINE=NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M ENGINE NDB; +INITIAL_SIZE 1M ENGINE NDB; --error ER_ALTER_FILEGROUP_FAILED ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M ENGINE=NDB; +INITIAL_SIZE 1M ENGINE=NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; --error ER_CREATE_FILEGROUP_FAILED CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE=NDB; --error ER_ALTER_FILEGROUP_FAILED ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE=NDB; CREATE TABLE mysqltest.t1 @@ -155,23 +155,23 @@ DROP DATABASE IF EXISTS mysqltest; creaTE LOgfilE GrOuP lg1 adD undoFILE 'undofile.dat' -initiAL_siZE 16M +initiAL_siZE 1M UnDo_BuFfEr_SiZe = 1M ENGInE=NDb; altER LOgFiLE GrOUp lg1 AdD UnDOfILe 'uNdOfiLe02.daT' -INItIAl_SIzE 4M ENgINE nDB; +INItIAl_SIzE 1M ENgINE nDB; CrEAtE TABLEspaCE ts1 ADD DATAfilE 'datafile.dat' UsE LoGFiLE GRoUP lg1 -INITiaL_SizE 12M +INITiaL_SizE 1M ENGiNe NDb; AlTeR tAbLeSpAcE ts1 AdD DaTaFiLe 'dAtAfiLe2.daT' -InItIaL_SiZe 12M +InItIaL_SiZe 1M EnGiNe=NDB; CREATE TABLE t1 @@ -203,24 +203,24 @@ EnGiNe=nDb; CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE=16M +INITIAL_SIZE=1M UNDO_BUFFER_SIZE=1M ENGINE=NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE=4M +INITIAL_SIZE=1M ENGINE=NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE=12M +INITIAL_SIZE=1M ENGINE=NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE=12M +INITIAL_SIZE=1M ENGINE=NDB; CREATE TABLE t1 @@ -250,24 +250,24 @@ ENGINE=NDB; CREATE LOGFILE GROUP lg1 ADD UNDOFILE 'undofile.dat' -INITIAL_SIZE 16M +INITIAL_SIZE 1M UNDO_BUFFER_SIZE 1M ENGINE NDB; ALTER LOGFILE GROUP lg1 ADD UNDOFILE 'undofile02.dat' -INITIAL_SIZE 4M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLESPACE ts1 ADD DATAFILE 'datafile.dat' USE LOGFILE GROUP lg1 -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; ALTER TABLESPACE ts1 ADD DATAFILE 'datafile2.dat' -INITIAL_SIZE 12M +INITIAL_SIZE 1M ENGINE NDB; CREATE TABLE t1 @@ -297,5 +297,30 @@ ENGINE NDB; --echo **** End = And No = **** ############ End = And No = ################## -# End 5.1 test +### +# +# bug#16341 +create table t1 (a int primary key) engine = myisam; +--error ER_CREATE_FILEGROUP_FAILED +--eval create logfile group lg1 add undofile '$MYSQLTEST_VARDIR/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb; + +create logfile group lg1 +add undofile 'undofile.dat' +initial_size 1M +undo_buffer_size = 1M +engine=ndb; + +--error ER_CREATE_FILEGROUP_FAILED +--eval create tablespace ts1 add datafile '$MYSQLTEST_VARDIR/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb; + +--error ER_DROP_FILEGROUP_FAILED +drop tablespace ts1 +engine ndb; + +drop logfile group lg1 +engine ndb; + +drop table t1; + +# End 5.1 test diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 27fe2e889af..986014a36d1 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -9947,7 +9947,8 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } - + + NdbError err; NDBDICT *dict = ndb->getDictionary(); int error; const char * errmsg; @@ -9960,6 +9961,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) NdbDictionary::Tablespace ndb_ts; NdbDictionary::Datafile ndb_df; + NdbDictionary::ObjectId objid; if (set_up_tablespace(info, &ndb_ts)) { DBUG_RETURN(1); @@ -9969,7 +9971,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) DBUG_RETURN(1); } errmsg= "TABLESPACE"; - if (dict->createTablespace(ndb_ts)) + if (dict->createTablespace(ndb_ts, &objid)) { DBUG_PRINT("error", ("createTablespace returned %d", error)); goto ndberror; @@ -9978,8 +9980,17 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) errmsg= "DATAFILE"; if (dict->createDatafile(ndb_df)) { + err= dict->getNdbError(); + NdbDictionary::Tablespace tmp= dict->getTablespace(ndb_ts.getName()); + if (dict->getNdbError().code == 0 && + tmp.getObjectId() == objid.getObjectId() && + tmp.getObjectVersion() == objid.getObjectVersion()) + { + dict->dropTablespace(tmp); + } + DBUG_PRINT("error", ("createDatafile returned %d", error)); - goto ndberror; + goto ndberror2; } is_tablespace= 1; break; @@ -10033,6 +10044,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) error= ER_CREATE_FILEGROUP_FAILED; NdbDictionary::LogfileGroup ndb_lg; NdbDictionary::Undofile ndb_uf; + NdbDictionary::ObjectId objid; if (info->undo_file_name == NULL) { /* @@ -10045,7 +10057,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) DBUG_RETURN(1); } errmsg= "LOGFILE GROUP"; - if (dict->createLogfileGroup(ndb_lg)) + if (dict->createLogfileGroup(ndb_lg, &objid)) { goto ndberror; } @@ -10057,7 +10069,15 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) errmsg= "UNDOFILE"; if (dict->createUndofile(ndb_uf)) { - goto ndberror; + err= dict->getNdbError(); + NdbDictionary::LogfileGroup tmp= dict->getLogfileGroup(ndb_lg.getName()); + if (dict->getNdbError().code == 0 && + tmp.getObjectId() == objid.getObjectId() && + tmp.getObjectVersion() == objid.getObjectVersion()) + { + dict->dropLogfileGroup(tmp); + } + goto ndberror2; } break; } @@ -10134,7 +10154,8 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) DBUG_RETURN(FALSE); ndberror: - const NdbError err= dict->getNdbError(); + err= dict->getNdbError(); +ndberror2: ERR_PRINT(err); ndb_to_mysql_error(&err); diff --git a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp index 810f9cdfd03..78216249a72 100644 --- a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp +++ b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp @@ -193,6 +193,7 @@ struct CreateFileConf { Uint32 senderData; Uint32 senderRef; Uint32 fileId; + Uint32 fileVersion; }; #endif diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index 27e0aede36d..a9fd107c06e 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -163,6 +163,31 @@ public: }; }; + class Dictionary; // Forward declaration + + class ObjectId : public Object + { + public: + ObjectId(); + virtual ~ObjectId(); + + /** + * Get status of object + */ + virtual Status getObjectStatus() const; + + /** + * Get version of object + */ + virtual int getObjectVersion() const; + + virtual int getObjectId() const; + + private: + friend class Dictionary; + class NdbDictObjectImpl & m_impl; + }; + class Table; // forward declaration class Tablespace; // forward declaration // class NdbEventOperation; // forward declaration @@ -1781,20 +1806,20 @@ public: * @{ */ - int createLogfileGroup(const LogfileGroup &); + int createLogfileGroup(const LogfileGroup &, ObjectId* = 0); int dropLogfileGroup(const LogfileGroup&); LogfileGroup getLogfileGroup(const char * name); - int createTablespace(const Tablespace &); + int createTablespace(const Tablespace &, ObjectId* = 0); int dropTablespace(const Tablespace&); Tablespace getTablespace(const char * name); Tablespace getTablespace(Uint32 tablespaceId); - int createDatafile(const Datafile &, bool overwrite_existing = false); + int createDatafile(const Datafile &, bool overwrite_existing = false, ObjectId* = 0); int dropDatafile(const Datafile&); Datafile getDatafile(Uint32 node, const char * path); - int createUndofile(const Undofile &, bool overwrite_existing = false); + int createUndofile(const Undofile &, bool overwrite_existing = false, ObjectId * = 0); int dropUndofile(const Undofile&); Undofile getUndofile(Uint32 node, const char * path); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index ed6b94fd2e8..908c116988d 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -13985,7 +13985,8 @@ Dbdict::trans_commit_complete_done(Signal* signal, conf->senderRef = reference(); conf->senderData = trans_ptr.p->m_senderData; conf->fileId = f_ptr.p->key; - + conf->fileVersion = f_ptr.p->m_version; + //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_CONF, signal, CreateFileConf::SignalLength, JBB); diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index c71689d2e81..7d888456888 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -18,6 +18,32 @@ #include "NdbDictionaryImpl.hpp" #include +NdbDictionary::ObjectId::ObjectId() + : m_impl(* new NdbDictObjectImpl(NdbDictionary::Object::TypeUndefined)) +{ +} + +NdbDictionary::ObjectId::~ObjectId() +{ + NdbDictObjectImpl * tmp = &m_impl; + delete tmp; +} + +NdbDictionary::Object::Status +NdbDictionary::ObjectId::getObjectStatus() const { + return m_impl.m_status; +} + +int +NdbDictionary::ObjectId::getObjectVersion() const { + return m_impl.m_version; +} + +int +NdbDictionary::ObjectId::getObjectId() const { + return m_impl.m_id; +} + /***************************************************************** * Column facade */ @@ -1799,17 +1825,22 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) } int -NdbDictionary::Dictionary::createLogfileGroup(const LogfileGroup & lg){ - return m_impl.createLogfileGroup(NdbLogfileGroupImpl::getImpl(lg)); +NdbDictionary::Dictionary::createLogfileGroup(const LogfileGroup & lg, + ObjectId * obj) +{ + return m_impl.createLogfileGroup(NdbLogfileGroupImpl::getImpl(lg), + obj ? &obj->m_impl : 0); } int -NdbDictionary::Dictionary::dropLogfileGroup(const LogfileGroup & lg){ +NdbDictionary::Dictionary::dropLogfileGroup(const LogfileGroup & lg) +{ return m_impl.dropLogfileGroup(NdbLogfileGroupImpl::getImpl(lg)); } NdbDictionary::LogfileGroup -NdbDictionary::Dictionary::getLogfileGroup(const char * name){ +NdbDictionary::Dictionary::getLogfileGroup(const char * name) +{ NdbDictionary::LogfileGroup tmp; m_impl.m_receiver.get_filegroup(NdbLogfileGroupImpl::getImpl(tmp), NdbDictionary::Object::LogfileGroup, name); @@ -1817,17 +1848,22 @@ NdbDictionary::Dictionary::getLogfileGroup(const char * name){ } int -NdbDictionary::Dictionary::createTablespace(const Tablespace & lg){ - return m_impl.createTablespace(NdbTablespaceImpl::getImpl(lg)); +NdbDictionary::Dictionary::createTablespace(const Tablespace & lg, + ObjectId * obj) +{ + return m_impl.createTablespace(NdbTablespaceImpl::getImpl(lg), + obj ? &obj->m_impl : 0); } int -NdbDictionary::Dictionary::dropTablespace(const Tablespace & lg){ +NdbDictionary::Dictionary::dropTablespace(const Tablespace & lg) +{ return m_impl.dropTablespace(NdbTablespaceImpl::getImpl(lg)); } NdbDictionary::Tablespace -NdbDictionary::Dictionary::getTablespace(const char * name){ +NdbDictionary::Dictionary::getTablespace(const char * name) +{ NdbDictionary::Tablespace tmp; m_impl.m_receiver.get_filegroup(NdbTablespaceImpl::getImpl(tmp), NdbDictionary::Object::Tablespace, name); @@ -1835,7 +1871,8 @@ NdbDictionary::Dictionary::getTablespace(const char * name){ } NdbDictionary::Tablespace -NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId){ +NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId) +{ NdbDictionary::Tablespace tmp; m_impl.m_receiver.get_filegroup(NdbTablespaceImpl::getImpl(tmp), NdbDictionary::Object::Tablespace, @@ -1844,17 +1881,24 @@ NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId){ } int -NdbDictionary::Dictionary::createDatafile(const Datafile & df, bool force){ - return m_impl.createDatafile(NdbDatafileImpl::getImpl(df), force); +NdbDictionary::Dictionary::createDatafile(const Datafile & df, + bool force, + ObjectId * obj) +{ + return m_impl.createDatafile(NdbDatafileImpl::getImpl(df), + force, + obj ? &obj->m_impl : 0); } int -NdbDictionary::Dictionary::dropDatafile(const Datafile& df){ +NdbDictionary::Dictionary::dropDatafile(const Datafile& df) +{ return m_impl.dropDatafile(NdbDatafileImpl::getImpl(df)); } NdbDictionary::Datafile -NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path){ +NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path) +{ NdbDictionary::Datafile tmp; m_impl.m_receiver.get_file(NdbDatafileImpl::getImpl(tmp), NdbDictionary::Object::Datafile, @@ -1863,17 +1907,24 @@ NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path){ } int -NdbDictionary::Dictionary::createUndofile(const Undofile & df, bool force){ - return m_impl.createUndofile(NdbUndofileImpl::getImpl(df), force); +NdbDictionary::Dictionary::createUndofile(const Undofile & df, + bool force, + ObjectId * obj) +{ + return m_impl.createUndofile(NdbUndofileImpl::getImpl(df), + force, + obj ? &obj->m_impl : 0); } int -NdbDictionary::Dictionary::dropUndofile(const Undofile& df){ +NdbDictionary::Dictionary::dropUndofile(const Undofile& df) +{ return m_impl.dropUndofile(NdbUndofileImpl::getImpl(df)); } NdbDictionary::Undofile -NdbDictionary::Dictionary::getUndofile(Uint32 node, const char * path){ +NdbDictionary::Dictionary::getUndofile(Uint32 node, const char * path) +{ NdbDictionary::Undofile tmp; m_impl.m_receiver.get_file(NdbUndofileImpl::getImpl(tmp), NdbDictionary::Object::Undofile, diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 22a5d2f20a5..06a2741cc4f 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -4391,19 +4391,23 @@ NdbUndofileImpl::assign(const NdbUndofileImpl& org) } int -NdbDictionaryImpl::createDatafile(const NdbDatafileImpl & file, bool force){ +NdbDictionaryImpl::createDatafile(const NdbDatafileImpl & file, + bool force, + NdbDictObjectImpl* obj) + +{ DBUG_ENTER("NdbDictionaryImpl::createDatafile"); NdbFilegroupImpl tmp(NdbDictionary::Object::Tablespace); if(file.m_filegroup_version != ~(Uint32)0){ tmp.m_id = file.m_filegroup_id; tmp.m_version = file.m_filegroup_version; - DBUG_RETURN(m_receiver.create_file(file, tmp)); + DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj)); } if(m_receiver.get_filegroup(tmp, NdbDictionary::Object::Tablespace, file.m_filegroup_name.c_str()) == 0){ - DBUG_RETURN(m_receiver.create_file(file, tmp, force)); + DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj)); } DBUG_RETURN(-1); } @@ -4414,53 +4418,65 @@ NdbDictionaryImpl::dropDatafile(const NdbDatafileImpl & file){ } int -NdbDictionaryImpl::createUndofile(const NdbUndofileImpl & file, bool force){ +NdbDictionaryImpl::createUndofile(const NdbUndofileImpl & file, + bool force, + NdbDictObjectImpl* obj) +{ DBUG_ENTER("NdbDictionaryImpl::createUndofile"); NdbFilegroupImpl tmp(NdbDictionary::Object::LogfileGroup); if(file.m_filegroup_version != ~(Uint32)0){ tmp.m_id = file.m_filegroup_id; tmp.m_version = file.m_filegroup_version; - DBUG_RETURN(m_receiver.create_file(file, tmp)); + DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj)); } if(m_receiver.get_filegroup(tmp, NdbDictionary::Object::LogfileGroup, file.m_filegroup_name.c_str()) == 0){ - DBUG_RETURN(m_receiver.create_file(file, tmp, force)); + DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj)); } DBUG_PRINT("info", ("Failed to find filegroup")); DBUG_RETURN(-1); } int -NdbDictionaryImpl::dropUndofile(const NdbUndofileImpl & file){ +NdbDictionaryImpl::dropUndofile(const NdbUndofileImpl & file) +{ return m_receiver.drop_file(file); } int -NdbDictionaryImpl::createTablespace(const NdbTablespaceImpl & fg){ - return m_receiver.create_filegroup(fg); +NdbDictionaryImpl::createTablespace(const NdbTablespaceImpl & fg, + NdbDictObjectImpl* obj) +{ + return m_receiver.create_filegroup(fg, obj); } int -NdbDictionaryImpl::dropTablespace(const NdbTablespaceImpl & fg){ +NdbDictionaryImpl::dropTablespace(const NdbTablespaceImpl & fg) +{ return m_receiver.drop_filegroup(fg); } int -NdbDictionaryImpl::createLogfileGroup(const NdbLogfileGroupImpl & fg){ - return m_receiver.create_filegroup(fg); +NdbDictionaryImpl::createLogfileGroup(const NdbLogfileGroupImpl & fg, + NdbDictObjectImpl* obj) +{ + return m_receiver.create_filegroup(fg, obj); } int -NdbDictionaryImpl::dropLogfileGroup(const NdbLogfileGroupImpl & fg){ +NdbDictionaryImpl::dropLogfileGroup(const NdbLogfileGroupImpl & fg) +{ return m_receiver.drop_filegroup(fg); } int NdbDictInterface::create_file(const NdbFileImpl & file, const NdbFilegroupImpl & group, - bool overwrite){ + bool overwrite, + NdbDictObjectImpl* obj) +{ DBUG_ENTER("NdbDictInterface::create_file"); UtilBufferWriter w(m_buffer); DictFilegroupInfo::File f; f.init(); @@ -4503,23 +4519,39 @@ NdbDictInterface::create_file(const NdbFileImpl & file, Send signal without time-out since creating files can take a very long time if the file is very big. */ - DBUG_RETURN(dictSignal(&tSignal, ptr, 1, - 0, // master - WAIT_CREATE_INDX_REQ, - -1, 100, - err)); + int ret = dictSignal(&tSignal, ptr, 1, + 0, // master + WAIT_CREATE_INDX_REQ, + -1, 100, + err); + + if (ret == 0 && obj) + { + Uint32* data = (Uint32*)m_buffer.get_data(); + obj->m_id = data[0]; + obj->m_version = data[1]; + } + + DBUG_RETURN(ret); } void NdbDictInterface::execCREATE_FILE_CONF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) + LinearSectionPtr ptr[3]) { + const CreateFileConf* conf= + CAST_CONSTPTR(CreateFileConf, signal->getDataPtr()); + m_buffer.grow(4 * 2); // 2 words + Uint32* data = (Uint32*)m_buffer.get_data(); + data[0] = conf->fileId; + data[1] = conf->fileVersion; + m_waiter.signal(NO_WAIT); } void NdbDictInterface::execCREATE_FILE_REF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) + LinearSectionPtr ptr[3]) { const CreateFileRef* ref = CAST_CONSTPTR(CreateFileRef, signal->getDataPtr()); @@ -4529,7 +4561,8 @@ NdbDictInterface::execCREATE_FILE_REF(NdbApiSignal * signal, } int -NdbDictInterface::drop_file(const NdbFileImpl & file){ +NdbDictInterface::drop_file(const NdbFileImpl & file) +{ DBUG_ENTER("NdbDictInterface::drop_file"); NdbApiSignal tSignal(m_reference); tSignal.theReceiversBlockNumber = DBDICT; @@ -4569,7 +4602,9 @@ NdbDictInterface::execDROP_FILE_REF(NdbApiSignal * signal, } int -NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group){ +NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group, + NdbDictObjectImpl* obj) +{ DBUG_ENTER("NdbDictInterface::create_filegroup"); UtilBufferWriter w(m_buffer); DictFilegroupInfo::Filegroup fg; fg.init(); @@ -4638,17 +4673,32 @@ NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group){ ptr[0].sz = m_buffer.length() / 4; int err[] = { CreateFilegroupRef::Busy, CreateFilegroupRef::NotMaster, 0}; - DBUG_RETURN(dictSignal(&tSignal, ptr, 1, - 0, // master - WAIT_CREATE_INDX_REQ, - DICT_WAITFOR_TIMEOUT, 100, - err)); + int ret = dictSignal(&tSignal, ptr, 1, + 0, // master + WAIT_CREATE_INDX_REQ, + DICT_WAITFOR_TIMEOUT, 100, + err); + + if (ret == 0 && obj) + { + Uint32* data = (Uint32*)m_buffer.get_data(); + obj->m_id = data[0]; + obj->m_version = data[1]; + } + + DBUG_RETURN(ret); } void NdbDictInterface::execCREATE_FILEGROUP_CONF(NdbApiSignal * signal, LinearSectionPtr ptr[3]) { + const CreateFilegroupConf* conf= + CAST_CONSTPTR(CreateFilegroupConf, signal->getDataPtr()); + m_buffer.grow(4 * 2); // 2 words + Uint32* data = (Uint32*)m_buffer.get_data(); + data[0] = conf->filegroupId; + data[1] = conf->filegroupVersion; m_waiter.signal(NO_WAIT); } @@ -4664,7 +4714,8 @@ NdbDictInterface::execCREATE_FILEGROUP_REF(NdbApiSignal * signal, } int -NdbDictInterface::drop_filegroup(const NdbFilegroupImpl & group){ +NdbDictInterface::drop_filegroup(const NdbFilegroupImpl & group) +{ DBUG_ENTER("NdbDictInterface::drop_filegroup"); NdbApiSignal tSignal(m_reference); tSignal.theReceiversBlockNumber = DBDICT; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp index b6961edd019..35e8027cdec 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -52,6 +52,8 @@ protected: m_status(NdbDictionary::Object::New) { m_id = -1; } + + friend class NdbDictionary::ObjectId; }; /** @@ -468,9 +470,10 @@ public: static int parseFilegroupInfo(NdbFilegroupImpl &dst, const Uint32 * data, Uint32 len); - int create_file(const NdbFileImpl &, const NdbFilegroupImpl&, bool overwrite = false); + int create_file(const NdbFileImpl &, const NdbFilegroupImpl&, + bool overwrite, NdbDictObjectImpl*); int drop_file(const NdbFileImpl &); - int create_filegroup(const NdbFilegroupImpl &); + int create_filegroup(const NdbFilegroupImpl &, NdbDictObjectImpl*); int drop_filegroup(const NdbFilegroupImpl &); int get_filegroup(NdbFilegroupImpl&, NdbDictionary::Object::Type, Uint32); @@ -622,17 +625,17 @@ public: NdbEventImpl * getBlobEvent(const NdbEventImpl& ev, uint col_no); NdbEventImpl * getEventImpl(const char * internalName); - int createDatafile(const NdbDatafileImpl &, bool force = false); + int createDatafile(const NdbDatafileImpl &, bool force, NdbDictObjectImpl*); int dropDatafile(const NdbDatafileImpl &); - int createUndofile(const NdbUndofileImpl &, bool force = false); + int createUndofile(const NdbUndofileImpl &, bool force, NdbDictObjectImpl*); int dropUndofile(const NdbUndofileImpl &); - int createTablespace(const NdbTablespaceImpl &); + int createTablespace(const NdbTablespaceImpl &, NdbDictObjectImpl*); int dropTablespace(const NdbTablespaceImpl &); - int createLogfileGroup(const NdbLogfileGroupImpl &); + int createLogfileGroup(const NdbLogfileGroupImpl &, NdbDictObjectImpl*); int dropLogfileGroup(const NdbLogfileGroupImpl &); - + const NdbError & getNdbError() const; NdbError m_error; Uint32 m_local_table_data_size; From ef892289cfa6f718d832d67d7e0a7fb90b93eee0 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Thu, 22 Jun 2006 13:53:02 +0200 Subject: [PATCH 06/14] fix result file for ndb_dd_ddl --- mysql-test/r/ndb_dd_ddl.result | 4 ++-- mysql-test/t/ndb_dd_ddl.test | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/ndb_dd_ddl.result b/mysql-test/r/ndb_dd_ddl.result index 71f42974929..9fff9f06f2a 100644 --- a/mysql-test/r/ndb_dd_ddl.result +++ b/mysql-test/r/ndb_dd_ddl.result @@ -200,14 +200,14 @@ DROP LOGFILE GROUP lg1 ENGINE NDB; **** End = And No = **** create table t1 (a int primary key) engine = myisam; -create logfile group lg1 add undofile '/home/jonas/src/51-work/mysql-test/var/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb;; +create logfile group lg1 add undofile 'MYSQLTEST_VARDIR/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb;; ERROR HY000: Failed to create UNDOFILE create logfile group lg1 add undofile 'undofile.dat' initial_size 1M undo_buffer_size = 1M engine=ndb; -create tablespace ts1 add datafile '/home/jonas/src/51-work/mysql-test/var/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb;; +create tablespace ts1 add datafile 'MYSQLTEST_VARDIR/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb;; ERROR HY000: Failed to create DATAFILE drop tablespace ts1 engine ndb; diff --git a/mysql-test/t/ndb_dd_ddl.test b/mysql-test/t/ndb_dd_ddl.test index 2a0755c2748..95ad7f0d152 100644 --- a/mysql-test/t/ndb_dd_ddl.test +++ b/mysql-test/t/ndb_dd_ddl.test @@ -302,6 +302,7 @@ ENGINE NDB; # bug#16341 create table t1 (a int primary key) engine = myisam; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --error ER_CREATE_FILEGROUP_FAILED --eval create logfile group lg1 add undofile '$MYSQLTEST_VARDIR/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb; @@ -311,6 +312,7 @@ initial_size 1M undo_buffer_size = 1M engine=ndb; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --error ER_CREATE_FILEGROUP_FAILED --eval create tablespace ts1 add datafile '$MYSQLTEST_VARDIR/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb; From 95447f9d1a0ab115a9a6734ccb4b863885107016 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 27 Jun 2006 10:02:58 +0200 Subject: [PATCH 07/14] Bug #19852 Restoring backup made from cluster with full data memory fails - make sure to allocate just enough pages in the fragments by using the actual row count from the backup, to avoid over allocation of pages to fragments, and thus avoid the bug --- ndb/include/kernel/GlobalSignalNumbers.h | 4 +- .../kernel/signaldata/BackupContinueB.hpp | 3 +- ndb/include/kernel/signaldata/BackupImpl.hpp | 22 ++- .../kernel/signaldata/BackupSignalData.hpp | 8 +- ndb/include/kernel/signaldata/DictTabInfo.hpp | 11 ++ ndb/include/kernel/signaldata/LqhFrag.hpp | 25 +-- ndb/include/kernel/signaldata/TupFrag.hpp | 15 +- ndb/include/ndbapi/NdbDictionary.hpp | 14 ++ .../common/debugger/signaldata/BackupImpl.cpp | 6 +- .../debugger/signaldata/BackupSignalData.cpp | 6 +- .../debugger/signaldata/DictTabInfo.cpp | 8 + .../common/debugger/signaldata/LqhFrag.cpp | 6 +- ndb/src/kernel/blocks/backup/Backup.cpp | 163 +++++++++++++++--- ndb/src/kernel/blocks/backup/Backup.hpp | 15 +- ndb/src/kernel/blocks/backup/BackupFormat.hpp | 17 +- ndb/src/kernel/blocks/backup/BackupInit.cpp | 3 + ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 32 +++- ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 4 + ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 10 +- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 94 ++++++---- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 5 +- ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 79 +++++---- ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp | 6 +- ndb/src/mgmsrv/MgmtSrvr.cpp | 10 +- ndb/src/mgmsrv/MgmtSrvr.hpp | 4 +- ndb/src/ndbapi/NdbDictionary.cpp | 24 +++ ndb/src/ndbapi/NdbDictionaryImpl.cpp | 20 +++ ndb/src/ndbapi/NdbDictionaryImpl.hpp | 3 + ndb/tools/restore/Restore.cpp | 58 ++++++- ndb/tools/restore/Restore.hpp | 15 ++ ndb/tools/restore/consumer_restore.cpp | 10 ++ sql/ha_ndbcluster.cc | 10 +- 32 files changed, 570 insertions(+), 140 deletions(-) diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index 98b6ce7d949..a84f3130abf 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -611,8 +611,6 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_WAIT_GCP_REF 500 #define GSN_WAIT_GCP_CONF 501 -/* 502 not used */ - /** * Trigger and index signals */ @@ -682,6 +680,8 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_BACKUP_FRAGMENT_REF 546 #define GSN_BACKUP_FRAGMENT_CONF 547 +#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 502 + #define GSN_STOP_BACKUP_REQ 548 #define GSN_STOP_BACKUP_REF 549 #define GSN_STOP_BACKUP_CONF 550 diff --git a/ndb/include/kernel/signaldata/BackupContinueB.hpp b/ndb/include/kernel/signaldata/BackupContinueB.hpp index d3d3f79f310..fe3f48444ec 100644 --- a/ndb/include/kernel/signaldata/BackupContinueB.hpp +++ b/ndb/include/kernel/signaldata/BackupContinueB.hpp @@ -31,7 +31,8 @@ private: BUFFER_UNDERFLOW = 1, BUFFER_FULL_SCAN = 2, BUFFER_FULL_FRAG_COMPLETE = 3, - BUFFER_FULL_META = 4 + BUFFER_FULL_META = 4, + BACKUP_FRAGMENT_INFO = 5 }; }; diff --git a/ndb/include/kernel/signaldata/BackupImpl.hpp b/ndb/include/kernel/signaldata/BackupImpl.hpp index 298440ad377..07ab5bc543b 100644 --- a/ndb/include/kernel/signaldata/BackupImpl.hpp +++ b/ndb/include/kernel/signaldata/BackupImpl.hpp @@ -258,15 +258,31 @@ class BackupFragmentConf { friend bool printBACKUP_FRAGMENT_CONF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 6 ); + STATIC_CONST( SignalLength = 8 ); private: Uint32 backupId; Uint32 backupPtr; Uint32 tableId; Uint32 fragmentNo; - Uint32 noOfRecords; - Uint32 noOfBytes; + Uint32 noOfRecordsLow; + Uint32 noOfBytesLow; + Uint32 noOfRecordsHigh; + Uint32 noOfBytesHigh; +}; + +class BackupFragmentCompleteRep { +public: + STATIC_CONST( SignalLength = 8 ); + + Uint32 backupId; + Uint32 backupPtr; + Uint32 tableId; + Uint32 fragmentNo; + Uint32 noOfTableRowsLow; + Uint32 noOfFragmentRowsLow; + Uint32 noOfTableRowsHigh; + Uint32 noOfFragmentRowsHigh; }; class StopBackupReq { diff --git a/ndb/include/kernel/signaldata/BackupSignalData.hpp b/ndb/include/kernel/signaldata/BackupSignalData.hpp index e1b8c6203a1..9e34ea3a211 100644 --- a/ndb/include/kernel/signaldata/BackupSignalData.hpp +++ b/ndb/include/kernel/signaldata/BackupSignalData.hpp @@ -201,17 +201,19 @@ class BackupCompleteRep { friend bool printBACKUP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 8 + NdbNodeBitmask::Size ); + STATIC_CONST( SignalLength = 10 + NdbNodeBitmask::Size ); private: Uint32 senderData; Uint32 backupId; Uint32 startGCP; Uint32 stopGCP; - Uint32 noOfBytes; - Uint32 noOfRecords; + Uint32 noOfBytesLow; + Uint32 noOfRecordsLow; Uint32 noOfLogBytes; Uint32 noOfLogRecords; NdbNodeBitmask nodes; + Uint32 noOfBytesHigh; + Uint32 noOfRecordsHigh; }; /** diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp index bc4817f0cf3..3fcae69aa74 100644 --- a/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -117,9 +117,16 @@ public: CustomTriggerId = 25, FrmLen = 26, FrmData = 27, + FragmentCount = 128, // No of fragments in table (!fragment replicas) FragmentDataLen = 129, FragmentData = 130, // CREATE_FRAGMENTATION reply + + MaxRowsLow = 139, + MaxRowsHigh = 140, + MinRowsLow = 133, + MinRowsHigh = 144, + TableEnd = 999, AttributeName = 1000, // String, Mandatory @@ -263,6 +270,10 @@ public: Uint32 FragmentCount; Uint32 FragmentDataLen; Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2]; + Uint32 MaxRowsLow; + Uint32 MaxRowsHigh; + Uint32 MinRowsLow; + Uint32 MinRowsHigh; void init(); }; diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/ndb/include/kernel/signaldata/LqhFrag.hpp index 13dfafcc653..50b0caaba07 100644 --- a/ndb/include/kernel/signaldata/LqhFrag.hpp +++ b/ndb/include/kernel/signaldata/LqhFrag.hpp @@ -104,7 +104,7 @@ class LqhFragReq { friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 25 ); + STATIC_CONST( SignalLength = 24 ); enum RequestInfo { CreateInRunning = 0x8000000, @@ -115,27 +115,32 @@ private: Uint32 senderData; Uint32 senderRef; Uint32 fragmentId; - Uint32 requestInfo; + Uint8 requestInfo; + Uint8 unused1; + Uint16 noOfAttributes; Uint32 tableId; Uint32 localKeyLength; - Uint32 maxLoadFactor; - Uint32 minLoadFactor; - Uint32 kValue; + Uint16 maxLoadFactor; + Uint16 minLoadFactor; + Uint16 kValue; + Uint8 tableType; // DictTabInfo::TableType + Uint8 GCPIndicator; Uint32 lh3DistrBits; Uint32 lh3PageBits; - Uint32 noOfAttributes; Uint32 noOfNullAttributes; - Uint32 noOfPagesToPreAllocate; + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; Uint32 schemaVersion; Uint32 keyLength; Uint32 nextLCP; Uint32 noOfKeyAttr; - Uint32 noOfNewAttr; // noOfCharsets in upper half + Uint16 noOfNewAttr; + Uint16 noOfCharsets; Uint32 checksumIndicator; Uint32 noOfAttributeGroups; - Uint32 GCPIndicator; Uint32 startGci; - Uint32 tableType; // DictTabInfo::TableType Uint32 primaryTableId; // table of index or RNIL }; diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/ndb/include/kernel/signaldata/TupFrag.hpp index 5fb9d7bcf42..c9f2ad5382f 100644 --- a/ndb/include/kernel/signaldata/TupFrag.hpp +++ b/ndb/include/kernel/signaldata/TupFrag.hpp @@ -30,7 +30,7 @@ class TupFragReq { friend class Dblqh; friend class Dbtup; public: - STATIC_CONST( SignalLength = 14 ); + STATIC_CONST( SignalLength = 17 ); private: Uint32 userPtr; Uint32 userRef; @@ -38,7 +38,18 @@ private: Uint32 tableId; Uint32 noOfAttr; Uint32 fragId; - Uint32 todo[8]; + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; + Uint32 noOfNullAttr; + Uint32 schemaVersion; + Uint32 noOfKeyAttr; + Uint16 noOfNewAttr; + Uint16 noOfCharsets; + Uint32 checksumIndicator; + Uint32 noOfAttributeGroups; + Uint32 globalCheckpointIdIndicator; }; class TupFragConf { diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 1413931035d..e67a0253096 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -722,6 +722,20 @@ public: */ void setObjectType(Object::Type type); + /** + * Set/Get Maximum number of rows in table (only used to calculate + * number of partitions). + */ + void setMaxRows(Uint64 maxRows); + Uint64 getMaxRows() const; + + /** + * Set/Get Minimum number of rows in table (only used to calculate + * number of partitions). + */ + void setMinRows(Uint64 minRows); + Uint64 getMinRows() const; + /** @} *******************************************************************/ #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL diff --git a/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/ndb/src/common/debugger/signaldata/BackupImpl.cpp index e9b0188d93b..855db0834bc 100644 --- a/ndb/src/common/debugger/signaldata/BackupImpl.cpp +++ b/ndb/src/common/debugger/signaldata/BackupImpl.cpp @@ -100,8 +100,10 @@ printBACKUP_FRAGMENT_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ BackupFragmentConf* sig = (BackupFragmentConf*)data; fprintf(out, " backupPtr: %d backupId: %d\n", sig->backupPtr, sig->backupId); - fprintf(out, " tableId: %d fragmentNo: %d records: %d bytes: %d\n", - sig->tableId, sig->fragmentNo, sig->noOfRecords, sig->noOfBytes); + fprintf(out, " tableId: %d fragmentNo: %d records: %llu bytes: %llu\n", + sig->tableId, sig->fragmentNo, + sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), + sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); return true; } diff --git a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp index 4b0a0e07b66..27fed22ac72 100644 --- a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp +++ b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp @@ -72,11 +72,11 @@ printBACKUP_ABORT_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ bool printBACKUP_COMPLETE_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){ BackupCompleteRep* sig = (BackupCompleteRep*)data; - fprintf(out, " senderData: %d backupId: %d records: %d bytes: %d\n", + fprintf(out, " senderData: %d backupId: %d records: %llu bytes: %llu\n", sig->senderData, sig->backupId, - sig->noOfRecords, - sig->noOfBytes); + sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), + sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); return true; } diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp index 43c129347c0..a1d8d82474d 100644 --- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp @@ -48,6 +48,10 @@ DictTabInfo::TableMapping[] = { DTIMAP(Table, FragmentCount, FragmentCount), DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES), DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen), + DTIMAP(Table, MaxRowsLow, MaxRowsLow), + DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), + DTIMAP(Table, MinRowsLow, MinRowsLow), + DTIMAP(Table, MinRowsHigh, MinRowsHigh), DTIBREAK(AttributeName) }; @@ -124,6 +128,10 @@ DictTabInfo::Table::init(){ FragmentCount = 0; FragmentDataLen = 0; memset(FragmentData, 0, sizeof(FragmentData)); + MaxRowsLow = 0; + MaxRowsHigh = 0; + MinRowsLow = 0; + MinRowsHigh = 0; } void diff --git a/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/ndb/src/common/debugger/signaldata/LqhFrag.cpp index 6d727959a67..3175582c3a2 100644 --- a/ndb/src/common/debugger/signaldata/LqhFrag.cpp +++ b/ndb/src/common/debugger/signaldata/LqhFrag.cpp @@ -37,8 +37,10 @@ printLQH_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recB fprintf(output, " noOfAttributes: %d noOfNullAttributes: %d keyLength: %d\n", sig->noOfAttributes, sig->noOfNullAttributes, sig->keyLength); - fprintf(output, " noOfPagesToPreAllocate: %d schemaVersion: %d nextLCP: %d\n", - sig->noOfPagesToPreAllocate, sig->schemaVersion, sig->nextLCP); + fprintf(output, " maxRowsLow/High: %u/%u minRowsLow/High: %u/%u\n", + sig->maxRowsLow, sig->maxRowsHigh, sig->minRowsLow, sig->minRowsHigh); + fprintf(output, " schemaVersion: %d nextLCP: %d\n", + sig->schemaVersion, sig->nextLCP); return true; } diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index f9089355475..43c1de5e2b3 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -266,6 +266,65 @@ Backup::execCONTINUEB(Signal* signal) const Uint32 Tdata2 = signal->theData[2]; switch(Tdata0) { + case BackupContinueB::BACKUP_FRAGMENT_INFO: + { + const Uint32 ptr_I = Tdata1; + Uint32 tabPtr_I = Tdata2; + Uint32 fragPtr_I = signal->theData[3]; + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, ptr_I); + TablePtr tabPtr; + ptr.p->tables.getPtr(tabPtr, tabPtr_I); + FragmentPtr fragPtr; + tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I); + + BackupFilePtr filePtr; + ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); + + const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2; + Uint32 * dst; + if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz)) + { + sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4); + return; + } + + BackupFormat::CtlFile::FragmentInfo * fragInfo = + (BackupFormat::CtlFile::FragmentInfo*)dst; + fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO); + fragInfo->SectionLength = htonl(sz); + fragInfo->TableId = htonl(fragPtr.p->tableId); + fragInfo->FragmentNo = htonl(fragPtr_I); + fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF); + fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32); + fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF); + fragInfo->FilePosHigh = htonl(0 >> 32); + + filePtr.p->operation.dataBuffer.updateWritePtr(sz); + + fragPtr_I++; + if (fragPtr_I == tabPtr.p->fragments.getSize()) + { + signal->theData[0] = tabPtr.p->tableId; + signal->theData[1] = 0; // unlock + EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + + fragPtr_I = 0; + ptr.p->tables.next(tabPtr); + if ((tabPtr_I = tabPtr.i) == RNIL) + { + closeFiles(signal, ptr); + return; + } + } + signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; + signal->theData[1] = ptr_I; + signal->theData[2] = tabPtr_I; + signal->theData[3] = fragPtr_I; + sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); + return; + } case BackupContinueB::START_FILE_THREAD: case BackupContinueB::BUFFER_UNDERFLOW: { @@ -455,7 +514,7 @@ Backup::findTable(const BackupRecordPtr & ptr, return false; } -static Uint32 xps(Uint32 x, Uint64 ms) +static Uint32 xps(Uint64 x, Uint64 ms) { float fx = x; float fs = ms; @@ -469,9 +528,9 @@ static Uint32 xps(Uint32 x, Uint64 ms) } struct Number { - Number(Uint32 r) { val = r;} - Number & operator=(Uint32 r) { val = r; return * this; } - Uint32 val; + Number(Uint64 r) { val = r;} + Number & operator=(Uint64 r) { val = r; return * this; } + Uint64 val; }; NdbOut & @@ -545,8 +604,10 @@ Backup::execBACKUP_COMPLETE_REP(Signal* signal) startTime = NdbTick_CurrentMillisecond() - startTime; ndbout_c("Backup %d has completed", rep->backupId); - const Uint32 bytes = rep->noOfBytes; - const Uint32 records = rep->noOfRecords; + const Uint64 bytes = + rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32); + const Uint64 records = + rep->noOfRecordsLow + (((Uint64)rep->noOfRecordsHigh) << 32); Number rps = xps(records, startTime); Number bps = xps(bytes, startTime); @@ -1905,8 +1966,10 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) const Uint32 tableId = conf->tableId; const Uint32 fragmentNo = conf->fragmentNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - const Uint32 noOfBytes = conf->noOfBytes; - const Uint32 noOfRecords = conf->noOfRecords; + const Uint64 noOfBytes = + conf->noOfBytesLow + (((Uint64)conf->noOfBytesHigh) << 32); + const Uint64 noOfRecords = + conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); @@ -1918,9 +1981,13 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); + tabPtr.p->noOfRecords += noOfRecords; + FragmentPtr fragPtr; tabPtr.p->fragments.getPtr(fragPtr, fragmentNo); + fragPtr.p->noOfRecords = noOfRecords; + ndbrequire(fragPtr.p->scanned == 0); ndbrequire(fragPtr.p->scanning == 1); ndbrequire(fragPtr.p->node == nodeId); @@ -1944,6 +2011,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) } else { + NodeBitmask nodes = ptr.p->nodes; + nodes.clear(getOwnNodeId()); + if (!nodes.isclear()) + { + BackupFragmentCompleteRep *rep = + (BackupFragmentCompleteRep*)signal->getDataPtrSend(); + rep->backupId = ptr.p->backupId; + rep->backupPtr = ptr.i; + rep->tableId = tableId; + rep->fragmentNo = fragmentNo; + rep->noOfTableRowsLow = (Uint32)(tabPtr.p->noOfRecords & 0xFFFFFFFF); + rep->noOfTableRowsHigh = (Uint32)(tabPtr.p->noOfRecords >> 32); + rep->noOfFragmentRowsLow = (Uint32)(noOfRecords & 0xFFFFFFFF); + rep->noOfFragmentRowsHigh = (Uint32)(noOfRecords >> 32); + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_BACKUP_FRAGMENT_COMPLETE_REP, signal, + BackupFragmentCompleteRep::SignalLength, JBB); + } nextFragment(signal, ptr); } } @@ -2006,6 +2091,29 @@ err: execABORT_BACKUP_ORD(signal); } +void +Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal) +{ + jamEntry(); + BackupFragmentCompleteRep * rep = + (BackupFragmentCompleteRep*)signal->getDataPtr(); + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, rep->backupPtr); + + TablePtr tabPtr; + ndbrequire(findTable(ptr, tabPtr, rep->tableId)); + + tabPtr.p->noOfRecords = + rep->noOfTableRowsLow + (((Uint64)rep->noOfTableRowsHigh) << 32); + + FragmentPtr fragPtr; + tabPtr.p->fragments.getPtr(fragPtr, rep->fragmentNo); + + fragPtr.p->noOfRecords = + rep->noOfFragmentRowsLow + (((Uint64)rep->noOfFragmentRowsHigh) << 32); +} + /***************************************************************************** * * Master functionallity - Drop triggers @@ -2206,8 +2314,10 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) rep->senderData = ptr.p->clientData; rep->startGCP = ptr.p->startGCP; rep->stopGCP = ptr.p->stopGCP; - rep->noOfBytes = ptr.p->noOfBytes; - rep->noOfRecords = ptr.p->noOfRecords; + rep->noOfBytesLow = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); + rep->noOfRecordsLow = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); + rep->noOfBytesHigh = (Uint32)(ptr.p->noOfBytes >> 32); + rep->noOfRecordsHigh = (Uint32)(ptr.p->noOfRecords >> 32); rep->noOfLogBytes = ptr.p->noOfLogBytes; rep->noOfLogRecords = ptr.p->noOfLogRecords; rep->nodes = ptr.p->nodes; @@ -2220,12 +2330,14 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) signal->theData[2] = ptr.p->backupId; signal->theData[3] = ptr.p->startGCP; signal->theData[4] = ptr.p->stopGCP; - signal->theData[5] = ptr.p->noOfBytes; - signal->theData[6] = ptr.p->noOfRecords; + signal->theData[5] = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); + signal->theData[6] = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); signal->theData[7] = ptr.p->noOfLogBytes; signal->theData[8] = ptr.p->noOfLogRecords; ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); + signal->theData[9+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfBytes >> 32); + signal->theData[10+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfRecords >> 32); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 11+NdbNodeBitmask::Size, JBB); } else { @@ -2988,6 +3100,7 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len) /** * Initialize table object */ + tabPtr.p->noOfRecords = 0; tabPtr.p->schemaVersion = tmpTab.TableVersion; tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes; tabPtr.p->noOfNull = 0; @@ -3695,8 +3808,10 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) conf->backupPtr = ptr.i; conf->tableId = filePtr.p->tableId; conf->fragmentNo = filePtr.p->fragmentNo; - conf->noOfRecords = op.noOfRecords; - conf->noOfBytes = op.noOfBytes; + conf->noOfRecordsLow = (Uint32)(op.noOfRecords & 0xFFFFFFFF); + conf->noOfRecordsHigh = (Uint32)(op.noOfRecords >> 32); + conf->noOfBytesLow = (Uint32)(op.noOfBytes & 0xFFFFFFFF); + conf->noOfBytesHigh = (Uint32)(op.noOfBytes >> 32); sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal, BackupFragmentConf::SignalLength, JBB); @@ -4123,20 +4238,18 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) gcp->StartGCP = htonl(startGCP); gcp->StopGCP = htonl(stopGCP - 1); filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz); - } - { - TablePtr tabPtr; - for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL; - ptr.p->tables.next(tabPtr)) { - signal->theData[0] = tabPtr.p->tableId; - signal->theData[1] = 0; // unlock - EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + TablePtr tabPtr; + ptr.p->tables.first(tabPtr); + + signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; + signal->theData[1] = ptr.i; + signal->theData[2] = tabPtr.i; + signal->theData[3] = 0; + sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); } } - - closeFiles(signal, ptr); } void diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp index c455e32fa67..e37923da749 100644 --- a/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/ndb/src/kernel/blocks/backup/Backup.hpp @@ -68,6 +68,7 @@ protected: void execBACKUP_DATA(Signal* signal); void execSTART_BACKUP_REQ(Signal* signal); void execBACKUP_FRAGMENT_REQ(Signal* signal); + void execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal); void execSTOP_BACKUP_REQ(Signal* signal); void execBACKUP_STATUS_REQ(Signal* signal); void execABORT_BACKUP_ORD(Signal* signal); @@ -183,10 +184,12 @@ public: typedef Ptr AttributePtr; struct Fragment { + Uint64 noOfRecords; Uint32 tableId; - Uint32 node; - Uint16 scanned; // 0 = not scanned x = scanned by node x - Uint16 scanning; // 0 = not scanning x = scanning on node x + Uint8 node; + Uint8 scanned; // 0 = not scanned x = scanned by node x + Uint8 scanning; // 0 = not scanning x = scanning on node x + Uint8 unused1; Uint32 nextPool; }; typedef Ptr FragmentPtr; @@ -194,6 +197,8 @@ public: struct Table { Table(ArrayPool &, ArrayPool &); + Uint64 noOfRecords; + Uint32 tableId; Uint32 schemaVersion; Uint32 tableType; @@ -269,8 +274,8 @@ public: Uint32 tablePtr; // Ptr.i to current table FsBuffer dataBuffer; - Uint32 noOfRecords; - Uint32 noOfBytes; + Uint64 noOfRecords; + Uint64 noOfBytes; Uint32 maxRecordSize; private: diff --git a/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/ndb/src/kernel/blocks/backup/BackupFormat.hpp index 65dd2ad9053..b8ffff3a294 100644 --- a/ndb/src/kernel/blocks/backup/BackupFormat.hpp +++ b/ndb/src/kernel/blocks/backup/BackupFormat.hpp @@ -32,7 +32,8 @@ struct BackupFormat { FRAGMENT_FOOTER = 3, TABLE_LIST = 4, TABLE_DESCRIPTION = 5, - GCP_ENTRY = 6 + GCP_ENTRY = 6, + FRAGMENT_INFO = 7 }; struct FileHeader { @@ -126,6 +127,20 @@ struct BackupFormat { Uint32 StartGCP; Uint32 StopGCP; }; + + /** + * Fragment Info + */ + struct FragmentInfo { + Uint32 SectionType; + Uint32 SectionLength; + Uint32 TableId; + Uint32 FragmentNo; + Uint32 NoOfRecordsLow; + Uint32 NoOfRecordsHigh; + Uint32 FilePosLow; + Uint32 FilePosHigh; + }; }; /** diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp index 4c734d58c8e..96c11468939 100644 --- a/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -97,6 +97,9 @@ Backup::Backup(const Configuration & conf) : addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ); addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF); + + addRecSignal(GSN_BACKUP_FRAGMENT_COMPLETE_REP, + &Backup::execBACKUP_FRAGMENT_COMPLETE_REP); addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ); addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF); diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index ca9daca428b..1f7fd8e6fa5 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -281,6 +281,10 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w, w.add(DictTabInfo::TableKValue, tablePtr.p->kValue); w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType); w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType); + w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow); + w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh); + w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow); + w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh); if(!signal) { @@ -1525,6 +1529,10 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) tablePtr.p->minLoadFactor = 70; tablePtr.p->noOfPrimkey = 1; tablePtr.p->tupKeyLength = 1; + tablePtr.p->maxRowsLow = 0; + tablePtr.p->maxRowsHigh = 0; + tablePtr.p->minRowsLow = 0; + tablePtr.p->minRowsHigh = 0; tablePtr.p->storedTable = true; tablePtr.p->tableType = DictTabInfo::UserTable; tablePtr.p->primaryTableId = RNIL; @@ -4464,6 +4472,13 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { Uint32 lhPageBits = 0; ::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount); + Uint64 maxRows = tabPtr.p->maxRowsLow + + (((Uint64)tabPtr.p->maxRowsHigh) << 32); + Uint64 minRows = tabPtr.p->minRowsLow + + (((Uint64)tabPtr.p->minRowsHigh) << 32); + maxRows = (maxRows + fragCount - 1) / fragCount; + minRows = (minRows + fragCount - 1) / fragCount; + { LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend(); req->senderData = senderData; @@ -4479,7 +4494,10 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->lh3PageBits = 0; //lhPageBits; req->noOfAttributes = tabPtr.p->noOfAttributes; req->noOfNullAttributes = tabPtr.p->noOfNullBits; - req->noOfPagesToPreAllocate = 0; + req->maxRowsLow = maxRows & 0xFFFFFFFF; + req->maxRowsHigh = maxRows >> 32; + req->minRowsLow = minRows & 0xFFFFFFFF; + req->minRowsHigh = minRows >> 32; req->schemaVersion = tabPtr.p->tableVersion; Uint32 keyLen = tabPtr.p->tupKeyLength; req->keyLength = keyLen; // wl-2066 no more "long keys" @@ -4487,8 +4505,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->noOfKeyAttr = tabPtr.p->noOfPrimkey; req->noOfNewAttr = 0; - // noOfCharsets passed to TUP in upper half - req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16); + req->noOfCharsets = tabPtr.p->noOfCharsets; req->checksumIndicator = 1; req->noOfAttributeGroups = 1; req->GCPIndicator = 0; @@ -5054,6 +5071,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType; tablePtr.p->kValue = tableDesc.TableKValue; tablePtr.p->fragmentCount = tableDesc.FragmentCount; + tablePtr.p->maxRowsLow = tableDesc.MaxRowsLow; + tablePtr.p->maxRowsHigh = tableDesc.MaxRowsHigh; + tablePtr.p->minRowsLow = tableDesc.MinRowsLow; + tablePtr.p->minRowsHigh = tableDesc.MinRowsHigh; + + Uint64 maxRows = + (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow; + Uint64 minRows = + (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow; tablePtr.p->frmLen = tableDesc.FrmLen; memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen); diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 6b78fb86534..e4788898cc8 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -131,6 +131,10 @@ public: * on disk. Index trigger ids are volatile. */ struct TableRecord : public MetaData::Table { + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; /**************************************************** * Support variables for table handling ****************************************************/ diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 1ed383853ba..f8e6292f7f2 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -443,7 +443,6 @@ public: UintR dictConnectptr; UintR fragmentPtr; UintR nextAddfragrec; - UintR noOfAllocPages; UintR schemaVer; UintR tup1Connectptr; UintR tup2Connectptr; @@ -465,12 +464,17 @@ public: Uint16 totalAttrReceived; Uint16 fragCopyCreation; Uint16 noOfKeyAttr; - Uint32 noOfNewAttr; // noOfCharsets in upper half + Uint16 noOfNewAttr; + Uint16 noOfCharsets; Uint16 noOfAttributeGroups; Uint16 lh3DistrBits; Uint16 tableType; Uint16 primaryTableId; - };// Size 108 bytes + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; + };// Size 124 bytes typedef Ptr AddFragRecordPtr; /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 56e93e6ee01..ecb67d04050 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -939,12 +939,16 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) Uint8 tlh = req->lh3PageBits; Uint32 tnoOfAttr = req->noOfAttributes; Uint32 tnoOfNull = req->noOfNullAttributes; - Uint32 noOfAlloc = req->noOfPagesToPreAllocate; + Uint32 maxRowsLow = req->maxRowsLow; + Uint32 maxRowsHigh = req->maxRowsHigh; + Uint32 minRowsLow = req->minRowsLow; + Uint32 minRowsHigh = req->minRowsHigh; Uint32 tschemaVersion = req->schemaVersion; Uint32 ttupKeyLength = req->keyLength; Uint32 nextLcp = req->nextLCP; Uint32 noOfKeyAttr = req->noOfKeyAttr; Uint32 noOfNewAttr = req->noOfNewAttr; + Uint32 noOfCharsets = req->noOfCharsets; Uint32 checksumIndicator = req->checksumIndicator; Uint32 noOfAttributeGroups = req->noOfAttributeGroups; Uint32 gcpIndicator = req->GCPIndicator; @@ -1042,7 +1046,10 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->m_senderAttrPtr = RNIL; addfragptr.p->noOfAttr = tnoOfAttr; addfragptr.p->noOfNull = tnoOfNull; - addfragptr.p->noOfAllocPages = noOfAlloc; + addfragptr.p->maxRowsLow = maxRowsLow; + addfragptr.p->maxRowsHigh = maxRowsHigh; + addfragptr.p->minRowsLow = minRowsLow; + addfragptr.p->minRowsHigh = minRowsHigh; addfragptr.p->tabId = tabptr.i; addfragptr.p->totalAttrReceived = 0; addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */ @@ -1052,6 +1059,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->addfragErrorCode = 0; addfragptr.p->noOfKeyAttr = noOfKeyAttr; addfragptr.p->noOfNewAttr = noOfNewAttr; + addfragptr.p->noOfCharsets = noOfCharsets; addfragptr.p->checksumIndicator = checksumIndicator; addfragptr.p->noOfAttributeGroups = noOfAttributeGroups; addfragptr.p->GCPIndicator = gcpIndicator; @@ -1221,47 +1229,56 @@ Dblqh::sendAddFragReq(Signal* signal) ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP || addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUP) { + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); if (DictTabInfo::isTable(addfragptr.p->tableType) || DictTabInfo::isHashIndex(addfragptr.p->tableType)) { jam(); - signal->theData[0] = addfragptr.i; - signal->theData[1] = cownref; - signal->theData[2] = 0; /* ADD TABLE */ - signal->theData[3] = addfragptr.p->tabId; - signal->theData[4] = addfragptr.p->noOfAttr; - signal->theData[5] = + tupFragReq->userPtr = addfragptr.i; + tupFragReq->userRef = cownref; + tupFragReq->reqInfo = 0; /* ADD TABLE */ + tupFragReq->tableId = addfragptr.p->tabId; + tupFragReq->noOfAttr = addfragptr.p->noOfAttr; + tupFragReq->fragId = addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ? addfragptr.p->fragid1 : addfragptr.p->fragid2; - signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1; - signal->theData[7] = addfragptr.p->noOfNull; - signal->theData[8] = addfragptr.p->schemaVer; - signal->theData[9] = addfragptr.p->noOfKeyAttr; - signal->theData[10] = addfragptr.p->noOfNewAttr; - signal->theData[11] = addfragptr.p->checksumIndicator; - signal->theData[12] = addfragptr.p->noOfAttributeGroups; - signal->theData[13] = addfragptr.p->GCPIndicator; + tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; + tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; + tupFragReq->minRowsLow = addfragptr.p->minRowsLow; + tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; + tupFragReq->noOfNullAttr = addfragptr.p->noOfNull; + tupFragReq->schemaVersion = addfragptr.p->schemaVer; + tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr; + tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr; + tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; + tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; + tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups; + tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; } if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) { jam(); - signal->theData[0] = addfragptr.i; - signal->theData[1] = cownref; - signal->theData[2] = 0; /* ADD TABLE */ - signal->theData[3] = addfragptr.p->tabId; - signal->theData[4] = 1; /* ordered index: one array attr */ - signal->theData[5] = + tupFragReq->userPtr = addfragptr.i; + tupFragReq->userRef = cownref; + tupFragReq->reqInfo = 0; /* ADD TABLE */ + tupFragReq->tableId = addfragptr.p->tabId; + tupFragReq->noOfAttr = 1; /* ordered index: one array attr */ + tupFragReq->fragId = addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ? addfragptr.p->fragid1 : addfragptr.p->fragid2; - signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1; - signal->theData[7] = 0; /* ordered index: no nullable */ - signal->theData[8] = addfragptr.p->schemaVer; - signal->theData[9] = 1; /* ordered index: one key */ - signal->theData[10] = addfragptr.p->noOfNewAttr; - signal->theData[11] = addfragptr.p->checksumIndicator; - signal->theData[12] = addfragptr.p->noOfAttributeGroups; - signal->theData[13] = addfragptr.p->GCPIndicator; + tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; + tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; + tupFragReq->minRowsLow = addfragptr.p->minRowsLow; + tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; + tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */ + tupFragReq->schemaVersion = addfragptr.p->schemaVer; + tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */ + tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr; + tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; + tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; + tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups; + tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; @@ -1580,28 +1597,35 @@ void Dblqh::abortAddFragOps(Signal* signal) { fragptr.i = addfragptr.p->fragmentPtr; ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); - signal->theData[0] = (Uint32)-1; if (addfragptr.p->tup1Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tup1Connectptr; + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); + tupFragReq->userPtr = (Uint32)-1; + tupFragReq->userRef = addfragptr.p->tup1Connectptr; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB); addfragptr.p->tup1Connectptr = RNIL; } if (addfragptr.p->tup2Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tup2Connectptr; + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); + tupFragReq->userPtr = (Uint32)-1; + tupFragReq->userRef = addfragptr.p->tup2Connectptr; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB); addfragptr.p->tup2Connectptr = RNIL; } if (addfragptr.p->tux1Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tux1Connectptr; + TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend(); + tuxFragReq->userPtr = (Uint32)-1; + tuxFragReq->userRef = addfragptr.p->tux1Connectptr; sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB); addfragptr.p->tux1Connectptr = RNIL; } if (addfragptr.p->tux2Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tux2Connectptr; + TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend(); + tuxFragReq->userPtr = (Uint32)-1; + tuxFragReq->userRef = addfragptr.p->tux2Connectptr; sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB); addfragptr.p->tux2Connectptr = RNIL; } diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index cf3c6056d65..41194fba82c 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -496,7 +496,8 @@ struct DiskBufferSegmentInfo { typedef Ptr DiskBufferSegmentInfoPtr; struct Fragoperrec { - bool definingFragment; + Uint64 minRows; + Uint64 maxRows; Uint32 nextFragoprec; Uint32 lqhPtrFrag; Uint32 fragidFrag; @@ -509,6 +510,7 @@ struct Fragoperrec { Uint32 charsetIndex; BlockReference lqhBlockrefFrag; bool inUse; + bool definingFragment; }; typedef Ptr FragoperrecPtr; @@ -560,6 +562,7 @@ struct Fragrecord { Uint32 currentPageRange; Uint32 rootPageRange; Uint32 noOfPages; + Uint32 noOfPagesToGrow; Uint32 emptyPrimPage; Uint32 firstusedOprec; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index bacba2a880c..12cd61a17a6 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -41,7 +41,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) { ljamEntry(); - if (signal->theData[0] == (Uint32)-1) { + TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr(); + if (tupFragReq->userPtr == (Uint32)-1) { ljam(); abortAddFragOp(signal); return; @@ -51,30 +52,34 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) FragrecordPtr regFragPtr; TablerecPtr regTabPtr; - Uint32 userptr = signal->theData[0]; - Uint32 userblockref = signal->theData[1]; - Uint32 reqinfo = signal->theData[2]; - regTabPtr.i = signal->theData[3]; - Uint32 noOfAttributes = signal->theData[4]; - Uint32 fragId = signal->theData[5]; - Uint32 noOfNullAttr = signal->theData[7]; - /* Uint32 schemaVersion = signal->theData[8];*/ - Uint32 noOfKeyAttr = signal->theData[9]; + Uint32 userptr = tupFragReq->userPtr; + Uint32 userblockref = tupFragReq->userRef; + Uint32 reqinfo = tupFragReq->reqInfo; + regTabPtr.i = tupFragReq->tableId; + Uint32 noOfAttributes = tupFragReq->noOfAttr; + Uint32 fragId = tupFragReq->fragId; + Uint32 noOfNullAttr = tupFragReq->noOfNullAttr; + /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/ + Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr; - Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF); - /* DICT sends number of character sets in upper half */ - Uint32 noOfCharsets = (signal->theData[10] >> 16); + Uint32 noOfNewAttr = tupFragReq->noOfNewAttr; + Uint32 noOfCharsets = tupFragReq->noOfCharsets; - Uint32 checksumIndicator = signal->theData[11]; - Uint32 noOfAttributeGroups = signal->theData[12]; - Uint32 globalCheckpointIdIndicator = signal->theData[13]; + Uint32 checksumIndicator = tupFragReq->checksumIndicator; + Uint32 noOfAttributeGroups = tupFragReq->noOfAttributeGroups; + Uint32 globalCheckpointIdIndicator = tupFragReq->globalCheckpointIdIndicator; + + Uint64 maxRows = + (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow; + Uint64 minRows = + (((Uint64)tupFragReq->minRowsHigh) << 32) + tupFragReq->minRowsLow; #ifndef VM_TRACE // config mismatch - do not crash if release compiled if (regTabPtr.i >= cnoOfTablerec) { ljam(); - signal->theData[0] = userptr; - signal->theData[1] = 800; + tupFragReq->userPtr = userptr; + tupFragReq->userRef = 800; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); return; } @@ -83,8 +88,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); if (cfirstfreeFragopr == RNIL) { ljam(); - signal->theData[0] = userptr; - signal->theData[1] = ZNOFREE_FRAGOP_ERROR; + tupFragReq->userPtr = userptr; + tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); return; }//if @@ -100,6 +105,9 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) fragOperPtr.p->noOfNewAttrCount = noOfNewAttr; fragOperPtr.p->charsetIndex = 0; fragOperPtr.p->currNullBit = 0; + // remove in 5.1, 2 fragments per fragment in 5.0 + fragOperPtr.p->minRows = (minRows + 1)/2; + fragOperPtr.p->maxRows = (maxRows + 1)/2; ndbrequire(reqinfo == ZADDFRAG); @@ -141,16 +149,6 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regFragPtr.p->fragmentId = fragId; regFragPtr.p->checkpointVersion = RNIL; - Uint32 noAllocatedPages = 2; - noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); - - if (noAllocatedPages == 0) { - ljam(); - terrorCode = ZNO_PAGES_ALLOCATED_ERROR; - fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); - return; - }//if - if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId || ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) { ljam(); @@ -407,6 +405,27 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) CLEAR_ERROR_INSERT_VALUE; return; } + + if (lastAttr) + { + ljam(); + Uint32 noRowsPerPage = ZWORDS_ON_PAGE/regTabPtr.p->tupheadsize; + Uint32 noAllocatedPages = + (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage; + if (fragOperPtr.p->minRows == 0) + noAllocatedPages = 2; + else if (noAllocatedPages == 0) + noAllocatedPages = 2; + noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); + + if (noAllocatedPages == 0) { + ljam(); + terrorCode = ZNO_PAGES_ALLOCATED_ERROR; + addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); + return; + }//if + } + /* **************************************************************** */ /* ************** TUP_ADD_ATTCONF ****************** */ /* **************************************************************** */ diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index 1f674876642..acdb73704cb 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -332,6 +332,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr) regFragPtr->rootPageRange = RNIL; regFragPtr->currentPageRange = RNIL; regFragPtr->noOfPages = 0; + regFragPtr->noOfPagesToGrow = 2; regFragPtr->nextStartRange = 0; }//initFragRange() @@ -393,9 +394,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* const regFragPtr, Uint32 tafpNoAllocReq void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr) { - Uint32 noAllocPages = regFragPtr->noOfPages >> 3; // 12.5% - noAllocPages += regFragPtr->noOfPages >> 4; // 6.25% + Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5% + noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25% noAllocPages += 2; + regFragPtr->noOfPagesToGrow += noAllocPages; /* -----------------------------------------------------------------*/ // We will grow by 18.75% plus two more additional pages to grow // a little bit quicker in the beginning. diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index b9466ed1173..69c0286a1de 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2380,14 +2380,20 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) event.Event = BackupEvent::BackupCompleted; event.Completed.BackupId = rep->backupId; - event.Completed.NoOfBytes = rep->noOfBytes; + event.Completed.NoOfBytes = rep->noOfBytesLow; event.Completed.NoOfLogBytes = rep->noOfLogBytes; - event.Completed.NoOfRecords = rep->noOfRecords; + event.Completed.NoOfRecords = rep->noOfRecordsLow; event.Completed.NoOfLogRecords = rep->noOfLogRecords; event.Completed.stopGCP = rep->stopGCP; event.Completed.startGCP = rep->startGCP; event.Nodes = rep->nodes; + if (signal->header.theLength >= BackupCompleteRep::SignalLength) + { + event.Completed.NoOfBytes += ((Uint64)rep->noOfBytesHigh) << 32; + event.Completed.NoOfRecords += ((Uint64)rep->noOfRecordsHigh) << 32; + } + backupId = rep->backupId; return 0; } diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 7811cf0e5d1..187f225470a 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -323,9 +323,9 @@ public: Uint32 ErrorCode; } FailedToStart ; struct { + Uint64 NoOfBytes; + Uint64 NoOfRecords; Uint32 BackupId; - Uint32 NoOfBytes; - Uint32 NoOfRecords; Uint32 NoOfLogBytes; Uint32 NoOfLogRecords; Uint32 startGCP; diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index a342a5d5926..a0a3dd431b8 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -385,6 +385,30 @@ NdbDictionary::Table::getNoOfPrimaryKeys() const { return m_impl.m_noOfKeys; } +void +NdbDictionary::Table::setMaxRows(Uint64 maxRows) +{ + m_impl.m_max_rows = maxRows; +} + +Uint64 +NdbDictionary::Table::getMaxRows() const +{ + return m_impl.m_max_rows; +} + +void +NdbDictionary::Table::setMinRows(Uint64 minRows) +{ + m_impl.m_min_rows = minRows; +} + +Uint64 +NdbDictionary::Table::getMinRows() const +{ + return m_impl.m_min_rows; +} + const char* NdbDictionary::Table::getPrimaryKey(int no) const { int count = 0; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index bd50440b3c0..ce348b616c9 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -319,6 +319,8 @@ NdbTableImpl::init(){ m_noOfDistributionKeys= 0; m_noOfBlobs= 0; m_replicaCount= 0; + m_min_rows = 0; + m_max_rows = 0; } bool @@ -416,6 +418,9 @@ NdbTableImpl::assign(const NdbTableImpl& org) m_version = org.m_version; m_status = org.m_status; + + m_max_rows = org.m_max_rows; + m_min_rows = org.m_min_rows; } void NdbTableImpl::setName(const char * name) @@ -1302,6 +1307,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, fragmentTypeMapping, (Uint32)NdbDictionary::Object::FragUndefined); + Uint64 max_rows = ((Uint64)tableDesc.MaxRowsHigh) << 32; + max_rows += tableDesc.MaxRowsLow; + impl->m_max_rows = max_rows; + Uint64 min_rows = ((Uint64)tableDesc.MinRowsHigh) << 32; + min_rows += tableDesc.MinRowsLow; + impl->m_min_rows = min_rows; impl->m_logging = tableDesc.TableLoggedFlag; impl->m_kvalue = tableDesc.TableKValue; impl->m_minLoadFactor = tableDesc.MinLoadFactor; @@ -1630,7 +1641,16 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpTab.MaxLoadFactor = impl.m_maxLoadFactor; tmpTab.TableType = DictTabInfo::UserTable; tmpTab.NoOfAttributes = sz; + tmpTab.MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32); + tmpTab.MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF); + tmpTab.MinRowsHigh = (Uint32)(impl.m_min_rows >> 32); + tmpTab.MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF); + Uint64 maxRows = + (((Uint64)tmpTab.MaxRowsHigh) << 32) + tmpTab.MaxRowsLow; + Uint64 minRows = + (((Uint64)tmpTab.MinRowsHigh) << 32) + tmpTab.MinRowsLow; + tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType, fragmentTypeMapping, DictTabInfo::AllNodesSmallTable); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index bc9894497f8..dfccf120228 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -130,6 +130,9 @@ public: Uint32 m_hashpointerValue; Vector m_fragments; + Uint64 m_max_rows; + Uint64 m_min_rows; + bool m_logging; int m_kvalue; int m_minLoadFactor; diff --git a/ndb/tools/restore/Restore.cpp b/ndb/tools/restore/Restore.cpp index 6ac06f8a6f8..a808a48b558 100644 --- a/ndb/tools/restore/Restore.cpp +++ b/ndb/tools/restore/Restore.cpp @@ -80,7 +80,12 @@ RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) { RestoreMetaData::~RestoreMetaData(){ for(Uint32 i= 0; i < allTables.size(); i++) - delete allTables[i]; + { + TableS *table = allTables[i]; + for(Uint32 j= 0; j < table->m_fragmentInfo.size(); j++) + delete table->m_fragmentInfo[j]; + delete table; + } allTables.clear(); } @@ -111,6 +116,9 @@ RestoreMetaData::loadContent() } if(!readGCPEntry()) return 0; + + if(!readFragmentInfo()) + return 0; return 1; } @@ -192,6 +200,52 @@ RestoreMetaData::readGCPEntry() { return true; } +bool +RestoreMetaData::readFragmentInfo() +{ + BackupFormat::CtlFile::FragmentInfo fragInfo; + TableS * table = 0; + Uint32 tableId = RNIL; + + while (buffer_read(&fragInfo, 4, 2) == 2) + { + fragInfo.SectionType = ntohl(fragInfo.SectionType); + fragInfo.SectionLength = ntohl(fragInfo.SectionLength); + + if (fragInfo.SectionType != BackupFormat::FRAGMENT_INFO) + { + err << "readFragmentInfo invalid section type: " << + fragInfo.SectionType << endl; + return false; + } + + if (buffer_read(&fragInfo.TableId, (fragInfo.SectionLength-2)*4, 1) != 1) + { + err << "readFragmentInfo invalid section length: " << + fragInfo.SectionLength << endl; + return false; + } + + fragInfo.TableId = ntohl(fragInfo.TableId); + if (fragInfo.TableId != tableId) + { + tableId = fragInfo.TableId; + table = getTable(tableId); + } + + FragmentInfo * tmp = new FragmentInfo; + tmp->fragmentNo = ntohl(fragInfo.FragmentNo); + tmp->noOfRecords = ntohl(fragInfo.NoOfRecordsLow) + + (((Uint64)ntohl(fragInfo.NoOfRecordsHigh)) << 32); + tmp->filePosLow = ntohl(fragInfo.FilePosLow); + tmp->filePosHigh = ntohl(fragInfo.FilePosHigh); + + table->m_fragmentInfo.push_back(tmp); + table->m_noOfRecords += tmp->noOfRecords; + } + return true; +} + TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) : m_dictTable(tableImpl) { @@ -199,6 +253,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) m_noOfNullable = m_nullBitmaskSize = 0; m_auto_val_id= ~(Uint32)0; m_max_auto_val= 0; + m_noOfRecords= 0; backupVersion = version; for (int i = 0; i < tableImpl->getNoOfColumns(); i++) @@ -937,4 +992,5 @@ operator<<(NdbOut& ndbout, const TableS & table){ template class Vector; template class Vector; template class Vector; +template class Vector; diff --git a/ndb/tools/restore/Restore.hpp b/ndb/tools/restore/Restore.hpp index 85793baf9df..cf8feb7125c 100644 --- a/ndb/tools/restore/Restore.hpp +++ b/ndb/tools/restore/Restore.hpp @@ -114,6 +114,14 @@ public: AttributeData * getData(int i) const; }; // class TupleS +struct FragmentInfo +{ + Uint32 fragmentNo; + Uint64 noOfRecords; + Uint32 filePosLow; + Uint32 filePosHigh; +}; + class TableS { friend class TupleS; @@ -136,6 +144,9 @@ class TableS { int pos; + Uint64 m_noOfRecords; + Vector m_fragmentInfo; + void createAttr(NdbDictionary::Column *column); public: @@ -146,6 +157,9 @@ public: Uint32 getTableId() const { return m_dictTable->getTableId(); } + Uint32 getNoOfRecords() const { + return m_noOfRecords; + } /* void setMysqlTableName(char * tableName) { strpcpy(mysqlTableName, tableName); @@ -274,6 +288,7 @@ class RestoreMetaData : public BackupFile { bool readMetaTableDesc(); bool readGCPEntry(); + bool readFragmentInfo(); Uint32 readMetaTableList(); Uint32 m_startGCP; diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp index d62ca3f610a..bff63c28716 100644 --- a/ndb/tools/restore/consumer_restore.cpp +++ b/ndb/tools/restore/consumer_restore.cpp @@ -193,6 +193,16 @@ BackupRestore::table(const TableS & table){ copy.setName(split[2].c_str()); + /* + update min and max rows to reflect the table, this to + ensure that memory is allocated properly in the ndb kernel + */ + copy.setMinRows(table.getNoOfRecords()); + if (table.getNoOfRecords() > copy.getMaxRows()) + { + copy.setMaxRows(table.getNoOfRecords()); + } + if (dict->createTable(copy) == -1) { err << "Create table " << table.getTableName() << " failed: " diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 98dd9d5a122..d59eb4d4f77 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4111,7 +4111,11 @@ static int create_ndb_column(NDBCOL &col, static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { - if (form->s->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */ + ha_rows max_rows= form->s->max_rows; + ha_rows min_rows= form->s->min_rows; + if (max_rows < min_rows) + max_rows= min_rows; + if (max_rows == (ha_rows)0) /* default setting, don't set fragmentation */ return; /** * get the number of fragments right @@ -4129,7 +4133,6 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) acc_row_size+= 4 + /*safety margin*/ 4; #endif ulonglong acc_fragment_size= 512*1024*1024; - ulonglong max_rows= form->s->max_rows; #if MYSQL_VERSION_ID >= 50100 no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; #else @@ -4153,6 +4156,9 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) ftype= NDBTAB::FragAllSmall; tab.setFragmentType(ftype); } + tab.setMaxRows(max_rows); + tab.setMinRows(min_rows); + fprintf(stderr, "max/min %llu %llu\n", max_rows, min_rows); } int ha_ndbcluster::create(const char *name, From 18e008a1ba9cee7ab89096ccf216a9db676a5314 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 27 Jun 2006 11:26:00 +0200 Subject: [PATCH 08/14] Bug #19852 Restoring backup made from cluster with full data memory fails - correction of previous patch --- ndb/include/kernel/GlobalSignalNumbers.h | 4 ++-- ndb/include/kernel/signaldata/DictTabInfo.hpp | 2 +- sql/ha_ndbcluster.cc | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index a84f3130abf..fcb0a87020f 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -680,7 +680,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_BACKUP_FRAGMENT_REF 546 #define GSN_BACKUP_FRAGMENT_CONF 547 -#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 502 +#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 575 #define GSN_STOP_BACKUP_REQ 548 #define GSN_STOP_BACKUP_REF 549 @@ -731,7 +731,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_SUB_STOP_REQ 572 #define GSN_SUB_STOP_REF 573 #define GSN_SUB_STOP_CONF 574 -/* 575 unused */ +/* 575 used */ #define GSN_SUB_CREATE_REQ 576 #define GSN_SUB_CREATE_REF 577 #define GSN_SUB_CREATE_CONF 578 diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp index 3fcae69aa74..0a7f6aa3fb3 100644 --- a/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -124,7 +124,7 @@ public: MaxRowsLow = 139, MaxRowsHigh = 140, - MinRowsLow = 133, + MinRowsLow = 143, MinRowsHigh = 144, TableEnd = 999, diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d59eb4d4f77..ced85d1a339 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4158,7 +4158,6 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) } tab.setMaxRows(max_rows); tab.setMinRows(min_rows); - fprintf(stderr, "max/min %llu %llu\n", max_rows, min_rows); } int ha_ndbcluster::create(const char *name, From 49c8863bbfd77cc3bad8b887f93fee2c313f27d0 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 27 Jun 2006 13:12:34 +0200 Subject: [PATCH 09/14] Bug #19852 Restoring backup made from cluster with full data memory fails - post merge fixes for 5.1 --- .../include/kernel/signaldata/DictTabInfo.hpp | 2 - .../ndb/include/kernel/signaldata/LqhFrag.hpp | 4 +- .../ndb/include/kernel/signaldata/TupFrag.hpp | 6 +-- storage/ndb/include/ndbapi/NdbDictionary.hpp | 7 --- .../ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 5 +- storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 2 - .../ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 8 +-- .../ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 50 +++++++++---------- storage/ndb/src/ndbapi/NdbDictionary.cpp | 1 + storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp | 16 +++--- 10 files changed, 39 insertions(+), 62 deletions(-) diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp index 08d05c4dca0..1382b09eabf 100644 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -315,8 +315,6 @@ public: Uint32 CustomTriggerId; Uint32 TablespaceId; Uint32 TablespaceVersion; - Uint32 MaxRowsLow; - Uint32 MaxRowsHigh; Uint32 DefaultNoPartFlag; Uint32 LinearHashFlag; /* diff --git a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp index 95e71187c2c..97481ea2c3e 100644 --- a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp +++ b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp @@ -123,9 +123,9 @@ private: Uint32 kValue; Uint32 schemaVersion; Uint32 nextLCP; - Uint16 noOfNewAttr; - Uint16 noOfCharsets; + Uint32 noOfCharsets; Uint32 startGci; + Uint32 tableType; // DictTabInfo::TableType Uint32 primaryTableId; // table of index or RNIL Uint32 tablespace_id; // RNIL for MM table Uint16 tableId; diff --git a/storage/ndb/include/kernel/signaldata/TupFrag.hpp b/storage/ndb/include/kernel/signaldata/TupFrag.hpp index fcba5f47feb..bc44877bb1c 100644 --- a/storage/ndb/include/kernel/signaldata/TupFrag.hpp +++ b/storage/ndb/include/kernel/signaldata/TupFrag.hpp @@ -30,7 +30,7 @@ class TupFragReq { friend class Dblqh; friend class Dbtup; public: - STATIC_CONST( SignalLength = 18 ); + STATIC_CONST( SignalLength = 17 ); private: Uint32 userPtr; Uint32 userRef; @@ -45,10 +45,8 @@ private: Uint32 noOfNullAttr; Uint32 schemaVersion; Uint32 noOfKeyAttr; - Uint16 noOfNewAttr; - Uint16 noOfCharsets; + Uint32 noOfCharsets; Uint32 checksumIndicator; - Uint32 noOfAttributeGroups; Uint32 globalCheckpointIdIndicator; Uint32 tablespaceid; }; diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index f61dfcbb709..6e572635247 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -830,13 +830,6 @@ public: */ virtual int getObjectVersion() const; - /** - * Set/Get Maximum number of rows in table (only used to calculate - * number of partitions). - */ - void setMaxRows(Uint64 maxRows); - Uint64 getMaxRows() const; - /** * Set/Get indicator if default number of partitions is used in table. */ diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 7be46ba2a07..99ca6884dd3 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -5362,7 +5362,6 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->nextLCP = lcpNo; req->noOfKeyAttr = tabPtr.p->noOfPrimkey; - req->noOfNewAttr = 0; req->noOfCharsets = tabPtr.p->noOfCharsets; req->checksumIndicator = 1; req->GCPIndicator = 1; @@ -5984,8 +5983,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId; tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow; tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh; - tablePtr.p->minRowsLow = tableDesc.MinRowsLow; - tablePtr.p->minRowsHigh = tableDesc.MinRowsHigh; + tablePtr.p->minRowsLow = c_tableDesc.MinRowsLow; + tablePtr.p->minRowsHigh = c_tableDesc.MinRowsHigh; tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 1c29f87e27a..c1d4175833e 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -448,9 +448,7 @@ public: Uint16 totalAttrReceived; Uint16 fragCopyCreation; Uint16 noOfKeyAttr; - Uint16 noOfNewAttr; Uint16 noOfCharsets; - Uint16 noOfAttributeGroups; Uint16 lh3DistrBits; Uint16 tableType; Uint16 primaryTableId; diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 373debadc9c..3b1f94473f5 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -1007,7 +1007,6 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) Uint32 ttupKeyLength = req->keyLength; Uint32 nextLcp = req->nextLCP; Uint32 noOfKeyAttr = req->noOfKeyAttr; - Uint32 noOfNewAttr = req->noOfNewAttr; Uint32 noOfCharsets = req->noOfCharsets; Uint32 checksumIndicator = req->checksumIndicator; Uint32 gcpIndicator = req->GCPIndicator; @@ -1128,7 +1127,6 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->fragCopyCreation = (tmp == 0 ? 0 : 1); addfragptr.p->addfragErrorCode = 0; addfragptr.p->noOfKeyAttr = noOfKeyAttr; - addfragptr.p->noOfNewAttr = noOfNewAttr; addfragptr.p->noOfCharsets = noOfCharsets; addfragptr.p->checksumIndicator = checksumIndicator; addfragptr.p->GCPIndicator = gcpIndicator; @@ -1282,12 +1280,10 @@ Dblqh::sendAddFragReq(Signal* signal) tupFragReq->noOfNullAttr = addfragptr.p->noOfNull; tupFragReq->schemaVersion = addfragptr.p->schemaVer; tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr; - tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr; tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; - tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups; tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; - tupFragReq->tablespaceId = addfragptr.p->tablespace_id; + tupFragReq->tablespaceid = addfragptr.p->tablespace_id; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; @@ -1307,10 +1303,8 @@ Dblqh::sendAddFragReq(Signal* signal) tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */ tupFragReq->schemaVersion = addfragptr.p->schemaVer; tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */ - tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr; tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; - tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups; tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 7fb53e1d824..a6842833dd8 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -53,19 +53,15 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) Uint32 reqinfo = tupFragReq->reqInfo; regTabPtr.i = tupFragReq->tableId; Uint32 noOfAttributes = tupFragReq->noOfAttr; - Uint32 pages = tupFragReq->pages; Uint32 fragId = tupFragReq->fragId; Uint32 noOfNullAttr = tupFragReq->noOfNullAttr; /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/ Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr; - - Uint32 noOfNewAttr = tupFragReq->noOfNewAttrREMOVE; Uint32 noOfCharsets = tupFragReq->noOfCharsets; Uint32 checksumIndicator = tupFragReq->checksumIndicator; - Uint32 noOfAttributeGroups = tupFragReq->noOfAttributeGroupsREMOVE; - Uint32 globalCheckpointIdIndicator = tupFragReq->globalCheckpointIdIndicator; - Uint32 tablespace= tupFragReq->tablespace; + Uint32 gcpIndicator = tupFragReq->globalCheckpointIdIndicator; + Uint32 tablespace_id= tupFragReq->tablespaceid; Uint64 maxRows = (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow; @@ -144,7 +140,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regFragPtr.p->fragTableId= regTabPtr.i; regFragPtr.p->fragmentId= fragId; - regFragPtr.p->m_tablespace_id= tablespace; + regFragPtr.p->m_tablespace_id= tablespace_id; regFragPtr.p->m_undo_complete= false; regFragPtr.p->m_lcp_scan_op = RNIL; regFragPtr.p->m_lcp_keep_list = RNIL; @@ -423,26 +419,6 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) return; } - if (lastAttr) - { - ljam(); - Uint32 noRowsPerPage = ZWORDS_ON_PAGE/regTabPtr.p->tupheadsize; - Uint32 noAllocatedPages = - (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage; - if (fragOperPtr.p->minRows == 0) - noAllocatedPages = 2; - else if (noAllocatedPages == 0) - noAllocatedPages = 2; - noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); - - if (noAllocatedPages == 0) { - ljam(); - terrorCode = ZNO_PAGES_ALLOCATED_ERROR; - addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); - return; - }//if - } - /* **************************************************************** */ /* ************** TUP_ADD_ATTCONF ****************** */ /* **************************************************************** */ @@ -558,6 +534,26 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) } #endif + { + ndbrequire(regTabPtr.p->m_offsets[MM].m_fix_header_size > 0); + Uint32 noRowsPerPage = + ZWORDS_ON_PAGE/regTabPtr.p->m_offsets[MM].m_fix_header_size; + Uint32 noAllocatedPages = + (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage; + if (fragOperPtr.p->minRows == 0) + noAllocatedPages = 2; + else if (noAllocatedPages == 0) + noAllocatedPages = 2; + noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); + + if (noAllocatedPages == 0) { + ljam(); + terrorCode = ZNO_PAGES_ALLOCATED_ERROR; + addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); + return; + }//if + } + CreateFilegroupImplReq rep; if(regTabPtr.p->m_no_of_disk_attributes) { diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index 96cf3f0ad75..b7df3037c34 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -451,6 +451,7 @@ NdbDictionary::Table::getMaxRows() const return m_impl.m_max_rows; } +void NdbDictionary::Table::setMinRows(Uint64 minRows) { m_impl.m_min_rows = minRows; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 1329cb2afb1..1e33a843a42 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -2067,11 +2067,11 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, fragmentTypeMapping, (Uint32)NdbDictionary::Object::FragUndefined); - Uint64 max_rows = ((Uint64)tableDesc.MaxRowsHigh) << 32; - max_rows += tableDesc.MaxRowsLow; + Uint64 max_rows = ((Uint64)tableDesc->MaxRowsHigh) << 32; + max_rows += tableDesc->MaxRowsLow; impl->m_max_rows = max_rows; - Uint64 min_rows = ((Uint64)tableDesc.MinRowsHigh) << 32; - min_rows += tableDesc.MinRowsLow; + Uint64 min_rows = ((Uint64)tableDesc->MinRowsHigh) << 32; + min_rows += tableDesc->MinRowsLow; impl->m_min_rows = min_rows; impl->m_default_no_part_flag = tableDesc->DefaultNoPartFlag; impl->m_linear_flag = tableDesc->LinearHashFlag; @@ -2526,10 +2526,10 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpTab->TableType = DictTabInfo::UserTable; tmpTab->PrimaryTableId = impl.m_primaryTableId; tmpTab->NoOfAttributes = sz; - tmpTab.MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32); - tmpTab.MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF); - tmpTab.MinRowsHigh = (Uint32)(impl.m_min_rows >> 32); - tmpTab.MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF); + tmpTab->MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32); + tmpTab->MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF); + tmpTab->MinRowsHigh = (Uint32)(impl.m_min_rows >> 32); + tmpTab->MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF); tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag; tmpTab->LinearHashFlag = impl.m_linear_flag; From 9bc0c99acaeed7340c78ac775f09b65d3d0757ae Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 27 Jun 2006 13:40:33 +0200 Subject: [PATCH 10/14] Bug #19852 Restoring backup made from cluster with full data memory fails - post merge fixes for 5.1 --- storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index a6842833dd8..88845a6ef64 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -535,9 +535,11 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) #endif { - ndbrequire(regTabPtr.p->m_offsets[MM].m_fix_header_size > 0); - Uint32 noRowsPerPage = - ZWORDS_ON_PAGE/regTabPtr.p->m_offsets[MM].m_fix_header_size; + Uint32 fix_tupheader = regTabPtr.p->m_offsets[MM].m_fix_header_size; + if(regTabPtr.p->m_attributes[MM].m_no_of_varsize != 0) + fix_tupheader += Tuple_header::HeaderSize + 1; + ndbassert(fix_tupheader > 0); + Uint32 noRowsPerPage = ZWORDS_ON_PAGE / fix_tupheader; Uint32 noAllocatedPages = (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage; if (fragOperPtr.p->minRows == 0) From 08bec7b954a76317b100023a578e3a1d6d1c841a Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 27 Jun 2006 14:31:34 +0200 Subject: [PATCH 11/14] changed signature of get_default_no_partitions --- sql/ha_ndbcluster.cc | 10 +++++++--- sql/ha_ndbcluster.h | 2 +- sql/handler.h | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 5e4b65f766e..b46a5971666 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -9590,8 +9590,12 @@ ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, /* Create a table in NDB Cluster */ -static uint get_no_fragments(ulonglong max_rows) +static uint get_no_fragments(TABLE_SHARE *table_share) { + ha_rows max_rows= table_share->max_rows; + ha_rows min_rows= table_share->min_rows; + if (max_rows < min_rows) + max_rows= min_rows; #if MYSQL_VERSION_ID >= 50000 uint acc_row_size= 25 + /*safety margin*/ 2; #else @@ -9628,10 +9632,10 @@ static bool adjusted_frag_count(uint no_fragments, uint no_nodes, return (reported_frags < no_fragments); } -int ha_ndbcluster::get_default_no_partitions(ulonglong max_rows) +int ha_ndbcluster::get_default_no_partitions(TABLE_SHARE *table_share) { uint reported_frags; - uint no_fragments= get_no_fragments(max_rows); + uint no_fragments= get_no_fragments(table_share); uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); if (adjusted_frag_count(no_fragments, no_nodes, reported_frags)) { diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 2e78a00ef94..adc70808f78 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -651,7 +651,7 @@ class ha_ndbcluster: public handler int create(const char *name, TABLE *form, HA_CREATE_INFO *info); int create_handler_files(const char *file, const char *old_name, int action_flag, HA_CREATE_INFO *info); - int get_default_no_partitions(ulonglong max_rows); + int get_default_no_partitions(TABLE_SHARE *); bool get_no_parts(const char *name, uint *no_parts); void set_auto_partitions(partition_info *part_info); diff --git a/sql/handler.h b/sql/handler.h index fb5f0f4ba05..025b76213d7 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1335,7 +1335,7 @@ public: virtual const char *table_type() const =0; virtual const char **bas_ext() const =0; - virtual int get_default_no_partitions(ulonglong max_rows) { return 1;} + virtual int get_default_no_partitions(TABLE_SHARE *) { return 1;} virtual void set_auto_partitions(partition_info *part_info) { return; } virtual bool get_no_parts(const char *name, uint *no_parts) From 9791d53445cf07a4f49a2a913cdc914d2ce06046 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 27 Jun 2006 22:19:27 +0200 Subject: [PATCH 12/14] partition functions to pass create_info, not only max_rows --- sql/ha_ndbcluster.cc | 28 ++++++++++++++++++---------- sql/ha_ndbcluster.h | 2 +- sql/handler.h | 2 +- sql/partition_info.cc | 27 ++++++++++++++------------- sql/partition_info.h | 8 ++++---- sql/sql_partition.cc | 3 +-- sql/sql_partition.h | 2 +- sql/sql_table.cc | 10 +++++----- 8 files changed, 45 insertions(+), 37 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index b46a5971666..361ec9b0d2b 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -9590,12 +9590,8 @@ ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, /* Create a table in NDB Cluster */ -static uint get_no_fragments(TABLE_SHARE *table_share) +static uint get_no_fragments(ulonglong max_rows) { - ha_rows max_rows= table_share->max_rows; - ha_rows min_rows= table_share->min_rows; - if (max_rows < min_rows) - max_rows= min_rows; #if MYSQL_VERSION_ID >= 50000 uint acc_row_size= 25 + /*safety margin*/ 2; #else @@ -9632,10 +9628,22 @@ static bool adjusted_frag_count(uint no_fragments, uint no_nodes, return (reported_frags < no_fragments); } -int ha_ndbcluster::get_default_no_partitions(TABLE_SHARE *table_share) +int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *info) { + ha_rows max_rows, min_rows; + if (info) + { + max_rows= info->max_rows; + min_rows= info->min_rows; + } + else + { + max_rows= table_share->max_rows; + min_rows= table_share->min_rows; + } uint reported_frags; - uint no_fragments= get_no_fragments(table_share); + uint no_fragments= + get_no_fragments(max_rows >= min_rows ? max_rows : min_rows); uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); if (adjusted_frag_count(no_fragments, no_nodes, reported_frags)) { @@ -9884,14 +9892,14 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions); tab->setLinearFlag(part_info->linear_hash_ind); { - ha_rows max_rows= form->s->max_rows; - ha_rows min_rows= form->s->min_rows; + ha_rows max_rows= table_share->max_rows; + ha_rows min_rows= table_share->min_rows; if (max_rows < min_rows) max_rows= min_rows; if (max_rows != (ha_rows)0) /* default setting, don't set fragmentation */ { tab->setMaxRows(max_rows); - tab->setMaxRows(min_rows); + tab->setMinRows(min_rows); } } tab->setTablespaceNames(ts_names, fd_index*sizeof(char*)); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index adc70808f78..9dc9ee79755 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -651,7 +651,7 @@ class ha_ndbcluster: public handler int create(const char *name, TABLE *form, HA_CREATE_INFO *info); int create_handler_files(const char *file, const char *old_name, int action_flag, HA_CREATE_INFO *info); - int get_default_no_partitions(TABLE_SHARE *); + int get_default_no_partitions(HA_CREATE_INFO *info); bool get_no_parts(const char *name, uint *no_parts); void set_auto_partitions(partition_info *part_info); diff --git a/sql/handler.h b/sql/handler.h index 025b76213d7..94f4519a2e7 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1335,7 +1335,7 @@ public: virtual const char *table_type() const =0; virtual const char **bas_ext() const =0; - virtual int get_default_no_partitions(TABLE_SHARE *) { return 1;} + virtual int get_default_no_partitions(HA_CREATE_INFO *info) { return 1;} virtual void set_auto_partitions(partition_info *part_info) { return; } virtual bool get_no_parts(const char *name, uint *no_parts) diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 39c8d976732..286637bd9aa 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -153,7 +153,7 @@ char *partition_info::create_subpartition_name(uint subpart_no, SYNOPSIS set_up_default_partitions() file A reference to a handler of the table - max_rows Maximum number of rows stored in the table + info Create info start_no Starting partition number RETURN VALUE @@ -169,7 +169,8 @@ char *partition_info::create_subpartition_name(uint subpart_no, The external routine needing this code is check_partition_info */ -bool partition_info::set_up_default_partitions(handler *file, ulonglong max_rows, +bool partition_info::set_up_default_partitions(handler *file, + HA_CREATE_INFO *info, uint start_no) { uint i; @@ -188,7 +189,7 @@ bool partition_info::set_up_default_partitions(handler *file, ulonglong max_rows goto end; } if (no_parts == 0) - no_parts= file->get_default_no_partitions(max_rows); + no_parts= file->get_default_no_partitions(info); if (unlikely(no_parts > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -228,7 +229,7 @@ end: SYNOPSIS set_up_default_subpartitions() file A reference to a handler of the table - max_rows Maximum number of rows stored in the table + info Create info RETURN VALUE TRUE Error, attempted default values not possible @@ -244,7 +245,7 @@ end: */ bool partition_info::set_up_default_subpartitions(handler *file, - ulonglong max_rows) + HA_CREATE_INFO *info) { uint i, j; char *default_name, *name_ptr; @@ -254,7 +255,7 @@ bool partition_info::set_up_default_subpartitions(handler *file, DBUG_ENTER("partition_info::set_up_default_subpartitions"); if (no_subparts == 0) - no_subparts= file->get_default_no_partitions(max_rows); + no_subparts= file->get_default_no_partitions(info); if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -296,7 +297,7 @@ end: SYNOPSIS set_up_defaults_for_partitioning() file A reference to a handler of the table - max_rows Maximum number of rows stored in the table + info Create info start_no Starting partition number RETURN VALUE @@ -309,7 +310,7 @@ end: */ bool partition_info::set_up_defaults_for_partitioning(handler *file, - ulonglong max_rows, + HA_CREATE_INFO *info, uint start_no) { DBUG_ENTER("partition_info::set_up_defaults_for_partitioning"); @@ -318,10 +319,10 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file, { default_partitions_setup= TRUE; if (use_default_partitions) - DBUG_RETURN(set_up_default_partitions(file, max_rows, start_no)); + DBUG_RETURN(set_up_default_partitions(file, info, start_no)); if (is_sub_partitioned() && use_default_subpartitions) - DBUG_RETURN(set_up_default_subpartitions(file, max_rows)); + DBUG_RETURN(set_up_default_subpartitions(file, info)); } DBUG_RETURN(FALSE); } @@ -692,7 +693,7 @@ end: SYNOPSIS check_partition_info() file A reference to a handler of the table - max_rows Maximum number of rows stored in the table + info Create info engine_type Return value for used engine in partitions RETURN VALUE @@ -708,7 +709,7 @@ end: */ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, - handler *file, ulonglong max_rows) + handler *file, HA_CREATE_INFO *info) { handlerton **engine_array= NULL; uint part_count= 0; @@ -743,7 +744,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, my_error(ER_SUBPARTITION_ERROR, MYF(0)); goto end; } - if (unlikely(set_up_defaults_for_partitioning(file, max_rows, (uint)0))) + if (unlikely(set_up_defaults_for_partitioning(file, info, (uint)0))) goto end; tot_partitions= get_tot_partitions(); if (unlikely(tot_partitions > MAX_PARTITIONS)) diff --git a/sql/partition_info.h b/sql/partition_info.h index 3d8c6a40221..d938d21653a 100644 --- a/sql/partition_info.h +++ b/sql/partition_info.h @@ -243,21 +243,21 @@ public: return no_parts * (is_sub_partitioned() ? no_subparts : 1); } - bool set_up_defaults_for_partitioning(handler *file, ulonglong max_rows, + bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info, uint start_no); char *has_unique_names(); static bool check_engine_mix(handlerton **engine_array, uint no_parts); bool check_range_constants(); bool check_list_constants(); bool check_partition_info(THD *thd, handlerton **eng_type, - handler *file, ulonglong max_rows); + handler *file, HA_CREATE_INFO *info); void print_no_partition_found(TABLE *table); private: static int list_part_cmp(const void* a, const void* b); static int list_part_cmp_unsigned(const void* a, const void* b); - bool set_up_default_partitions(handler *file, ulonglong max_rows, + bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info, uint start_no); - bool set_up_default_subpartitions(handler *file, ulonglong max_rows); + bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info); char *create_default_partition_names(uint part_no, uint no_parts, uint start_no); char *create_subpartition_name(uint subpart_no, const char *part_name); diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 00c15c2dbca..701af808c96 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -3834,14 +3834,13 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info, if (alter_info->flags == ALTER_TABLE_REORG) { uint new_part_no, curr_part_no; - ulonglong max_rows= table->s->max_rows; if (tab_part_info->part_type != HASH_PARTITION || tab_part_info->use_default_no_partitions) { my_error(ER_REORG_NO_PARAM_ERROR, MYF(0)); DBUG_RETURN(TRUE); } - new_part_no= table->file->get_default_no_partitions(max_rows); + new_part_no= table->file->get_default_no_partitions(create_info); curr_part_no= tab_part_info->no_parts; if (new_part_no == curr_part_no) { diff --git a/sql/sql_partition.h b/sql/sql_partition.h index 845180ad592..e34d71dfdc5 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -65,7 +65,7 @@ int get_part_for_delete(const byte *buf, const byte *rec0, partition_info *part_info, uint32 *part_id); void prune_partition_set(const TABLE *table, part_id_range *part_spec); bool check_partition_info(partition_info *part_info,handlerton **eng_type, - TABLE *table, handler *file, ulonglong max_rows); + TABLE *table, handler *file, HA_CREATE_INFO *info); bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind); char *generate_partition_syntax(partition_info *part_info, uint *buf_length, bool use_sql_alloc, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 2ecbc94541a..7d8631e3236 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3185,8 +3185,7 @@ bool mysql_create_table_internal(THD *thd, } DBUG_PRINT("info", ("db_type = %d", ha_legacy_type(part_info->default_engine_type))); - if (part_info->check_partition_info(thd, &engine_type, file, - create_info->max_rows)) + if (part_info->check_partition_info(thd, &engine_type, file, create_info)) goto err; part_info->default_engine_type= engine_type; @@ -3224,7 +3223,8 @@ bool mysql_create_table_internal(THD *thd, */ if (part_info->use_default_no_partitions && part_info->no_parts && - (int)part_info->no_parts != file->get_default_no_partitions(0ULL)) + (int)part_info->no_parts != + file->get_default_no_partitions(create_info)) { uint i; List_iterator part_it(part_info->partitions); @@ -3237,10 +3237,10 @@ bool mysql_create_table_internal(THD *thd, part_info->use_default_no_subpartitions && part_info->no_subparts && (int)part_info->no_subparts != - file->get_default_no_partitions(0ULL)) + file->get_default_no_partitions(create_info)) { DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE); - part_info->no_subparts= file->get_default_no_partitions(0ULL); + part_info->no_subparts= file->get_default_no_partitions(create_info); } } else if (create_info->db_type != engine_type) From 40267422ebeefe0631ce8ad06b83b97dfa035b78 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 28 Jun 2006 01:03:10 +0200 Subject: [PATCH 13/14] corrected partition test case results that were put back --- mysql-test/r/ndb_dd_backuprestore.result | 24 ++++++++++++------------ mysql-test/r/ndb_partition_key.result | 14 +++++++------- mysql-test/r/ndb_partition_range.result | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/mysql-test/r/ndb_dd_backuprestore.result b/mysql-test/r/ndb_dd_backuprestore.result index e7568e4ce49..cb6c62b16da 100644 --- a/mysql-test/r/ndb_dd_backuprestore.result +++ b/mysql-test/r/ndb_dd_backuprestore.result @@ -175,7 +175,7 @@ t1 CREATE TABLE `t1` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (c3) PARTITIONS 4 +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */ SHOW CREATE TABLE test.t2; Table Create Table t2 CREATE TABLE `t2` ( @@ -184,7 +184,7 @@ t2 CREATE TABLE `t2` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t3; Table Create Table t3 CREATE TABLE `t3` ( @@ -193,7 +193,7 @@ t3 CREATE TABLE `t3` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) +) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t4; Table Create Table t4 CREATE TABLE `t4` ( @@ -202,7 +202,7 @@ t4 CREATE TABLE `t4` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (c3) PARTITIONS 2 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */ SHOW CREATE TABLE test.t5; Table Create Table t5 CREATE TABLE `t5` ( @@ -211,7 +211,7 @@ t5 CREATE TABLE `t5` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t6; Table Create Table t6 CREATE TABLE `t6` ( @@ -220,7 +220,7 @@ t6 CREATE TABLE `t6` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SELECT * FROM information_schema.partitions WHERE table_name= 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default @@ -341,7 +341,7 @@ t1 CREATE TABLE `t1` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (c3) PARTITIONS 4 +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */ SHOW CREATE TABLE test.t2; Table Create Table t2 CREATE TABLE `t2` ( @@ -350,7 +350,7 @@ t2 CREATE TABLE `t2` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t3; Table Create Table t3 CREATE TABLE `t3` ( @@ -359,7 +359,7 @@ t3 CREATE TABLE `t3` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) +) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t4; Table Create Table t4 CREATE TABLE `t4` ( @@ -368,7 +368,7 @@ t4 CREATE TABLE `t4` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (c3) PARTITIONS 2 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */ SHOW CREATE TABLE test.t5; Table Create Table t5 CREATE TABLE `t5` ( @@ -377,7 +377,7 @@ t5 CREATE TABLE `t5` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ SHOW CREATE TABLE test.t6; Table Create Table t6 CREATE TABLE `t6` ( @@ -386,7 +386,7 @@ t6 CREATE TABLE `t6` ( `c3` int(11) NOT NULL, `c4` bit(1) NOT NULL, PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SELECT * FROM information_schema.partitions WHERE table_name= 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result index b936469ac14..fd793c4c2c7 100644 --- a/mysql-test/r/ndb_partition_key.result +++ b/mysql-test/r/ndb_partition_key.result @@ -78,7 +78,7 @@ t1 CREATE TABLE `t1` ( `c` int(11) NOT NULL DEFAULT '0', `d` int(11) DEFAULT NULL, PRIMARY KEY (`a`,`b`,`c`) USING HASH -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (b) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (b) */ DROP TABLE t1; CREATE TABLE t1 (a int not null primary key) PARTITION BY KEY(a) @@ -97,19 +97,19 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ alter table t1 engine=heap; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=MEMORY DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = MEMORY, PARTITION p1 ENGINE = MEMORY) +) ENGINE=MEMORY DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = MEMORY, PARTITION p1 ENGINE = MEMORY) */ alter table t1 engine=ndb; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ alter table t1 engine=heap remove partitioning; show create table t1; Table Create Table @@ -123,7 +123,7 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ alter table t1 partition by key (a) (partition p0 engine=ndb, partition p1 engine=ndb); @@ -131,7 +131,7 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ alter table t1 remove partitioning; show create table t1; Table Create Table @@ -150,7 +150,7 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ drop table t1; CREATE TABLE t1 ( c1 MEDIUMINT NOT NULL AUTO_INCREMENT, diff --git a/mysql-test/r/ndb_partition_range.result b/mysql-test/r/ndb_partition_range.result index cb79f04873e..9cc9aa2cda9 100644 --- a/mysql-test/r/ndb_partition_range.result +++ b/mysql-test/r/ndb_partition_range.result @@ -115,7 +115,7 @@ t1 CREATE TABLE `t1` ( `c` int(11) NOT NULL, PRIMARY KEY (`b`), UNIQUE KEY `a` (`a`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (b) (PARTITION x1 VALUES LESS THAN (5) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (10) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (20) ENGINE = ndbcluster) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (b) (PARTITION x1 VALUES LESS THAN (5) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (10) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (20) ENGINE = ndbcluster) */ drop table t1; CREATE TABLE t1 (id MEDIUMINT NOT NULL, From a23259c2efecd5be4ea96c865d0e1879c89bba93 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 28 Jun 2006 02:35:13 +0200 Subject: [PATCH 14/14] Bug #20705 table truncation from one mysqld causes ERROR 1412 on other mysqld servers - make sure to invalidate even if table is not binlogged --- sql/ha_ndbcluster_binlog.cc | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 40a98563613..b18fa8ec931 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1770,8 +1770,31 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, /* acknowledge this query _after_ epoch completion */ post_epoch_unlock= 1; break; - case SOT_CREATE_TABLE: case SOT_TRUNCATE_TABLE: + { + char key[FN_REFLEN]; + build_table_filename(key, sizeof(key), schema->db, schema->name, ""); + NDB_SHARE *share= get_share(key, 0, FALSE, FALSE); + // invalidation already handled by binlog thread + if (!share || !share->op) + { + { + injector_ndb->setDatabaseName(schema->db); + Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(), + schema->name); + ndbtab_g.invalidate(); + } + TABLE_LIST table_list; + bzero((char*) &table_list,sizeof(table_list)); + table_list.db= schema->db; + table_list.alias= table_list.table_name= schema->name; + close_cached_tables(thd, 0, &table_list, FALSE); + } + if (share) + free_share(&share); + } + // fall through + case SOT_CREATE_TABLE: pthread_mutex_lock(&LOCK_open); if (ndb_create_table_from_engine(thd, schema->db, schema->name)) {