From 2e43e47040f8ece21b45e2d76089d6514788125c Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 14 Sep 2004 08:52:21 +0000 Subject: [PATCH 1/2] moved all ndb thread specific data into new placeholder new methods to keep "records" up to date unset flag HA_NOT_EXACT_COUNT to make handler read "records" field, for count() optim and join optimization new methods to keep "records" up to datecorrect record field in ndbcluster handler new method for ndbcluster handler to store/retrieve table and thread specific data changed local hash to store new table_info object, with placeholders for local data, instead of TableImpl hanged deleteKey to return ponter to deleted object moved heavy global cache fetch from inline to separate method mysql-test/r/ndb_alter_table.result: correct record field in ndbcluster handler mysql-test/r/ndb_blob.result: correct record field in ndbcluster handler ndb/include/ndbapi/NdbDictionary.hpp: new method for ndbcluster handler to store/retrieve table and thread specific data ndb/src/ndbapi/DictCache.cpp: changed local hash to store new table_info object, with placeholders for local data, instead of TableImpl ndb/src/ndbapi/DictCache.hpp: changed local hash to store new table_info object, with placeholders for local data, instead of TableImpl ndb/src/ndbapi/Ndb.cpp: replaced method DictionaryImpl::getTable with DictionaryImpl::get_local_table_info ndb/src/ndbapi/NdbDictionary.cpp: new method for ndbcluster handler to store/retrieve table and thread specific data ndb/src/ndbapi/NdbDictionaryImpl.cpp: changed local hash to store new table_info object, with placeholders for local data, instead of TableImpl moved heavy global cache fetch from inline to separate method ndb/src/ndbapi/NdbDictionaryImpl.hpp: replaced method DictionaryImpl::getTable with DictionaryImpl::get_local_table_info ndb/src/ndbapi/NdbLinHash.hpp: changed deleteKey to return ponter to deleted object sql/ha_ndbcluster.cc: moved all ndb thread specific data into new placeholder new methods to keep "records" up to date unset flag HA_NOT_EXACT_COUNT to make handler read "records" field, for count() optim and join optimization sql/ha_ndbcluster.h: new methods to keep "records" up to date sql/sql_class.h: moved all ndb thread specific data into new placeholder --- mysql-test/r/ndb_alter_table.result | 8 +- mysql-test/r/ndb_blob.result | 4 +- ndb/include/ndbapi/NdbDictionary.hpp | 2 + ndb/src/ndbapi/DictCache.cpp | 27 ++++- ndb/src/ndbapi/DictCache.hpp | 16 ++- ndb/src/ndbapi/Ndb.cpp | 12 ++- ndb/src/ndbapi/NdbDictionary.cpp | 9 +- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 43 +++++++- ndb/src/ndbapi/NdbDictionaryImpl.hpp | 59 ++++------- ndb/src/ndbapi/NdbLinHash.hpp | 12 ++- sql/ha_ndbcluster.cc | 150 +++++++++++++++++++++++---- sql/ha_ndbcluster.h | 8 +- sql/sql_class.h | 3 +- 13 files changed, 261 insertions(+), 92 deletions(-) diff --git a/mysql-test/r/ndb_alter_table.result b/mysql-test/r/ndb_alter_table.result index f55f680e972..43a4d5d561c 100644 --- a/mysql-test/r/ndb_alter_table.result +++ b/mysql-test/r/ndb_alter_table.result @@ -18,12 +18,12 @@ col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null, col6 int not null, to_be_deleted int) ENGINE=ndbcluster; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 1 NULL NULL NULL latin1_swedish_ci NULL +t1 ndbcluster 9 Dynamic 0 0 0 NULL 0 0 1 NULL NULL NULL latin1_swedish_ci NULL insert into t1 values (0,4,3,5,"PENDING",1,7),(NULL,4,3,5,"PENDING",1,7),(31,4,3,5,"PENDING",1,7), (7,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7), (100,4,3,5,"PENDING",1,7), (99,4,3,5,"PENDING",1,7), (8,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL +t1 ndbcluster 9 Dynamic 9 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL select * from t1 order by col1; col1 col2 col3 col4 col5 col6 to_be_deleted 0 4 3 5 PENDING 1 7 @@ -43,7 +43,7 @@ change column col2 fourth varchar(30) not null after col3, modify column col6 int not null first; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL +t1 ndbcluster 9 Dynamic 9 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL select * from t1 order by col1; col6 col1 col3 fourth col4 col4_5 col5 col7 col8 1 0 3 4 5 PENDING 0000-00-00 00:00:00 @@ -58,7 +58,7 @@ col6 col1 col3 fourth col4 col4_5 col5 col7 col8 insert into t1 values (2, NULL,4,3,5,99,"PENDING","EXTRA",'2004-01-01 00:00:00'); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 103 NULL NULL NULL latin1_swedish_ci NULL +t1 ndbcluster 9 Dynamic 10 0 0 NULL 0 0 103 NULL NULL NULL latin1_swedish_ci NULL select * from t1 order by col1; col6 col1 col3 fourth col4 col4_5 col5 col7 col8 1 0 3 4 5 PENDING 0000-00-00 00:00:00 diff --git a/mysql-test/r/ndb_blob.result b/mysql-test/r/ndb_blob.result index 45b003e6967..004713be718 100644 --- a/mysql-test/r/ndb_blob.result +++ b/mysql-test/r/ndb_blob.result @@ -150,7 +150,7 @@ insert into t1 values(9,'b9',999,'dd9'); commit; explain select * from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 100 +1 SIMPLE t1 ALL NULL NULL NULL NULL 9 select * from t1 order by a; a b c d 1 b1 111 dd1 @@ -185,7 +185,7 @@ insert into t1 values(2,@b2,222,@d2); commit; explain select * from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 100 +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3) from t1 order by a; a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3) diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 71ef5dbf630..bb7e96bde1b 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -1066,6 +1066,8 @@ public: Dictionary(NdbDictionaryImpl&); const Table * getIndexTable(const char * indexName, const char * tableName); + public: + const Table * getTable(const char * name, void **data); }; }; diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index 5f620f77906..0c778d7222e 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -21,6 +21,21 @@ #include #include +Ndb_local_table_info::Ndb_local_table_info(NdbTableImpl *table_impl, Uint32 sz) +{ + m_table_impl= table_impl; + if (sz) + m_local_data= malloc(sz); + else + m_local_data= 0; +} + +Ndb_local_table_info::~Ndb_local_table_info() +{ + if (m_local_data) + free(m_local_data); +} + LocalDictCache::LocalDictCache(){ m_tableHash.createHashTable(); } @@ -29,22 +44,24 @@ LocalDictCache::~LocalDictCache(){ m_tableHash.releaseHashTable(); } -NdbTableImpl * +Ndb_local_table_info * LocalDictCache::get(const char * name){ const Uint32 len = strlen(name); return m_tableHash.getData(name, len); } void -LocalDictCache::put(const char * name, NdbTableImpl * tab){ - const Uint32 id = tab->m_tableId; +LocalDictCache::put(const char * name, Ndb_local_table_info * tab_info){ + const Uint32 id = tab_info->m_table_impl->m_tableId; - m_tableHash.insertKey(name, strlen(name), id, tab); + m_tableHash.insertKey(name, strlen(name), id, tab_info); } void LocalDictCache::drop(const char * name){ - m_tableHash.deleteKey(name, strlen(name)); + Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name)); + DBUG_ASSERT(info != 0); + delete info; } /***************************************************************** diff --git a/ndb/src/ndbapi/DictCache.hpp b/ndb/src/ndbapi/DictCache.hpp index 098acc9006a..f94ad7a6fa9 100644 --- a/ndb/src/ndbapi/DictCache.hpp +++ b/ndb/src/ndbapi/DictCache.hpp @@ -27,6 +27,16 @@ #include #include "NdbLinHash.hpp" +class Ndb_local_table_info { +public: + Ndb_local_table_info(NdbTableImpl *table_impl, Uint32 sz=0); + ~Ndb_local_table_info(); + NdbTableImpl *m_table_impl; + Uint64 m_first_tuple_id; + Uint64 m_last_tuple_id; + void *m_local_data; +}; + /** * A non thread safe dict cache */ @@ -35,12 +45,12 @@ public: LocalDictCache(); ~LocalDictCache(); - NdbTableImpl * get(const char * name); + Ndb_local_table_info * get(const char * name); - void put(const char * name, NdbTableImpl *); + void put(const char * name, Ndb_local_table_info *); void drop(const char * name); - NdbLinHash m_tableHash; // On name + NdbLinHash m_tableHash; // On name }; /** diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index d5f9ef1a51b..74e4c2a0c84 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -753,9 +753,11 @@ Uint64 Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize) { DEBUG_TRACE("getAutoIncrementValue"); - const NdbTableImpl* table = theDictionary->getTable(aTableName); - if (table == 0) + const char * internalTableName = internalizeTableName(aTableName); + Ndb_local_table_info *info= theDictionary->get_local_table_info(internalTableName); + if (info == 0) return ~0; + const NdbTableImpl *table= info->m_table_impl; Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize); return tupleId; } @@ -832,11 +834,13 @@ bool Ndb::setAutoIncrementValue(const char* aTableName, Uint64 val, bool increase) { DEBUG_TRACE("setAutoIncrementValue " << val); - const NdbTableImpl* table = theDictionary->getTable(aTableName); - if (table == 0) { + const char * internalTableName= internalizeTableName(aTableName); + Ndb_local_table_info *info= theDictionary->get_local_table_info(internalTableName); + if (info == 0) { theError= theDictionary->getNdbError(); return false; } + const NdbTableImpl* table= info->m_table_impl; return setTupleIdInNdb(table->m_tableId, val, increase); } diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index d0fb062f78a..a92126abae7 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -681,13 +681,18 @@ NdbDictionary::Dictionary::alterTable(const Table & t){ } const NdbDictionary::Table * -NdbDictionary::Dictionary::getTable(const char * name){ - NdbTableImpl * t = m_impl.getTable(name); +NdbDictionary::Dictionary::getTable(const char * name, void **data){ + NdbTableImpl * t = m_impl.getTable(name, data); if(t) return t->m_facade; return 0; } +const NdbDictionary::Table * +NdbDictionary::Dictionary::getTable(const char * name){ + return getTable(name, 0); +} + void NdbDictionary::Dictionary::invalidateTable(const char * name){ NdbTableImpl * t = m_impl.getTable(name); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index d7fccb3bd37..6b36b776f14 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -595,11 +595,12 @@ static int f_dictionary_count = 0; NdbDictionaryImpl::~NdbDictionaryImpl() { - NdbElement_t * curr = m_localHash.m_tableHash.getNext(0); + NdbElement_t * curr = m_localHash.m_tableHash.getNext(0); if(m_globalHash){ while(curr != 0){ m_globalHash->lock(); - m_globalHash->release(curr->theData); + m_globalHash->release(curr->theData->m_table_impl); + delete curr->theData; m_globalHash->unlock(); curr = m_localHash.m_tableHash.getNext(curr); @@ -620,7 +621,39 @@ NdbDictionaryImpl::~NdbDictionaryImpl() } } +Ndb_local_table_info * +NdbDictionaryImpl::fetchGlobalTableImpl(const char * internalTableName) +{ + NdbTableImpl *impl; + m_globalHash->lock(); + impl = m_globalHash->get(internalTableName); + m_globalHash->unlock(); + + if (impl == 0){ + impl = m_receiver.getTable(internalTableName, m_ndb.usingFullyQualifiedNames()); + m_globalHash->lock(); + m_globalHash->put(internalTableName, impl); + m_globalHash->unlock(); + + if(impl == 0){ + return 0; + } + } + + Ndb_local_table_info *info= new Ndb_local_table_info(impl, 32); + info->m_first_tuple_id= ~0; + info->m_last_tuple_id= ~0; + + m_localHash.put(internalTableName, info); + + m_ndb.theFirstTupleId[impl->getTableId()] = ~0; + m_ndb.theLastTupleId[impl->getTableId()] = ~0; + + addBlobTables(*impl); + + return info; +} #if 0 bool @@ -1504,7 +1537,6 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, : createTable(&tSignal, ptr); if (!alter && haveAutoIncrement) { - // if (!ndb.setAutoIncrementValue(impl.m_internalName.c_str(), autoIncrementValue)) { if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), autoIncrementValue)) { if (ndb.theError.code == 0) { m_error.code = 4336; @@ -1775,11 +1807,12 @@ NdbIndexImpl* NdbDictionaryImpl::getIndexImpl(const char * externalName, const char * internalName) { - NdbTableImpl* tab = getTableImpl(internalName); - if(tab == 0){ + Ndb_local_table_info * info = get_local_table_info(internalName); + if(info == 0){ m_error.code = 4243; return 0; } + NdbTableImpl * tab = info->m_table_impl; if(tab->m_indexType == NdbDictionary::Index::Undefined){ // Not an index diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index 8f197856f57..cd0463f7126 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -390,8 +390,8 @@ public: int listObjects(List& list, NdbDictionary::Object::Type type); int listIndexes(List& list, const char * tableName); - NdbTableImpl * getTable(const char * tableName); - NdbTableImpl * getTableImpl(const char * internalName); + NdbTableImpl * getTable(const char * tableName, void **data= 0); + Ndb_local_table_info * get_local_table_info(const char * internalName); NdbIndexImpl * getIndex(const char * indexName, const char * tableName); NdbIndexImpl * getIndexImpl(const char * name, const char * internalName); @@ -410,6 +410,8 @@ public: NdbDictInterface m_receiver; Ndb & m_ndb; +private: + Ndb_local_table_info * fetchGlobalTableImpl(const char * internalName); }; inline @@ -598,45 +600,28 @@ NdbDictionaryImpl::getImpl(const NdbDictionary::Dictionary & t){ inline NdbTableImpl * -NdbDictionaryImpl::getTable(const char * tableName) +NdbDictionaryImpl::getTable(const char * tableName, void **data) { const char * internalTableName = m_ndb.internalizeTableName(tableName); - - return getTableImpl(internalTableName); + Ndb_local_table_info *info= get_local_table_info(internalTableName); + if (info == 0) { + return 0; + } + if (data) { + *data= info->m_local_data; + } + return info->m_table_impl; } inline -NdbTableImpl * -NdbDictionaryImpl::getTableImpl(const char * internalTableName) +Ndb_local_table_info * +NdbDictionaryImpl::get_local_table_info(const char * internalTableName) { - NdbTableImpl *ret = m_localHash.get(internalTableName); - - if (ret != 0) { - return ret; // autoincrement already initialized + Ndb_local_table_info *info= m_localHash.get(internalTableName); + if (info != 0) { + return info; // autoincrement already initialized } - - m_globalHash->lock(); - ret = m_globalHash->get(internalTableName); - m_globalHash->unlock(); - - if (ret == 0){ - ret = m_receiver.getTable(internalTableName, m_ndb.usingFullyQualifiedNames()); - m_globalHash->lock(); - m_globalHash->put(internalTableName, ret); - m_globalHash->unlock(); - - if(ret == 0){ - return 0; - } - } - m_localHash.put(internalTableName, ret); - - m_ndb.theFirstTupleId[ret->getTableId()] = ~0; - m_ndb.theLastTupleId[ret->getTableId()] = ~0; - - addBlobTables(*ret); - - return ret; + return fetchGlobalTableImpl(internalTableName); } inline @@ -654,9 +639,9 @@ NdbDictionaryImpl::getIndex(const char * indexName, internalIndexName = m_ndb.internalizeTableName(indexName); // Index is also a table } if (internalIndexName) { - NdbTableImpl * tab = getTableImpl(internalIndexName); - - if (tab) { + Ndb_local_table_info * info = get_local_table_info(internalIndexName); + if (info) { + NdbTableImpl * tab = info->m_table_impl; if (tab->m_index == 0) tab->m_index = getIndexImpl(indexName, internalIndexName); if (tab->m_index != 0) diff --git a/ndb/src/ndbapi/NdbLinHash.hpp b/ndb/src/ndbapi/NdbLinHash.hpp index 5d0d52a31d8..f786600607f 100644 --- a/ndb/src/ndbapi/NdbLinHash.hpp +++ b/ndb/src/ndbapi/NdbLinHash.hpp @@ -59,7 +59,7 @@ public: void releaseHashTable(void); int insertKey(const char * str, Uint32 len, Uint32 lkey1, C* data); - int deleteKey(const char * str, Uint32 len); + C *deleteKey(const char * str, Uint32 len); C* getData(const char *, Uint32); Uint32* getKey(const char *, Uint32); @@ -277,7 +277,7 @@ NdbLinHash::getData( const char* str, Uint32 len ){ template inline -int +C * NdbLinHash::deleteKey ( const char* str, Uint32 len){ const Uint32 hash = Hash(str, len); int dir, seg; @@ -288,19 +288,21 @@ NdbLinHash::deleteKey ( const char* str, Uint32 len){ for(NdbElement_t * chain = *chainp; chain != 0; chain = chain->next){ if(chain->len == len && !memcmp(chain->str, str, len)){ if (oldChain == 0) { + C *data= chain->theData; delete chain; * chainp = 0; - return 1; + return data; } else { + C *data= chain->theData; oldChain->next = chain->next; delete chain; - return 1; + return data; } } else { oldChain = chain; } } - return -1; /* Element doesn't exist */ + return 0; /* Element doesn't exist */ } template diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d35f84a8fc8..2c8f6c9d698 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -87,7 +87,8 @@ static int unpackfrm(const void **data, uint *len, const void* pack_data); static int ndb_get_table_statistics(Ndb*, const char *, - Uint64* rows, Uint64* commits); + Uint64* rows, Uint64* commits); + /* Error handling functions @@ -137,6 +138,93 @@ static int ndb_to_mysql_error(const NdbError *err) } +/* + Place holder for ha_ndbcluster thread specific data +*/ + +class Thd_ndb { +public: + Thd_ndb(); + ~Thd_ndb(); + Ndb *ndb; + ulong count; + uint lock_count; +}; + +Thd_ndb::Thd_ndb() +{ + ndb= 0; + lock_count= 0; + count= 0; +} + +Thd_ndb::~Thd_ndb() +{ +} + +/* + * manage uncommitted insert/deletes during transactio to get records correct + */ + +struct Ndb_table_local_info { + int no_uncommitted_rows_count; + ulong transaction_count; + ha_rows records; +}; + +void ha_ndbcluster::records_update() +{ + DBUG_ENTER("ha_ndbcluster::records_update"); + struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; + DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); + if (info->records == ~(ha_rows)0) + { + Uint64 rows; + if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ + info->records= rows; + } + } + records= info->records+ info->no_uncommitted_rows_count; + DBUG_VOID_RETURN; +} + +void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) +{ + DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init"); + struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; + Thd_ndb *thd_ndb= (Thd_ndb *)thd->transaction.thd_ndb; + if (info->transaction_count != thd_ndb->count) + { + info->transaction_count = thd_ndb->count; + info->no_uncommitted_rows_count= 0; + info->records= ~(ha_rows)0; + DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); + } + DBUG_VOID_RETURN; +} + +void ha_ndbcluster::no_uncommitted_rows_update(int c) +{ + DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update"); + struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; + info->no_uncommitted_rows_count+= c; + DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); + DBUG_VOID_RETURN; +} + +void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) +{ + DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset"); + ((Thd_ndb*)(thd->transaction.thd_ndb))->count++; + DBUG_VOID_RETURN; +} + /* Take care of the error that occured in NDB @@ -145,6 +233,7 @@ static int ndb_to_mysql_error(const NdbError *err) # The mapped error code */ + int ha_ndbcluster::ndb_err(NdbConnection *trans) { int res; @@ -506,7 +595,7 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_ENTER("get_metadata"); DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); - if (!(tab= dict->getTable(m_tabname))) + if (!(tab= dict->getTable(m_tabname, &m_table_info))) ERR_RETURN(dict->getNdbError()); DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); @@ -556,10 +645,6 @@ int ha_ndbcluster::get_metadata(const char *path) // All checks OK, lets use the table m_table= (void*)tab; - Uint64 rows; - if(false && ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ - records= rows; - } DBUG_RETURN(build_index_list(table, ILBP_OPEN)); } @@ -1480,6 +1565,7 @@ int ha_ndbcluster::write_row(byte *record) Find out how this is detected! */ rows_inserted++; + no_uncommitted_rows_update(1); bulk_insert_not_flushed= true; if ((rows_to_insert == 1) || ((rows_inserted % bulk_insert_rows) == 0) || @@ -1701,6 +1787,8 @@ int ha_ndbcluster::delete_row(const byte *record) ERR_RETURN(trans->getNdbError()); ops_pending++; + no_uncommitted_rows_update(-1); + // If deleting from cursor, NoCommit will be handled in next_result DBUG_RETURN(0); } @@ -1711,6 +1799,8 @@ int ha_ndbcluster::delete_row(const byte *record) op->deleteTuple() != 0) ERR_RETURN(trans->getNdbError()); + no_uncommitted_rows_update(-1); + if (table->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key @@ -2259,7 +2349,10 @@ void ha_ndbcluster::info(uint flag) if (flag & HA_STATUS_CONST) DBUG_PRINT("info", ("HA_STATUS_CONST")); if (flag & HA_STATUS_VARIABLE) + { DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); + records_update(); + } if (flag & HA_STATUS_ERRKEY) { DBUG_PRINT("info", ("HA_STATUS_ERRKEY")); @@ -2558,9 +2651,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) NdbConnection* trans= NULL; DBUG_ENTER("external_lock"); - DBUG_PRINT("enter", ("transaction.ndb_lock_count: %d", - thd->transaction.ndb_lock_count)); - /* Check that this handler instance has a connection set up to the Ndb object of thd @@ -2568,10 +2658,15 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) if (check_ndb_connection()) DBUG_RETURN(1); + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; + + DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d", + thd_ndb->lock_count)); + if (lock_type != F_UNLCK) { DBUG_PRINT("info", ("lock_type != F_UNLCK")); - if (!thd->transaction.ndb_lock_count++) + if (!thd_ndb->lock_count++) { PRINT_OPTION_FLAGS(thd); @@ -2584,6 +2679,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) trans= m_ndb->startTransaction(); if (trans == NULL) ERR_RETURN(m_ndb->getNdbError()); + no_uncommitted_rows_reset(thd); thd->transaction.stmt.ndb_tid= trans; } else @@ -2597,6 +2693,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) trans= m_ndb->startTransaction(); if (trans == NULL) ERR_RETURN(m_ndb->getNdbError()); + no_uncommitted_rows_reset(thd); /* If this is the start of a LOCK TABLE, a table look @@ -2633,11 +2730,12 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) // Start of transaction retrieve_all_fields= FALSE; ops_pending= 0; + no_uncommitted_rows_init(thd); } else { DBUG_PRINT("info", ("lock_type == F_UNLCK")); - if (!--thd->transaction.ndb_lock_count) + if (!--thd_ndb->lock_count) { DBUG_PRINT("trans", ("Last external_lock")); PRINT_OPTION_FLAGS(thd); @@ -2696,6 +2794,7 @@ int ha_ndbcluster::start_stmt(THD *thd) trans= m_ndb->startTransaction(); if (trans == NULL) ERR_RETURN(m_ndb->getNdbError()); + no_uncommitted_rows_reset(thd); thd->transaction.stmt.ndb_tid= trans; } m_active_trans= trans; @@ -2715,7 +2814,7 @@ int ha_ndbcluster::start_stmt(THD *thd) int ndbcluster_commit(THD *thd, void *ndb_transaction) { int res= 0; - Ndb *ndb= (Ndb*)thd->transaction.ndb; + Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; NdbConnection *trans= (NdbConnection*)ndb_transaction; DBUG_ENTER("ndbcluster_commit"); @@ -2733,7 +2832,7 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction) if (res != -1) ndbcluster_print_error(res, error_op); } - ndb->closeTransaction(trans); + ndb->closeTransaction(trans); DBUG_RETURN(res); } @@ -2745,7 +2844,7 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction) int ndbcluster_rollback(THD *thd, void *ndb_transaction) { int res= 0; - Ndb *ndb= (Ndb*)thd->transaction.ndb; + Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; NdbConnection *trans= (NdbConnection*)ndb_transaction; DBUG_ENTER("ndbcluster_rollback"); @@ -3222,9 +3321,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_active_cursor(NULL), m_ndb(NULL), m_table(NULL), + m_table_info(NULL), m_table_flags(HA_REC_NOT_IN_SEQ | HA_NULL_IN_KEY | - HA_NOT_EXACT_COUNT | HA_NO_PREFIX_CHAR_KEYS), m_share(0), m_use_write(false), @@ -3249,7 +3348,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): // TODO Adjust number of records and other parameters for proper // selection of scan/pk access - records= 100; + // records= 100; + records= 0; block_size= 1024; for (i= 0; i < MAX_KEY; i++) @@ -3401,25 +3501,30 @@ int ha_ndbcluster::check_ndb_connection() Ndb* ndb; DBUG_ENTER("check_ndb_connection"); - if (!thd->transaction.ndb) + if (!thd->transaction.thd_ndb) { ndb= seize_ndb(); if (!ndb) DBUG_RETURN(2); - thd->transaction.ndb= ndb; + thd->transaction.thd_ndb= new Thd_ndb(); + ((Thd_ndb *)thd->transaction.thd_ndb)->ndb= ndb; } - m_ndb= (Ndb*)thd->transaction.ndb; + m_ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; m_ndb->setDatabaseName(m_dbname); DBUG_RETURN(0); } void ndbcluster_close_connection(THD *thd) { + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; Ndb* ndb; DBUG_ENTER("ndbcluster_close_connection"); - ndb= (Ndb*)thd->transaction.ndb; - ha_ndbcluster::release_ndb(ndb); - thd->transaction.ndb= NULL; + if (thd_ndb) + { + ha_ndbcluster::release_ndb(thd_ndb->ndb); + delete thd_ndb; + thd->transaction.thd_ndb= NULL; + } DBUG_VOID_RETURN; } @@ -3539,6 +3644,7 @@ bool ndbcluster_init() (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, (hash_get_key) ndbcluster_get_key,0,0); pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); + ndbcluster_inited= 1; #ifdef USE_DISCOVER_ON_STARTUP if (ndb_discover_tables() != 0) diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index c49a6078e7a..44a6873f4e5 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -214,7 +214,8 @@ class ha_ndbcluster: public handler NdbConnection *m_active_trans; NdbResultSet *m_active_cursor; Ndb *m_ndb; - void *m_table; + void *m_table; + void *m_table_info; char m_dbname[FN_HEADLEN]; //char m_schemaname[FN_HEADLEN]; char m_tabname[FN_HEADLEN]; @@ -238,6 +239,11 @@ class ha_ndbcluster: public handler char *blobs_buffer; uint32 blobs_buffer_size; uint dupkey; + + void records_update(); + void no_uncommitted_rows_update(int); + void no_uncommitted_rows_init(THD *); + void no_uncommitted_rows_reset(THD *); }; bool ndbcluster_init(void); diff --git a/sql/sql_class.h b/sql/sql_class.h index 5a5b0fa81ce..387bba43cad 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -764,9 +764,8 @@ public: THD_TRANS all; // Trans since BEGIN WORK THD_TRANS stmt; // Trans for current statement uint bdb_lock_count; - uint ndb_lock_count; #ifdef HAVE_NDBCLUSTER_DB - void* ndb; + void* thd_ndb; #endif bool on; /* From 2ed29f93716a8fa7d20c8a524aec62ea419528a3 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 14 Sep 2004 12:47:34 +0000 Subject: [PATCH 2/2] new method to set size of local table data clearer configure description texts changed Ndb_local_table_info to use create, destroy metods and hidden constructor/destructor move definition if Thd_ndb to .h file and changes seize/release to operate on Thd_ndb instead of Ndb objects moved allocation/deletion of Ndb objects to Thd_ndb ndb/include/ndbapi/NdbDictionary.hpp: new method to set size of local table data ndb/src/mgmsrv/ConfigInfo.cpp: clearer configure description texts ndb/src/ndbapi/DictCache.cpp: changed Ndb_local_table_info to use create, destroy metods and hidden constructor/destructor ndb/src/ndbapi/DictCache.hpp: changed Ndb_local_table_info to use create, destroy metods and hidden constructor/destructor ndb/src/ndbapi/NdbDictionary.cpp: new method to set size of local table data ndb/src/ndbapi/NdbDictionaryImpl.cpp: new method to set size of local table data ndb/src/ndbapi/NdbDictionaryImpl.hpp: new method to set size of local table data sql/ha_ndbcluster.cc: new method to set size of local table data moved allocation/deletion of Ndb objects to Thd_ndb sql/ha_ndbcluster.h: move definition if Thd_ndb to .h file and changes seize/release to operate on Thd_ndb instead of Ndb objects --- ndb/include/ndbapi/NdbDictionary.hpp | 1 + ndb/src/mgmsrv/ConfigInfo.cpp | 66 +++++++++++++------------ ndb/src/ndbapi/DictCache.cpp | 26 ++++++---- ndb/src/ndbapi/DictCache.hpp | 11 +++-- ndb/src/ndbapi/NdbDictionary.cpp | 5 ++ ndb/src/ndbapi/NdbDictionaryImpl.cpp | 7 ++- ndb/src/ndbapi/NdbDictionaryImpl.hpp | 1 + sql/ha_ndbcluster.cc | 72 +++++++++++++--------------- sql/ha_ndbcluster.h | 17 ++++++- 9 files changed, 116 insertions(+), 90 deletions(-) diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index bb7e96bde1b..5c470c1d25f 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -1068,6 +1068,7 @@ public: const char * tableName); public: const Table * getTable(const char * name, void **data); + void set_local_table_data_size(unsigned sz); }; }; diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index 377bc7c435f..ea19bc76d0e 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -33,6 +33,10 @@ * Section names ****************************************************************************/ +#define DB_TOKEN_PRINT "ndbd(DB)" +#define MGM_TOKEN_PRINT "ndb_mgmd(MGM)" +#define API_TOKEN_PRINT "mysqld(API)" + #define DB_TOKEN "DB" #define MGM_TOKEN "MGM" #define API_TOKEN "API" @@ -327,7 +331,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_SYS_PRIMARY_MGM_NODE, "PrimaryMGMNode", "SYSTEM", - "Node id of Primary "MGM_TOKEN" node", + "Node id of Primary "MGM_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -388,7 +392,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ID, "Id", DB_TOKEN, - "Number identifying the database node ("DB_TOKEN")", + "Number identifying the database node ("DB_TOKEN_PRINT")", ConfigInfo::USED, false, ConfigInfo::INT, @@ -484,7 +488,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_INDEX_OPS, "MaxNoOfConcurrentIndexOperations", DB_TOKEN, - "Total number of index operations that can execute simultaneously on one "DB_TOKEN" node", + "Total number of index operations that can execute simultaneously on one "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -509,7 +513,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_TRIGGER_OPS, "MaxNoOfFiredTriggers", DB_TOKEN, - "Total number of triggers that can fire simultaneously in one "DB_TOKEN" node", + "Total number of triggers that can fire simultaneously in one "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -568,7 +572,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_STOP_ON_ERROR, "StopOnError", DB_TOKEN, - "If set to N, "DB_TOKEN" automatically restarts/recovers in case of node failure", + "If set to N, "DB_TOKEN_PRINT" automatically restarts/recovers in case of node failure", ConfigInfo::USED, true, ConfigInfo::BOOL, @@ -640,7 +644,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_TRANSACTIONS, "MaxNoOfConcurrentTransactions", DB_TOKEN, - "Max number of transaction executing concurrently on the "DB_TOKEN" node", + "Max number of transaction executing concurrently on the "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -652,7 +656,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_SCANS, "MaxNoOfConcurrentScans", DB_TOKEN, - "Max number of scans executing concurrently on the "DB_TOKEN" node", + "Max number of scans executing concurrently on the "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -664,7 +668,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_TRANS_BUFFER_MEM, "TransactionBufferMemory", DB_TOKEN, - "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN" node", + "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -676,7 +680,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_INDEX_MEM, "IndexMemory", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for storing indexes", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing indexes", ConfigInfo::USED, false, ConfigInfo::INT64, @@ -688,7 +692,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_DATA_MEM, "DataMemory", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for storing data", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing data", ConfigInfo::USED, false, ConfigInfo::INT64, @@ -700,7 +704,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_UNDO_INDEX_BUFFER, "UndoIndexBuffer", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for writing UNDO logs for index part", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for index part", ConfigInfo::USED, false, ConfigInfo::INT, @@ -712,7 +716,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_UNDO_DATA_BUFFER, "UndoDataBuffer", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for writing UNDO logs for data part", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for data part", ConfigInfo::USED, false, ConfigInfo::INT, @@ -724,7 +728,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_REDO_BUFFER, "RedoBuffer", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for writing REDO logs", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing REDO logs", ConfigInfo::USED, false, ConfigInfo::INT, @@ -736,7 +740,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_LONG_SIGNAL_BUFFER, "LongMessageBuffer", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for internal long messages", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for internal long messages", ConfigInfo::USED, false, ConfigInfo::INT, @@ -784,7 +788,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_HEARTBEAT_INTERVAL, "HeartbeatIntervalDbDb", DB_TOKEN, - "Time between "DB_TOKEN"-"DB_TOKEN" heartbeats. "DB_TOKEN" considered dead after 3 missed HBs", + "Time between "DB_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "DB_TOKEN_PRINT" considered dead after 3 missed HBs", ConfigInfo::USED, true, ConfigInfo::INT, @@ -796,7 +800,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_API_HEARTBEAT_INTERVAL, "HeartbeatIntervalDbApi", DB_TOKEN, - "Time between "API_TOKEN"-"DB_TOKEN" heartbeats. "API_TOKEN" connection closed after 3 missed HBs", + "Time between "API_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "API_TOKEN_PRINT" connection closed after 3 missed HBs", ConfigInfo::USED, true, ConfigInfo::INT, @@ -832,7 +836,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_REDOLOG_FILES, "NoOfFragmentLogFiles", DB_TOKEN, - "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN" node", + "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -844,7 +848,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { KEY_INTERNAL, "MaxNoOfOpenFiles", DB_TOKEN, - "Max number of files open per "DB_TOKEN" node.(One thread is created per file)", + "Max number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)", ConfigInfo::USED, false, ConfigInfo::INT, @@ -998,7 +1002,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_FILESYSTEM_PATH, "FileSystemPath", DB_TOKEN, - "Path to directory where the "DB_TOKEN" node stores its data (directory must exist)", + "Path to directory where the "DB_TOKEN_PRINT" node stores its data (directory must exist)", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1288,7 +1292,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ID, "Id", API_TOKEN, - "Number identifying application node ("API_TOKEN")", + "Number identifying application node ("API_TOKEN_PRINT")", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1311,7 +1315,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ARBIT_RANK, "ArbitrationRank", API_TOKEN, - "If 0, then "API_TOKEN" is not arbitrator. Kernel selects arbitrators in order 1, 2", + "If 0, then "API_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1419,7 +1423,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ID, "Id", MGM_TOKEN, - "Number identifying the management server node ("MGM_TOKEN")", + "Number identifying the management server node ("MGM_TOKEN_PRINT")", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1489,7 +1493,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ARBIT_RANK, "ArbitrationRank", MGM_TOKEN, - "If 0, then "MGM_TOKEN" is not arbitrator. Kernel selects arbitrators in order 1, 2", + "If 0, then "MGM_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1550,7 +1554,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_1, "NodeId1", "TCP", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1561,7 +1565,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_2, "NodeId2", "TCP", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1681,7 +1685,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_1, "NodeId1", "SHM", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1704,7 +1708,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_2, "NodeId2", "SHM", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1801,7 +1805,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_1, "NodeId1", "SCI", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1813,7 +1817,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_2, "NodeId2", "SCI", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1956,7 +1960,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_1, "NodeId1", "OSE", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1968,7 +1972,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_2, "NodeId2", "OSE", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::INT, diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index 0c778d7222e..12300ce216f 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -21,19 +21,29 @@ #include #include -Ndb_local_table_info::Ndb_local_table_info(NdbTableImpl *table_impl, Uint32 sz) +Ndb_local_table_info * +Ndb_local_table_info::create(NdbTableImpl *table_impl, Uint32 sz) +{ + void *data= malloc(sizeof(NdbTableImpl)+sz-1); + if (data == 0) + return 0; + memset(data,0,sizeof(NdbTableImpl)+sz-1); + new (data) Ndb_local_table_info(table_impl); + return (Ndb_local_table_info *) data; +} + +void Ndb_local_table_info::destroy(Ndb_local_table_info *info) +{ + free((void *)info); +} + +Ndb_local_table_info::Ndb_local_table_info(NdbTableImpl *table_impl) { m_table_impl= table_impl; - if (sz) - m_local_data= malloc(sz); - else - m_local_data= 0; } Ndb_local_table_info::~Ndb_local_table_info() { - if (m_local_data) - free(m_local_data); } LocalDictCache::LocalDictCache(){ @@ -61,7 +71,7 @@ void LocalDictCache::drop(const char * name){ Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name)); DBUG_ASSERT(info != 0); - delete info; + Ndb_local_table_info::destroy(info); } /***************************************************************** diff --git a/ndb/src/ndbapi/DictCache.hpp b/ndb/src/ndbapi/DictCache.hpp index f94ad7a6fa9..0dc853306fa 100644 --- a/ndb/src/ndbapi/DictCache.hpp +++ b/ndb/src/ndbapi/DictCache.hpp @@ -29,12 +29,13 @@ class Ndb_local_table_info { public: - Ndb_local_table_info(NdbTableImpl *table_impl, Uint32 sz=0); - ~Ndb_local_table_info(); + static Ndb_local_table_info *create(NdbTableImpl *table_impl, Uint32 sz=0); + static void destroy(Ndb_local_table_info *); NdbTableImpl *m_table_impl; - Uint64 m_first_tuple_id; - Uint64 m_last_tuple_id; - void *m_local_data; + char m_local_data[1]; +private: + Ndb_local_table_info(NdbTableImpl *table_impl); + ~Ndb_local_table_info(); }; /** diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index a92126abae7..8000b53d3be 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -688,6 +688,11 @@ NdbDictionary::Dictionary::getTable(const char * name, void **data){ return 0; } +void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz) +{ + m_impl.m_local_table_data_size= sz; +} + const NdbDictionary::Table * NdbDictionary::Dictionary::getTable(const char * name){ return getTable(name, 0); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 6b36b776f14..7be43c46a9b 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -589,6 +589,7 @@ NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb, m_ndb(ndb) { m_globalHash = 0; + m_local_table_data_size= 0; } static int f_dictionary_count = 0; @@ -600,7 +601,7 @@ NdbDictionaryImpl::~NdbDictionaryImpl() while(curr != 0){ m_globalHash->lock(); m_globalHash->release(curr->theData->m_table_impl); - delete curr->theData; + Ndb_local_table_info::destroy(curr->theData); m_globalHash->unlock(); curr = m_localHash.m_tableHash.getNext(curr); @@ -641,9 +642,7 @@ NdbDictionaryImpl::fetchGlobalTableImpl(const char * internalTableName) } } - Ndb_local_table_info *info= new Ndb_local_table_info(impl, 32); - info->m_first_tuple_id= ~0; - info->m_last_tuple_id= ~0; + Ndb_local_table_info *info= Ndb_local_table_info::create(impl, m_local_table_data_size); m_localHash.put(internalTableName, info); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index cd0463f7126..da5e7e45c36 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -400,6 +400,7 @@ public: const NdbError & getNdbError() const; NdbError m_error; + Uint32 m_local_table_data_size; LocalDictCache m_localHash; GlobalDictCache * m_globalHash; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 48543847649..8b6f2d5cfef 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -142,24 +142,17 @@ static int ndb_to_mysql_error(const NdbError *err) Place holder for ha_ndbcluster thread specific data */ -class Thd_ndb { -public: - Thd_ndb(); - ~Thd_ndb(); - Ndb *ndb; - ulong count; - uint lock_count; -}; - Thd_ndb::Thd_ndb() { - ndb= 0; + ndb= new Ndb(g_ndb_cluster_connection, ""); lock_count= 0; count= 0; } Thd_ndb::~Thd_ndb() { + if (ndb) + delete ndb; } /* @@ -168,7 +161,7 @@ Thd_ndb::~Thd_ndb() struct Ndb_table_local_info { int no_uncommitted_rows_count; - ulong transaction_count; + ulong last_count; ha_rows records; }; @@ -195,9 +188,9 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init"); struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; Thd_ndb *thd_ndb= (Thd_ndb *)thd->transaction.thd_ndb; - if (info->transaction_count != thd_ndb->count) + if (info->last_count != thd_ndb->count) { - info->transaction_count = thd_ndb->count; + info->last_count = thd_ndb->count; info->no_uncommitted_rows_count= 0; info->records= ~(ha_rows)0; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", @@ -3346,10 +3339,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_tabname[0]= '\0'; m_dbname[0]= '\0'; - // TODO Adjust number of records and other parameters for proper - // selection of scan/pk access - // records= 100; - records= 0; + records= ~(ha_rows)0; // uninitialized block_size= 1024; for (i= 0; i < MAX_KEY; i++) @@ -3444,41 +3434,44 @@ int ha_ndbcluster::close(void) } -Ndb* ha_ndbcluster::seize_ndb() +Thd_ndb* ha_ndbcluster::seize_thd_ndb() { - Ndb* ndb; - DBUG_ENTER("seize_ndb"); + Thd_ndb *thd_ndb; + DBUG_ENTER("seize_thd_ndb"); #ifdef USE_NDB_POOL // Seize from pool ndb= Ndb::seize(); + xxxxxxxxxxxxxx error #else - ndb= new Ndb(g_ndb_cluster_connection, ""); + thd_ndb= new Thd_ndb(); #endif - if (ndb->init(max_transactions) != 0) + thd_ndb->ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info)); + if (thd_ndb->ndb->init(max_transactions) != 0) { - ERR_PRINT(ndb->getNdbError()); + ERR_PRINT(thd_ndb->ndb->getNdbError()); /* TODO Alt.1 If init fails because to many allocated Ndb wait on condition for a Ndb object to be released. Alt.2 Seize/release from pool, wait until next release */ - delete ndb; - ndb= NULL; + delete thd_ndb; + thd_ndb= NULL; } - DBUG_RETURN(ndb); + DBUG_RETURN(thd_ndb); } -void ha_ndbcluster::release_ndb(Ndb* ndb) +void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb) { - DBUG_ENTER("release_ndb"); + DBUG_ENTER("release_thd_ndb"); #ifdef USE_NDB_POOL // Release to pool Ndb::release(ndb); + xxxxxxxxxxxx error #else - delete ndb; + delete thd_ndb; #endif DBUG_VOID_RETURN; } @@ -3497,19 +3490,18 @@ void ha_ndbcluster::release_ndb(Ndb* ndb) int ha_ndbcluster::check_ndb_connection() { - THD* thd= current_thd; - Ndb* ndb; + THD *thd= current_thd; + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; DBUG_ENTER("check_ndb_connection"); - if (!thd->transaction.thd_ndb) + if (!thd_ndb) { - ndb= seize_ndb(); - if (!ndb) + thd_ndb= seize_thd_ndb(); + if (!thd_ndb) DBUG_RETURN(2); - thd->transaction.thd_ndb= new Thd_ndb(); - ((Thd_ndb *)thd->transaction.thd_ndb)->ndb= ndb; + thd->transaction.thd_ndb= thd_ndb; } - m_ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; + m_ndb= thd_ndb->ndb; m_ndb->setDatabaseName(m_dbname); DBUG_RETURN(0); } @@ -3517,12 +3509,10 @@ int ha_ndbcluster::check_ndb_connection() void ndbcluster_close_connection(THD *thd) { Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; - Ndb* ndb; DBUG_ENTER("ndbcluster_close_connection"); if (thd_ndb) { - ha_ndbcluster::release_ndb(thd_ndb->ndb); - delete thd_ndb; + ha_ndbcluster::release_thd_ndb(thd_ndb); thd->transaction.thd_ndb= NULL; } DBUG_VOID_RETURN; @@ -3543,6 +3533,7 @@ int ndbcluster_discover(const char *dbname, const char *name, DBUG_PRINT("enter", ("db: %s, name: %s", dbname, name)); Ndb ndb(g_ndb_cluster_connection, dbname); + ndb.getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info)); if (ndb.init()) ERR_RETURN(ndb.getNdbError()); @@ -3633,6 +3624,7 @@ bool ndbcluster_init() // Create a Ndb object to open the connection to NDB g_ndb= new Ndb(g_ndb_cluster_connection, "sys"); + g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info)); if (g_ndb->init() != 0) { ERR_PRINT (g_ndb->getNdbError()); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 44a6873f4e5..f223ada55b1 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -63,6 +63,19 @@ typedef struct st_ndbcluster_share { uint table_name_length,use_count; } NDB_SHARE; +/* + Place holder for ha_ndbcluster thread specific data +*/ + +class Thd_ndb { + public: + Thd_ndb(); + ~Thd_ndb(); + Ndb *ndb; + ulong count; + uint lock_count; +}; + class ha_ndbcluster: public handler { public: @@ -147,8 +160,8 @@ class ha_ndbcluster: public handler void start_bulk_insert(ha_rows rows); int end_bulk_insert(); - static Ndb* seize_ndb(); - static void release_ndb(Ndb* ndb); + static Thd_ndb* seize_thd_ndb(); + static void release_thd_ndb(Thd_ndb* thd_ndb); uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; } private: