From 7515696d89b2317206a836c5a4530251cf6158e7 Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Wed, 8 Feb 2006 09:58:47 +0100 Subject: [PATCH 1/3] NdbEventOperationImpl.cpp, NdbEventOperation.cpp, NdbEventOperation.hpp: Put back getTable NdbDictionaryImpl.cpp, NdbDictionary.cpp, NdbDictionary.hpp: Added putTable --- storage/ndb/include/ndbapi/NdbDictionary.hpp | 8 ++++++++ storage/ndb/include/ndbapi/NdbEventOperation.hpp | 1 + storage/ndb/src/ndbapi/NdbDictionary.cpp | 7 +++++++ storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp | 7 +++++++ storage/ndb/src/ndbapi/NdbEventOperation.cpp | 4 ++++ storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp | 8 ++++++++ 6 files changed, 35 insertions(+) diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index d96ebc896c3..8139cc3b570 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -1588,6 +1588,14 @@ public: */ const Table * getTable(const char * name) const; +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + /* + * Save a table definition in dictionary cache + * @param table Object to put into cache + */ + void putTable(const Table * table); +#endif + /** * Get index with given name, NULL if undefined * @param indexName Name of index to get. diff --git a/storage/ndb/include/ndbapi/NdbEventOperation.hpp b/storage/ndb/include/ndbapi/NdbEventOperation.hpp index 698b66ce701..25d7b8c6644 100644 --- a/storage/ndb/include/ndbapi/NdbEventOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbEventOperation.hpp @@ -220,6 +220,7 @@ public: #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** these are subject to change at any time */ + const NdbDictionary::Table* getTable() const; const NdbDictionary::Event *getEvent() const; const NdbRecAttr *getFirstPkAttr() const; const NdbRecAttr *getFirstPkPreAttr() const; diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index ea60b36fdee..5b8e06f5df8 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -1337,6 +1337,13 @@ NdbDictionary::Dictionary::getTable(const char * name, void **data) const return 0; } +void NdbDictionary::Dictionary::putTable(const NdbDictionary::Table * table) +{ + NdbDictionary::Table *copy_table = new NdbDictionary::Table; + *copy_table = *table; + m_impl.putTable(&NdbTableImpl::getImpl(*copy_table)); +} + void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz) { m_impl.m_local_table_data_size= sz; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 75760fb4019..6866c059119 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -1271,7 +1271,14 @@ NdbDictionaryImpl::fetchGlobalTableImpl(const BaseString& internalTableName) void NdbDictionaryImpl::putTable(NdbTableImpl *impl) { + NdbTableImpl *old; + m_globalHash->lock(); + if ((old= m_globalHash->get(impl->m_internalName.c_str()))) + { + old->m_status = NdbDictionary::Object::Invalid; + m_globalHash->drop(old); + } m_globalHash->put(impl->m_internalName.c_str(), impl); m_globalHash->unlock(); Ndb_local_table_info *info= diff --git a/storage/ndb/src/ndbapi/NdbEventOperation.cpp b/storage/ndb/src/ndbapi/NdbEventOperation.cpp index 71bb8889614..4e96ee63565 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperation.cpp @@ -144,6 +144,10 @@ NdbEventOperation::print() /* * Internal for the mysql server */ +const NdbDictionary::Table *NdbEventOperation::getTable() const +{ + return m_impl.m_eventImpl->m_tableImpl->m_facade; +} const NdbDictionary::Event *NdbEventOperation::getEvent() const { return m_impl.m_eventImpl->m_facade; diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 0a9993e33a1..2acfa339d37 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -639,6 +639,14 @@ NdbEventOperationImpl::receive_event() m_buffer.length() / 4, true); m_buffer.clear(); + if (at) + at->buildColumnHash(); + else + { + DBUG_PRINT_EVENT("info", ("Failed to parse DictTabInfo error %u", + error.code)); + DBUG_RETURN_EVENT(1); + } if ( m_eventImpl->m_tableImpl) delete m_eventImpl->m_tableImpl; m_eventImpl->m_tableImpl = at; From 5db302c8348570e530c6b6ad59444c0e9836bfef Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Mon, 13 Feb 2006 11:23:13 +0100 Subject: [PATCH 2/3] Added on-line handling of altered frm in binlog thread --- mysql-test/r/ndb_alter_table_row.result | 9 ++ mysql-test/t/ndb_alter_table_row.test | 2 + sql/ha_ndbcluster.cc | 4 +- sql/ha_ndbcluster_binlog.cc | 117 ++++++++++++++++++++---- sql/ha_ndbcluster_binlog.h | 2 + 5 files changed, 112 insertions(+), 22 deletions(-) diff --git a/mysql-test/r/ndb_alter_table_row.result b/mysql-test/r/ndb_alter_table_row.result index 450b2c9a5af..ee7c9b1c7b0 100644 --- a/mysql-test/r/ndb_alter_table_row.result +++ b/mysql-test/r/ndb_alter_table_row.result @@ -3,10 +3,19 @@ create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) ) engine=ndb; insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); create index c on t1(c); +show indexes from t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment +t1 0 PRIMARY 1 a A 3 NULL NULL BTREE +t1 1 b 1 b A 3 NULL NULL YES BTREE +t1 1 c 1 c A 3 NULL NULL YES BTREE select * from t1 where c = 'two'; a b c 2 two two alter table t1 drop index c; +show indexes from t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment +t1 0 PRIMARY 1 a A 3 NULL NULL BTREE +t1 1 b 1 b A 3 NULL NULL YES BTREE select * from t1 where c = 'two'; a b c 2 two two diff --git a/mysql-test/t/ndb_alter_table_row.test b/mysql-test/t/ndb_alter_table_row.test index 5dbfa26289b..9c834e0dd20 100644 --- a/mysql-test/t/ndb_alter_table_row.test +++ b/mysql-test/t/ndb_alter_table_row.test @@ -13,10 +13,12 @@ engine=ndb; insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); create index c on t1(c); connection server2; +show indexes from t1; select * from t1 where c = 'two'; connection server1; alter table t1 drop index c; connection server2; +show indexes from t1; select * from t1 where c = 'two'; connection server1; drop table t1; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index f1fdfe86930..acd8208c324 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -993,8 +993,8 @@ bool ha_ndbcluster::uses_blob_value() -2 Meta data has changed; Re-read data and try again */ -static int cmp_frm(const NDBTAB *ndbtab, const void *pack_data, - uint pack_length) +int cmp_frm(const NDBTAB *ndbtab, const void *pack_data, + uint pack_length) { DBUG_ENTER("cmp_frm"); /* diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index b349e3320de..783cb0a39b7 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1291,20 +1291,87 @@ static int ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, NDB_SHARE *share) { + DBUG_ENTER("ndb_handle_schema_change"); int remote_drop_table= 0, do_close_cached_tables= 0; + const char *dbname= share->table->s->db.str; + const char *tabname= share->table->s->table_name.str; + bool online_alter_table= (pOp->getEventType() == NDBEVENT::TE_ALTER && + pOp->tableFrmChanged()); if (pOp->getEventType() != NDBEVENT::TE_CLUSTER_FAILURE && pOp->getReqNodeId() != g_ndb_cluster_connection->node_id()) { - ndb->setDatabaseName(share->table->s->db.str); + NDBDICT *dict= ndb->getDictionary(); + NdbDictionary::Dictionary::List index_list; + + ndb->setDatabaseName(dbname); + // Invalidating indexes + if (! dict->listIndexes(index_list, tabname)) + { + for (unsigned i = 0; i < index_list.count; i++) { + NdbDictionary::Dictionary::List::Element& index= + index_list.elements[i]; + DBUG_PRINT("info", ("Invalidating index %s.%s", + index.database, index.name)); + dict->invalidateIndex(index.name, tabname); + } + } + // Invalidate table ha_ndbcluster::invalidate_dictionary_cache(share->table->s, ndb, - share->table->s->db.str, - share->table->s->table_name.str, + dbname, + tabname, TRUE); + + if (online_alter_table) + { + char key[FN_REFLEN]; + const void *data= 0, *pack_data= 0; + uint length, pack_length; + int error; + + DBUG_PRINT("info", ("Detected frm change of table %s.%s", + dbname, tabname)); + const NDBTAB *altered_table= pOp->getEvent()->getTable(); + bool remote_event= + pOp->getReqNodeId() != g_ndb_cluster_connection->node_id(); + strxnmov(key, FN_LEN-1, mysql_data_home, "/", + dbname, "/", tabname, NullS); + /* + If the frm of the altered table is different than the one on + disk then overwrite it with the new table definition + */ + if (remote_event && + readfrm(key, &data, &length) == 0 && + packfrm(data, length, &pack_data, &pack_length) == 0 && + cmp_frm(altered_table, pack_data, pack_length)) + { + DBUG_DUMP("frm", (char*)altered_table->getFrmData(), + altered_table->getFrmLength()); + pthread_mutex_lock(&LOCK_open); + dict->putTable(altered_table); + + if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) || + (error= writefrm(key, data, length))) + { + sql_print_information("NDB: Failed write frm for %s.%s, error %d", + dbname, tabname, error); + } + pthread_mutex_unlock(&LOCK_open); + close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0); + } + } remote_drop_table= 1; } + // If only frm was changed continue replicating + if (online_alter_table) + { + /* Signal ha_ndbcluster::alter_table that drop is done */ + (void) pthread_cond_signal(&injector_cond); + DBUG_RETURN(0); + } + (void) pthread_mutex_lock(&share->mutex); DBUG_ASSERT(share->op == pOp || share->op_old == pOp); if (share->op_old == pOp) @@ -1385,7 +1452,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, /* fall through */ case SOT_ALTER_TABLE: /* fall through */ - if (!ndb_binlog_running) + if (ndb_binlog_running) { log_query= 1; break; /* discovery will be handled by binlog */ @@ -1482,11 +1549,16 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, // skip break; case NDBEVENT::TE_ALTER: - /* do the rename of the table in the share */ - share->table->s->db.str= share->db; - share->table->s->db.length= strlen(share->db); - share->table->s->table_name.str= share->table_name; - share->table->s->table_name.length= strlen(share->table_name); + if (pOp->tableNameChanged()) + { + DBUG_PRINT("info", ("Detected name change of table %s.%s", + share->db, share->table_name)); + /* do the rename of the table in the share */ + share->table->s->db.str= share->db; + share->table->s->db.length= strlen(share->db); + share->table->s->table_name.str= share->table_name; + share->table->s->table_name.length= strlen(share->table_name); + } ndb_handle_schema_change(thd, ndb, pOp, share); break; case NDBEVENT::TE_CLUSTER_FAILURE: @@ -2357,17 +2429,22 @@ ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp, share->key, share, pOp, share->op, share->op_old)); break; case NDBEVENT::TE_ALTER: - /* ToDo: remove printout */ - if (ndb_extra_logging) - sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.", - share_prefix, share->table->s->db.str, - share->table->s->table_name.str, - share->key); - /* do the rename of the table in the share */ - share->table->s->db.str= share->db; - share->table->s->db.length= strlen(share->db); - share->table->s->table_name.str= share->table_name; - share->table->s->table_name.length= strlen(share->table_name); + if (pOp->tableNameChanged()) + { + DBUG_PRINT("info", ("Detected name change of table %s.%s", + share->db, share->table_name)); + /* ToDo: remove printout */ + if (ndb_extra_logging) + sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.", + share_prefix, share->table->s->db.str, + share->table->s->table_name.str, + share->key); + /* do the rename of the table in the share */ + share->table->s->db.str= share->db; + share->table->s->db.length= strlen(share->db); + share->table->s->table_name.str= share->table_name; + share->table->s->table_name.length= strlen(share->table_name); + } goto drop_alter_common; case NDBEVENT::TE_DROP: if (apply_status_share == share) diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index a297f80f6ab..88f476357b2 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -122,6 +122,8 @@ ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print, prototypes for ndb handler utility function also needed by the ndb binlog code */ +int cmp_frm(const NDBTAB *ndbtab, const void *pack_data, + uint pack_length); int ndbcluster_find_all_files(THD *thd); #endif /* HAVE_NDB_BINLOG */ From e62db4e18c4151adf3d940dcfaf86c86fc560604 Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Thu, 16 Feb 2006 10:07:31 +0100 Subject: [PATCH 3/3] Improvement of on-line discovery in injector thread --- sql/ha_ndbcluster.cc | 12 +- sql/ha_ndbcluster.h | 5 +- sql/ha_ndbcluster_binlog.cc | 195 +++++++++--------- storage/ndb/include/kernel/ndb_limits.h | 1 + .../include/kernel/signaldata/AlterTable.hpp | 1 + .../ndb/src/kernel/blocks/backup/Backup.hpp | 2 +- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 10 +- storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp | 2 + 8 files changed, 127 insertions(+), 101 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 14a2159209c..9f49ffab097 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -519,6 +519,7 @@ void ha_ndbcluster::invalidate_dictionary_cache(bool global) { NDBINDEX *index = (NDBINDEX *) m_index[i].index; NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index; + if (!index && !unique_index) continue; NDB_INDEX_TYPE idx_type= m_index[i].type; switch (idx_type) { @@ -1076,7 +1077,7 @@ int ha_ndbcluster::get_metadata(const char *path) m_table= (void *)tab; m_table_info= NULL; // Set in external lock - DBUG_RETURN(open_indexes(ndb, table)); + DBUG_RETURN(open_indexes(ndb, table, FALSE)); } static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, @@ -1249,7 +1250,7 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info, /* Associate index handles for each index of a table */ -int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab) +int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) { uint i; int error= 0; @@ -1263,7 +1264,10 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab) for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) { if ((error= add_index_handle(thd, dict, key_info, *key_name, i))) - break; + if (ignore_error) + m_index[i].index= m_index[i].unique_index= NULL; + else + break; } DBUG_RETURN(error); @@ -3699,7 +3703,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) { m_table= (void *)tab; m_table_version = tab->getObjectVersion(); - if (!(my_errno= open_indexes(ndb, table))) + if (!(my_errno= open_indexes(ndb, table, FALSE))) DBUG_RETURN(my_errno); } m_table_info= tab_info; diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 12a5be32881..1c3a57caff3 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -691,6 +691,9 @@ static void set_tabname(const char *pathname, char *tabname); private: friend int ndbcluster_drop_database_impl(const char *path); + friend int ndb_handle_schema_change(THD *thd, + Ndb *ndb, NdbEventOperation *pOp, + NDB_SHARE *share); int alter_table_name(const char *to); static int delete_table(ha_ndbcluster *h, Ndb *ndb, const char *path, @@ -708,7 +711,7 @@ private: int create_indexes(Ndb *ndb, TABLE *tab); void clear_index(int i); void clear_indexes(); - int open_indexes(Ndb *ndb, TABLE *tab); + int open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error); void renumber_indexes(Ndb *ndb, TABLE *tab); int drop_indexes(Ndb *ndb, TABLE *tab); int add_index_handle(THD *thd, NdbDictionary::Dictionary *dict, diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 783cb0a39b7..6e80c27719a 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -233,6 +233,72 @@ static void run_query(THD *thd, char *buf, char *end, } } +int +ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share, + TABLE_SHARE *table_share, TABLE *table) +{ + int error; + MEM_ROOT *mem_root= &share->mem_root; + DBUG_ENTER("ndbcluster_binlog_open_table"); + + init_tmp_table_share(table_share, share->db, 0, share->table_name, + share->key); + if ((error= open_table_def(thd, table_share, 0))) + { + sql_print_error("Unable to get table share for %s, error=%d", + share->key, error); + DBUG_PRINT("error", ("open_table_def failed %d", error)); + my_free((gptr) table_share, MYF(0)); + table_share= 0; + my_free((gptr) table, MYF(0)); + table= 0; + DBUG_RETURN(error); + } + if ((error= open_table_from_share(thd, table_share, "", 0, + (uint) READ_ALL, 0, table, FALSE))) + { + sql_print_error("Unable to open table for %s, error=%d(%d)", + share->key, error, my_errno); + DBUG_PRINT("error", ("open_table_from_share failed %d", error)); + my_free((gptr) table_share, MYF(0)); + table_share= 0; + my_free((gptr) table, MYF(0)); + table= 0; + DBUG_RETURN(error); + } + assign_new_table_id(table); + if (!table->record[1] || table->record[1] == table->record[0]) + { + table->record[1]= alloc_root(&table->mem_root, + table->s->rec_buff_length); + } + table->in_use= injector_thd; + + table->s->db.str= share->db; + table->s->db.length= strlen(share->db); + table->s->table_name.str= share->table_name; + table->s->table_name.length= strlen(share->table_name); + + share->table_share= table_share; + share->table= table; +#ifndef DBUG_OFF + dbug_print_table("table", table); +#endif + /* + ! do not touch the contents of the table + it may be in use by the injector thread + */ + share->ndb_value[0]= (NdbValue*) + alloc_root(mem_root, sizeof(NdbValue) * table->s->fields + + 1 /*extra for hidden key*/); + share->ndb_value[1]= (NdbValue*) + alloc_root(mem_root, sizeof(NdbValue) * table->s->fields + +1 /*extra for hidden key*/); + + DBUG_RETURN(0); +} + + /* Initialize the binlog part of the NDB_SHARE */ @@ -260,64 +326,12 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) } while (1) { + int error; TABLE_SHARE *table_share= (TABLE_SHARE *) my_malloc(sizeof(*table_share), MYF(MY_WME)); TABLE *table= (TABLE*) my_malloc(sizeof(*table), MYF(MY_WME)); - int error; - - init_tmp_table_share(table_share, share->db, 0, share->table_name, - share->key); - if ((error= open_table_def(thd, table_share, 0))) - { - sql_print_error("Unable to get table share for %s, error=%d", - share->key, error); - DBUG_PRINT("error", ("open_table_def failed %d", error)); - my_free((gptr) table_share, MYF(0)); - table_share= 0; - my_free((gptr) table, MYF(0)); - table= 0; + if ((error= ndbcluster_binlog_open_table(thd, share, table_share, table))) break; - } - if ((error= open_table_from_share(thd, table_share, "", 0, - (uint) READ_ALL, 0, table, FALSE))) - { - sql_print_error("Unable to open table for %s, error=%d(%d)", - share->key, error, my_errno); - DBUG_PRINT("error", ("open_table_from_share failed %d", error)); - my_free((gptr) table_share, MYF(0)); - table_share= 0; - my_free((gptr) table, MYF(0)); - table= 0; - break; - } - assign_new_table_id(table); - if (!table->record[1] || table->record[1] == table->record[0]) - { - table->record[1]= alloc_root(&table->mem_root, - table->s->rec_buff_length); - } - table->in_use= injector_thd; - - table->s->db.str= share->db; - table->s->db.length= strlen(share->db); - table->s->table_name.str= share->table_name; - table->s->table_name.length= strlen(share->table_name); - - share->table_share= table_share; - share->table= table; -#ifndef DBUG_OFF - dbug_print_table("table", table); -#endif - /* - ! do not touch the contents of the table - it may be in use by the injector thread - */ - share->ndb_value[0]= (NdbValue*) - alloc_root(mem_root, sizeof(NdbValue) * table->s->fields - + 1 /*extra for hidden key*/); - share->ndb_value[1]= (NdbValue*) - alloc_root(mem_root, sizeof(NdbValue) * table->s->fields - +1 /*extra for hidden key*/); { int i, no_nodes= g_ndb_cluster_connection->no_db_nodes(); share->subscriber_bitmap= (MY_BITMAP*) @@ -651,10 +665,10 @@ static int ndbcluster_create_apply_status_table(THD *thd) if so, remove it since there is none in Ndb */ { - strxnmov(buf, sizeof(buf), - mysql_data_home, - "/" NDB_REP_DB "/" NDB_APPLY_TABLE, - reg_ext, NullS); + build_table_filename(buf, sizeof(buf), + NDB_REP_DB, + NDB_APPLY_TABLE, + reg_ext); unpack_filename(buf,buf); my_delete(buf, MYF(0)); } @@ -703,10 +717,10 @@ static int ndbcluster_create_schema_table(THD *thd) if so, remove it since there is none in Ndb */ { - strxnmov(buf, sizeof(buf), - mysql_data_home, - "/" NDB_REP_DB "/" NDB_SCHEMA_TABLE, - reg_ext, NullS); + build_table_filename(buf, sizeof(buf), + NDB_REP_DB, + NDB_SCHEMA_TABLE, + reg_ext); unpack_filename(buf,buf); my_delete(buf, MYF(0)); } @@ -1287,7 +1301,8 @@ end: /* Handle _non_ data events from the storage nodes */ -static int +//static int +int ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, NDB_SHARE *share) { @@ -1299,50 +1314,37 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, pOp->tableFrmChanged()); if (pOp->getEventType() != NDBEVENT::TE_CLUSTER_FAILURE && - pOp->getReqNodeId() != g_ndb_cluster_connection->node_id()) + (uint) pOp->getReqNodeId() != g_ndb_cluster_connection->node_id()) { - NDBDICT *dict= ndb->getDictionary(); - NdbDictionary::Dictionary::List index_list; - - ndb->setDatabaseName(dbname); - // Invalidating indexes - if (! dict->listIndexes(index_list, tabname)) - { - for (unsigned i = 0; i < index_list.count; i++) { - NdbDictionary::Dictionary::List::Element& index= - index_list.elements[i]; - DBUG_PRINT("info", ("Invalidating index %s.%s", - index.database, index.name)); - dict->invalidateIndex(index.name, tabname); - } - } - // Invalidate table - ha_ndbcluster::invalidate_dictionary_cache(share->table->s, - ndb, - dbname, - tabname, - TRUE); - + TABLE_SHARE *table_share= share->table->s; //share->table_share; + TABLE* table= share->table; + + /* + Invalidate table and all it's indexes + */ + ha_ndbcluster table_handler(table_share); + table_handler.set_dbname(share->key); + table_handler.set_tabname(share->key); + table_handler.open_indexes(ndb, table, TRUE); + table_handler.invalidate_dictionary_cache(TRUE); + if (online_alter_table) { char key[FN_REFLEN]; const void *data= 0, *pack_data= 0; uint length, pack_length; int error; + NDBDICT *dict= ndb->getDictionary(); + const NDBTAB *altered_table= pOp->getTable(); DBUG_PRINT("info", ("Detected frm change of table %s.%s", dbname, tabname)); - const NDBTAB *altered_table= pOp->getEvent()->getTable(); - bool remote_event= - pOp->getReqNodeId() != g_ndb_cluster_connection->node_id(); - strxnmov(key, FN_LEN-1, mysql_data_home, "/", - dbname, "/", tabname, NullS); + build_table_filename(key, FN_LEN-1, dbname, tabname, NullS); /* If the frm of the altered table is different than the one on disk then overwrite it with the new table definition */ - if (remote_event && - readfrm(key, &data, &length) == 0 && + if (readfrm(key, &data, &length) == 0 && packfrm(data, length, &pack_data, &pack_length) == 0 && cmp_frm(altered_table, pack_data, pack_length)) { @@ -1359,6 +1361,12 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, } pthread_mutex_unlock(&LOCK_open); close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0); + /* + if ((error= ndbcluster_binlog_open_table(thd, share, + table_share, table))) + sql_print_information("NDB: Failed to re-open table %s.%s", + dbname, tabname); + */ } } remote_drop_table= 1; @@ -1838,6 +1846,7 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, /* Handle any trailing share */ NDB_SHARE *share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables, (byte*) key, key_len); + if (share && share_may_exist) { if (share->flags & NSF_NO_BINLOG || diff --git a/storage/ndb/include/kernel/ndb_limits.h b/storage/ndb/include/kernel/ndb_limits.h index ef6b8370888..6bf9256bf95 100644 --- a/storage/ndb/include/kernel/ndb_limits.h +++ b/storage/ndb/include/kernel/ndb_limits.h @@ -66,6 +66,7 @@ #define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES)) #define MAX_NDB_PARTITIONS 1024 #define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data +#define MAX_WORDS_META_FILE 16382 #define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1) /* diff --git a/storage/ndb/include/kernel/signaldata/AlterTable.hpp b/storage/ndb/include/kernel/signaldata/AlterTable.hpp index 427179a389f..1cdc7c00fcb 100644 --- a/storage/ndb/include/kernel/signaldata/AlterTable.hpp +++ b/storage/ndb/include/kernel/signaldata/AlterTable.hpp @@ -39,6 +39,7 @@ class AlterTableReq { friend class NdbEventOperationImpl; friend class NdbDictInterface; friend class Dbdict; + friend class Suma; /** * For printing diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp index 9ad244ed3e4..a7d73801665 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp @@ -530,7 +530,7 @@ public: Config c_defaults; Uint32 m_diskless; - STATIC_CONST(NO_OF_PAGES_META_FILE = 2); + STATIC_CONST(NO_OF_PAGES_META_FILE = MAX_WORDS_META_FILE/BACKUP_WORDS_PER_PAGE); /** * Pools diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index a3859012139..8e7a7a091af 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -3440,7 +3441,7 @@ Suma::execDROP_TAB_CONF(Signal *signal) DBUG_VOID_RETURN; } -static Uint32 b_dti_buf[10000]; +static Uint32 b_dti_buf[MAX_WORDS_META_FILE]; void Suma::execALTER_TAB_REQ(Signal *signal) @@ -3462,7 +3463,7 @@ Suma::execALTER_TAB_REQ(Signal *signal) } DBUG_PRINT("info",("alter table id: %d[i=%u]", tableId, tabPtr.i)); - + Table::State old_state = tabPtr.p->m_state; tabPtr.p->m_state = Table::ALTERED; // triggers must be removed, waiting for sub stop req for that @@ -3520,6 +3521,11 @@ Suma::execALTER_TAB_REQ(Signal *signal) DBUG_PRINT("info",("sent to subscriber %d", subbPtr.i)); } } + if (AlterTableReq::getFrmFlag(changeMask)) + { + // Frm changes only are handled on-line + tabPtr.p->m_state = old_state; + } DBUG_VOID_RETURN; } diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 5a458c24aa2..60186837b09 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -1320,6 +1320,8 @@ NdbDictionaryImpl::putTable(NdbTableImpl *impl) m_localHash.put(impl->m_internalName.c_str(), info); + addBlobTables(*impl); + m_ndb.theFirstTupleId[impl->getTableId()] = ~0; m_ndb.theLastTupleId[impl->getTableId()] = ~0; }