mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
BUG#18094 Slave caches invalid table definition after atlters causes select failure
- return correct object status from ndb dictionary - check for validity of index retrieved from index, and retry if invalid
This commit is contained in:
@ -122,3 +122,28 @@ select * from t1 order by nid;
|
|||||||
nid nom prenom
|
nid nom prenom
|
||||||
1 DEAD ABC1
|
1 DEAD ABC1
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
CREATE TABLE t1 (c1 INT KEY) ENGINE=NDB;
|
||||||
|
INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
|
||||||
|
ALTER TABLE t1 ADD c2 INT;
|
||||||
|
SELECT * FROM t1 ORDER BY c1;
|
||||||
|
c1 c2
|
||||||
|
1 NULL
|
||||||
|
2 NULL
|
||||||
|
3 NULL
|
||||||
|
4 NULL
|
||||||
|
5 NULL
|
||||||
|
6 NULL
|
||||||
|
7 NULL
|
||||||
|
8 NULL
|
||||||
|
9 NULL
|
||||||
|
10 NULL
|
||||||
|
ALTER TABLE t1 CHANGE c2 c2 TEXT CHARACTER SET utf8;
|
||||||
|
ALTER TABLE t1 CHANGE c2 c2 BLOB;
|
||||||
|
SELECT * FROM t1 ORDER BY c1 LIMIT 5;
|
||||||
|
c1 c2
|
||||||
|
1 NULL
|
||||||
|
2 NULL
|
||||||
|
3 NULL
|
||||||
|
4 NULL
|
||||||
|
5 NULL
|
||||||
|
DROP TABLE t1;
|
||||||
|
@ -25,9 +25,9 @@ rpl_ndb_2innodb : BUG#19004 2006-03-22 tomas ndb: partition by range an
|
|||||||
rpl_ndb_2myisam : BUG#19004 2006-03-22 tomas ndb: partition by range and update hangs
|
rpl_ndb_2myisam : BUG#19004 2006-03-22 tomas ndb: partition by range and update hangs
|
||||||
rpl_ndb_auto_inc : BUG#17086 2006-02-16 jmiller CR: auto_increment_increment and auto_increment_offset produce duplicate key er
|
rpl_ndb_auto_inc : BUG#17086 2006-02-16 jmiller CR: auto_increment_increment and auto_increment_offset produce duplicate key er
|
||||||
rpl_ndb_ddl : result file needs update + test needs to checked
|
rpl_ndb_ddl : result file needs update + test needs to checked
|
||||||
rpl_ndb_innodb2ndb : BUG#18094 2006-03-16 mats Slave caches invalid table definition after atlters causes select failure
|
rpl_ndb_innodb2ndb : BUG#17400 2006-04-19 tomas Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||||
rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ
|
rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ
|
||||||
rpl_ndb_myisam2ndb : BUG#18094 2006-03-16 mats Slave caches invalid table definition after atlters causes select failure
|
rpl_ndb_myisam2ndb : BUG#17400 2006-04-19 tomas Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||||
rpl_ndb_relay_space : BUG#16993 2006-02-16 jmiller RBR: ALTER TABLE ZEROFILL AUTO_INCREMENT is not replicated correctly
|
rpl_ndb_relay_space : BUG#16993 2006-02-16 jmiller RBR: ALTER TABLE ZEROFILL AUTO_INCREMENT is not replicated correctly
|
||||||
rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian
|
rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian
|
||||||
rpl_row_basic_7ndb : BUG#17400 2006-04-09 brian Cluster Replication: delete & update of rows in table without pk fails on slave.
|
rpl_row_basic_7ndb : BUG#17400 2006-04-09 brian Cluster Replication: delete & update of rows in table without pk fails on slave.
|
||||||
|
@ -143,6 +143,37 @@ COMMIT;
|
|||||||
--connection slave
|
--connection slave
|
||||||
select * from t1 order by nid;
|
select * from t1 order by nid;
|
||||||
|
|
||||||
|
# cleanup
|
||||||
|
--connection master
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# BUG#18094
|
||||||
|
# Slave caches invalid table definition after atlters causes select failure
|
||||||
|
#
|
||||||
|
--connection master
|
||||||
|
CREATE TABLE t1 (c1 INT KEY) ENGINE=NDB;
|
||||||
|
|
||||||
|
INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
|
||||||
|
|
||||||
|
ALTER TABLE t1 ADD c2 INT;
|
||||||
|
|
||||||
|
--sync_slave_with_master
|
||||||
|
connection slave;
|
||||||
|
SELECT * FROM t1 ORDER BY c1;
|
||||||
|
|
||||||
|
connection master;
|
||||||
|
ALTER TABLE t1 CHANGE c2 c2 TEXT CHARACTER SET utf8;
|
||||||
|
ALTER TABLE t1 CHANGE c2 c2 BLOB;
|
||||||
|
|
||||||
|
--sync_slave_with_master
|
||||||
|
connection slave;
|
||||||
|
# here we would get error 1412 prior to bug
|
||||||
|
SELECT * FROM t1 ORDER BY c1 LIMIT 5;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# cleanup
|
# cleanup
|
||||||
--connection master
|
--connection master
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
@ -466,7 +466,7 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
|
|||||||
# The mapped error code
|
# The mapped error code
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int ha_ndbcluster::invalidate_dictionary_cache(bool global)
|
int ha_ndbcluster::invalidate_dictionary_cache(bool global, const NDBTAB *ndbtab)
|
||||||
{
|
{
|
||||||
NDBDICT *dict= get_ndb()->getDictionary();
|
NDBDICT *dict= get_ndb()->getDictionary();
|
||||||
DBUG_ENTER("invalidate_dictionary_cache");
|
DBUG_ENTER("invalidate_dictionary_cache");
|
||||||
@ -494,20 +494,17 @@ int ha_ndbcluster::invalidate_dictionary_cache(bool global)
|
|||||||
DBUG_PRINT("info", ("Released ndbcluster mutex"));
|
DBUG_PRINT("info", ("Released ndbcluster mutex"));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
const NDBTAB *tab= dict->getTable(m_tabname);
|
if (!ndbtab)
|
||||||
if (!tab)
|
|
||||||
DBUG_RETURN(1);
|
|
||||||
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
|
||||||
{
|
{
|
||||||
// Global cache has already been invalidated
|
ndbtab= dict->getTable(m_tabname);
|
||||||
dict->removeCachedTable(m_tabname);
|
if (!ndbtab)
|
||||||
global= FALSE;
|
DBUG_RETURN(1);
|
||||||
DBUG_PRINT("info", ("global: %d", global));
|
|
||||||
}
|
}
|
||||||
else
|
dict->invalidateTable(ndbtab);
|
||||||
dict->invalidateTable(m_tabname);
|
|
||||||
table_share->version= 0L; /* Free when thread is ready */
|
table_share->version= 0L; /* Free when thread is ready */
|
||||||
}
|
}
|
||||||
|
else if (ndbtab)
|
||||||
|
dict->removeCachedTable(ndbtab);
|
||||||
else
|
else
|
||||||
dict->removeCachedTable(m_tabname);
|
dict->removeCachedTable(m_tabname);
|
||||||
|
|
||||||
@ -564,7 +561,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
|
|||||||
table_list.alias= table_list.table_name= m_tabname;
|
table_list.alias= table_list.table_name= m_tabname;
|
||||||
close_cached_tables(current_thd, 0, &table_list);
|
close_cached_tables(current_thd, 0, &table_list);
|
||||||
|
|
||||||
invalidate_dictionary_cache(TRUE);
|
invalidate_dictionary_cache(TRUE, m_table);
|
||||||
|
|
||||||
if (err.code==284)
|
if (err.code==284)
|
||||||
{
|
{
|
||||||
@ -1041,7 +1038,7 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||||||
// Check if thread has stale local cache
|
// Check if thread has stale local cache
|
||||||
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||||
{
|
{
|
||||||
invalidate_dictionary_cache(FALSE);
|
invalidate_dictionary_cache(FALSE, tab);
|
||||||
if (!(tab= dict->getTable(m_tabname)))
|
if (!(tab= dict->getTable(m_tabname)))
|
||||||
ERR_RETURN(dict->getNdbError());
|
ERR_RETURN(dict->getNdbError());
|
||||||
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
|
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
|
||||||
@ -1064,7 +1061,7 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||||||
if (!invalidating_ndb_table)
|
if (!invalidating_ndb_table)
|
||||||
{
|
{
|
||||||
DBUG_PRINT("info", ("Invalidating table"));
|
DBUG_PRINT("info", ("Invalidating table"));
|
||||||
invalidate_dictionary_cache(TRUE);
|
invalidate_dictionary_cache(TRUE, tab);
|
||||||
invalidating_ndb_table= TRUE;
|
invalidating_ndb_table= TRUE;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -1091,7 +1088,7 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
|
|
||||||
m_table_version= tab->getObjectVersion();
|
m_table_version= tab->getObjectVersion();
|
||||||
m_table= (void *)tab;
|
m_table= tab;
|
||||||
m_table_info= NULL; // Set in external lock
|
m_table_info= NULL; // Set in external lock
|
||||||
|
|
||||||
DBUG_RETURN(open_indexes(ndb, table, FALSE));
|
DBUG_RETURN(open_indexes(ndb, table, FALSE));
|
||||||
@ -1150,7 +1147,7 @@ int ha_ndbcluster::table_changed(const void *pack_frm_data, uint pack_frm_len)
|
|||||||
// Check if thread has stale local cache
|
// Check if thread has stale local cache
|
||||||
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||||
{
|
{
|
||||||
dict->removeCachedTable(m_tabname);
|
dict->removeCachedTable(orig_tab);
|
||||||
if (!(orig_tab= dict->getTable(m_tabname)))
|
if (!(orig_tab= dict->getTable(m_tabname)))
|
||||||
ERR_RETURN(dict->getNdbError());
|
ERR_RETURN(dict->getNdbError());
|
||||||
}
|
}
|
||||||
@ -1219,13 +1216,31 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
|
|||||||
int error= 0;
|
int error= 0;
|
||||||
NDB_INDEX_TYPE idx_type= get_index_type_from_table(index_no);
|
NDB_INDEX_TYPE idx_type= get_index_type_from_table(index_no);
|
||||||
m_index[index_no].type= idx_type;
|
m_index[index_no].type= idx_type;
|
||||||
DBUG_ENTER("ha_ndbcluster::get_index_handle");
|
DBUG_ENTER("ha_ndbcluster::add_index_handle");
|
||||||
|
DBUG_PRINT("enter", ("table %s", m_tabname));
|
||||||
|
|
||||||
if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX)
|
if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX)
|
||||||
{
|
{
|
||||||
DBUG_PRINT("info", ("Get handle to index %s", index_name));
|
DBUG_PRINT("info", ("Get handle to index %s", index_name));
|
||||||
const NDBINDEX *index= dict->getIndex(index_name, m_tabname);
|
const NDBINDEX *index;
|
||||||
if (!index) ERR_RETURN(dict->getNdbError());
|
do
|
||||||
|
{
|
||||||
|
index= dict->getIndex(index_name, m_tabname);
|
||||||
|
if (!index)
|
||||||
|
ERR_RETURN(dict->getNdbError());
|
||||||
|
DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
|
||||||
|
index,
|
||||||
|
index->getObjectId(),
|
||||||
|
index->getObjectVersion() & 0xFFFFFF,
|
||||||
|
index->getObjectVersion() >> 24,
|
||||||
|
index->getObjectStatus()));
|
||||||
|
if (index->getObjectStatus() != NdbDictionary::Object::Retrieved)
|
||||||
|
{
|
||||||
|
dict->removeCachedIndex(index);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
} while (1);
|
||||||
m_index[index_no].index= (void *) index;
|
m_index[index_no].index= (void *) index;
|
||||||
// ordered index - add stats
|
// ordered index - add stats
|
||||||
NDB_INDEX_DATA& d=m_index[index_no];
|
NDB_INDEX_DATA& d=m_index[index_no];
|
||||||
@ -1254,8 +1269,25 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
|
|||||||
m_has_unique_index= TRUE;
|
m_has_unique_index= TRUE;
|
||||||
strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS);
|
strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS);
|
||||||
DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name));
|
DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name));
|
||||||
const NDBINDEX *index= dict->getIndex(unique_index_name, m_tabname);
|
const NDBINDEX *index;
|
||||||
if (!index) ERR_RETURN(dict->getNdbError());
|
do
|
||||||
|
{
|
||||||
|
index= dict->getIndex(unique_index_name, m_tabname);
|
||||||
|
if (!index)
|
||||||
|
ERR_RETURN(dict->getNdbError());
|
||||||
|
DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
|
||||||
|
index,
|
||||||
|
index->getObjectId(),
|
||||||
|
index->getObjectVersion() & 0xFFFFFF,
|
||||||
|
index->getObjectVersion() >> 24,
|
||||||
|
index->getObjectStatus()));
|
||||||
|
if (index->getObjectStatus() != NdbDictionary::Object::Retrieved)
|
||||||
|
{
|
||||||
|
dict->removeCachedIndex(index);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
} while (1);
|
||||||
m_index[index_no].unique_index= (void *) index;
|
m_index[index_no].unique_index= (void *) index;
|
||||||
error= fix_unique_index_attr_order(m_index[index_no], index, key_info);
|
error= fix_unique_index_attr_order(m_index[index_no], index, key_info);
|
||||||
}
|
}
|
||||||
@ -3954,7 +3986,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||||||
if ((trans && tab->getObjectStatus() != NdbDictionary::Object::Retrieved)
|
if ((trans && tab->getObjectStatus() != NdbDictionary::Object::Retrieved)
|
||||||
|| tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
|| tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||||
{
|
{
|
||||||
invalidate_dictionary_cache(FALSE);
|
invalidate_dictionary_cache(FALSE, tab);
|
||||||
if (!(tab= dict->getTable(m_tabname, &tab_info)))
|
if (!(tab= dict->getTable(m_tabname, &tab_info)))
|
||||||
ERR_RETURN(dict->getNdbError());
|
ERR_RETURN(dict->getNdbError());
|
||||||
DBUG_PRINT("info", ("Table schema version: %d",
|
DBUG_PRINT("info", ("Table schema version: %d",
|
||||||
@ -3970,7 +4002,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||||||
}
|
}
|
||||||
if (m_table != (void *)tab)
|
if (m_table != (void *)tab)
|
||||||
{
|
{
|
||||||
m_table= (void *)tab;
|
m_table= tab;
|
||||||
m_table_version = tab->getObjectVersion();
|
m_table_version = tab->getObjectVersion();
|
||||||
if (!(my_errno= open_indexes(ndb, table, FALSE)))
|
if (!(my_errno= open_indexes(ndb, table, FALSE)))
|
||||||
DBUG_RETURN(my_errno);
|
DBUG_RETURN(my_errno);
|
||||||
@ -4990,7 +5022,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
|||||||
// Check if thread has stale local cache
|
// Check if thread has stale local cache
|
||||||
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||||
{
|
{
|
||||||
dict->removeCachedTable(m_tabname);
|
dict->removeCachedTable(orig_tab);
|
||||||
if (!(orig_tab= dict->getTable(m_tabname)))
|
if (!(orig_tab= dict->getTable(m_tabname)))
|
||||||
ERR_RETURN(dict->getNdbError());
|
ERR_RETURN(dict->getNdbError());
|
||||||
}
|
}
|
||||||
@ -5002,7 +5034,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
|||||||
DBUG_ASSERT(r == 0);
|
DBUG_ASSERT(r == 0);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
m_table= (void *)orig_tab;
|
m_table= orig_tab;
|
||||||
// Change current database to that of target table
|
// Change current database to that of target table
|
||||||
set_dbname(to);
|
set_dbname(to);
|
||||||
ndb->setDatabaseName(m_dbname);
|
ndb->setDatabaseName(m_dbname);
|
||||||
@ -9988,7 +10020,7 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
|
|||||||
// Check if thread has stale local cache
|
// Check if thread has stale local cache
|
||||||
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
|
||||||
{
|
{
|
||||||
invalidate_dictionary_cache(FALSE);
|
invalidate_dictionary_cache(FALSE, tab);
|
||||||
if (!(tab= dict->getTable(m_tabname)))
|
if (!(tab= dict->getTable(m_tabname)))
|
||||||
ERR_BREAK(dict->getNdbError(), err);
|
ERR_BREAK(dict->getNdbError(), err);
|
||||||
}
|
}
|
||||||
|
@ -778,7 +778,8 @@ private:
|
|||||||
void print_results();
|
void print_results();
|
||||||
|
|
||||||
ulonglong get_auto_increment();
|
ulonglong get_auto_increment();
|
||||||
int invalidate_dictionary_cache(bool global);
|
int invalidate_dictionary_cache(bool global,
|
||||||
|
const NdbDictionary::Table *ndbtab);
|
||||||
int ndb_err(NdbTransaction*);
|
int ndb_err(NdbTransaction*);
|
||||||
bool uses_blob_value();
|
bool uses_blob_value();
|
||||||
|
|
||||||
@ -816,7 +817,7 @@ private:
|
|||||||
|
|
||||||
NdbTransaction *m_active_trans;
|
NdbTransaction *m_active_trans;
|
||||||
NdbScanOperation *m_active_cursor;
|
NdbScanOperation *m_active_cursor;
|
||||||
void *m_table;
|
const NdbDictionary::Table *m_table;
|
||||||
int m_table_version;
|
int m_table_version;
|
||||||
void *m_table_info;
|
void *m_table_info;
|
||||||
char m_dbname[FN_HEADLEN];
|
char m_dbname[FN_HEADLEN];
|
||||||
|
@ -1068,20 +1068,27 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
|
|||||||
MY_BITMAP schema_subscribers;
|
MY_BITMAP schema_subscribers;
|
||||||
uint32 bitbuf[sizeof(ndb_schema_object->slock)/4];
|
uint32 bitbuf[sizeof(ndb_schema_object->slock)/4];
|
||||||
{
|
{
|
||||||
int i;
|
int i, updated= 0;
|
||||||
|
int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
|
||||||
bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, false);
|
bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, false);
|
||||||
bitmap_set_all(&schema_subscribers);
|
bitmap_set_all(&schema_subscribers);
|
||||||
(void) pthread_mutex_lock(&schema_share->mutex);
|
(void) pthread_mutex_lock(&schema_share->mutex);
|
||||||
for (i= 0; i < ndb_number_of_storage_nodes; i++)
|
for (i= 0; i < no_storage_nodes; i++)
|
||||||
{
|
{
|
||||||
MY_BITMAP *table_subscribers= &schema_share->subscriber_bitmap[i];
|
MY_BITMAP *table_subscribers= &schema_share->subscriber_bitmap[i];
|
||||||
if (!bitmap_is_clear_all(table_subscribers))
|
if (!bitmap_is_clear_all(table_subscribers))
|
||||||
|
{
|
||||||
bitmap_intersect(&schema_subscribers,
|
bitmap_intersect(&schema_subscribers,
|
||||||
table_subscribers);
|
table_subscribers);
|
||||||
|
updated= 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
(void) pthread_mutex_unlock(&schema_share->mutex);
|
(void) pthread_mutex_unlock(&schema_share->mutex);
|
||||||
bitmap_clear_bit(&schema_subscribers, node_id);
|
if (updated)
|
||||||
|
bitmap_clear_bit(&schema_subscribers, node_id);
|
||||||
|
else
|
||||||
|
bitmap_clear_all(&schema_subscribers);
|
||||||
|
|
||||||
if (ndb_schema_object)
|
if (ndb_schema_object)
|
||||||
{
|
{
|
||||||
(void) pthread_mutex_lock(&ndb_schema_object->mutex);
|
(void) pthread_mutex_lock(&ndb_schema_object->mutex);
|
||||||
@ -1227,13 +1234,14 @@ end:
|
|||||||
{
|
{
|
||||||
struct timespec abstime;
|
struct timespec abstime;
|
||||||
int i;
|
int i;
|
||||||
|
int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
|
||||||
set_timespec(abstime, 1);
|
set_timespec(abstime, 1);
|
||||||
int ret= pthread_cond_timedwait(&injector_cond,
|
int ret= pthread_cond_timedwait(&injector_cond,
|
||||||
&ndb_schema_object->mutex,
|
&ndb_schema_object->mutex,
|
||||||
&abstime);
|
&abstime);
|
||||||
|
|
||||||
(void) pthread_mutex_lock(&schema_share->mutex);
|
(void) pthread_mutex_lock(&schema_share->mutex);
|
||||||
for (i= 0; i < ndb_number_of_storage_nodes; i++)
|
for (i= 0; i < no_storage_nodes; i++)
|
||||||
{
|
{
|
||||||
/* remove any unsubscribed from schema_subscribers */
|
/* remove any unsubscribed from schema_subscribers */
|
||||||
MY_BITMAP *tmp= &schema_share->subscriber_bitmap[i];
|
MY_BITMAP *tmp= &schema_share->subscriber_bitmap[i];
|
||||||
@ -1466,7 +1474,7 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
|
|||||||
(void)strxmov(table_handler.m_dbname, dbname, NullS);
|
(void)strxmov(table_handler.m_dbname, dbname, NullS);
|
||||||
(void)strxmov(table_handler.m_tabname, tabname, NullS);
|
(void)strxmov(table_handler.m_tabname, tabname, NullS);
|
||||||
table_handler.open_indexes(ndb, table, TRUE);
|
table_handler.open_indexes(ndb, table, TRUE);
|
||||||
table_handler.invalidate_dictionary_cache(TRUE);
|
table_handler.invalidate_dictionary_cache(TRUE, 0);
|
||||||
thd_ndb->ndb= old_ndb;
|
thd_ndb->ndb= old_ndb;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1555,7 +1563,7 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
|
|||||||
table_handler.set_dbname(share->key);
|
table_handler.set_dbname(share->key);
|
||||||
table_handler.set_tabname(share->key);
|
table_handler.set_tabname(share->key);
|
||||||
table_handler.open_indexes(ndb, table, TRUE);
|
table_handler.open_indexes(ndb, table, TRUE);
|
||||||
table_handler.invalidate_dictionary_cache(TRUE);
|
table_handler.invalidate_dictionary_cache(TRUE, 0);
|
||||||
thd_ndb->ndb= old_ndb;
|
thd_ndb->ndb= old_ndb;
|
||||||
}
|
}
|
||||||
DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
|
DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
|
||||||
|
@ -1745,11 +1745,15 @@ public:
|
|||||||
const char * tableName);
|
const char * tableName);
|
||||||
|
|
||||||
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
|
||||||
|
void removeCachedTable(const Table *table);
|
||||||
|
void removeCachedIndex(const Index *index);
|
||||||
|
void invalidateTable(const Table *table);
|
||||||
/**
|
/**
|
||||||
* Invalidate cached index object
|
* Invalidate cached index object
|
||||||
*/
|
*/
|
||||||
void invalidateIndex(const char * indexName,
|
void invalidateIndex(const char * indexName,
|
||||||
const char * tableName);
|
const char * tableName);
|
||||||
|
void invalidateIndex(const Index *index);
|
||||||
/**
|
/**
|
||||||
* Force gcp and wait for gcp complete
|
* Force gcp and wait for gcp complete
|
||||||
*/
|
*/
|
||||||
|
@ -772,17 +772,17 @@ NdbDictionary::Index::getLogging() const {
|
|||||||
|
|
||||||
NdbDictionary::Object::Status
|
NdbDictionary::Object::Status
|
||||||
NdbDictionary::Index::getObjectStatus() const {
|
NdbDictionary::Index::getObjectStatus() const {
|
||||||
return m_impl.m_status;
|
return m_impl.m_table->m_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
NdbDictionary::Index::getObjectVersion() const {
|
NdbDictionary::Index::getObjectVersion() const {
|
||||||
return m_impl.m_version;
|
return m_impl.m_table->m_version;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
NdbDictionary::Index::getObjectId() const {
|
NdbDictionary::Index::getObjectId() const {
|
||||||
return m_impl.m_id;
|
return m_impl.m_table->m_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1395,6 +1395,12 @@ NdbDictionary::Dictionary::invalidateTable(const char * name){
|
|||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
NdbDictionary::Dictionary::invalidateTable(const Table *table){
|
||||||
|
NdbTableImpl &t = NdbTableImpl::getImpl(*table);
|
||||||
|
m_impl.invalidateObject(t);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
NdbDictionary::Dictionary::removeCachedTable(const char * name){
|
NdbDictionary::Dictionary::removeCachedTable(const char * name){
|
||||||
NdbTableImpl * t = m_impl.getTable(name);
|
NdbTableImpl * t = m_impl.getTable(name);
|
||||||
@ -1402,6 +1408,12 @@ NdbDictionary::Dictionary::removeCachedTable(const char * name){
|
|||||||
m_impl.removeCachedObject(* t);
|
m_impl.removeCachedObject(* t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
NdbDictionary::Dictionary::removeCachedTable(const Table *table){
|
||||||
|
NdbTableImpl &t = NdbTableImpl::getImpl(*table);
|
||||||
|
m_impl.removeCachedObject(t);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
NdbDictionary::Dictionary::createIndex(const Index & ind)
|
NdbDictionary::Dictionary::createIndex(const Index & ind)
|
||||||
{
|
{
|
||||||
@ -1425,6 +1437,15 @@ NdbDictionary::Dictionary::getIndex(const char * indexName,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
NdbDictionary::Dictionary::invalidateIndex(const Index *index){
|
||||||
|
DBUG_ENTER("NdbDictionary::Dictionary::invalidateIndex");
|
||||||
|
NdbIndexImpl &i = NdbIndexImpl::getImpl(*index);
|
||||||
|
assert(i.m_table != 0);
|
||||||
|
m_impl.invalidateObject(* i.m_table);
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
NdbDictionary::Dictionary::invalidateIndex(const char * indexName,
|
NdbDictionary::Dictionary::invalidateIndex(const char * indexName,
|
||||||
const char * tableName){
|
const char * tableName){
|
||||||
@ -1443,6 +1464,15 @@ NdbDictionary::Dictionary::forceGCPWait()
|
|||||||
return m_impl.forceGCPWait();
|
return m_impl.forceGCPWait();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
NdbDictionary::Dictionary::removeCachedIndex(const Index *index){
|
||||||
|
DBUG_ENTER("NdbDictionary::Dictionary::removeCachedIndex");
|
||||||
|
NdbIndexImpl &i = NdbIndexImpl::getImpl(*index);
|
||||||
|
assert(i.m_table != 0);
|
||||||
|
m_impl.removeCachedObject(* i.m_table);
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
NdbDictionary::Dictionary::removeCachedIndex(const char * indexName,
|
NdbDictionary::Dictionary::removeCachedIndex(const char * indexName,
|
||||||
const char * tableName){
|
const char * tableName){
|
||||||
|
Reference in New Issue
Block a user