diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index f4e5f08ae63..f9508e6c6c9 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -147,6 +147,7 @@ mwagner@work.mysql.com mydev@mysql.com mysql@home.(none) mysql@mc04.(none) +mysqldev@bk-internal.mysql.com mysqldev@build.mysql2.com mysqldev@melody.local mysqldev@mysql.com diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc new file mode 100644 index 00000000000..d0c083cab86 --- /dev/null +++ b/mysql-test/include/have_multi_ndb.inc @@ -0,0 +1,28 @@ +# Setup connections to both MySQL Servers connected to the cluster +connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,); +connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); + +# Check that server1 has NDB support +connection server1; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id.require show variables like "server_id"; +enable_query_log; + +# Check that server2 has NDB support +connection server2; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id1.require show variables like "server_id"; +enable_query_log; + +# Set the default connection to 'server1' +connection server1; diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc index 84e60657876..d000a954733 100644 --- a/mysql-test/include/have_ndb.inc +++ b/mysql-test/include/have_ndb.inc @@ -2,6 +2,4 @@ disable_query_log; show variables like "have_ndbcluster"; enable_query_log; -#connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK); -#connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,$MASTER_MYSOCK1); -#connection server1; + diff --git a/mysql-test/r/ndb_cache.result b/mysql-test/r/ndb_cache.result index 714e1831267..7423771e026 100644 --- a/mysql-test/r/ndb_cache.result +++ b/mysql-test/r/ndb_cache.result @@ -1,43 +1,191 @@ +drop table if exists t1; +set GLOBAL query_cache_type=on; set GLOBAL query_cache_size=1355776; reset query cache; flush status; -drop table if exists t1,t2; -CREATE TABLE t1 (a int) ENGINE=ndbcluster; -CREATE TABLE t2 (a int); +CREATE TABLE t1 ( pk int not null primary key, +a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); select * from t1; -a +pk a b c +1 2 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +update t1 set a=3 where pk=1; +select * from t1; +pk a b c +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +use test; +select * from t1; +pk a b c +2 7 8 Second row +1 3 3 First row +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 4 +update t1 set a=4 where b=3; +use test; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 5 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +begin; +update t1 set a=5 where pk=1; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 show status like "Qcache_inserts"; Variable_name Value -Qcache_inserts 0 +Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value -Qcache_hits 0 -select * from t2; -a -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 +Qcache_hits 7 select * from t1; -a -select * from t2; -a +pk a b c +2 7 8 Second row +1 4 3 First row show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 1 show status like "Qcache_inserts"; Variable_name Value -Qcache_inserts 1 +Qcache_inserts 8 show status like "Qcache_hits"; Variable_name Value -Qcache_hits 1 -drop table t1, t2; +Qcache_hits 7 +commit; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 8 +drop table t1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 SET GLOBAL query_cache_size=0; diff --git a/mysql-test/r/ndb_cache_multi.result b/mysql-test/r/ndb_cache_multi.result new file mode 100644 index 00000000000..c7135ed9e8a --- /dev/null +++ b/mysql-test/r/ndb_cache_multi.result @@ -0,0 +1,72 @@ +drop table if exists t1, t2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +2 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +update t1 set a=3 where a=2; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +drop table t1, t2; diff --git a/mysql-test/r/ndb_multi.result b/mysql-test/r/ndb_multi.result new file mode 100644 index 00000000000..4a2389cd1ff --- /dev/null +++ b/mysql-test/r/ndb_multi.result @@ -0,0 +1,49 @@ +drop table if exists t1, t2, t3, t4; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 0 +flush status; +select * from t1; +a +2 +update t1 set a=3 where a=2; +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 1 +create table t3 (a int not null primary key, b varchar(22), +c int, last_col text) engine=ndb; +insert into t3 values(1, 'Hi!', 89, 'Longtext column'); +create table t4 (pk int primary key, b int) engine=ndb; +select * from t1; +a +3 +select * from t3; +a b c last_col +1 Hi! 89 Longtext column +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 1 +show tables like 't4'; +Tables_in_test (t4) +t4 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 2 +show tables; +Tables_in_test +t1 +t2 +t3 +t4 +drop table t1, t2, t3, t4; diff --git a/mysql-test/r/server_id.require b/mysql-test/r/server_id.require new file mode 100644 index 00000000000..adffcc483b1 --- /dev/null +++ b/mysql-test/r/server_id.require @@ -0,0 +1,2 @@ +Variable_name Value +server_id 1 diff --git a/mysql-test/r/server_id1.require b/mysql-test/r/server_id1.require new file mode 100644 index 00000000000..666c94ef633 --- /dev/null +++ b/mysql-test/r/server_id1.require @@ -0,0 +1,2 @@ +Variable_name Value +server_id 102 diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test index abd09424f64..8bdcbe17728 100644 --- a/mysql-test/t/ndb_cache.test +++ b/mysql-test/t/ndb_cache.test @@ -1,31 +1,121 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc +--disable_warnings +drop table if exists t1; +--enable_warnings + +# Turn on and reset query cache +set GLOBAL query_cache_type=on; set GLOBAL query_cache_size=1355776; reset query cache; flush status; ---disable_warnings -drop table if exists t1,t2; ---enable_warnings - -CREATE TABLE t1 (a int) ENGINE=ndbcluster; -CREATE TABLE t2 (a int); +# Create test table in NDB +CREATE TABLE t1 ( pk int not null primary key, + a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); +# Perform one query which should be inerted in query cache select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; -select * from t2; -show status like "Qcache_queries_in_cache"; + +# Perform the same query and make sure the query cache is hit +select * from t1; +show status like "Qcache_hits"; + +# Update the table and make sure the correct data is returned +update t1 set a=3 where pk=1; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Insert a new record and make sure the correct data is returned +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; show status like "Qcache_inserts"; show status like "Qcache_hits"; select * from t1; -select * from t2; +show status like "Qcache_hits"; + +# Perform a "new" query and make sure the query cache is not hit +select * from t1 where b=3; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_hits"; + +# Same query again... +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Delete from the table +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Start another connection and check that the query cache is hit +connect (con1,localhost,root,,); +connection con1; +use test; +select * from t1; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Update the table and switch to other connection +update t1 set a=4 where b=3; +connect (con2,localhost,root,,); +connection con2; +use test; +show status like "Qcache_queries_in_cache"; +select * from t1; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1; +select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; -drop table t1, t2; +# Use transactions and make sure the query cache is not updated until +# transaction is commited +begin; +update t1 set a=5 where pk=1; +# Note!! the below test shows that table is invalidated +# before transaction is committed +# TODO Fix so that cache is not invalidated HERE! +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +commit; +# TODO Here query is invalidated once again, commit count in NDB has changed +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1; + +show status like "Qcache_queries_in_cache"; SET GLOBAL query_cache_size=0; + + diff --git a/mysql-test/t/ndb_cache_multi.test b/mysql-test/t/ndb_cache_multi.test new file mode 100644 index 00000000000..7202b5f8558 --- /dev/null +++ b/mysql-test/t/ndb_cache_multi.test @@ -0,0 +1,64 @@ +-- source include/have_query_cache.inc +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc + +--disable_warnings +drop table if exists t1, t2; +--enable_warnings + + +# Turn on and reset query cache on server1 +connection server1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; + +# Turn on and reset query cache on server2 +connection server2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; + + + +# Create test tables in NDB and load them into cache +# on server1 +connection server1; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + + +# Connect server2, load table in to cache, then update the table +connection server2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +update t1 set a=3 where a=2; + +# Connect to server1 and check that cache is invalidated +# and correct data is returned +connection server1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1, t2; + + diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test new file mode 100644 index 00000000000..9286721b677 --- /dev/null +++ b/mysql-test/t/ndb_multi.test @@ -0,0 +1,44 @@ +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc + + +--disable_warnings +drop table if exists t1, t2, t3, t4; +--enable_warnings + +flush status; + +# Create test tables on server1 +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like 'handler_discover%'; + +# Connect to server2 and use the tables from there +connection server2; +flush status; +select * from t1; +update t1 set a=3 where a=2; +show status like 'handler_discover%'; + +# Create a new table on server2 +create table t3 (a int not null primary key, b varchar(22), +c int, last_col text) engine=ndb; +insert into t3 values(1, 'Hi!', 89, 'Longtext column'); +create table t4 (pk int primary key, b int) engine=ndb; + +# Check that the tables are accessible from server1 +connection server1; +select * from t1; +select * from t3; +show status like 'handler_discover%'; +show tables like 't4'; +show status like 'handler_discover%'; +show tables; + +drop table t1, t2, t3, t4; + + diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index cc69762cbdb..51dd672c012 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -658,8 +658,9 @@ innobase_query_caching_of_table_permitted( char* full_name, /* in: concatenation of database name, the null character '\0', and the table name */ - uint full_name_len) /* in: length of the full name, i.e. + uint full_name_len, /* in: length of the full name, i.e. len(dbname) + len(tablename) + 1 */ + ulonglong *unused) /* unused for this engine */ { ibool is_autocommit; trx_t* trx; diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index e76a966c6b9..e3b058d0b42 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -33,6 +33,10 @@ typedef struct st_innobase_share { } INNOBASE_SHARE; +my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, + uint full_name_len, + ulonglong *unused); + /* The class defining a handle to an Innodb table */ class ha_innobase: public handler { @@ -168,6 +172,20 @@ class ha_innobase: public handler void init_table_handle_for_HANDLER(); longlong get_auto_increment(); uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } + /* + ask handler about permission to cache table during query registration + */ + my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *call_back, + ulonglong *engine_data) + { + *call_back= innobase_query_caching_of_table_permitted; + *engine_data= 0; + return innobase_query_caching_of_table_permitted(thd, table_key, + key_length, + engine_data); + } static char *get_mysql_bin_log_name(); static ulonglong get_mysql_bin_log_pos(); @@ -233,8 +251,6 @@ int innobase_close_connection(THD *thd); int innobase_drop_database(char *path); int innodb_show_status(THD* thd); -my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, - uint full_name_len); void innobase_release_temporary_latches(void* innobase_tid); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 029fe31ecf7..8b10bd12f98 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3057,7 +3057,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_transaction_on= FALSE; else m_transaction_on= thd->variables.ndb_use_transactions; - // m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; m_active_trans= thd->transaction.all.ndb_tid ? (NdbConnection*)thd->transaction.all.ndb_tid: @@ -3789,9 +3788,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_ha_not_exact_count(FALSE), m_force_send(TRUE), m_autoincrement_prefetch(32), - m_transaction_on(TRUE), - m_use_local_query_cache(FALSE) -{ + m_transaction_on(TRUE) +{ int i; DBUG_ENTER("ha_ndbcluster"); @@ -4506,10 +4504,129 @@ const char* ha_ndbcluster::index_type(uint key_number) } uint8 ha_ndbcluster::table_cache_type() { - if (m_use_local_query_cache) - return HA_CACHE_TBL_TRANSACT; + DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT"); + DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); +} + +static +my_bool +ndbcluster_cache_retrieval_allowed( +/*======================================*/ + /* out: TRUE if permitted, FALSE if not; + note that the value FALSE means invalidation + of query cache if *engine_data is changed */ + THD* thd, /* in: thd of the user who is trying to + store a result to the query cache or + retrieve it */ + char* full_name, /* in: concatenation of database name, + the null character '\0', and the table + name */ + uint full_name_len, /* in: length of the full name, i.e. + len(dbname) + len(tablename) + 1 */ + ulonglong *engine_data) /* in: value set in call to + ha_ndbcluster::cached_table_registration + out: if return FALSE this is used to invalidate + all cached queries with this table*/ +{ + DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); + char tabname[128]; + char *dbname= full_name; + my_bool is_autocommit; + { + int dbname_len= strlen(full_name); + int tabname_len= full_name_len-dbname_len-1; + memcpy(tabname, full_name+dbname_len+1, tabname_len); + tabname[tabname_len]= '\0'; + } + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + is_autocommit = FALSE; else - return HA_CACHE_TBL_NOCACHE; + is_autocommit = TRUE; + DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d", + dbname,tabname,is_autocommit)); + if (!is_autocommit) + { + DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", + thd->options & OPTION_NOT_AUTOCOMMIT, + thd->options & OPTION_BEGIN)); + // ToDo enable cache inside a transaction + // no need to invalidate though so leave *engine_data + DBUG_RETURN(FALSE); + } + { + Ndb *ndb; + Uint64 commit_count; + if (!(ndb= check_ndb_in_thd(thd))) + { + *engine_data= *engine_data+1; // invalidate + DBUG_RETURN(FALSE); + } + ndb->setDatabaseName(dbname); + if (ndb_get_table_statistics(ndb, tabname, 0, &commit_count)) + { + *engine_data= *engine_data+1; // invalidate + DBUG_RETURN(FALSE); + } + if (*engine_data != commit_count) + { + *engine_data= commit_count; // invalidate + DBUG_RETURN(FALSE); + } + } + DBUG_PRINT("exit",("*engine_data=%d ok, use cache",*engine_data)); + DBUG_RETURN(TRUE); +} + +my_bool +ha_ndbcluster::cached_table_registration( +/*======================================*/ + /* out: TRUE if permitted, FALSE if not; + note that the value FALSE means invalidation + of query cache if *engine_data is changed */ + THD* thd, /* in: thd of the user who is trying to + store a result to the query cache or + retrieve it */ + char* full_name, /* in: concatenation of database name, + the null character '\0', and the table + name */ + uint full_name_len, /* in: length of the full name, i.e. + len(dbname) + len(tablename) + 1 */ + qc_engine_callback + *engine_callback, /* out: function to be called before using + cache on this table */ + ulonglong *engine_data) /* out: if return FALSE this is used to + invalidate all cached queries with this table*/ +{ + DBUG_ENTER("ha_ndbcluster::cached_table_registration"); + my_bool is_autocommit; + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + is_autocommit = FALSE; + else + is_autocommit = TRUE; + DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", + m_dbname,m_tabname,is_autocommit)); + if (!is_autocommit) + { + DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", + thd->options & OPTION_NOT_AUTOCOMMIT, + thd->options & OPTION_BEGIN)); + // ToDo enable cache inside a transaction + // no need to invalidate though so leave *engine_data + DBUG_RETURN(FALSE); + } + { + Uint64 commit_count; + m_ndb->setDatabaseName(m_dbname); + if (ndb_get_table_statistics(m_ndb, m_tabname, 0, &commit_count)) + { + *engine_data= 0; + DBUG_RETURN(FALSE); + } + *engine_data= commit_count; + } + *engine_callback= ndbcluster_cache_retrieval_allowed; + DBUG_PRINT("exit",("*engine_data=%d", *engine_data)); + DBUG_RETURN(TRUE); } /* diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 2d7b14b2311..93c060d2107 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -146,7 +146,10 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); uint8 table_cache_type(); - + my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data); private: int alter_table_name(const char *to); int drop_table(); @@ -244,7 +247,6 @@ class ha_ndbcluster: public handler bool m_force_send; ha_rows m_autoincrement_prefetch; bool m_transaction_on; - bool m_use_local_query_cache; void set_rec_per_key(); void records_update(); diff --git a/sql/handler.cc b/sql/handler.cc index 3200c6932e9..cdfb628287a 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -229,15 +229,6 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) } } -bool ha_caching_allowed(THD* thd, char* table_key, - uint key_length, uint8 cache_type) -{ -#ifdef HAVE_INNOBASE_DB - if (cache_type == HA_CACHE_TBL_ASKTRANSACT) - return innobase_query_caching_of_table_permitted(thd, table_key, key_length); -#endif - return 1; -} int ha_init() { diff --git a/sql/handler.h b/sql/handler.h index 245defe61e0..547eda47ddd 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -507,10 +507,15 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* - Is query with this table cachable (have sense only for ASKTRANSACT - tables) - */ + /* ask handler about permission to cache table during query registration */ + virtual my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data) + { + *engine_callback= 0; + return 1; + } }; /* Some extern variables used with handlers */ @@ -529,8 +534,6 @@ extern TYPELIB tx_isolation_typelib; T != DB_TYPE_BERKELEY_DB && \ T != DB_TYPE_NDBCLUSTER) -bool ha_caching_allowed(THD* thd, char* table_key, - uint key_length, uint8 cache_type); enum db_type ha_resolve_by_name(const char *name, uint namelen); const char *ha_get_storage_engine(enum db_type db_type); handler *get_new_handler(TABLE *table, enum db_type db_type); diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index c90935f4cf9..7a249ff91f4 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -340,6 +340,9 @@ inline THD *_current_thd(void) } #define current_thd _current_thd() +typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key, + uint key_length, + ulonglong *engine_data); #include "sql_string.h" #include "sql_list.h" #include "sql_map.h" diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index bd42a2c1720..69d967a0be6 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -911,12 +911,12 @@ end: int Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) { + ulonglong engine_data; Query_cache_query *query; Query_cache_block *first_result_block, *result_block; Query_cache_block_table *block_table, *block_table_end; ulong tot_length; Query_cache_query_flags flags; - bool check_tables; DBUG_ENTER("Query_cache::send_result_to_client"); if (query_cache_size == 0 || thd->variables.query_cache_type == 0) @@ -1017,7 +1017,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) goto err_unlock; } - check_tables= query->tables_type() & HA_CACHE_TBL_ASKTRANSACT; // Check access; block_table= query_block->table(0); block_table_end= block_table+query_block->n_tables; @@ -1078,19 +1077,30 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) goto err_unlock; // Parse query } #endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ - if (check_tables && !ha_caching_allowed(thd, table->db(), - table->key_length(), - table->type())) + engine_data= table->engine_data(); + if (table->callback() && + !(*table->callback())(thd, table->db(), + table->key_length(), + &engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", table_list.db, table_list.alias)); BLOCK_UNLOCK_RD(query_block); - thd->lex->safe_to_cache_query= 0; // Don't try to cache this + if (engine_data != table->engine_data()) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %s.%s %lld-%lld", + table_list.db, table_list.alias, + engine_data, table->engine_data())); + invalidate_table(table->db(), table->key_length()); + } + else + thd->lex->safe_to_cache_query= 0; // Don't try to cache this goto err_unlock; // Parse query } else - DBUG_PRINT("qcache", ("handler allow caching (%d) %s,%s", - check_tables, table_list.db, table_list.alias)); + DBUG_PRINT("qcache", ("handler allow caching %s,%s", + table_list.db, table_list.alias)); } move_to_query_list_end(query_block); hits++; @@ -2115,7 +2125,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, if (!insert_table(tables_used->table->key_length, tables_used->table->table_cache_key, block_table, tables_used->db_length, - tables_used->table->file->table_cache_type())) + tables_used->table->file->table_cache_type(), + tables_used->callback_func, + tables_used->engine_data)) break; if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) @@ -2131,9 +2143,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, uint key_length= filename_2_table_key(key, table->table->filename, &db_length); (++block_table)->n= ++n; + /* + There are not callback function for for MyISAM, and engine data + */ if (!insert_table(key_length, key, block_table, db_length, - tables_used->table->file->table_cache_type())) + tables_used->table->file->table_cache_type(), + 0, 0)) goto err; } } @@ -2160,7 +2176,9 @@ err: my_bool Query_cache::insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type) + uint32 db_length, uint8 cache_type, + qc_engine_callback callback, + ulonglong engine_data) { DBUG_ENTER("Query_cache::insert_table"); DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d", @@ -2170,6 +2188,23 @@ Query_cache::insert_table(uint key_len, char *key, hash_search(&tables, (byte*) key, key_len)); + if (table_block && + table_block->table()->engine_data() != engine_data) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %s.%s %lld-%lld", + table_block->table()->db(), + table_block->table()->table(), + engine_data, + table_block->table()->engine_data())); + /* + as far as we delete all queries with this table, table block will be + deleted, too + */ + invalidate_table(table_block); + table_block= 0; + } + if (table_block == 0) { DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)", @@ -2200,6 +2235,8 @@ Query_cache::insert_table(uint key_len, char *key, header->table(db + db_length + 1); header->key_length(key_len); header->type(cache_type); + header->callback(callback); + header->engine_data(engine_data); } Query_cache_block_table *list_root = table_block->table(0); @@ -2720,9 +2757,11 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, for (; tables_used; tables_used= tables_used->next) { TABLE *table= tables_used->table; - if (!ha_caching_allowed(thd, table->table_cache_key, - table->key_length, - table->file->table_cache_type())) + handler *handler= table->file; + if (!handler->cached_table_registration(thd, table->table_cache_key, + table->key_length, + &tables_used->callback_func, + &tables_used->engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", tables_used->db, tables_used->alias)); diff --git a/sql/sql_cache.h b/sql/sql_cache.h index c933a2349af..7595bfbbd54 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -145,6 +145,10 @@ struct Query_cache_table char *tbl; uint32 key_len; uint8 table_type; + /* unique for every engine reference */ + qc_engine_callback callback_func; + /* data need by some engines */ + ulonglong engine_data_buff; inline char *db() { return (char *) data(); } inline char *table() { return tbl; } @@ -153,6 +157,10 @@ struct Query_cache_table inline void key_length(uint32 len) { key_len= len; } inline uint8 type() { return table_type; } inline void type(uint8 t) { table_type= t; } + inline qc_engine_callback callback() { return callback_func; } + inline void callback(qc_engine_callback fn){ callback_func= fn; } + inline ulonglong engine_data() { return engine_data_buff; } + inline void engine_data(ulonglong data) { engine_data_buff= data; } inline gptr data() { return (gptr)(((byte*)this)+ @@ -281,7 +289,9 @@ protected: TABLE_COUNTER_TYPE tables); my_bool insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type); + uint32 db_length, uint8 cache_type, + qc_engine_callback callback, + ulonglong engine_data); void unlink_table(Query_cache_block_table *node); Query_cache_block *get_free_block (ulong len, my_bool not_less, ulong min); diff --git a/sql/table.h b/sql/table.h index eed9969dac8..a46c210dd85 100644 --- a/sql/table.h +++ b/sql/table.h @@ -213,6 +213,10 @@ typedef struct st_table_list TABLE *table; /* opened table */ st_table_list *table_list; /* pointer to node of list of all tables */ class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ + /* data need by some engines in query cache*/ + ulonglong engine_data; + /* call back function for asking handler about caching in query cache */ + qc_engine_callback callback_func; GRANT_INFO grant; thr_lock_type lock_type; uint outer_join; /* Which join type */