From 1ffdb9e81e5ab46ba809829c11ca1aafdc238db8 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Nov 2004 09:50:33 +0000 Subject: [PATCH 01/25] enabling control of query cache for ndb --- sql/ha_ndbcluster.cc | 2 +- sql/mysqld.cc | 13 ++++++++++++- sql/set_var.cc | 6 ++++++ sql/sql_class.h | 1 + 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 6f7940caf75..db031c632ff 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3004,7 +3004,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_transaction_on= FALSE; else m_transaction_on= thd->variables.ndb_use_transactions; - // m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; + m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; m_active_trans= thd->transaction.all.ndb_tid ? (NdbConnection*)thd->transaction.all.ndb_tid: diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 5033c42ac69..f7c89f6dde3 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2227,7 +2227,11 @@ extern "C" pthread_handler_decl(handle_shutdown,arg) #endif -const char *load_default_groups[]= { "mysqld","server",MYSQL_BASE_VERSION,0,0}; +const char *load_default_groups[]= { +#ifdef HAVE_NDBCLUSTER_DB + "mysql_cluster", +#endif + "mysqld","server",MYSQL_BASE_VERSION,0,0}; bool open_log(MYSQL_LOG *log, const char *hostname, const char *opt_name, const char *extension, @@ -3950,6 +3954,7 @@ enum options_mysqld OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, + OPT_NDB_USE_LOCAL_QUERY_CACHE, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4409,6 +4414,12 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb_use_local_query_cache", OPT_NDB_USE_LOCAL_QUERY_CACHE, + "Use local query cache, note that this cache will _not_ " + "be invalidated if data is updated through other mysql servers", + (gptr*) &global_system_variables.ndb_use_local_query_cache, + (gptr*) &global_system_variables.ndb_use_local_query_cache, + 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/set_var.cc b/sql/set_var.cc index f1973b53e49..6bf151be83f 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -371,6 +371,9 @@ sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); sys_var_thd_bool +sys_ndb_use_local_query_cache("ndb_use_local_query_cache", + &SV::ndb_use_local_query_cache); +sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); // ndb server global variable settings @@ -634,6 +637,7 @@ sys_var *sys_variables[]= &sys_ndb_autoincrement_prefetch_sz, &sys_ndb_force_send, &sys_ndb_use_exact_count, + &sys_ndb_use_local_query_cache, &sys_ndb_use_transactions, #endif &sys_unique_checks, @@ -801,6 +805,8 @@ struct show_var_st init_vars[]= { (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, + {sys_ndb_use_local_query_cache.name, + (char*) &sys_ndb_use_local_query_cache, SHOW_SYS}, {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, diff --git a/sql/sql_class.h b/sql/sql_class.h index d0d9afc7746..06975730195 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -403,6 +403,7 @@ struct system_variables ulong ndb_autoincrement_prefetch_sz; my_bool ndb_force_send; my_bool ndb_use_exact_count; + my_bool ndb_use_local_query_cache; my_bool ndb_use_transactions; #endif /* HAVE_NDBCLUSTER_DB */ my_bool old_passwords; From dfef378702462359d40be4ad4d7ec3f8da2f2276 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Nov 2004 16:38:00 +0000 Subject: [PATCH 02/25] changed query cache type variable for ndb --- sql/ha_ndbcluster.cc | 21 +++++++++++++++------ sql/ha_ndbcluster.h | 2 +- sql/mysqld.cc | 18 +++++++++++------- sql/set_var.cc | 18 ++++++++++++------ sql/sql_cache.cc | 6 ++++++ sql/sql_cache.h | 1 + sql/sql_class.h | 2 +- 7 files changed, 47 insertions(+), 21 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 8d82f60ae85..eea051be9e4 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3025,7 +3025,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_transaction_on= FALSE; else m_transaction_on= thd->variables.ndb_use_transactions; - m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; +#ifdef HAVE_QUERY_CACHE + m_query_cache_type= thd->variables.ndb_query_cache_type; +#endif m_active_trans= thd->transaction.all.ndb_tid ? (NdbConnection*)thd->transaction.all.ndb_tid: @@ -3751,8 +3753,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_force_send(TRUE), m_autoincrement_prefetch(32), m_transaction_on(TRUE), - m_use_local_query_cache(FALSE) -{ + m_query_cache_type(0) +{ int i; DBUG_ENTER("ha_ndbcluster"); @@ -4455,10 +4457,17 @@ const char* ha_ndbcluster::index_type(uint key_number) } uint8 ha_ndbcluster::table_cache_type() { - if (m_use_local_query_cache) - return HA_CACHE_TBL_TRANSACT; - else + switch (m_query_cache_type) + { + case 0: return HA_CACHE_TBL_NOCACHE; + case 1: + return HA_CACHE_TBL_ASKTRANSACT; + case 2: + return HA_CACHE_TBL_TRANSACT; + default: + return HA_CACHE_TBL_NOCACHE; + } } /* diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index f6c712620c1..baf4a7480ac 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -244,7 +244,7 @@ class ha_ndbcluster: public handler bool m_force_send; ha_rows m_autoincrement_prefetch; bool m_transaction_on; - bool m_use_local_query_cache; + ulong m_query_cache_type; void set_rec_per_key(); void records_update(); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index f7c89f6dde3..51e12ff23f9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3954,7 +3954,7 @@ enum options_mysqld OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, - OPT_NDB_USE_LOCAL_QUERY_CACHE, + OPT_NDB_QUERY_CACHE_TYPE, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4414,12 +4414,16 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, - {"ndb_use_local_query_cache", OPT_NDB_USE_LOCAL_QUERY_CACHE, - "Use local query cache, note that this cache will _not_ " - "be invalidated if data is updated through other mysql servers", - (gptr*) &global_system_variables.ndb_use_local_query_cache, - (gptr*) &global_system_variables.ndb_use_local_query_cache, - 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_QUERY_CACHE + {"ndb_query_cache_type", OPT_NDB_QUERY_CACHE_TYPE, + "0 = OFF = Don't cache or retrieve results. 1 = ON = Cache as query_cache_type states and " + "invalidate cache if tables are updated by other mysql servers. " + "2 = LOCAL = Cache as query_cache_type states and don't bother about what's happening on other " + "mysql servers.", + (gptr*) &global_system_variables.ndb_query_cache_type, + (gptr*) &global_system_variables.ndb_query_cache_type, + 0, GET_ULONG, REQUIRED_ARG, 0, 0, 2, 0, 0, 0}, +#endif #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/set_var.cc b/sql/set_var.cc index a040e8b2ba4..c51bfce7a43 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -370,9 +370,12 @@ sys_ndb_force_send("ndb_force_send", sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); -sys_var_thd_bool -sys_ndb_use_local_query_cache("ndb_use_local_query_cache", - &SV::ndb_use_local_query_cache); +#ifdef HAVE_QUERY_CACHE +sys_var_thd_enum +sys_ndb_query_cache_type("ndb_query_cache_type", + &SV::ndb_query_cache_type, + &ndb_query_cache_type_typelib); +#endif sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); @@ -637,7 +640,9 @@ sys_var *sys_variables[]= &sys_ndb_autoincrement_prefetch_sz, &sys_ndb_force_send, &sys_ndb_use_exact_count, - &sys_ndb_use_local_query_cache, +#ifdef HAVE_QUERY_CACHE + &sys_ndb_query_cache_type, +#endif &sys_ndb_use_transactions, #endif &sys_unique_checks, @@ -805,8 +810,9 @@ struct show_var_st init_vars[]= { (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, - {sys_ndb_use_local_query_cache.name, - (char*) &sys_ndb_use_local_query_cache, SHOW_SYS}, +#ifdef HAVE_QUERY_CACHE + {sys_ndb_query_cache_type.name,(char*) &sys_ndb_query_cache_type, SHOW_SYS}, +#endif {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 1bf8d179770..da6998ded47 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -366,6 +366,12 @@ TYPELIB query_cache_type_typelib= array_elements(query_cache_type_names)-1,"", query_cache_type_names, NULL }; +const char *ndb_query_cache_type_names[]= { "OFF", "ON", "LOCAL",NullS }; +TYPELIB ndb_query_cache_type_typelib= +{ + array_elements(ndb_query_cache_type_names)-1,"", ndb_query_cache_type_names, NULL +}; + /***************************************************************************** Query_cache_block_table method(s) *****************************************************************************/ diff --git a/sql/sql_cache.h b/sql/sql_cache.h index c933a2349af..b8cbc7953b8 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -412,6 +412,7 @@ protected: }; extern Query_cache query_cache; +extern TYPELIB ndb_query_cache_type_typelib; extern TYPELIB query_cache_type_typelib; void query_cache_end_of_result(THD *thd); void query_cache_abort(NET *net); diff --git a/sql/sql_class.h b/sql/sql_class.h index 06975730195..2ebad4b466a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -403,7 +403,7 @@ struct system_variables ulong ndb_autoincrement_prefetch_sz; my_bool ndb_force_send; my_bool ndb_use_exact_count; - my_bool ndb_use_local_query_cache; + ulong ndb_query_cache_type; my_bool ndb_use_transactions; #endif /* HAVE_NDBCLUSTER_DB */ my_bool old_passwords; From 35502d45e0d95428524cc8d65bd029bd4c56c5e6 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 24 Nov 2004 11:56:51 +0000 Subject: [PATCH 03/25] sql/ha_innodb.cc enabled query cache for ndb modified engine interface somewhat sql/ha_innodb.h enabled query cache for ndb modified engine interface somewhat sql/ha_ndbcluster.cc enabled query cache for ndb modified engine interface somewhat ndb will only allow caching and retrieval if running autocommit - return false, but do not invalidate commit count is used as engine data, i.e. - store commit count before store of cache - allow retrieval if commit count has not changed on a table - invalidate if commit count has changed sql/ha_ndbcluster.h enabled query cache for ndb modified engine interface somewhat sql/handler.cc enabled query cache for ndb modified engine interface somewhat sql/handler.h enabled query cache for ndb modified engine interface somewhat new virtual handler method cached_table_registration called on each table before alowing store in query cache - return TRUE - ok to cache, FALSE - not allowed to cache, invalidate queries if engine_data below has changed - sets ulonglong (engine_data) that is stored in query cache for each table - sets callback to be called for each table before usage of cached query, callback = 0 -> no check later sql/mysql_priv.h enabled query cache for ndb modified engine interface somewhat callcack prototype for callback to engine before query cache retrieval sql/sql_cache.cc enabled query cache for ndb modified engine interface somewhat if callback is set on table in cache, do callback to check if allowed to use cache if not allowed to use cache, check if engine_data has changed, if so, invalidate all queries with that table + changes to store and pass callback and engine_data around sql/sql_cache.h enabled query cache for ndb modified engine interface somewhat changes to store callback and engine_data sql/table.h enabled query cache for ndb modified engine interface somewhat changes to store callback and engine_data sql/ha_innodb.cc: enabled query cache for ndb modified engine interface somewhat sql/ha_innodb.h: enabled query cache for ndb modified engine interface somewhat sql/ha_ndbcluster.cc: enabled query cache for ndb modified engine interface somewhat ndb will only allow caching and retrieval if running autocommit - return false, but do not invalidate commit count is used as engine data, i.e. - store commit count before store of cache - allow retrieval if commit count has not changed on a table - invalidate if commit count has changed sql/ha_ndbcluster.h: enabled query cache for ndb modified engine interface somewhat sql/handler.cc: enabled query cache for ndb modified engine interface somewhat sql/handler.h: enabled query cache for ndb modified engine interface somewhat new virtual handler method cached_table_registration called on each table before alowing store in query cache - return TRUE - ok to cache, FALSE - not allowed to cache, invalidate queries if engine_data below has changed - sets ulonglong (engine_data) that is stored in query cache for each table - sets callback to be called for each table before usage of cached query, callback = 0 -> no check later sql/mysql_priv.h: enabled query cache for ndb modified engine interface somewhat callcack prototype for callback to engine before query cache retrieval sql/sql_cache.cc: enabled query cache for ndb modified engine interface somewhat if callback is set on table in cache, do callback to check if allowed to use cache if not allowed to use cache, check if engine_data has changed, if so, invalidate all queries with that table + changes to store and pass callback and engine_data around sql/sql_cache.h: enabled query cache for ndb modified engine interface somewhat changes to store callback and engine_data sql/table.h: enabled query cache for ndb modified engine interface somewhat changes to store callback and engine_data --- sql/ha_innodb.cc | 3 +- sql/ha_innodb.h | 20 ++++++- sql/ha_ndbcluster.cc | 132 ++++++++++++++++++++++++++++++++++++++++++- sql/ha_ndbcluster.h | 5 +- sql/handler.cc | 9 --- sql/handler.h | 15 +++-- sql/mysql_priv.h | 3 + sql/sql_cache.cc | 64 ++++++++++++++++----- sql/sql_cache.h | 12 +++- sql/table.h | 4 ++ 10 files changed, 232 insertions(+), 35 deletions(-) diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 07d8da63733..b5c94386677 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -655,8 +655,9 @@ innobase_query_caching_of_table_permitted( char* full_name, /* in: concatenation of database name, the null character '\0', and the table name */ - uint full_name_len) /* in: length of the full name, i.e. + uint full_name_len, /* in: length of the full name, i.e. len(dbname) + len(tablename) + 1 */ + ulonglong *unused) /* unused for this engine */ { ibool is_autocommit; trx_t* trx; diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index e76a966c6b9..e3b058d0b42 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -33,6 +33,10 @@ typedef struct st_innobase_share { } INNOBASE_SHARE; +my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, + uint full_name_len, + ulonglong *unused); + /* The class defining a handle to an Innodb table */ class ha_innobase: public handler { @@ -168,6 +172,20 @@ class ha_innobase: public handler void init_table_handle_for_HANDLER(); longlong get_auto_increment(); uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } + /* + ask handler about permission to cache table during query registration + */ + my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *call_back, + ulonglong *engine_data) + { + *call_back= innobase_query_caching_of_table_permitted; + *engine_data= 0; + return innobase_query_caching_of_table_permitted(thd, table_key, + key_length, + engine_data); + } static char *get_mysql_bin_log_name(); static ulonglong get_mysql_bin_log_pos(); @@ -233,8 +251,6 @@ int innobase_close_connection(THD *thd); int innobase_drop_database(char *path); int innodb_show_status(THD* thd); -my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, - uint full_name_len); void innobase_release_temporary_latches(void* innobase_tid); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 8d82f60ae85..8621fb89563 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4455,10 +4455,138 @@ const char* ha_ndbcluster::index_type(uint key_number) } uint8 ha_ndbcluster::table_cache_type() { + DBUG_ENTER("ha_ndbcluster::table_cache_type"); if (m_use_local_query_cache) - return HA_CACHE_TBL_TRANSACT; + { + DBUG_PRINT("exit",("HA_CACHE_TBL_ASKTRANSACT")); + DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); //HA_CACHE_TBL_TRANSACT; + } else - return HA_CACHE_TBL_NOCACHE; + { + DBUG_PRINT("exit",("HA_CACHE_TBL_NOCACHE")); + DBUG_RETURN(HA_CACHE_TBL_NOCACHE); + } +} + +static +my_bool +ndbcluster_cache_retrieval_allowed( +/*======================================*/ + /* out: TRUE if permitted, FALSE if not; + note that the value FALSE means invalidation + of query cache if *engine_data is changed */ + THD* thd, /* in: thd of the user who is trying to + store a result to the query cache or + retrieve it */ + char* full_name, /* in: concatenation of database name, + the null character '\0', and the table + name */ + uint full_name_len, /* in: length of the full name, i.e. + len(dbname) + len(tablename) + 1 */ + ulonglong *engine_data) /* in: value set in call to + ha_ndbcluster::cached_table_registration + out: if return FALSE this is used to invalidate + all cached queries with this table*/ +{ + DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); + char tabname[128]; + char *dbname= full_name; + my_bool is_autocommit; + { + int dbname_len= strlen(full_name); + int tabname_len= full_name_len-dbname_len-1; + memcpy(tabname, full_name+dbname_len+1, tabname_len); + tabname[tabname_len]= '\0'; + } + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + is_autocommit = FALSE; + else + is_autocommit = TRUE; + DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d", + dbname,tabname,is_autocommit)); + if (!is_autocommit) + { + DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", + thd->options & OPTION_NOT_AUTOCOMMIT, + thd->options & OPTION_BEGIN)); + // ToDo enable cache inside a transaction + // no need to invalidate though so leave *engine_data + DBUG_RETURN(FALSE); + } + { + Ndb *ndb; + Uint64 commit_count; + if (!(ndb= check_ndb_in_thd(thd))) + { + *engine_data= *engine_data+1; // invalidate + DBUG_RETURN(FALSE); + } + ndb->setDatabaseName(dbname); + if (ndb_get_table_statistics(ndb, tabname, 0, &commit_count)) + { + *engine_data= *engine_data+1; // invalidate + DBUG_RETURN(FALSE); + } + if (*engine_data != commit_count) + { + *engine_data= commit_count; // invalidate + DBUG_RETURN(FALSE); + } + } + DBUG_PRINT("exit",("*engine_data=%d ok, use cache",*engine_data)); + DBUG_RETURN(TRUE); +} + +my_bool +ha_ndbcluster::cached_table_registration( +/*======================================*/ + /* out: TRUE if permitted, FALSE if not; + note that the value FALSE means invalidation + of query cache if *engine_data is changed */ + THD* thd, /* in: thd of the user who is trying to + store a result to the query cache or + retrieve it */ + char* full_name, /* in: concatenation of database name, + the null character '\0', and the table + name */ + uint full_name_len, /* in: length of the full name, i.e. + len(dbname) + len(tablename) + 1 */ + qc_engine_callback + *engine_callback, /* out: function to be called before using + cache on this table */ + ulonglong *engine_data) /* out: if return FALSE this is used to + invalidate all cached queries with this table*/ +{ + DBUG_ENTER("ha_ndbcluster::cached_table_registration"); + my_bool is_autocommit; + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + is_autocommit = FALSE; + else + is_autocommit = TRUE; + DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", + m_dbname,m_tabname,is_autocommit)); + if (!is_autocommit) + { + DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", + thd->options & OPTION_NOT_AUTOCOMMIT, + thd->options & OPTION_BEGIN)); + // ToDo enable cache inside a transaction + // no need to invalidate though so leave *engine_data + DBUG_RETURN(FALSE); + } + { + Uint64 commit_count; + m_ndb->setDatabaseName(m_dbname); + if (ndb_get_table_statistics(m_ndb, m_tabname, 0, &commit_count)) + { + *engine_data= 0; + DBUG_RETURN(FALSE); + } + *engine_data= commit_count; + } + *engine_callback= ndbcluster_cache_retrieval_allowed; + DBUG_PRINT("exit",("*engine_data=%d", *engine_data)); + DBUG_RETURN(TRUE); } /* diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index f6c712620c1..7b3b5658175 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -146,7 +146,10 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); uint8 table_cache_type(); - + my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data); private: int alter_table_name(const char *from, const char *to); int drop_table(); diff --git a/sql/handler.cc b/sql/handler.cc index 7ddd7b80a34..9e781817c02 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -229,15 +229,6 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) } } -bool ha_caching_allowed(THD* thd, char* table_key, - uint key_length, uint8 cache_type) -{ -#ifdef HAVE_INNOBASE_DB - if (cache_type == HA_CACHE_TBL_ASKTRANSACT) - return innobase_query_caching_of_table_permitted(thd, table_key, key_length); -#endif - return 1; -} int ha_init() { diff --git a/sql/handler.h b/sql/handler.h index 252861e5c37..31710ec728c 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -506,10 +506,15 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* - Is query with this table cachable (have sense only for ASKTRANSACT - tables) - */ + /* ask handler about permission to cache table during query registration */ + virtual my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data) + { + *engine_callback= 0; + return 1; + } }; /* Some extern variables used with handlers */ @@ -528,8 +533,6 @@ extern TYPELIB tx_isolation_typelib; T != DB_TYPE_BERKELEY_DB && \ T != DB_TYPE_NDBCLUSTER) -bool ha_caching_allowed(THD* thd, char* table_key, - uint key_length, uint8 cache_type); enum db_type ha_resolve_by_name(const char *name, uint namelen); const char *ha_get_storage_engine(enum db_type db_type); handler *get_new_handler(TABLE *table, enum db_type db_type); diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 3f55a88b262..77703c2b390 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -340,6 +340,9 @@ inline THD *_current_thd(void) } #define current_thd _current_thd() +typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key, + uint key_length, + ulonglong *engine_data); #include "sql_string.h" #include "sql_list.h" #include "sql_map.h" diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 1bf8d179770..5c2698bcef2 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -912,12 +912,12 @@ end: int Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) { + ulonglong engine_data; Query_cache_query *query; Query_cache_block *first_result_block, *result_block; Query_cache_block_table *block_table, *block_table_end; ulong tot_length; Query_cache_query_flags flags; - bool check_tables; DBUG_ENTER("Query_cache::send_result_to_client"); if (query_cache_size == 0 || thd->variables.query_cache_type == 0) @@ -1018,7 +1018,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) goto err_unlock; } - check_tables= query->tables_type() & HA_CACHE_TBL_ASKTRANSACT; // Check access; block_table= query_block->table(0); block_table_end= block_table+query_block->n_tables; @@ -1079,19 +1078,29 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) goto err_unlock; // Parse query } #endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ - if (check_tables && !ha_caching_allowed(thd, table->db(), - table->key_length(), - table->type())) + engine_data= table->engine_data(); + if (table->callback() && + !(*table->callback())(thd, table->db(), + table->key_length(), + &engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", table_list.db, table_list.alias)); BLOCK_UNLOCK_RD(query_block); thd->lex->safe_to_cache_query= 0; // Don't try to cache this + if (engine_data != table->engine_data()) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %s.%s %lld-%lld", + table_list.db, table_list.alias, + engine_data, table->engine_data())); + invalidate_table(table->db(), table->key_length()); + } goto err_unlock; // Parse query } else - DBUG_PRINT("qcache", ("handler allow caching (%d) %s,%s", - check_tables, table_list.db, table_list.alias)); + DBUG_PRINT("qcache", ("handler allow caching %s,%s", + table_list.db, table_list.alias)); } move_to_query_list_end(query_block); hits++; @@ -2116,7 +2125,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, if (!insert_table(tables_used->table->key_length, tables_used->table->table_cache_key, block_table, tables_used->db_length, - tables_used->table->file->table_cache_type())) + tables_used->table->file->table_cache_type(), + tables_used->callback_func, + tables_used->engine_data)) break; if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) @@ -2132,9 +2143,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, uint key_length= filename_2_table_key(key, table->table->filename, &db_length); (++block_table)->n= ++n; + /* + There are not callback function for for MyISAM, and engine data + */ if (!insert_table(key_length, key, block_table, db_length, - tables_used->table->file->table_cache_type())) + tables_used->table->file->table_cache_type(), + 0, 0)) goto err; } } @@ -2161,7 +2176,9 @@ err: my_bool Query_cache::insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type) + uint32 db_length, uint8 cache_type, + qc_engine_callback callback, + ulonglong engine_data) { DBUG_ENTER("Query_cache::insert_table"); DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d", @@ -2171,6 +2188,23 @@ Query_cache::insert_table(uint key_len, char *key, hash_search(&tables, (byte*) key, key_len)); + if (table_block && + table_block->table()->engine_data() != engine_data) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %s.%s %lld-%lld", + table_block->table()->db(), + table_block->table()->table(), + engine_data, + table_block->table()->engine_data())); + /* + as far as we delete all queries with this table, table block will be + deleted, too + */ + invalidate_table(table_block); + table_block= 0; + } + if (table_block == 0) { DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)", @@ -2201,6 +2235,8 @@ Query_cache::insert_table(uint key_len, char *key, header->table(db + db_length + 1); header->key_length(key_len); header->type(cache_type); + header->callback(callback); + header->engine_data(engine_data); } Query_cache_block_table *list_root = table_block->table(0); @@ -2721,9 +2757,11 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, for (; tables_used; tables_used= tables_used->next) { TABLE *table= tables_used->table; - if (!ha_caching_allowed(thd, table->table_cache_key, - table->key_length, - table->file->table_cache_type())) + handler *handler= table->file; + if (!handler->cached_table_registration(thd, table->table_cache_key, + table->key_length, + &tables_used->callback_func, + &tables_used->engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", tables_used->db, tables_used->alias)); diff --git a/sql/sql_cache.h b/sql/sql_cache.h index c933a2349af..7595bfbbd54 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -145,6 +145,10 @@ struct Query_cache_table char *tbl; uint32 key_len; uint8 table_type; + /* unique for every engine reference */ + qc_engine_callback callback_func; + /* data need by some engines */ + ulonglong engine_data_buff; inline char *db() { return (char *) data(); } inline char *table() { return tbl; } @@ -153,6 +157,10 @@ struct Query_cache_table inline void key_length(uint32 len) { key_len= len; } inline uint8 type() { return table_type; } inline void type(uint8 t) { table_type= t; } + inline qc_engine_callback callback() { return callback_func; } + inline void callback(qc_engine_callback fn){ callback_func= fn; } + inline ulonglong engine_data() { return engine_data_buff; } + inline void engine_data(ulonglong data) { engine_data_buff= data; } inline gptr data() { return (gptr)(((byte*)this)+ @@ -281,7 +289,9 @@ protected: TABLE_COUNTER_TYPE tables); my_bool insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type); + uint32 db_length, uint8 cache_type, + qc_engine_callback callback, + ulonglong engine_data); void unlink_table(Query_cache_block_table *node); Query_cache_block *get_free_block (ulong len, my_bool not_less, ulong min); diff --git a/sql/table.h b/sql/table.h index 2eb854f553d..c3945ac5d79 100644 --- a/sql/table.h +++ b/sql/table.h @@ -207,6 +207,10 @@ typedef struct st_table_list TABLE *table; /* opened table */ st_table_list *table_list; /* pointer to node of list of all tables */ class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ + /* data need by some engines in query cache*/ + ulonglong engine_data; + /* call back function for asking handler about caching in query cache */ + qc_engine_callback callback_func; GRANT_INFO grant; thr_lock_type lock_type; uint outer_join; /* Which join type */ From 93191c739e8302c312d283169c736867cf8327b3 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 25 Nov 2004 19:49:50 +0200 Subject: [PATCH 04/25] new NDB test with QC sql/sql_cache.cc: if we removed old values in cache, then we can cache new one --- mysql-test/r/ndb_cache.result | 43 ----------------------------------- mysql-test/t/ndb_cache.test | 38 +++++++++++++++++++++++-------- sql/sql_cache.cc | 3 ++- 3 files changed, 30 insertions(+), 54 deletions(-) diff --git a/mysql-test/r/ndb_cache.result b/mysql-test/r/ndb_cache.result index 714e1831267..e69de29bb2d 100644 --- a/mysql-test/r/ndb_cache.result +++ b/mysql-test/r/ndb_cache.result @@ -1,43 +0,0 @@ -set GLOBAL query_cache_size=1355776; -reset query cache; -flush status; -drop table if exists t1,t2; -CREATE TABLE t1 (a int) ENGINE=ndbcluster; -CREATE TABLE t2 (a int); -select * from t1; -a -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 0 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 0 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from t2; -a -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from t1; -a -select * from t2; -a -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 1 -drop table t1, t2; -SET GLOBAL query_cache_size=0; diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test index abd09424f64..bd368105a84 100644 --- a/mysql-test/t/ndb_cache.test +++ b/mysql-test/t/ndb_cache.test @@ -1,31 +1,49 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc - +set GLOBAL ndb_query_cache_type=on; +# following line have to be removed when ndb_query_cache_type will made +# global only +set ndb_query_cache_type=on; set GLOBAL query_cache_size=1355776; reset query cache; flush status; --disable_warnings -drop table if exists t1,t2; +drop table if exists t1; --enable_warnings CREATE TABLE t1 (a int) ENGINE=ndbcluster; -CREATE TABLE t2 (a int); - +insert into t1 value (2); select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; -select * from t2; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; +connect (con1,localhost,root,,); +connection con1; +use test; +set autocommit=0; +update t1 set a=3; +connect (con2,localhost,root,,); +connection con2; +select * from t1; select * from t1; -select * from t2; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; +connection con1; +select * from t1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +commit; +connection con2; +select * from t1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +drop table t1; -drop table t1, t2; SET GLOBAL query_cache_size=0; diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 105dcae0319..28e814a2d62 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1093,7 +1093,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", table_list.db, table_list.alias)); BLOCK_UNLOCK_RD(query_block); - thd->lex->safe_to_cache_query= 0; // Don't try to cache this if (engine_data != table->engine_data()) { DBUG_PRINT("qcache", @@ -1102,6 +1101,8 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) engine_data, table->engine_data())); invalidate_table(table->db(), table->key_length()); } + else + thd->lex->safe_to_cache_query= 0; // Don't try to cache this goto err_unlock; // Parse query } else From e0b469aa1d9827fe722904d974bf1c7b2c3021cc Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 25 Nov 2004 18:56:10 +0000 Subject: [PATCH 05/25] removed special ndb query cache variable ndb will always return query cache type ASKTRANSACT --- sql/ha_ndbcluster.cc | 24 +++--------------------- sql/ha_ndbcluster.h | 1 - sql/mysqld.cc | 11 ----------- sql/set_var.cc | 12 ------------ sql/sql_cache.cc | 6 ------ sql/sql_cache.h | 1 - sql/sql_class.h | 1 - 7 files changed, 3 insertions(+), 53 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 5dd977dc95b..3a1dde2e4d4 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3025,9 +3025,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_transaction_on= FALSE; else m_transaction_on= thd->variables.ndb_use_transactions; -#ifdef HAVE_QUERY_CACHE - m_query_cache_type= thd->variables.ndb_query_cache_type; -#endif m_active_trans= thd->transaction.all.ndb_tid ? (NdbConnection*)thd->transaction.all.ndb_tid: @@ -3752,8 +3749,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_ha_not_exact_count(FALSE), m_force_send(TRUE), m_autoincrement_prefetch(32), - m_transaction_on(TRUE), - m_query_cache_type(0) + m_transaction_on(TRUE) { int i; @@ -4457,22 +4453,8 @@ const char* ha_ndbcluster::index_type(uint key_number) } uint8 ha_ndbcluster::table_cache_type() { - DBUG_ENTER("ha_ndbcluster::table_cache_type"); - switch (m_query_cache_type) - { - case 0: - DBUG_PRINT("exit",("HA_CACHE_TBL_NOCACHE")); - DBUG_RETURN(HA_CACHE_TBL_NOCACHE); - case 1: - DBUG_PRINT("exit",("HA_CACHE_TBL_ASKTRANSACT")); - DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); - case 2: - DBUG_PRINT("exit",("HA_CACHE_TBL_TRANSACT")); - DBUG_RETURN(HA_CACHE_TBL_TRANSACT); - default: - DBUG_PRINT("exit",("HA_CACHE_TBL_NOCACHE")); - DBUG_RETURN(HA_CACHE_TBL_NOCACHE); - } + DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT"); + DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); } static diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 33f06dd9092..ab39993c8e6 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -247,7 +247,6 @@ class ha_ndbcluster: public handler bool m_force_send; ha_rows m_autoincrement_prefetch; bool m_transaction_on; - ulong m_query_cache_type; void set_rec_per_key(); void records_update(); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 2778bdedee6..14fd267e85d 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3969,7 +3969,6 @@ enum options_mysqld OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, - OPT_NDB_QUERY_CACHE_TYPE, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4429,16 +4428,6 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, -#ifdef HAVE_QUERY_CACHE - {"ndb_query_cache_type", OPT_NDB_QUERY_CACHE_TYPE, - "0 = OFF = Don't cache or retrieve results. 1 = ON = Cache as query_cache_type states and " - "invalidate cache if tables are updated by other mysql servers. " - "2 = LOCAL = Cache as query_cache_type states and don't bother about what's happening on other " - "mysql servers.", - (gptr*) &global_system_variables.ndb_query_cache_type, - (gptr*) &global_system_variables.ndb_query_cache_type, - 0, GET_ULONG, REQUIRED_ARG, 0, 0, 2, 0, 0, 0}, -#endif #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/set_var.cc b/sql/set_var.cc index c51bfce7a43..2031ac15412 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -370,12 +370,6 @@ sys_ndb_force_send("ndb_force_send", sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); -#ifdef HAVE_QUERY_CACHE -sys_var_thd_enum -sys_ndb_query_cache_type("ndb_query_cache_type", - &SV::ndb_query_cache_type, - &ndb_query_cache_type_typelib); -#endif sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); @@ -640,9 +634,6 @@ sys_var *sys_variables[]= &sys_ndb_autoincrement_prefetch_sz, &sys_ndb_force_send, &sys_ndb_use_exact_count, -#ifdef HAVE_QUERY_CACHE - &sys_ndb_query_cache_type, -#endif &sys_ndb_use_transactions, #endif &sys_unique_checks, @@ -810,9 +801,6 @@ struct show_var_st init_vars[]= { (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, -#ifdef HAVE_QUERY_CACHE - {sys_ndb_query_cache_type.name,(char*) &sys_ndb_query_cache_type, SHOW_SYS}, -#endif {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 105dcae0319..5c2698bcef2 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -366,12 +366,6 @@ TYPELIB query_cache_type_typelib= array_elements(query_cache_type_names)-1,"", query_cache_type_names, NULL }; -const char *ndb_query_cache_type_names[]= { "OFF", "ON", "LOCAL",NullS }; -TYPELIB ndb_query_cache_type_typelib= -{ - array_elements(ndb_query_cache_type_names)-1,"", ndb_query_cache_type_names, NULL -}; - /***************************************************************************** Query_cache_block_table method(s) *****************************************************************************/ diff --git a/sql/sql_cache.h b/sql/sql_cache.h index 9ab9b7cc549..7595bfbbd54 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -422,7 +422,6 @@ protected: }; extern Query_cache query_cache; -extern TYPELIB ndb_query_cache_type_typelib; extern TYPELIB query_cache_type_typelib; void query_cache_end_of_result(THD *thd); void query_cache_abort(NET *net); diff --git a/sql/sql_class.h b/sql/sql_class.h index 2ebad4b466a..d0d9afc7746 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -403,7 +403,6 @@ struct system_variables ulong ndb_autoincrement_prefetch_sz; my_bool ndb_force_send; my_bool ndb_use_exact_count; - ulong ndb_query_cache_type; my_bool ndb_use_transactions; #endif /* HAVE_NDBCLUSTER_DB */ my_bool old_passwords; From ead004f91f38d5c90cb73e63969bc1a222c22325 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 9 Dec 2004 12:55:17 +0100 Subject: [PATCH 06/25] Test for query cache in combination with NDB mysql-test/r/ndb_cache.result: Updated tests and results for ndb_cache mysql-test/t/ndb_cache.test: Updated tests and results for ndb_cache --- mysql-test/r/ndb_cache.result | 191 ++++++++++++++++++++++++++++++++++ mysql-test/t/ndb_cache.test | 102 +++++++++++++++--- 2 files changed, 278 insertions(+), 15 deletions(-) diff --git a/mysql-test/r/ndb_cache.result b/mysql-test/r/ndb_cache.result index e69de29bb2d..7423771e026 100644 --- a/mysql-test/r/ndb_cache.result +++ b/mysql-test/r/ndb_cache.result @@ -0,0 +1,191 @@ +drop table if exists t1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; +CREATE TABLE t1 ( pk int not null primary key, +a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +update t1 set a=3 where pk=1; +select * from t1; +pk a b c +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +use test; +select * from t1; +pk a b c +2 7 8 Second row +1 3 3 First row +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 4 +update t1 set a=4 where b=3; +use test; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 5 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +begin; +update t1 set a=5 where pk=1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +commit; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 8 +drop table t1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +SET GLOBAL query_cache_size=0; diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test index bd368105a84..8bdcbe17728 100644 --- a/mysql-test/t/ndb_cache.test +++ b/mysql-test/t/ndb_cache.test @@ -1,33 +1,77 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc -set GLOBAL ndb_query_cache_type=on; -# following line have to be removed when ndb_query_cache_type will made -# global only -set ndb_query_cache_type=on; -set GLOBAL query_cache_size=1355776; -reset query cache; -flush status; --disable_warnings drop table if exists t1; --enable_warnings -CREATE TABLE t1 (a int) ENGINE=ndbcluster; -insert into t1 value (2); +# Turn on and reset query cache +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; + +# Create test table in NDB +CREATE TABLE t1 ( pk int not null primary key, + a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); + +# Perform one query which should be inerted in query cache select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; + +# Perform the same query and make sure the query cache is hit +select * from t1; +show status like "Qcache_hits"; + +# Update the table and make sure the correct data is returned +update t1 set a=3 where pk=1; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Insert a new record and make sure the correct data is returned +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_hits"; + +# Perform a "new" query and make sure the query cache is not hit +select * from t1 where b=3; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_hits"; + +# Same query again... +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Delete from the table +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Start another connection and check that the query cache is hit connect (con1,localhost,root,,); connection con1; use test; -set autocommit=0; -update t1 set a=3; +select * from t1; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Update the table and switch to other connection +update t1 set a=4 where b=3; connect (con2,localhost,root,,); connection con2; -select * from t1; -select * from t1; +use test; show status like "Qcache_queries_in_cache"; +select * from t1; +select * from t1; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; @@ -36,14 +80,42 @@ select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; -commit; + +# Use transactions and make sure the query cache is not updated until +# transaction is commited +begin; +update t1 set a=5 where pk=1; +# Note!! the below test shows that table is invalidated +# before transaction is committed +# TODO Fix so that cache is not invalidated HERE! +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; connection con2; select * from t1; -select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; +connection con1; +commit; +# TODO Here query is invalidated once again, commit count in NDB has changed +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + drop table t1; +show status like "Qcache_queries_in_cache"; SET GLOBAL query_cache_size=0; + + From fcca1791e341ab23d9b571938002a4576972ed32 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Dec 2004 12:09:19 +0100 Subject: [PATCH 07/25] Added test case for multiple MySQL Servers connected to one cluster Added test for cache in combination with multiple MySQL Servers mysql-test/include/have_ndb.inc: Added suport for connecting to two MySQL Server instances (I hope this will work on all platforms, since it does not use the socket parameter when openeing the connections, analog to how it looks like in replication tests) --- mysql-test/include/have_ndb.inc | 33 +++++++++++-- mysql-test/r/ndb_cache_multi.result | 72 +++++++++++++++++++++++++++++ mysql-test/r/ndb_multi.result | 49 ++++++++++++++++++++ mysql-test/r/server_id.require | 2 + mysql-test/r/server_id1.require | 2 + mysql-test/t/ndb_cache_multi.test | 63 +++++++++++++++++++++++++ mysql-test/t/ndb_multi.test | 42 +++++++++++++++++ 7 files changed, 260 insertions(+), 3 deletions(-) create mode 100644 mysql-test/r/ndb_cache_multi.result create mode 100644 mysql-test/r/ndb_multi.result create mode 100644 mysql-test/r/server_id.require create mode 100644 mysql-test/r/server_id1.require create mode 100644 mysql-test/t/ndb_cache_multi.test create mode 100644 mysql-test/t/ndb_multi.test diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc index 84e60657876..249f3362bce 100644 --- a/mysql-test/include/have_ndb.inc +++ b/mysql-test/include/have_ndb.inc @@ -2,6 +2,33 @@ disable_query_log; show variables like "have_ndbcluster"; enable_query_log; -#connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK); -#connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,$MASTER_MYSOCK1); -#connection server1; + +# Setup connections to both MySQL Servers connected to the cluster +connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,); +connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); + +# Check that server1 has NDB support +connection server1; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id.require show variables like "server_id"; +enable_query_log; + +# Check that server2 has NDB support +connection server2; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id1.require show variables like "server_id"; +enable_query_log; + +# Set the default connection to 'server1' +connection server1; + diff --git a/mysql-test/r/ndb_cache_multi.result b/mysql-test/r/ndb_cache_multi.result new file mode 100644 index 00000000000..c7135ed9e8a --- /dev/null +++ b/mysql-test/r/ndb_cache_multi.result @@ -0,0 +1,72 @@ +drop table if exists t1, t2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +2 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +update t1 set a=3 where a=2; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +drop table t1, t2; diff --git a/mysql-test/r/ndb_multi.result b/mysql-test/r/ndb_multi.result new file mode 100644 index 00000000000..4a2389cd1ff --- /dev/null +++ b/mysql-test/r/ndb_multi.result @@ -0,0 +1,49 @@ +drop table if exists t1, t2, t3, t4; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 0 +flush status; +select * from t1; +a +2 +update t1 set a=3 where a=2; +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 1 +create table t3 (a int not null primary key, b varchar(22), +c int, last_col text) engine=ndb; +insert into t3 values(1, 'Hi!', 89, 'Longtext column'); +create table t4 (pk int primary key, b int) engine=ndb; +select * from t1; +a +3 +select * from t3; +a b c last_col +1 Hi! 89 Longtext column +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 1 +show tables like 't4'; +Tables_in_test (t4) +t4 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 2 +show tables; +Tables_in_test +t1 +t2 +t3 +t4 +drop table t1, t2, t3, t4; diff --git a/mysql-test/r/server_id.require b/mysql-test/r/server_id.require new file mode 100644 index 00000000000..adffcc483b1 --- /dev/null +++ b/mysql-test/r/server_id.require @@ -0,0 +1,2 @@ +Variable_name Value +server_id 1 diff --git a/mysql-test/r/server_id1.require b/mysql-test/r/server_id1.require new file mode 100644 index 00000000000..666c94ef633 --- /dev/null +++ b/mysql-test/r/server_id1.require @@ -0,0 +1,2 @@ +Variable_name Value +server_id 102 diff --git a/mysql-test/t/ndb_cache_multi.test b/mysql-test/t/ndb_cache_multi.test new file mode 100644 index 00000000000..ac4a80cee30 --- /dev/null +++ b/mysql-test/t/ndb_cache_multi.test @@ -0,0 +1,63 @@ +-- source include/have_query_cache.inc +-- source include/have_ndb.inc + +--disable_warnings +drop table if exists t1, t2; +--enable_warnings + + +# Turn on and reset query cache on server1 +connection server1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; + +# Turn on and reset query cache on server2 +connection server2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; + + + +# Create test tables in NDB and load them into cache +# on server1 +connection server1; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + + +# Connect server2, load table in to cache, then update the table +connection server2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +update t1 set a=3 where a=2; + +# Connect to server1 and check that cache is invalidated +# and correct data is returned +connection server1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1, t2; + + diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test new file mode 100644 index 00000000000..65b1c7b2db4 --- /dev/null +++ b/mysql-test/t/ndb_multi.test @@ -0,0 +1,42 @@ +-- source include/have_ndb.inc + +--disable_warnings +drop table if exists t1, t2, t3, t4; +--enable_warnings + +flush status; + +# Create test tables on server1 +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like 'handler_discover%'; + +# Connect to server2 and use the tables from there +connection server2; +flush status; +select * from t1; +update t1 set a=3 where a=2; +show status like 'handler_discover%'; + +# Create a new table on server2 +create table t3 (a int not null primary key, b varchar(22), +c int, last_col text) engine=ndb; +insert into t3 values(1, 'Hi!', 89, 'Longtext column'); +create table t4 (pk int primary key, b int) engine=ndb; + +# Check that the tables are accessible from server1 +connection server1; +select * from t1; +select * from t3; +show status like 'handler_discover%'; +show tables like 't4'; +show status like 'handler_discover%'; +show tables; + +drop table t1, t2, t3, t4; + + From bf532e26d117201931718d5eab90ba93a17d3510 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Dec 2004 11:09:06 +0100 Subject: [PATCH 08/25] Moved test for multi ndb to have_ndb_multi --- mysql-test/include/have_multi_ndb.inc | 28 ++++++++++++++++++++++++++ mysql-test/include/have_ndb.inc | 29 --------------------------- mysql-test/t/ndb_cache_multi.test | 1 + mysql-test/t/ndb_multi.test | 2 ++ 4 files changed, 31 insertions(+), 29 deletions(-) create mode 100644 mysql-test/include/have_multi_ndb.inc diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc new file mode 100644 index 00000000000..d0c083cab86 --- /dev/null +++ b/mysql-test/include/have_multi_ndb.inc @@ -0,0 +1,28 @@ +# Setup connections to both MySQL Servers connected to the cluster +connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,); +connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); + +# Check that server1 has NDB support +connection server1; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id.require show variables like "server_id"; +enable_query_log; + +# Check that server2 has NDB support +connection server2; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id1.require show variables like "server_id"; +enable_query_log; + +# Set the default connection to 'server1' +connection server1; diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc index 249f3362bce..d000a954733 100644 --- a/mysql-test/include/have_ndb.inc +++ b/mysql-test/include/have_ndb.inc @@ -3,32 +3,3 @@ disable_query_log; show variables like "have_ndbcluster"; enable_query_log; -# Setup connections to both MySQL Servers connected to the cluster -connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,); -connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); - -# Check that server1 has NDB support -connection server1; -disable_query_log; ---disable_warnings -drop table if exists t1, t2; ---enable_warnings -flush tables; -@r/have_ndb.require show variables like "have_ndbcluster"; -@r/server_id.require show variables like "server_id"; -enable_query_log; - -# Check that server2 has NDB support -connection server2; -disable_query_log; ---disable_warnings -drop table if exists t1, t2; ---enable_warnings -flush tables; -@r/have_ndb.require show variables like "have_ndbcluster"; -@r/server_id1.require show variables like "server_id"; -enable_query_log; - -# Set the default connection to 'server1' -connection server1; - diff --git a/mysql-test/t/ndb_cache_multi.test b/mysql-test/t/ndb_cache_multi.test index ac4a80cee30..7202b5f8558 100644 --- a/mysql-test/t/ndb_cache_multi.test +++ b/mysql-test/t/ndb_cache_multi.test @@ -1,5 +1,6 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc +-- source include/have_multi_ndb.inc --disable_warnings drop table if exists t1, t2; diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index 65b1c7b2db4..9286721b677 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -1,4 +1,6 @@ -- source include/have_ndb.inc +-- source include/have_multi_ndb.inc + --disable_warnings drop table if exists t1, t2, t3, t4; From d6747f963e13d87c1a7bc952b95ceeba8ba2aada Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 14 Jan 2005 14:33:26 +0100 Subject: [PATCH 09/25] Merge problem FC3 fix sql/ha_ndbcluster.cc: Fixed merge problem, that occured when m_ndb where removed. Fix fo FC3 compile problem. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + sql/ha_ndbcluster.cc | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 9e25aca5fa2..b352bceba8a 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -141,6 +141,7 @@ mronstrom@build.mysql.com mronstrom@mysql.com mskold@mysql.com msvensson@build.mysql.com +msvensson@neptunus.homeip.net mwagner@cash.mwagner.org mwagner@evoq.mwagner.org mwagner@here.mwagner.org diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 11dd20a46d6..0d83955a335 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2905,9 +2905,13 @@ int ha_ndbcluster::reset() DBUG_RETURN(1); } +static const char *ha_ndb_bas_ext[]= { ha_ndb_ext, NullS }; -const char **ha_ndbcluster::bas_ext() const -{ static const char *ext[]= { ha_ndb_ext, NullS }; return ext; } +const char** +ha_ndbcluster::bas_ext() const +{ + return ha_ndb_bas_ext; +} /* @@ -4644,8 +4648,9 @@ ha_ndbcluster::cached_table_registration( } { Uint64 commit_count; - m_ndb->setDatabaseName(m_dbname); - if (ndb_get_table_statistics(m_ndb, m_tabname, 0, &commit_count)) + Ndb *ndb= get_ndb(); + ndb->setDatabaseName(m_dbname); + if (ndb_get_table_statistics(ndb, m_tabname, 0, &commit_count)) { *engine_data= 0; DBUG_RETURN(FALSE); From 38e395aa325eb418cf92a6ce62646959ee2ed477 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 15:43:08 +0100 Subject: [PATCH 10/25] WL#2269 Enable query cache for NDB - Added a thread that fetches commit_count for open tables. This will mean that NDB will not have to be contacted for every use of a cached query. sql/ha_ndbcluster.cc: Added a thread that periodically will fetch commit_count for open tables and store that value in share. The commit count value is then used when query cache asks if a cached query can be used. The thread activation interval is regulated by the config variable ndb_cache_check_time, it's default value is 0 which means that NDB is contacted every time a cached query is reused. sql/ha_ndbcluster.h: Added commit_count to share Added ndb_cache_check_time sql/mysqld.cc: Added config variable ndb_cache_check_time sql/set_var.cc: Added config variable ndb_cache_check_time --- mysql-test/r/ndb_cache2.result | 193 +++++++++++++++++ mysql-test/r/ndb_cache_multi2.result | 74 +++++++ mysql-test/t/ndb_cache2.test | 126 +++++++++++ mysql-test/t/ndb_cache_multi2.test | 71 +++++++ sql/ha_ndbcluster.cc | 306 ++++++++++++++++++++------- sql/ha_ndbcluster.h | 2 + sql/mysqld.cc | 7 +- sql/set_var.cc | 3 + 8 files changed, 708 insertions(+), 74 deletions(-) create mode 100644 mysql-test/r/ndb_cache2.result create mode 100644 mysql-test/r/ndb_cache_multi2.result create mode 100644 mysql-test/t/ndb_cache2.test create mode 100644 mysql-test/t/ndb_cache_multi2.test diff --git a/mysql-test/r/ndb_cache2.result b/mysql-test/r/ndb_cache2.result new file mode 100644 index 00000000000..ce10e9dab00 --- /dev/null +++ b/mysql-test/r/ndb_cache2.result @@ -0,0 +1,193 @@ +drop table if exists t1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=5; +reset query cache; +flush status; +CREATE TABLE t1 ( pk int not null primary key, +a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +update t1 set a=3 where pk=1; +select * from t1; +pk a b c +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +use test; +select * from t1; +pk a b c +2 7 8 Second row +1 3 3 First row +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 4 +update t1 set a=4 where b=3; +use test; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 5 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +begin; +update t1 set a=5 where pk=1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +commit; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 8 +drop table t1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +SET GLOBAL query_cache_size=0; +SET GLOBAL ndb_cache_check_time=0; diff --git a/mysql-test/r/ndb_cache_multi2.result b/mysql-test/r/ndb_cache_multi2.result new file mode 100644 index 00000000000..6e435c071b5 --- /dev/null +++ b/mysql-test/r/ndb_cache_multi2.result @@ -0,0 +1,74 @@ +drop table if exists t1, t2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=1; +reset query cache; +flush status; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=1; +reset query cache; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +2 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +update t1 set a=3 where a=2; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +drop table t1, t2; diff --git a/mysql-test/t/ndb_cache2.test b/mysql-test/t/ndb_cache2.test new file mode 100644 index 00000000000..5c1674a7021 --- /dev/null +++ b/mysql-test/t/ndb_cache2.test @@ -0,0 +1,126 @@ +-- source include/have_query_cache.inc +-- source include/have_ndb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + + +# Turn on and reset query cache +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +# Turn on thread that will fetch commit count for open tables +set GLOBAL ndb_cache_check_time=5; +reset query cache; +flush status; + +# Wait for thread to wake up and start "working" +sleep 20; + +# Create test table in NDB +CREATE TABLE t1 ( pk int not null primary key, + a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); + +# Perform one query which should be inerted in query cache +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Perform the same query and make sure the query cache is hit +select * from t1; +show status like "Qcache_hits"; + +# Update the table and make sure the correct data is returned +update t1 set a=3 where pk=1; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Insert a new record and make sure the correct data is returned +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_hits"; + +# Perform a "new" query and make sure the query cache is not hit +select * from t1 where b=3; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_hits"; + +# Same query again... +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Delete from the table +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Start another connection and check that the query cache is hit +connect (con1,localhost,root,,); +connection con1; +use test; +select * from t1; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Update the table and switch to other connection +update t1 set a=4 where b=3; +connect (con2,localhost,root,,); +connection con2; +use test; +show status like "Qcache_queries_in_cache"; +select * from t1; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Use transactions and make sure the query cache is not updated until +# transaction is commited +begin; +update t1 set a=5 where pk=1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +commit; +# Sleep to let the query cache thread update commit count +sleep 10; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1; + +show status like "Qcache_queries_in_cache"; + +SET GLOBAL query_cache_size=0; +SET GLOBAL ndb_cache_check_time=0; + + diff --git a/mysql-test/t/ndb_cache_multi2.test b/mysql-test/t/ndb_cache_multi2.test new file mode 100644 index 00000000000..a9d008dba7c --- /dev/null +++ b/mysql-test/t/ndb_cache_multi2.test @@ -0,0 +1,71 @@ +-- source include/have_query_cache.inc +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc + +--disable_warnings +drop table if exists t1, t2; +--enable_warnings + + +# Turn on and reset query cache on server1 +connection server1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=1; +reset query cache; +flush status; + +# Turn on and reset query cache on server2 +connection server2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=1; +reset query cache; +flush status; + +# Sleep so that the query cache check thread has time to start +sleep 15; + + +# Create test tables in NDB and load them into cache +# on server1 +connection server1; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + + +# Connect server2, load table in to cache, then update the table +connection server2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +update t1 set a=3 where a=2; + +# Sleep so that the query cache check thread has time to run +sleep 5; + +# Connect to server1 and check that cache is invalidated +# and correct data is returned +connection server1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1, t2; + + diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 0d83955a335..4f6e243db93 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -86,6 +86,12 @@ static int unpackfrm(const void **data, uint *len, static int ndb_get_table_statistics(Ndb*, const char *, Uint64* rows, Uint64* commits); +// Util thread variables +static pthread_t ndb_util_thread; +pthread_mutex_t LOCK_ndb_util_thread; +pthread_cond_t COND_ndb_util_thread; +extern "C" pthread_handler_decl(ndb_util_thread_func, arg); +ulong ndb_cache_check_time; /* Dummy buffer to read zero pack_length fields @@ -3865,6 +3871,7 @@ ha_ndbcluster::~ha_ndbcluster() } + /* Open a table for further use - fetch metadata for this table from NDB @@ -3963,16 +3970,14 @@ void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb) Ndb* check_ndb_in_thd(THD* thd) { - DBUG_ENTER("check_ndb_in_thd"); - Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; - + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; if (!thd_ndb) { if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) - DBUG_RETURN(NULL); + return NULL; thd->transaction.thd_ndb= thd_ndb; } - DBUG_RETURN(thd_ndb->ndb); + return thd_ndb->ndb; } @@ -4310,13 +4315,21 @@ bool ndbcluster_init() (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, (hash_get_key) ndbcluster_get_key,0,0); pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); + pthread_mutex_init(&LOCK_ndb_util_thread,MY_MUTEX_INIT_FAST); + pthread_cond_init(&COND_ndb_util_thread,NULL); + + // Create utility thread + pthread_t tmp; + if (pthread_create(&tmp,&connection_attrib,ndb_util_thread_func,0)) + { + DBUG_PRINT("error", ("Could not create ndb utility thread")); + goto ndbcluster_init_error; + } + ndbcluster_inited= 1; -#ifdef USE_DISCOVER_ON_STARTUP - if (ndb_discover_tables() != 0) - goto ndbcluster_init_error; -#endif DBUG_RETURN(FALSE); + ndbcluster_init_error: ndbcluster_end(); DBUG_RETURN(TRUE); @@ -4326,12 +4339,19 @@ bool ndbcluster_init() /* End use of the NDB Cluster table handler - free all global variables allocated by - ndcluster_init() + ndbcluster_init() */ bool ndbcluster_end() { DBUG_ENTER("ndbcluster_end"); + + // Kill ndb utility thread + (void) pthread_mutex_lock(&LOCK_ndb_util_thread); + DBUG_PRINT("exit",("killing ndb util thread: %lx",ndb_util_thread)); + (void) pthread_cond_signal(&COND_ndb_util_thread); + (void) pthread_mutex_unlock(&LOCK_ndb_util_thread); + if(g_ndb) delete g_ndb; g_ndb= NULL; @@ -4342,6 +4362,8 @@ bool ndbcluster_end() DBUG_RETURN(0); hash_free(&ndbcluster_open_tables); pthread_mutex_destroy(&ndbcluster_mutex); + pthread_mutex_destroy(&LOCK_ndb_util_thread); + pthread_cond_destroy(&COND_ndb_util_thread); ndbcluster_inited= 0; DBUG_RETURN(0); } @@ -4534,12 +4556,53 @@ const char* ha_ndbcluster::index_type(uint key_number) return "HASH"; } } + uint8 ha_ndbcluster::table_cache_type() { DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT"); DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); } + +uint ndb_get_commitcount(THD* thd, char* dbname, char* tabname, + Uint64* commit_count) +{ + DBUG_ENTER("ndb_get_commitcount"); + + if (ndb_cache_check_time > 0) + { + // Use cached commit_count from share + char name[FN_REFLEN]; + NDB_SHARE* share; + (void)strxnmov(name, FN_REFLEN, + "./",dbname,"/",tabname,NullS); + DBUG_PRINT("info", ("name: %s", name)); + pthread_mutex_lock(&ndbcluster_mutex); + if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables, + (byte*) name, + strlen(name)))) + { + pthread_mutex_unlock(&ndbcluster_mutex); + DBUG_RETURN(1); + } + *commit_count= share->commit_count; + DBUG_PRINT("info", ("commit_count: %d", *commit_count)); + pthread_mutex_unlock(&ndbcluster_mutex); + DBUG_RETURN(0); + } + + // Get commit_count from NDB + Ndb *ndb; + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(1); + ndb->setDatabaseName(dbname); + + if (ndb_get_table_statistics(ndb, tabname, 0, commit_count)) + DBUG_RETURN(1); + DBUG_RETURN(0); +} + + static my_bool ndbcluster_cache_retrieval_allowed( @@ -4561,51 +4624,33 @@ ndbcluster_cache_retrieval_allowed( all cached queries with this table*/ { DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); - char tabname[128]; - char *dbname= full_name; - my_bool is_autocommit; - { - int dbname_len= strlen(full_name); - int tabname_len= full_name_len-dbname_len-1; - memcpy(tabname, full_name+dbname_len+1, tabname_len); - tabname[tabname_len]= '\0'; - } - if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - is_autocommit = FALSE; - else - is_autocommit = TRUE; + + Uint64 commit_count; + bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + char* dbname= full_name; + char* tabname= dbname+strlen(dbname)+1; + DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d", - dbname,tabname,is_autocommit)); + dbname, tabname, is_autocommit)); + if (!is_autocommit) + DBUG_RETURN(FALSE); + + if (ndb_get_commitcount(thd, dbname, tabname, &commit_count)) { - DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", - thd->options & OPTION_NOT_AUTOCOMMIT, - thd->options & OPTION_BEGIN)); - // ToDo enable cache inside a transaction - // no need to invalidate though so leave *engine_data + *engine_data= *engine_data+1; // invalidate DBUG_RETURN(FALSE); } + DBUG_PRINT("info", ("*engine_data=%llu, commit_count=%llu", + *engine_data, commit_count)); + if (*engine_data != commit_count) { - Ndb *ndb; - Uint64 commit_count; - if (!(ndb= check_ndb_in_thd(thd))) - { - *engine_data= *engine_data+1; // invalidate - DBUG_RETURN(FALSE); - } - ndb->setDatabaseName(dbname); - if (ndb_get_table_statistics(ndb, tabname, 0, &commit_count)) - { - *engine_data= *engine_data+1; // invalidate - DBUG_RETURN(FALSE); - } - if (*engine_data != commit_count) - { - *engine_data= commit_count; // invalidate - DBUG_RETURN(FALSE); - } + *engine_data= commit_count; // invalidate + DBUG_PRINT("exit",("Do not use cache, commit_count has changed")); + DBUG_RETURN(FALSE); } - DBUG_PRINT("exit",("*engine_data=%d ok, use cache",*engine_data)); + + DBUG_PRINT("exit",("OK to use cache, *engine_data=%llu",*engine_data)); DBUG_RETURN(TRUE); } @@ -4630,35 +4675,24 @@ ha_ndbcluster::cached_table_registration( invalidate all cached queries with this table*/ { DBUG_ENTER("ha_ndbcluster::cached_table_registration"); - my_bool is_autocommit; - if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - is_autocommit = FALSE; - else - is_autocommit = TRUE; + + bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", m_dbname,m_tabname,is_autocommit)); if (!is_autocommit) + DBUG_RETURN(FALSE); + + + Uint64 commit_count; + if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count)) { - DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", - thd->options & OPTION_NOT_AUTOCOMMIT, - thd->options & OPTION_BEGIN)); - // ToDo enable cache inside a transaction - // no need to invalidate though so leave *engine_data + *engine_data= 0; + DBUG_PRINT("error", ("Could not get commitcount")) DBUG_RETURN(FALSE); } - { - Uint64 commit_count; - Ndb *ndb= get_ndb(); - ndb->setDatabaseName(m_dbname); - if (ndb_get_table_statistics(ndb, m_tabname, 0, &commit_count)) - { - *engine_data= 0; - DBUG_RETURN(FALSE); - } - *engine_data= commit_count; - } + *engine_data= commit_count; *engine_callback= ndbcluster_cache_retrieval_allowed; - DBUG_PRINT("exit",("*engine_data=%d", *engine_data)); + DBUG_PRINT("exit",("*engine_data=%llu", *engine_data)); DBUG_RETURN(TRUE); } @@ -4700,8 +4734,14 @@ static NDB_SHARE* get_share(const char *table_name) } thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + share->commit_count= 0; } } + DBUG_PRINT("share", + ("table_name: %s, length: %d, use_count: %d, commit_count: %d", + share->table_name, share->table_name_length, share->use_count, + share->commit_count)); + share->use_count++; pthread_mutex_unlock(&ndbcluster_mutex); return share; @@ -4868,10 +4908,10 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, ndb->closeTransaction(pTrans); if(row_count) - * row_count= sum_rows; + *row_count= sum_rows; if(commit_count) - * commit_count= sum_commits; - DBUG_PRINT("exit", ("records: %u commits: %u", sum_rows, sum_commits)); + *commit_count= sum_commits; + DBUG_PRINT("exit", ("records: %llu commits: %llu", sum_rows, sum_commits)); DBUG_RETURN(0); } while(0); @@ -4906,4 +4946,124 @@ int ha_ndbcluster::write_ndb_file() DBUG_RETURN(error); } + +// Utility thread main loop +extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused))) +{ + THD *thd; // needs to be first for thread_stack + int error = 0; + struct timespec abstime; + + my_thread_init(); + DBUG_ENTER("ndb_util_thread"); + DBUG_PRINT("enter", ("ndb_cache_check_time: %d", ndb_cache_check_time)); + + thd= new THD; // note that contructor of THD uses DBUG_ ! + THD_CHECK_SENTRY(thd); + + pthread_detach_this_thread(); + ndb_util_thread = pthread_self(); + + thd->thread_stack = (char*)&thd; // remember where our stack is + if (thd->store_globals()) + { + thd->cleanup(); + delete thd; + DBUG_RETURN(NULL); + } + + List util_open_tables; + set_timespec(abstime, ndb_cache_check_time); + for (;;) + { + + pthread_mutex_lock(&LOCK_ndb_util_thread); + error= pthread_cond_timedwait(&COND_ndb_util_thread, + &LOCK_ndb_util_thread, + &abstime); + pthread_mutex_unlock(&LOCK_ndb_util_thread); + + DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %d", + ndb_cache_check_time)); + + if (abort_loop) + break; // Shutting down server + + if (ndb_cache_check_time == 0) + { + set_timespec(abstime, 10); + continue; + } + + // Set new time to wake up + set_timespec(abstime, ndb_cache_check_time); + + // Lock mutex and fill list with pointers to all open tables + NDB_SHARE *share; + pthread_mutex_lock(&ndbcluster_mutex); + for (uint i= 0; i < ndbcluster_open_tables.records; i++) + { + share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i); + share->use_count++; // Make sure the table can't be closed + + DBUG_PRINT("ndb_util_thread", + ("Found open table[%d]: %s, use_count: %d", + i, share->table_name, share->use_count)); + + // Store pointer to table + util_open_tables.push_back(share); + } + pthread_mutex_unlock(&ndbcluster_mutex); + + + // Iterate through the open files list + List_iterator_fast it(util_open_tables); + while (share=it++) + { + // Split tab- and dbname + char buf[FN_REFLEN]; + char *tabname, *db; + uint length= dirname_length(share->table_name); + tabname= share->table_name+length; + memcpy(buf, share->table_name, length-1); + buf[length-1]= 0; + db= buf+dirname_length(buf); + DBUG_PRINT("ndb_util_thread", + ("Fetching commit count for: %s, db: %s, tab: %s", + share->table_name, db, tabname)); + + // Contact NDB to get commit count for table + g_ndb->setDatabaseName(db); + Uint64 rows, commit_count; + if(ndb_get_table_statistics(g_ndb, tabname, + &rows, &commit_count) == 0){ + DBUG_PRINT("ndb_util_thread", + ("Table: %s, rows: %llu, commit_count: %llu", + share->table_name, rows, commit_count)); + share->commit_count= commit_count; + } + else + { + DBUG_PRINT("ndb_util_thread", + ("Error: Could not get commit count for table %s", + share->table_name)); + share->commit_count++; // Invalidate + } + // Decrease the use count and possibly free share + free_share(share); + } + + // Clear the list of open tables + util_open_tables.empty(); + + } + + thd->cleanup(); + delete thd; + DBUG_PRINT("exit", ("ndb_util_thread")); + my_thread_end(); + DBUG_RETURN(NULL); +} + + #endif /* HAVE_NDBCLUSTER_DB */ diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index b5cf727ead7..df88afa678a 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -38,6 +38,7 @@ class NdbBlob; // connectstring to cluster if given by mysqld extern const char *ndbcluster_connectstring; +extern ulong ndb_cache_check_time; typedef enum ndb_index_type { UNDEFINED_INDEX = 0, @@ -59,6 +60,7 @@ typedef struct st_ndbcluster_share { pthread_mutex_t mutex; char *table_name; uint table_name_length,use_count; + uint commit_count; } NDB_SHARE; /* diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d1fef3519bf..671f38898c1 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -284,6 +284,7 @@ my_bool opt_console= 0, opt_bdb, opt_innodb, opt_isam, opt_ndbcluster; #ifdef HAVE_NDBCLUSTER_DB const char *opt_ndbcluster_connectstring= 0; my_bool opt_ndb_shm, opt_ndb_optimized_node_selection; +ulong opt_ndb_cache_check_time= 0; #endif my_bool opt_readonly, use_temp_pool, relay_log_purge; my_bool opt_sync_bdb_logs, opt_sync_frm; @@ -4016,7 +4017,7 @@ enum options_mysqld OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, - OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, + OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4498,6 +4499,10 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &opt_ndb_optimized_node_selection, (gptr*) &opt_ndb_optimized_node_selection, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + { "ndb_cache_check_time", OPT_NDB_CACHE_CHECK_TIME, + "A dedicated thread is created to update cached commit count value at the given interval.", + (gptr*) &opt_ndb_cache_check_time, (gptr*) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG, + 0, 0, LONG_TIMEOUT, 0, 1, 0}, #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/set_var.cc b/sql/set_var.cc index 082c55db188..58c30c8e9bc 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -370,6 +370,7 @@ sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); +sys_var_long_ptr sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time); #endif /* Time/date/datetime formats */ @@ -630,6 +631,7 @@ sys_var *sys_variables[]= &sys_ndb_force_send, &sys_ndb_use_exact_count, &sys_ndb_use_transactions, + &sys_ndb_cache_check_time, #endif &sys_unique_checks, &sys_warning_count @@ -797,6 +799,7 @@ struct show_var_st init_vars[]= { {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, + {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, {sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS}, From d81a0bede23ca74222252b6f43bd85ead5d7be2a Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 08:38:24 +0200 Subject: [PATCH 11/25] Fix for BUG#8023. Allow LIMIT clause after DUAL. mysql-test/r/limit.result: Added test result for BUG#8023. mysql-test/t/limit.test: Added test for BUG#8023. sql/sql_yacc.yy: Allow the specification of a LIMIT clause after DUAL. This is needed for queries as: select a from t1 union all select 1 from dual limit 1; In this query LIMIT is applied to the whole UNION, so it makes sense, however, the current parser did not allow any clause after DUAL. --- mysql-test/r/limit.result | 9 +++++++++ mysql-test/t/limit.test | 10 ++++++++++ sql/sql_yacc.yy | 9 +++++---- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/limit.result b/mysql-test/r/limit.result index c82105e6a49..6a3d2bffab0 100644 --- a/mysql-test/r/limit.result +++ b/mysql-test/r/limit.result @@ -67,3 +67,12 @@ SELECT * FROM t1; id id2 3 0 DROP TABLE t1; +create table t1 (a integer); +insert into t1 values (1); +select 1 as a from t1 union all select 1 from dual limit 1; +a +1 +(select 1 as a from t1) union all (select 1 from dual) limit 1; +a +1 +drop table t1; diff --git a/mysql-test/t/limit.test b/mysql-test/t/limit.test index 61c57c9b468..28b287a5d4a 100644 --- a/mysql-test/t/limit.test +++ b/mysql-test/t/limit.test @@ -49,3 +49,13 @@ SELECT * FROM t1; DELETE FROM t1 WHERE id2 = 0 ORDER BY id desc LIMIT 1; SELECT * FROM t1; DROP TABLE t1; + +# +# Bug#8023 - limit on UNION with from DUAL, causes syntax error +# +create table t1 (a integer); +insert into t1 values (1); +# both queries must return one row +select 1 as a from t1 union all select 1 from dual limit 1; +(select 1 as a from t1) union all (select 1 from dual) limit 1; +drop table t1; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 1e51d8fb82d..e70efe14557 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -2455,10 +2455,11 @@ select_into: select_from: FROM join_table_list where_clause group_clause having_clause opt_order_clause opt_limit_clause procedure_clause - | FROM DUAL_SYM /* oracle compatibility: oracle always requires FROM - clause, and DUAL is system table without fields. - Is "SELECT 1 FROM DUAL" any better than - "SELECT 1" ? Hmmm :) */ + | FROM DUAL_SYM opt_limit_clause + /* oracle compatibility: oracle always requires FROM clause, + and DUAL is system table without fields. + Is "SELECT 1 FROM DUAL" any better than "SELECT 1" ? + Hmmm :) */ ; select_options: From c2e9e15e9f44b2149286a9b6b784f93fe9b2938e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 21:50:56 +0100 Subject: [PATCH 12/25] mtr_cases.pl: new file --- mysql-test/lib/mtr_cases.pl | 270 ++++++++++++++++++++++++++++++++++++ 1 file changed, 270 insertions(+) create mode 100644 mysql-test/lib/mtr_cases.pl diff --git a/mysql-test/lib/mtr_cases.pl b/mysql-test/lib/mtr_cases.pl new file mode 100644 index 00000000000..5977bb380cf --- /dev/null +++ b/mysql-test/lib/mtr_cases.pl @@ -0,0 +1,270 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +sub collect_test_cases ($); +sub collect_one_test_case ($$$$$); + +############################################################################## +# +# Collect information about test cases we are to run +# +############################################################################## + +sub collect_test_cases ($) { + my $suite= shift; # Test suite name + + my $testdir; + my $resdir; + + if ( $suite eq "main" ) + { + $testdir= "$::glob_mysql_test_dir/t"; + $resdir= "$::glob_mysql_test_dir/r"; + } + else + { + $testdir= "$::glob_mysql_test_dir/suite/$suite/t"; + $resdir= "$::glob_mysql_test_dir/suite/$suite/r"; + } + + my $cases = []; # Array of hash, will be array of C struct + + opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!"); + + if ( @::opt_cases ) + { + foreach my $tname ( @::opt_cases ) { # Run in specified order, no sort + my $elem= "$tname.test"; + if ( ! -f "$testdir/$elem") + { + mtr_error("Test case $tname ($testdir/$elem) is not found"); + } + collect_one_test_case($testdir,$resdir,$tname,$elem,$cases); + } + closedir TESTDIR; + } + else + { + foreach my $elem ( sort readdir(TESTDIR) ) { + my $tname= mtr_match_extension($elem,"test"); + next if ! defined $tname; + next if $::opt_do_test and ! defined mtr_match_prefix($elem,$::opt_do_test); + + collect_one_test_case($testdir,$resdir,$tname,$elem,$cases); + } + closedir TESTDIR; + } + + # To speed things up, we sort first in if the test require a restart + # or not, second in alphanumeric order. + +# @$cases = sort { +# if ( $a->{'master_restart'} and $b->{'master_restart'} or +# ! $a->{'master_restart'} and ! $b->{'master_restart'} ) +# { +# return $a->{'name'} cmp $b->{'name'}; +# } +# if ( $a->{'master_restart'} ) +# { +# return 1; # Is greater +# } +# else +# { +# return -1; # Is less +# } +# } @$cases; + + return $cases; +} + + +############################################################################## +# +# Collect information about a single test case +# +############################################################################## + + +sub collect_one_test_case($$$$$) { + my $testdir= shift; + my $resdir= shift; + my $tname= shift; + my $elem= shift; + my $cases= shift; + + my $path= "$testdir/$elem"; + + # ---------------------------------------------------------------------- + # Skip some tests silently + # ---------------------------------------------------------------------- + + if ( $::opt_start_from and $tname lt $::opt_start_from ) + { + return; + } + + # ---------------------------------------------------------------------- + # Skip some tests but include in list, just mark them to skip + # ---------------------------------------------------------------------- + + my $tinfo= {}; + $tinfo->{'name'}= $tname; + $tinfo->{'result_file'}= "$resdir/$tname.result"; + push(@$cases, $tinfo); + + if ( $::opt_skip_test and defined mtr_match_prefix($tname,$::opt_skip_test) ) + { + $tinfo->{'skip'}= 1; + return; + } + + # FIXME temporary solution, we have a hard coded list of test cases to + # skip if we are using the embedded server + + if ( $::glob_use_embedded_server and + mtr_match_any_exact($tname,\@::skip_if_embedded_server) ) + { + $tinfo->{'skip'}= 1; + return; + } + + # ---------------------------------------------------------------------- + # Collect information about test case + # ---------------------------------------------------------------------- + + $tinfo->{'path'}= $path; + $tinfo->{'timezone'}= "GMT-3"; # for UNIX_TIMESTAMP tests to work + + if ( defined mtr_match_prefix($tname,"rpl") ) + { + if ( $::opt_skip_rpl ) + { + $tinfo->{'skip'}= 1; + return; + } + + $tinfo->{'slave_num'}= 1; # Default, use one slave + + # FIXME currently we always restart slaves + $tinfo->{'slave_restart'}= 1; + + if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' ) + { +# $tinfo->{'slave_num'}= 3; # Not 3 ? Check old code, strange + } + } + + # FIXME what about embedded_server + ndbcluster, skip ?! + + my $master_opt_file= "$testdir/$tname-master.opt"; + my $slave_opt_file= "$testdir/$tname-slave.opt"; + my $slave_mi_file= "$testdir/$tname.slave-mi"; + my $master_sh= "$testdir/$tname-master.sh"; + my $slave_sh= "$testdir/$tname-slave.sh"; + my $disabled= "$testdir/$tname.disabled"; + + $tinfo->{'master_opt'}= []; + $tinfo->{'slave_opt'}= []; + $tinfo->{'slave_mi'}= []; + + if ( -f $master_opt_file ) + { + $tinfo->{'master_restart'}= 1; # We think so for now + # This is a dirty hack from old mysql-test-run, we use the opt file + # to flag other things as well, it is not a opt list at all + my $extra_master_opt= mtr_get_opts_from_file($master_opt_file); + + foreach my $opt (@$extra_master_opt) + { + my $value; + + $value= mtr_match_prefix($opt, "--timezone="); + + if ( defined $value ) + { + $tinfo->{'timezone'}= $value; + $extra_master_opt= []; + $tinfo->{'master_restart'}= 0; + last; + } + + $value= mtr_match_prefix($opt, "--result-file="); + + if ( defined $value ) + { + $tinfo->{'result_file'}= "r/$value.result"; + if ( $::opt_result_ext and $::opt_record or + -f "$tinfo->{'result_file'}$::opt_result_ext") + { + $tinfo->{'result_file'}.= $::opt_result_ext; + } + $extra_master_opt= []; + $tinfo->{'master_restart'}= 0; + last; + } + } + + $tinfo->{'master_opt'}= $extra_master_opt; + } + + if ( -f $slave_opt_file ) + { + $tinfo->{'slave_opt'}= mtr_get_opts_from_file($slave_opt_file); + $tinfo->{'slave_restart'}= 1; + } + + if ( -f $slave_mi_file ) + { + $tinfo->{'slave_mi'}= mtr_get_opts_from_file($slave_mi_file); + $tinfo->{'slave_restart'}= 1; + } + + if ( -f $master_sh ) + { + if ( $::glob_win32_perl ) + { + $tinfo->{'skip'}= 1; + } + else + { + $tinfo->{'master_sh'}= $master_sh; + $tinfo->{'master_restart'}= 1; + } + } + + if ( -f $slave_sh ) + { + if ( $::glob_win32_perl ) + { + $tinfo->{'skip'}= 1; + } + else + { + $tinfo->{'slave_sh'}= $slave_sh; + $tinfo->{'slave_restart'}= 1; + } + } + + if ( -f $disabled ) + { + $tinfo->{'skip'}= 1; + $tinfo->{'disable'}= 1; # Sub type of 'skip' + $tinfo->{'comment'}= mtr_fromfile($disabled); + } + + # We can't restart a running server that may be in use + + if ( $::glob_use_running_server and + ( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) ) + { + $tinfo->{'skip'}= 1; + } +} + + +1; From 3c925ee0f1b3387e6df952de8f86c618f11c1a8d Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 21:11:12 +0000 Subject: [PATCH 13/25] Bug#8057 Fix crash with LAST_INSERT_ID() in UPDATE, Tests included, mysql-test/r/update.result: Bug#8057 Test for bug mysql-test/t/update.test: Bug#8057 Test for bug sql/item_func.cc: Bug#8057 Don't create new Item in val_int() --- mysql-test/r/update.result | 7 +++++++ mysql-test/t/update.test | 9 +++++++++ sql/item_func.cc | 8 ++------ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/update.result b/mysql-test/r/update.result index beab6105f79..ac370db9ecc 100644 --- a/mysql-test/r/update.result +++ b/mysql-test/r/update.result @@ -212,3 +212,10 @@ insert into t1 values (1, "t1c2-1", 10), (2, "t1c2-2", 20); update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1"; update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1" where t1.c3 = 10; drop table t1, t2; +create table t1 (id int not null auto_increment primary key, id_str varchar(32)); +insert into t1 (id_str) values ("test"); +update t1 set id_str = concat(id_str, id) where id = last_insert_id(); +select * from t1; +id id_str +1 test1 +drop table t1; diff --git a/mysql-test/t/update.test b/mysql-test/t/update.test index 704263b1216..04192f25ac8 100644 --- a/mysql-test/t/update.test +++ b/mysql-test/t/update.test @@ -170,3 +170,12 @@ insert into t1 values (1, "t1c2-1", 10), (2, "t1c2-2", 20); update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1"; update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1" where t1.c3 = 10; drop table t1, t2; + +# +# Bug #8057 +# +create table t1 (id int not null auto_increment primary key, id_str varchar(32)); +insert into t1 (id_str) values ("test"); +update t1 set id_str = concat(id_str, id) where id = last_insert_id(); +select * from t1; +drop table t1; diff --git a/sql/item_func.cc b/sql/item_func.cc index 7125f4704b8..03b5688efc2 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2284,14 +2284,10 @@ longlong Item_func_last_insert_id::val_int() longlong value=args[0]->val_int(); current_thd->insert_id(value); null_value=args[0]->null_value; - return value; } else - { - Item *it= get_system_var(current_thd, OPT_SESSION, "last_insert_id", 14, - "last_insert_id()"); - return it->val_int(); - } + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + return current_thd->insert_id(); } /* This function is just used to test speed of different functions */ From 9f7c9aa7d5eed311e3d40c8f7d1a55abb7d4566c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 23:56:13 +0100 Subject: [PATCH 14/25] ndb - sol9x86: cc -xO3: fix optimizer error. ndb/src/common/util/NdbSqlUtil.cpp: sol9x86: cc -xO3: fix optimizer error. Note: same expression remains in Field_newdate::val_int(). --- ndb/src/common/util/NdbSqlUtil.cpp | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index 6b23da774af..53fa5d69215 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -526,6 +526,7 @@ NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32 union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; u2.p = p2; +#ifdef ndb_date_sol9x86_cc_xO3_madness // from Field_newdate::val_int Uint64 j1 = uint3korr(u1.v); Uint64 j2 = uint3korr(u2.v); @@ -536,6 +537,33 @@ NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32 if (j1 > j2) return +1; return 0; +#else + uint j1 = uint3korr(u1.v); + uint j2 = uint3korr(u2.v); + uint d1 = (j1 & 31); + uint d2 = (j2 & 31); + j1 = (j1 >> 5); + j2 = (j2 >> 5); + uint m1 = (j1 & 15); + uint m2 = (j2 & 15); + j1 = (j1 >> 4); + j2 = (j2 >> 4); + uint y1 = j1; + uint y2 = j2; + if (y1 < y2) + return -1; + if (y1 > y2) + return +1; + if (m1 < m2) + return -1; + if (m1 > m2) + return +1; + if (d1 < d2) + return -1; + if (d1 > d2) + return +1; + return 0; +#endif #endif } From ebda548d0d26f49a05d424f186e0b1d92c90925e Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 09:14:22 +0300 Subject: [PATCH 15/25] Fix for BUG#7716: in in_string::set() take into account that the value returned by item->val_str() may be a substring of the passed string. Disallow string=its_substring assignment in String::operator=(). mysql-test/r/func_misc.result: Testcase for BUG#7716 mysql-test/t/func_misc.test: Testcase for BUG#7716 sql/item_cmpfunc.cc: Fix for BUG#7716: in in_string::set() take into account that the string returned by item->val_str(S) may be not S but use the buffer owned by S. sql/sql_string.h: * Added assert: String& String::operator=(const String&) may not be used to do assignments like str = string_that_uses_buffer_owned_by_str * Added String::uses_buffer_owned_by(). --- mysql-test/r/func_misc.result | 21 +++++++++++++++++++++ mysql-test/t/func_misc.test | 15 +++++++++++++++ sql/item_cmpfunc.cc | 4 ++++ sql/sql_string.h | 10 ++++++++++ 4 files changed, 50 insertions(+) diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index 5a9f0f68228..2d464c891bf 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -28,3 +28,24 @@ length(format('nan', 2)) > 0 select concat("$",format(2500,2)); concat("$",format(2500,2)) $2,500.00 +create table t1 ( a timestamp ); +insert into t1 values ( '2004-01-06 12:34' ); +select a from t1 where left(a+0,6) in ( left(20040106,6) ); +a +2004-01-06 12:34:00 +select a from t1 where left(a+0,6) = ( left(20040106,6) ); +a +2004-01-06 12:34:00 +select a from t1 where right(a+0,6) in ( right(20040106123400,6) ); +a +2004-01-06 12:34:00 +select a from t1 where right(a+0,6) = ( right(20040106123400,6) ); +a +2004-01-06 12:34:00 +select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) ); +a +2004-01-06 12:34:00 +select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) ); +a +2004-01-06 12:34:00 +drop table t1; diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index e73f2a1b26c..89aba7ee583 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -23,3 +23,18 @@ select length(format('nan', 2)) > 0; # Test for bug #628 # select concat("$",format(2500,2)); + +# Test for BUG#7716 +create table t1 ( a timestamp ); +insert into t1 values ( '2004-01-06 12:34' ); +select a from t1 where left(a+0,6) in ( left(20040106,6) ); +select a from t1 where left(a+0,6) = ( left(20040106,6) ); + +select a from t1 where right(a+0,6) in ( right(20040106123400,6) ); +select a from t1 where right(a+0,6) = ( right(20040106123400,6) ); + +select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) ); +select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) ); + +drop table t1; + diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c5e6d520ab7..46ef3281dd1 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1503,7 +1503,11 @@ void in_string::set(uint pos,Item *item) String *str=((String*) base)+pos; String *res=item->val_str(str); if (res && res != str) + { + if (res->uses_buffer_owned_by(str)) + res->copy(); *str= *res; + } if (!str->charset()) { CHARSET_INFO *cs; diff --git a/sql/sql_string.h b/sql/sql_string.h index a8fb9574c0b..9136dddbbf2 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -182,6 +182,11 @@ public: { if (&s != this) { + /* + It is forbidden to do assignments like + some_string = substring_of_that_string + */ + DBUG_ASSERT(!s.uses_buffer_owned_by(this)); free(); Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length; alloced=0; @@ -313,4 +318,9 @@ public: /* Swap two string objects. Efficient way to exchange data without memcpy. */ void swap(String &s); + + inline bool uses_buffer_owned_by(const String *s) const + { + return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length); + } }; From fa17ed6895a3278c21934742272f75012e40656d Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 13:51:37 +0400 Subject: [PATCH 16/25] type_float.result.es updated mysql-test/r/type_float.result.es: Updated. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + mysql-test/r/type_float.result.es | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 0cd599d040a..ef28bf38f48 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -197,6 +197,7 @@ ram@gw.mysql.r18.ru ram@gw.udmsearch.izhnet.ru ram@mysql.r18.ru ram@ram.(none) +ramil@mysql.com ranger@regul.home.lan rburnett@build.mysql.com reggie@bob.(none) diff --git a/mysql-test/r/type_float.result.es b/mysql-test/r/type_float.result.es index 2751e6cb33b..f2639ef545a 100644 --- a/mysql-test/r/type_float.result.es +++ b/mysql-test/r/type_float.result.es @@ -1,4 +1,4 @@ -drop table if exists t1; +drop table if exists t1,t2; SELECT 10,10.0,10.,.1e+2,100.0e-1; 10 10.0 10. .1e+2 100.0e-1 10 10.0 10 10 10 @@ -8,6 +8,9 @@ SELECT 6e-05, -6e-05, --6e-05, -6e-05+1.000000; SELECT 1e1,1.e1,1.0e1,1e+1,1.e+1,1.0e+1,1e-1,1.e-1,1.0e-1; 1e1 1.e1 1.0e1 1e+1 1.e+1 1.0e+1 1e-1 1.e-1 1.0e-1 10 10 10 10 10 10 0.1 0.1 0.1 +SELECT 0.001e+1,0.001e-1, -0.001e+01,-0.001e-01; +0.001e+1 0.001e-1 -0.001e+01 -0.001e-01 +0.01 0.0001 -0.01 -0.0001 create table t1 (f1 float(24),f2 float(52)); show full columns from t1; Field Type Collation Null Key Default Extra Privileges Comment @@ -143,6 +146,15 @@ drop table t1; create table t1 (f float(54)); ERROR 42000: Incorrect column specifier for column 'f' drop table if exists t1; +create table t1 (d1 double, d2 double unsigned); +insert into t1 set d1 = -1.0; +update t1 set d2 = d1; +Warnings: +Warning 1264 Out of range value adjusted for column 'd2' at row 1 +select * from t1; +d1 d2 +-1 0 +drop table t1; create table t1 (f float(4,3)); insert into t1 values (-11.0),(-11),("-11"),(11.0),(11),("11"); Warnings: From 03bc59970610527a86a76ed5e8b993a6a3769d09 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 10:56:53 +0100 Subject: [PATCH 17/25] Merge of query cache from 4.1 to 5.0 mysql-test/r/ndb_cache.result: Added 'order by' to select's mysql-test/r/ndb_cache2.result: Added 'order by' to select's mysql-test/t/ndb_cache.test: Added 'order by' to select's mysql-test/t/ndb_cache2.test: Added 'order by' to select's sql/ha_innodb.h: Changed function name sql/ha_ndbcluster.cc: Merge from query cache from 4.1 to 5.0 Added better comments ndb_get_table_statistics had changed, so there where some adaptions to make sql/ha_ndbcluster.h: Changed name of function sql/handler.h: Changed name of function sql/sql_cache.cc: Changed name of function --- mysql-test/r/ndb_cache.result | 26 +++---- mysql-test/r/ndb_cache2.result | 24 +++---- mysql-test/t/ndb_cache.test | 20 +++--- mysql-test/t/ndb_cache2.test | 20 +++--- sql/ha_innodb.h | 8 +-- sql/ha_ndbcluster.cc | 120 +++++++++++++++++++-------------- sql/ha_ndbcluster.h | 10 +-- sql/handler.h | 11 +-- sql/sql_cache.cc | 9 +-- 9 files changed, 135 insertions(+), 113 deletions(-) diff --git a/mysql-test/r/ndb_cache.result b/mysql-test/r/ndb_cache.result index 7423771e026..478663b1aa1 100644 --- a/mysql-test/r/ndb_cache.result +++ b/mysql-test/r/ndb_cache.result @@ -36,22 +36,22 @@ Variable_name Value Qcache_hits 1 insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); -select * from t1; +select * from t1 order by pk; pk a b c +1 3 3 First row 2 7 8 Second row 4 5 6 Fourth row -1 3 3 First row show status like "Qcache_inserts"; Variable_name Value Qcache_inserts 3 show status like "Qcache_hits"; Variable_name Value Qcache_hits 1 -select * from t1; +select * from t1 order by pk; pk a b c +1 3 3 First row 2 7 8 Second row 4 5 6 Fourth row -1 3 3 First row show status like "Qcache_hits"; Variable_name Value Qcache_hits 2 @@ -81,10 +81,10 @@ show status like "Qcache_hits"; Variable_name Value Qcache_hits 3 use test; -select * from t1; +select * from t1 order by pk; pk a b c -2 7 8 Second row 1 3 3 First row +2 7 8 Second row select * from t1 where b=3; pk a b c 1 3 3 First row @@ -96,11 +96,11 @@ use test; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -110,11 +110,11 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 5 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -138,7 +138,7 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -161,7 +161,7 @@ Qcache_inserts 8 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 5 3 First row @@ -171,7 +171,7 @@ Qcache_inserts 9 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 5 3 First row diff --git a/mysql-test/r/ndb_cache2.result b/mysql-test/r/ndb_cache2.result index ce10e9dab00..de4b3e31874 100644 --- a/mysql-test/r/ndb_cache2.result +++ b/mysql-test/r/ndb_cache2.result @@ -37,10 +37,10 @@ Variable_name Value Qcache_hits 1 insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); -select * from t1; +select * from t1 order by pk desc; pk a b c -2 7 8 Second row 4 5 6 Fourth row +2 7 8 Second row 1 3 3 First row show status like "Qcache_inserts"; Variable_name Value @@ -48,10 +48,10 @@ Qcache_inserts 3 show status like "Qcache_hits"; Variable_name Value Qcache_hits 1 -select * from t1; +select * from t1 order by pk desc; pk a b c -2 7 8 Second row 4 5 6 Fourth row +2 7 8 Second row 1 3 3 First row show status like "Qcache_hits"; Variable_name Value @@ -82,7 +82,7 @@ show status like "Qcache_hits"; Variable_name Value Qcache_hits 3 use test; -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 3 3 First row @@ -97,11 +97,11 @@ use test; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -111,11 +111,11 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 5 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -139,7 +139,7 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -162,7 +162,7 @@ Qcache_inserts 8 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 5 3 First row @@ -172,7 +172,7 @@ Qcache_inserts 9 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 5 3 First row diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test index 8bdcbe17728..e899e94e4ac 100644 --- a/mysql-test/t/ndb_cache.test +++ b/mysql-test/t/ndb_cache.test @@ -35,10 +35,10 @@ show status like "Qcache_hits"; # Insert a new record and make sure the correct data is returned insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); -select * from t1; +select * from t1 order by pk; show status like "Qcache_inserts"; show status like "Qcache_hits"; -select * from t1; +select * from t1 order by pk; show status like "Qcache_hits"; # Perform a "new" query and make sure the query cache is not hit @@ -60,7 +60,7 @@ show status like "Qcache_hits"; connect (con1,localhost,root,,); connection con1; use test; -select * from t1; +select * from t1 order by pk; select * from t1 where b=3; show status like "Qcache_hits"; @@ -70,13 +70,13 @@ connect (con2,localhost,root,,); connection con2; use test; show status like "Qcache_queries_in_cache"; -select * from t1; -select * from t1; +select * from t1 order by pk desc; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; -select * from t1; -select * from t1; +select * from t1 order by pk desc; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -92,7 +92,7 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con2; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -103,11 +103,11 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con2; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; diff --git a/mysql-test/t/ndb_cache2.test b/mysql-test/t/ndb_cache2.test index 5c1674a7021..c46935e8601 100644 --- a/mysql-test/t/ndb_cache2.test +++ b/mysql-test/t/ndb_cache2.test @@ -41,10 +41,10 @@ show status like "Qcache_hits"; # Insert a new record and make sure the correct data is returned insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_hits"; # Perform a "new" query and make sure the query cache is not hit @@ -66,7 +66,7 @@ show status like "Qcache_hits"; connect (con1,localhost,root,,); connection con1; use test; -select * from t1; +select * from t1 order by pk desc; select * from t1 where b=3; show status like "Qcache_hits"; @@ -76,13 +76,13 @@ connect (con2,localhost,root,,); connection con2; use test; show status like "Qcache_queries_in_cache"; -select * from t1; -select * from t1; +select * from t1 order by pk desc; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; -select * from t1; -select * from t1; +select * from t1 order by pk desc; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -95,7 +95,7 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con2; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -107,11 +107,11 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con2; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index c5d501f3702..cca33cbbe1c 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -180,10 +180,10 @@ class ha_innobase: public handler /* ask handler about permission to cache table during query registration */ - my_bool cached_table_registration(THD *thd, char *table_key, - uint key_length, - qc_engine_callback *call_back, - ulonglong *engine_data) + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *call_back, + ulonglong *engine_data) { *call_back= innobase_query_caching_of_table_permitted; *engine_data= 0; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 849fac07821..d146e55f798 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4026,7 +4026,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_force_send(TRUE), m_autoincrement_prefetch(32), m_transaction_on(TRUE), - m_use_local_query_cache(FALSE), m_multi_cursor(NULL) { int i; @@ -4820,31 +4819,48 @@ uint ndb_get_commitcount(THD* thd, char* dbname, char* tabname, DBUG_RETURN(1); ndb->setDatabaseName(dbname); - if (ndb_get_table_statistics(ndb, tabname, 0, commit_count)) + struct Ndb_statistics stat; + if (ndb_get_table_statistics(ndb, tabname, &stat)) DBUG_RETURN(1); + *commit_count= stat.commit_count; DBUG_RETURN(0); } -static -my_bool -ndbcluster_cache_retrieval_allowed( -/*======================================*/ - /* out: TRUE if permitted, FALSE if not; - note that the value FALSE means invalidation - of query cache if *engine_data is changed */ - THD* thd, /* in: thd of the user who is trying to - store a result to the query cache or - retrieve it */ - char* full_name, /* in: concatenation of database name, - the null character '\0', and the table - name */ - uint full_name_len, /* in: length of the full name, i.e. - len(dbname) + len(tablename) + 1 */ - ulonglong *engine_data) /* in: value set in call to - ha_ndbcluster::cached_table_registration - out: if return FALSE this is used to invalidate - all cached queries with this table*/ +/* + Check if a cached query can be used. + This is done by comparing the supplied engine_data to commit_count of + the table. + The commit_count is either retrieved from the share for the table, where + it has been cached by the util thread. If the util thread is not started, + NDB has to be contacetd to retrieve the commit_count, this will introduce + a small delay while waiting for NDB to answer. + + + SYNOPSIS + ndbcluster_cache_retrieval_allowed + thd thread handle + full_name concatenation of database name, + the null character '\0', and the table + name + full_name_len length of the full name, + i.e. len(dbname) + len(tablename) + 1 + + engine_data parameter retrieved when query was first inserted into + the cache. If the value of engine_data is changed, + all queries for this table should be invalidated. + + RETURN VALUE + TRUE Yes, use the query from cache + FALSE No, don't use the cached query, and if engine_data + has changed, all queries for this table should be invalidated + +*/ + +static my_bool +ndbcluster_cache_retrieval_allowed(THD* thd, + char* full_name, uint full_name_len, + ulonglong *engine_data) { DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); @@ -4861,7 +4877,7 @@ ndbcluster_cache_retrieval_allowed( if (ndb_get_commitcount(thd, dbname, tabname, &commit_count)) { - *engine_data= *engine_data+1; // invalidate + *engine_data+= 1; // invalidate DBUG_RETURN(FALSE); } DBUG_PRINT("info", ("*engine_data=%llu, commit_count=%llu", @@ -4877,27 +4893,36 @@ ndbcluster_cache_retrieval_allowed( DBUG_RETURN(TRUE); } + +/** + Register a table for use in the query cache. Fetch the commit_count + for the table and return it in engine_data, this will later be used + to check if the table has changed, before the cached query is reused. + + SYNOPSIS + ha_ndbcluster::can_query_cache_table + thd thread handle + full_name concatenation of database name, + the null character '\0', and the table + name + full_name_len length of the full name, + i.e. len(dbname) + len(tablename) + 1 + qc_engine_callback function to be called before using cache on this table + engine_data out, commit_count for this table + + RETURN VALUE + TRUE Yes, it's ok to cahce this query + FALSE No, don't cach the query + +*/ + my_bool -ha_ndbcluster::cached_table_registration( -/*======================================*/ - /* out: TRUE if permitted, FALSE if not; - note that the value FALSE means invalidation - of query cache if *engine_data is changed */ - THD* thd, /* in: thd of the user who is trying to - store a result to the query cache or - retrieve it */ - char* full_name, /* in: concatenation of database name, - the null character '\0', and the table - name */ - uint full_name_len, /* in: length of the full name, i.e. - len(dbname) + len(tablename) + 1 */ - qc_engine_callback - *engine_callback, /* out: function to be called before using - cache on this table */ - ulonglong *engine_data) /* out: if return FALSE this is used to - invalidate all cached queries with this table*/ +ha_ndbcluster::register_query_cache_table(THD* thd, + char* full_name, uint full_name_len, + qc_engine_callback *engine_callback, + ulonglong *engine_data) { - DBUG_ENTER("ha_ndbcluster::cached_table_registration"); + DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", @@ -5139,10 +5164,6 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, pOp->close(TRUE); ndb->closeTransaction(pTrans); - if(row_count) - *row_count= sum_rows; - if(commit_count) - *commit_count= sum_commits; ndbstat->row_count= sum_rows; ndbstat->commit_count= sum_commits; @@ -5662,13 +5683,12 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused)) // Contact NDB to get commit count for table g_ndb->setDatabaseName(db); - Uint64 rows, commit_count; - if(ndb_get_table_statistics(g_ndb, tabname, - &rows, &commit_count) == 0){ + struct Ndb_statistics stat;; + if(ndb_get_table_statistics(g_ndb, tabname, &stat) == 0){ DBUG_PRINT("ndb_util_thread", ("Table: %s, rows: %llu, commit_count: %llu", - share->table_name, rows, commit_count)); - share->commit_count= commit_count; + share->table_name, stat.row_count, stat.commit_count)); + share->commit_count= stat.commit_count; } else { diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index bd0d8cc7be5..fb624353491 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -157,11 +157,11 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); uint8 table_cache_type(); - my_bool cached_table_registration(THD *thd, char *table_key, - uint key_length, - qc_engine_callback *engine_callback, - ulonglong *engine_data); - private: + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data); +private: int alter_table_name(const char *to); int drop_table(); int create_index(const char *name, KEY *key_info, bool unique); diff --git a/sql/handler.h b/sql/handler.h index 2720a0bff33..04f196dccca 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -592,11 +592,12 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* ask handler about permission to cache table during query registration */ - virtual my_bool cached_table_registration(THD *thd, char *table_key, - uint key_length, - qc_engine_callback *engine_callback, - ulonglong *engine_data) + /* ask handler about permission to cache table when query is to be cached */ + virtual my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback + *engine_callback, + ulonglong *engine_data) { *engine_callback= 0; return 1; diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 68964d80bf7..e38e417e6df 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -2770,10 +2770,11 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, for (; tables_used; tables_used= tables_used->next_global) { TABLE *table= tables_used->table; - if (!handler->cached_table_registration(thd, table->s->table_cache_key, - table->s->key_length, - &tables_used->callback_func, - &tables_used->engine_data)) + handler *handler= table->file; + if (!handler->register_query_cache_table(thd, table->s->table_cache_key, + table->s->key_length, + &tables_used->callback_func, + &tables_used->engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", tables_used->db, tables_used->alias)); From ca95b9fefbb307bd85185bb2eaf192af4df6c3bb Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 14:35:56 +0400 Subject: [PATCH 18/25] type_ranges.result.es updated. mysql-test/r/type_ranges.result.es: Updated. --- mysql-test/r/type_ranges.result.es | 176 +++++++++++++++-------------- 1 file changed, 90 insertions(+), 86 deletions(-) diff --git a/mysql-test/r/type_ranges.result.es b/mysql-test/r/type_ranges.result.es index c1f6d2453e9..5e2ea2aebbc 100644 --- a/mysql-test/r/type_ranges.result.es +++ b/mysql-test/r/type_ranges.result.es @@ -40,30 +40,30 @@ KEY (options,flags) ); show full fields from t1; Field Type Collation Null Key Default Extra Privileges Comment -auto int(5) unsigned NULL PRI NULL auto_increment -string varchar(10) latin1_swedish_ci YES hello -tiny tinyint(4) NULL MUL 0 -short smallint(6) NULL MUL 1 -medium mediumint(8) NULL MUL 0 -long_int int(11) NULL 0 -longlong bigint(13) NULL MUL 0 -real_float float(13,1) NULL MUL 0.0 +auto int(5) unsigned NULL NO PRI NULL auto_increment +string char(10) latin1_swedish_ci YES hello +tiny tinyint(4) NULL NO MUL 0 +short smallint(6) NULL NO MUL 1 +medium mediumint(8) NULL NO MUL 0 +long_int int(11) NULL NO 0 +longlong bigint(13) NULL NO MUL 0 +real_float float(13,1) NULL NO MUL 0.0 real_double double(16,4) NULL YES NULL -utiny tinyint(3) unsigned NULL MUL 0 -ushort smallint(5) unsigned zerofill NULL MUL 00000 -umedium mediumint(8) unsigned NULL MUL 0 -ulong int(11) unsigned NULL MUL 0 -ulonglong bigint(13) unsigned NULL MUL 0 +utiny tinyint(3) unsigned NULL NO MUL 0 +ushort smallint(5) unsigned zerofill NULL NO MUL 00000 +umedium mediumint(8) unsigned NULL NO MUL 0 +ulong int(11) unsigned NULL NO MUL 0 +ulonglong bigint(13) unsigned NULL NO MUL 0 time_stamp timestamp NULL YES CURRENT_TIMESTAMP date_field date NULL YES NULL time_field time NULL YES NULL date_time datetime NULL YES NULL blob_col blob NULL YES NULL tinyblob_col tinyblob NULL YES NULL -mediumblob_col mediumblob NULL -longblob_col longblob NULL -options enum('one','two','tree') latin1_swedish_ci MUL one -flags set('one','two','tree') latin1_swedish_ci +mediumblob_col mediumblob NULL NO +longblob_col longblob NULL NO +options enum('one','two','tree') latin1_swedish_ci NO MUL one +flags set('one','two','tree') latin1_swedish_ci NO show keys from t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment t1 0 PRIMARY 1 auto A 0 NULL NULL BTREE @@ -89,33 +89,33 @@ insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,N insert into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3); insert into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1); Warnings: -Warning 1264 Data truncated; out of range for column 'utiny' at row 1 -Warning 1264 Data truncated; out of range for column 'ushort' at row 1 -Warning 1264 Data truncated; out of range for column 'umedium' at row 1 -Warning 1264 Data truncated; out of range for column 'ulong' at row 1 +Warning 1264 Out of range value adjusted for column 'utiny' at row 1 +Warning 1264 Out of range value adjusted for column 'ushort' at row 1 +Warning 1264 Out of range value adjusted for column 'umedium' at row 1 +Warning 1264 Out of range value adjusted for column 'ulong' at row 1 Warning 1265 Data truncated for column 'options' at row 1 Warning 1265 Data truncated for column 'flags' at row 1 insert into t1 values (0,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,NULL,0,0,0,-4294967295,-4294967295,-4294967295,'-4294967295',0,"one,two,tree"); Warnings: Warning 1265 Data truncated for column 'string' at row 1 -Warning 1264 Data truncated; out of range for column 'tiny' at row 1 -Warning 1264 Data truncated; out of range for column 'short' at row 1 -Warning 1264 Data truncated; out of range for column 'medium' at row 1 -Warning 1264 Data truncated; out of range for column 'long_int' at row 1 -Warning 1264 Data truncated; out of range for column 'utiny' at row 1 -Warning 1264 Data truncated; out of range for column 'ushort' at row 1 -Warning 1264 Data truncated; out of range for column 'umedium' at row 1 -Warning 1264 Data truncated; out of range for column 'ulong' at row 1 +Warning 1264 Out of range value adjusted for column 'tiny' at row 1 +Warning 1264 Out of range value adjusted for column 'short' at row 1 +Warning 1264 Out of range value adjusted for column 'medium' at row 1 +Warning 1264 Out of range value adjusted for column 'long_int' at row 1 +Warning 1264 Out of range value adjusted for column 'utiny' at row 1 +Warning 1264 Out of range value adjusted for column 'ushort' at row 1 +Warning 1264 Out of range value adjusted for column 'umedium' at row 1 +Warning 1264 Out of range value adjusted for column 'ulong' at row 1 Warning 1265 Data truncated for column 'options' at row 1 insert into t1 values (0,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,NULL,0,0,0,4294967295,4294967295,4294967295,'4294967295',0,0); Warnings: -Warning 1264 Data truncated; out of range for column 'tiny' at row 1 -Warning 1264 Data truncated; out of range for column 'short' at row 1 -Warning 1264 Data truncated; out of range for column 'medium' at row 1 -Warning 1264 Data truncated; out of range for column 'long_int' at row 1 -Warning 1264 Data truncated; out of range for column 'utiny' at row 1 -Warning 1264 Data truncated; out of range for column 'ushort' at row 1 -Warning 1264 Data truncated; out of range for column 'umedium' at row 1 +Warning 1264 Out of range value adjusted for column 'tiny' at row 1 +Warning 1264 Out of range value adjusted for column 'short' at row 1 +Warning 1264 Out of range value adjusted for column 'medium' at row 1 +Warning 1264 Out of range value adjusted for column 'long_int' at row 1 +Warning 1264 Out of range value adjusted for column 'utiny' at row 1 +Warning 1264 Out of range value adjusted for column 'ushort' at row 1 +Warning 1264 Out of range value adjusted for column 'umedium' at row 1 Warning 1265 Data truncated for column 'options' at row 1 insert into t1 (tiny) values (1); select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,utiny,ushort,umedium,ulong,ulonglong,mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000),date_field,time_field,date_time,blob_col,tinyblob_col,mediumblob_col,longblob_col from t1; @@ -208,56 +208,56 @@ Warning 1265 Data truncated for column 'options' at row 6 update t2 set string="changed" where auto=16; show full columns from t1; Field Type Collation Null Key Default Extra Privileges Comment -auto int(5) unsigned NULL MUL NULL auto_increment -string varchar(10) latin1_swedish_ci YES new defaul -tiny tinyint(4) NULL MUL 0 -short smallint(6) NULL MUL 0 -medium mediumint(8) NULL MUL 0 -long_int int(11) NULL 0 -longlong bigint(13) NULL MUL 0 -real_float float(13,1) NULL MUL 0.0 +auto int(5) unsigned NULL NO MUL NULL auto_increment +string char(10) latin1_swedish_ci YES new defaul +tiny tinyint(4) NULL NO MUL 0 +short smallint(6) NULL NO MUL 0 +medium mediumint(8) NULL NO MUL 0 +long_int int(11) NULL NO 0 +longlong bigint(13) NULL NO MUL 0 +real_float float(13,1) NULL NO MUL 0.0 real_double double(16,4) NULL YES NULL -utiny tinyint(3) unsigned NULL 0 -ushort smallint(5) unsigned zerofill NULL 00000 -umedium mediumint(8) unsigned NULL MUL 0 -ulong int(11) unsigned NULL MUL 0 -ulonglong bigint(13) unsigned NULL MUL 0 +utiny tinyint(3) unsigned NULL NO 0 +ushort smallint(5) unsigned zerofill NULL NO 00000 +umedium mediumint(8) unsigned NULL NO MUL 0 +ulong int(11) unsigned NULL NO MUL 0 +ulonglong bigint(13) unsigned NULL NO MUL 0 time_stamp timestamp NULL YES CURRENT_TIMESTAMP date_field char(10) latin1_swedish_ci YES NULL time_field time NULL YES NULL date_time datetime NULL YES NULL new_blob_col varchar(20) latin1_swedish_ci YES NULL tinyblob_col tinyblob NULL YES NULL -mediumblob_col mediumblob NULL -options enum('one','two','tree') latin1_swedish_ci MUL one -flags set('one','two','tree') latin1_swedish_ci -new_field char(10) latin1_swedish_ci new +mediumblob_col mediumblob NULL NO +options enum('one','two','tree') latin1_swedish_ci NO MUL one +flags set('one','two','tree') latin1_swedish_ci NO +new_field char(10) latin1_swedish_ci NO new show full columns from t2; Field Type Collation Null Key Default Extra Privileges Comment -auto int(5) unsigned NULL 0 -string varchar(10) latin1_swedish_ci YES new defaul -tiny tinyint(4) NULL 0 -short smallint(6) NULL 0 -medium mediumint(8) NULL 0 -long_int int(11) NULL 0 -longlong bigint(13) NULL 0 -real_float float(13,1) NULL 0.0 +auto int(5) unsigned NULL NO 0 +string char(10) latin1_swedish_ci YES new defaul +tiny tinyint(4) NULL NO 0 +short smallint(6) NULL NO 0 +medium mediumint(8) NULL NO 0 +long_int int(11) NULL NO 0 +longlong bigint(13) NULL NO 0 +real_float float(13,1) NULL NO 0.0 real_double double(16,4) NULL YES NULL -utiny tinyint(3) unsigned NULL 0 -ushort smallint(5) unsigned zerofill NULL 00000 -umedium mediumint(8) unsigned NULL 0 -ulong int(11) unsigned NULL 0 -ulonglong bigint(13) unsigned NULL 0 +utiny tinyint(3) unsigned NULL NO 0 +ushort smallint(5) unsigned zerofill NULL NO 00000 +umedium mediumint(8) unsigned NULL NO 0 +ulong int(11) unsigned NULL NO 0 +ulonglong bigint(13) unsigned NULL NO 0 time_stamp timestamp NULL YES 0000-00-00 00:00:00 date_field char(10) latin1_swedish_ci YES NULL time_field time NULL YES NULL date_time datetime NULL YES NULL new_blob_col varchar(20) latin1_swedish_ci YES NULL tinyblob_col tinyblob NULL YES NULL -mediumblob_col mediumblob NULL -options enum('one','two','tree') latin1_swedish_ci one -flags set('one','two','tree') latin1_swedish_ci -new_field char(10) latin1_swedish_ci new +mediumblob_col mediumblob NULL NO +options enum('one','two','tree') latin1_swedish_ci NO one +flags set('one','two','tree') latin1_swedish_ci NO +new_field char(10) latin1_swedish_ci NO new select t1.auto,t2.auto from t1,t2 where t1.auto=t2.auto and ((t1.string<>t2.string and (t1.string is not null or t2.string is not null)) or (t1.tiny<>t2.tiny and (t1.tiny is not null or t2.tiny is not null)) or (t1.short<>t2.short and (t1.short is not null or t2.short is not null)) or (t1.medium<>t2.medium and (t1.medium is not null or t2.medium is not null)) or (t1.long_int<>t2.long_int and (t1.long_int is not null or t2.long_int is not null)) or (t1.longlong<>t2.longlong and (t1.longlong is not null or t2.longlong is not null)) or (t1.real_float<>t2.real_float and (t1.real_float is not null or t2.real_float is not null)) or (t1.real_double<>t2.real_double and (t1.real_double is not null or t2.real_double is not null)) or (t1.utiny<>t2.utiny and (t1.utiny is not null or t2.utiny is not null)) or (t1.ushort<>t2.ushort and (t1.ushort is not null or t2.ushort is not null)) or (t1.umedium<>t2.umedium and (t1.umedium is not null or t2.umedium is not null)) or (t1.ulong<>t2.ulong and (t1.ulong is not null or t2.ulong is not null)) or (t1.ulonglong<>t2.ulonglong and (t1.ulonglong is not null or t2.ulonglong is not null)) or (t1.time_stamp<>t2.time_stamp and (t1.time_stamp is not null or t2.time_stamp is not null)) or (t1.date_field<>t2.date_field and (t1.date_field is not null or t2.date_field is not null)) or (t1.time_field<>t2.time_field and (t1.time_field is not null or t2.time_field is not null)) or (t1.date_time<>t2.date_time and (t1.date_time is not null or t2.date_time is not null)) or (t1.new_blob_col<>t2.new_blob_col and (t1.new_blob_col is not null or t2.new_blob_col is not null)) or (t1.tinyblob_col<>t2.tinyblob_col and (t1.tinyblob_col is not null or t2.tinyblob_col is not null)) or (t1.mediumblob_col<>t2.mediumblob_col and (t1.mediumblob_col is not null or t2.mediumblob_col is not null)) or (t1.options<>t2.options and (t1.options is not null or t2.options is not null)) or (t1.flags<>t2.flags and (t1.flags is not null or t2.flags is not null)) or (t1.new_field<>t2.new_field and (t1.new_field is not null or t2.new_field is not null))); auto auto 16 16 @@ -265,23 +265,27 @@ select t1.auto,t2.auto from t1,t2 where t1.auto=t2.auto and not (t1.string<=>t2. auto auto 16 16 drop table t2; -create table t2 (primary key (auto)) select auto+1 as auto,1 as t1, "a" as t2, repeat("a",256) as t3, binary repeat("b",256) as t4 from t1; +create table t2 (primary key (auto)) select auto+1 as auto,1 as t1, 'a' as t2, repeat('a',256) as t3, binary repeat('b',256) as t4, repeat('a',4096) as t5, binary repeat('b',4096) as t6, '' as t7, binary '' as t8 from t1; show full columns from t2; Field Type Collation Null Key Default Extra Privileges Comment -auto bigint(17) unsigned NULL PRI 0 -t1 bigint(1) NULL 0 -t2 char(1) latin1_swedish_ci -t3 longtext latin1_swedish_ci -t4 longblob NULL -select * from t2; -auto t1 t2 t3 t4 -11 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -12 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -13 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -14 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -15 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -16 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -17 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +auto bigint(17) unsigned NULL NO PRI 0 +t1 bigint(1) NULL NO 0 +t2 varchar(1) latin1_swedish_ci NO +t3 varchar(256) latin1_swedish_ci NO +t4 varbinary(256) NULL NO +t5 longtext latin1_swedish_ci NO +t6 longblob NULL NO +t7 char(0) latin1_swedish_ci NO +t8 binary(0) NULL NO +select t1,t2,length(t3),length(t4),length(t5),length(t6),t7,t8 from t2; +t1 t2 length(t3) length(t4) length(t5) length(t6) t7 t8 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 drop table t1,t2; create table t1 (c int); insert into t1 values(1),(2); @@ -293,7 +297,7 @@ show full columns from t3; Field Type Collation Null Key Default Extra Privileges Comment c1 int(11) NULL YES NULL c2 int(11) NULL YES NULL -const bigint(1) NULL 0 +const bigint(1) NULL NO 0 drop table t1,t2,t3; create table t1 ( myfield INT NOT NULL, UNIQUE INDEX (myfield), unique (myfield), index(myfield)); drop table t1; From 4c69539827f69a693236eca0a2f512b1618e80a1 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:35:08 +0400 Subject: [PATCH 19/25] type_float.result.es updated. mysql-test/r/type_float.result.es: Updated. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + mysql-test/r/type_float.result.es | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 6ccc886e161..bf88e38a780 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -180,6 +180,7 @@ ram@gw.mysql.r18.ru ram@gw.udmsearch.izhnet.ru ram@mysql.r18.ru ram@ram.(none) +ramil@mysql.com ranger@regul.home.lan rburnett@build.mysql.com reggie@bob.(none) diff --git a/mysql-test/r/type_float.result.es b/mysql-test/r/type_float.result.es index b93539b6bea..5fcf9213f83 100644 --- a/mysql-test/r/type_float.result.es +++ b/mysql-test/r/type_float.result.es @@ -143,6 +143,15 @@ drop table t1; create table t1 (f float(54)); ERROR 42000: Incorrect column specifier for column 'f' drop table if exists t1; +create table t1 (d1 double, d2 double unsigned); +insert into t1 set d1 = -1.0; +update t1 set d2 = d1; +Warnings: +Warning 1264 Data truncated; out of range for column 'd2' at row 1 +select * from t1; +d1 d2 +-1 0 +drop table t1; create table t1 (f float(4,3)); insert into t1 values (-11.0),(-11),("-11"),(11.0),(11),("11"); Warnings: From e5e7cd8ea8d0cee5e3d1f6f50b6846403b0a7f24 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 13:53:16 +0100 Subject: [PATCH 20/25] Proposal to fix this problem: when using libmysqlclient, you must call mysql_server_end() to nicely free memory at the end of your program; it however sounds weird to call a function named *SERVER_end* when you're the CLIENT (you're not ending the server, you're ending your ability to talk to servers). So here I add two defines which should be more generic names. This was longly discussed with Konstantin, Serg, Brian. The problem started from a post on valgrind-users list: http://sourceforge.net/mailarchive/forum.php?thread_id=5778035&forum_id=32038 ; our manual mentions these functions only for libmysqld API so needs some fixing, and then we can close BUG#8099 and BUG#6149. include/mysql.h: Creating synonyms (defines): mysql_library_init for mysql_server_init, mysql_library_end for mysql_server_end; these new names are more generic, so suitable when using libmysqlclient as well as libmysqld.c --- include/mysql.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/mysql.h b/include/mysql.h index 58c314207c1..b87b865608e 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -337,6 +337,17 @@ typedef struct st_mysql_parameters */ int STDCALL mysql_server_init(int argc, char **argv, char **groups); void STDCALL mysql_server_end(void); +/* + mysql_server_init/end need to be called when using libmysqld or + libmysqlclient (exactly, mysql_server_init() is called by mysql_init() so + you don't need to call it explicitely; but you need to call + mysql_server_end() to free memory). The names are a bit misleading + (mysql_SERVER* to be used when using libmysqlCLIENT). So we add more general + names which suit well whether you're using libmysqld or libmysqlclient. We + intend to promote these aliases over the mysql_server* ones. +*/ +#define mysql_library_init mysql_server_init +#define mysql_library_end mysql_server_end MYSQL_PARAMETERS *STDCALL mysql_get_parameters(void); From 48e2d224047ddb5a70dcca3abd7f4f828ee0b5bd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 14:25:08 +0100 Subject: [PATCH 21/25] added test to trigger drifferent fragmentations in ndb corrected documentation on fragmentation set "fragmentation medium" to mean 2 fragments per node instead of 1 set default fragmentation to small instead of medium bug#8284 adjust fragmentation to max_rows mysql-test/r/ndb_basic.result: added test to trigger drifferent fragmentations in ndb mysql-test/t/ndb_basic.test: added test to trigger drifferent fragmentations in ndb ndb/include/ndbapi/NdbDictionary.hpp: corrected documentation on fragmentation ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: set "fragmentation medium" to mean 2 fragments per node instead of 1 ndb/src/ndbapi/NdbDictionaryImpl.cpp: set default fragmentation to small instead of medium sql/ha_ndbcluster.cc: bug#8284 adjust fragmentation to max_rows --- mysql-test/r/ndb_basic.result | 34 +++++++++++++++++ mysql-test/t/ndb_basic.test | 38 +++++++++++++++++++ ndb/include/ndbapi/NdbDictionary.hpp | 6 +-- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 2 +- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 2 +- sql/ha_ndbcluster.cc | 45 ++++++++++++++++++++++- 6 files changed, 121 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index 6ec5338acbe..a6396080ef0 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -573,3 +573,37 @@ select * from t1 where a12345678901234567890123456789a1234567890=2; a1234567890123456789012345678901234567890 a12345678901234567890123456789a1234567890 5 2 drop table t1; +create table t1 +(a bigint, b bigint, c bigint, d bigint, +primary key (a,b,c,d)) +engine=ndb +max_rows=200000000; +Warnings: +Warning 1105 Ndb might have problems storing the max amount of rows specified +insert into t1 values +(1,2,3,4),(2,3,4,5),(3,4,5,6), +(3,2,3,4),(1,3,4,5),(2,4,5,6), +(1,2,3,5),(2,3,4,8),(3,4,5,9), +(3,2,3,5),(1,3,4,8),(2,4,5,9), +(1,2,3,6),(2,3,4,6),(3,4,5,7), +(3,2,3,6),(1,3,4,6),(2,4,5,7), +(1,2,3,7),(2,3,4,7),(3,4,5,8), +(3,2,3,7),(1,3,4,7),(2,4,5,8), +(1,3,3,4),(2,4,4,5),(3,5,5,6), +(3,3,3,4),(1,4,4,5),(2,5,5,6), +(1,3,3,5),(2,4,4,8),(3,5,5,9), +(3,3,3,5),(1,4,4,8),(2,5,5,9), +(1,3,3,6),(2,4,4,6),(3,5,5,7), +(3,3,3,6),(1,4,4,6),(2,5,5,7), +(1,3,3,7),(2,4,4,7),(3,5,5,8), +(3,3,3,7),(1,4,4,7),(2,5,5,8); +select count(*) from t1; +count(*) +48 +drop table t1; +create table t1 +(a bigint, b bigint, c bigint, d bigint, +primary key (a)) +engine=ndb +max_rows=1; +drop table t1; diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 2671223ada8..f460c573a9d 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -539,3 +539,41 @@ insert into t1 values (1,1),(2,1),(3,1),(4,1),(5,2),(6,1),(7,1); explain select * from t1 where a12345678901234567890123456789a1234567890=2; select * from t1 where a12345678901234567890123456789a1234567890=2; drop table t1; + +# +# test fragment creation +# +# first a table with _many_ fragments per node group +# then a table with just one fragment per node group +# +create table t1 + (a bigint, b bigint, c bigint, d bigint, + primary key (a,b,c,d)) + engine=ndb + max_rows=200000000; +insert into t1 values + (1,2,3,4),(2,3,4,5),(3,4,5,6), + (3,2,3,4),(1,3,4,5),(2,4,5,6), + (1,2,3,5),(2,3,4,8),(3,4,5,9), + (3,2,3,5),(1,3,4,8),(2,4,5,9), + (1,2,3,6),(2,3,4,6),(3,4,5,7), + (3,2,3,6),(1,3,4,6),(2,4,5,7), + (1,2,3,7),(2,3,4,7),(3,4,5,8), + (3,2,3,7),(1,3,4,7),(2,4,5,8), + (1,3,3,4),(2,4,4,5),(3,5,5,6), + (3,3,3,4),(1,4,4,5),(2,5,5,6), + (1,3,3,5),(2,4,4,8),(3,5,5,9), + (3,3,3,5),(1,4,4,8),(2,5,5,9), + (1,3,3,6),(2,4,4,6),(3,5,5,7), + (3,3,3,6),(1,4,4,6),(2,5,5,7), + (1,3,3,7),(2,4,4,7),(3,5,5,8), + (3,3,3,7),(1,4,4,7),(2,5,5,8); +select count(*) from t1; +drop table t1; + +create table t1 + (a bigint, b bigint, c bigint, d bigint, + primary key (a)) + engine=ndb + max_rows=1; +drop table t1; diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 0dca1c0f106..49afbd695c9 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -141,9 +141,9 @@ public: enum FragmentType { FragUndefined = 0, ///< Fragmentation type undefined or default FragSingle = 1, ///< Only one fragment - FragAllSmall = 2, ///< One fragment per node group - FragAllMedium = 3, ///< Default value. Two fragments per node group. - FragAllLarge = 4 ///< Eight fragments per node group. + FragAllSmall = 2, ///< One fragment per node, default + FragAllMedium = 3, ///< two fragments per node + FragAllLarge = 4 ///< Four fragments per node. }; }; diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index dba1efbba9a..0bc8351a9db 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -6178,7 +6178,7 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){ break; case DictTabInfo::AllNodesMediumTable: jam(); - noOfFragments = csystemnodes; + noOfFragments = 2 * csystemnodes; break; case DictTabInfo::AllNodesLargeTable: jam(); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 9f6ed144fb0..530f15d3a2e 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -284,7 +284,7 @@ void NdbTableImpl::init(){ clearNewProperties(); m_frm.clear(); - m_fragmentType = NdbDictionary::Object::FragAllMedium; + m_fragmentType = NdbDictionary::Object::FragAllSmall; m_logging = true; m_kvalue = 6; m_minLoadFactor = 78; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a959cbaf434..9f0da616289 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3503,6 +3503,47 @@ static int create_ndb_column(NDBCOL &col, Create a table in NDB Cluster */ +static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) +{ + if (form->max_rows == 0) /* default setting, don't set fragmentation */ + return; + /** + * get the number of fragments right + */ + uint no_fragments; + { +#if MYSQL_VERSION_ID >= 50000 + uint acc_row_size= 25+2; +#else + uint acc_row_size= pk_length*4; + /* add acc overhead */ + if (pk_length <= 8) + acc_row_size+= 25+2; /* main page will set the limit */ + else + acc_row_size+= 4+4; /* overflow page will set the limit */ +#endif + ulonglong acc_fragment_size= 512*1024*1024; + ulonglong max_rows= form->max_rows; + no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; + } + { + uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); + NDBTAB::FragmentType ftype; + if (no_fragments > 2*no_nodes) + { + ftype= NDBTAB::FragAllLarge; + if (no_fragments > 4*no_nodes) + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Ndb might have problems storing the max amount of rows specified"); + } + else if (no_fragments > no_nodes) + ftype= NDBTAB::FragAllMedium; + else + ftype= NDBTAB::FragAllSmall; + tab.setFragmentType(ftype); + } +} + int ha_ndbcluster::create(const char *name, TABLE *form, HA_CREATE_INFO *info) @@ -3605,7 +3646,9 @@ int ha_ndbcluster::create(const char *name, break; } } - + + ndb_set_fragmentation(tab, form, pk_length); + if ((my_errno= check_ndb_connection())) DBUG_RETURN(my_errno); From 58fd4d94ce16eb473ccf56134bb51e1cd38c9c49 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 14:33:08 +0100 Subject: [PATCH 22/25] ha_ndbcluster.cc: fixed change in struct in 4.1->5.0 merge sql/ha_ndbcluster.cc: fixed change in struct in 4.1->5.0 merge --- sql/ha_ndbcluster.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 3911ad920fa..7cfb6501948 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3652,7 +3652,7 @@ static int create_ndb_column(NDBCOL &col, static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { - if (form->max_rows == 0) /* default setting, don't set fragmentation */ + if (form->s->max_rows == 0) /* default setting, don't set fragmentation */ return; /** * get the number of fragments right @@ -3670,7 +3670,7 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) acc_row_size+= 4+4; /* overflow page will set the limit */ #endif ulonglong acc_fragment_size= 512*1024*1024; - ulonglong max_rows= form->max_rows; + ulonglong max_rows= form->s->max_rows; no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; } { From e16b1b4a79210476229302f107dfa1c4c473bab2 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:44:09 +0200 Subject: [PATCH 23/25] InnoDB: A small cleanup: remove two duplicate rec_get_offsets() calls innobase/page/page0cur.c: Remove two duplicate rec_get_offsets() calls --- innobase/page/page0cur.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/innobase/page/page0cur.c b/innobase/page/page0cur.c index fc94fc4b1e0..488d2757115 100644 --- a/innobase/page/page0cur.c +++ b/innobase/page/page0cur.c @@ -311,9 +311,6 @@ page_cur_search_with_match( low_matched_bytes = cur_matched_bytes; } else if (cmp == -1) { - offsets = rec_get_offsets(mid_rec, index, offsets, - dtuple_get_n_fields_cmp(tuple), &heap); - if (mode == PAGE_CUR_LE_OR_EXTENDS && page_cur_rec_field_extends(tuple, mid_rec, offsets, cur_matched_fields)) { @@ -366,9 +363,6 @@ page_cur_search_with_match( low_matched_bytes = cur_matched_bytes; } else if (cmp == -1) { - offsets = rec_get_offsets(mid_rec, index, offsets, - dtuple_get_n_fields_cmp(tuple), &heap); - if (mode == PAGE_CUR_LE_OR_EXTENDS && page_cur_rec_field_extends(tuple, mid_rec, offsets, cur_matched_fields)) { From 9e92b63a0bc55356e42c169827b5cd2d631a92f9 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:08:08 +0100 Subject: [PATCH 24/25] Updated after review. sql/ha_ndbcluster.cc: Fixing spaces in parameter list. --- sql/ha_ndbcluster.cc | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d146e55f798..5c44479aa12 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4537,13 +4537,13 @@ bool ndbcluster_init() (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, (hash_get_key) ndbcluster_get_key,0,0); pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); - pthread_mutex_init(&LOCK_ndb_util_thread,MY_MUTEX_INIT_FAST); - pthread_cond_init(&COND_ndb_util_thread,NULL); + pthread_mutex_init(&LOCK_ndb_util_thread, MY_MUTEX_INIT_FAST); + pthread_cond_init(&COND_ndb_util_thread, NULL); // Create utility thread pthread_t tmp; - if (pthread_create(&tmp,&connection_attrib,ndb_util_thread_func,0)) + if (pthread_create(&tmp, &connection_attrib, ndb_util_thread_func, 0)) { DBUG_PRINT("error", ("Could not create ndb utility thread")); goto ndbcluster_init_error; @@ -4570,7 +4570,7 @@ bool ndbcluster_end() // Kill ndb utility thread (void) pthread_mutex_lock(&LOCK_ndb_util_thread); - DBUG_PRINT("exit",("killing ndb util thread: %lx",ndb_util_thread)); + DBUG_PRINT("exit",("killing ndb util thread: %lx", ndb_util_thread)); (void) pthread_cond_signal(&COND_ndb_util_thread); (void) pthread_mutex_unlock(&LOCK_ndb_util_thread); @@ -5597,7 +5597,8 @@ ha_ndbcluster::update_table_comment( // Utility thread main loop -extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused))) +extern "C" pthread_handler_decl(ndb_util_thread_func, + arg __attribute__((unused))) { THD *thd; // needs to be first for thread_stack int error = 0; @@ -5628,8 +5629,8 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused)) pthread_mutex_lock(&LOCK_ndb_util_thread); error= pthread_cond_timedwait(&COND_ndb_util_thread, - &LOCK_ndb_util_thread, - &abstime); + &LOCK_ndb_util_thread, + &abstime); pthread_mutex_unlock(&LOCK_ndb_util_thread); DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %d", From 5d16b7a93a553b7ec7678d6c0e83a345667895d2 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 17:24:18 +0100 Subject: [PATCH 25/25] mysql-test-run.sh: USE_RUNNING_SERVER should be set to 0 or 1 mysql-test/mysql-test-run.sh: USE_RUNNING_SERVER should be set to 0 or 1 --- mysql-test/mysql-test-run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index fadc14d4e1b..42f96789eef 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -259,13 +259,13 @@ while test $# -gt 0; do --embedded-server) USE_EMBEDDED_SERVER=1 USE_MANAGER=0 NO_SLAVE=1 - USE_RUNNING_SERVER="" + USE_RUNNING_SERVER=0 RESULT_EXT=".es" TEST_MODE="$TEST_MODE embedded" ;; --purify) USE_PURIFY=1 USE_MANAGER=0 - USE_RUNNING_SERVER="" + USE_RUNNING_SERVER=0 TEST_MODE="$TEST_MODE purify" ;; --user=*) DBUSER=`$ECHO "$1" | $SED -e "s;--user=;;"` ;; --force) FORCE=1 ;;