From 1ffdb9e81e5ab46ba809829c11ca1aafdc238db8 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Nov 2004 09:50:33 +0000 Subject: [PATCH 01/45] enabling control of query cache for ndb --- sql/ha_ndbcluster.cc | 2 +- sql/mysqld.cc | 13 ++++++++++++- sql/set_var.cc | 6 ++++++ sql/sql_class.h | 1 + 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 6f7940caf75..db031c632ff 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3004,7 +3004,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_transaction_on= FALSE; else m_transaction_on= thd->variables.ndb_use_transactions; - // m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; + m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; m_active_trans= thd->transaction.all.ndb_tid ? (NdbConnection*)thd->transaction.all.ndb_tid: diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 5033c42ac69..f7c89f6dde3 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2227,7 +2227,11 @@ extern "C" pthread_handler_decl(handle_shutdown,arg) #endif -const char *load_default_groups[]= { "mysqld","server",MYSQL_BASE_VERSION,0,0}; +const char *load_default_groups[]= { +#ifdef HAVE_NDBCLUSTER_DB + "mysql_cluster", +#endif + "mysqld","server",MYSQL_BASE_VERSION,0,0}; bool open_log(MYSQL_LOG *log, const char *hostname, const char *opt_name, const char *extension, @@ -3950,6 +3954,7 @@ enum options_mysqld OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, + OPT_NDB_USE_LOCAL_QUERY_CACHE, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4409,6 +4414,12 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb_use_local_query_cache", OPT_NDB_USE_LOCAL_QUERY_CACHE, + "Use local query cache, note that this cache will _not_ " + "be invalidated if data is updated through other mysql servers", + (gptr*) &global_system_variables.ndb_use_local_query_cache, + (gptr*) &global_system_variables.ndb_use_local_query_cache, + 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/set_var.cc b/sql/set_var.cc index f1973b53e49..6bf151be83f 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -371,6 +371,9 @@ sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); sys_var_thd_bool +sys_ndb_use_local_query_cache("ndb_use_local_query_cache", + &SV::ndb_use_local_query_cache); +sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); // ndb server global variable settings @@ -634,6 +637,7 @@ sys_var *sys_variables[]= &sys_ndb_autoincrement_prefetch_sz, &sys_ndb_force_send, &sys_ndb_use_exact_count, + &sys_ndb_use_local_query_cache, &sys_ndb_use_transactions, #endif &sys_unique_checks, @@ -801,6 +805,8 @@ struct show_var_st init_vars[]= { (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, + {sys_ndb_use_local_query_cache.name, + (char*) &sys_ndb_use_local_query_cache, SHOW_SYS}, {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, diff --git a/sql/sql_class.h b/sql/sql_class.h index d0d9afc7746..06975730195 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -403,6 +403,7 @@ struct system_variables ulong ndb_autoincrement_prefetch_sz; my_bool ndb_force_send; my_bool ndb_use_exact_count; + my_bool ndb_use_local_query_cache; my_bool ndb_use_transactions; #endif /* HAVE_NDBCLUSTER_DB */ my_bool old_passwords; From dfef378702462359d40be4ad4d7ec3f8da2f2276 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 23 Nov 2004 16:38:00 +0000 Subject: [PATCH 02/45] changed query cache type variable for ndb --- sql/ha_ndbcluster.cc | 21 +++++++++++++++------ sql/ha_ndbcluster.h | 2 +- sql/mysqld.cc | 18 +++++++++++------- sql/set_var.cc | 18 ++++++++++++------ sql/sql_cache.cc | 6 ++++++ sql/sql_cache.h | 1 + sql/sql_class.h | 2 +- 7 files changed, 47 insertions(+), 21 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 8d82f60ae85..eea051be9e4 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3025,7 +3025,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_transaction_on= FALSE; else m_transaction_on= thd->variables.ndb_use_transactions; - m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; +#ifdef HAVE_QUERY_CACHE + m_query_cache_type= thd->variables.ndb_query_cache_type; +#endif m_active_trans= thd->transaction.all.ndb_tid ? (NdbConnection*)thd->transaction.all.ndb_tid: @@ -3751,8 +3753,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_force_send(TRUE), m_autoincrement_prefetch(32), m_transaction_on(TRUE), - m_use_local_query_cache(FALSE) -{ + m_query_cache_type(0) +{ int i; DBUG_ENTER("ha_ndbcluster"); @@ -4455,10 +4457,17 @@ const char* ha_ndbcluster::index_type(uint key_number) } uint8 ha_ndbcluster::table_cache_type() { - if (m_use_local_query_cache) - return HA_CACHE_TBL_TRANSACT; - else + switch (m_query_cache_type) + { + case 0: return HA_CACHE_TBL_NOCACHE; + case 1: + return HA_CACHE_TBL_ASKTRANSACT; + case 2: + return HA_CACHE_TBL_TRANSACT; + default: + return HA_CACHE_TBL_NOCACHE; + } } /* diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index f6c712620c1..baf4a7480ac 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -244,7 +244,7 @@ class ha_ndbcluster: public handler bool m_force_send; ha_rows m_autoincrement_prefetch; bool m_transaction_on; - bool m_use_local_query_cache; + ulong m_query_cache_type; void set_rec_per_key(); void records_update(); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index f7c89f6dde3..51e12ff23f9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3954,7 +3954,7 @@ enum options_mysqld OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, - OPT_NDB_USE_LOCAL_QUERY_CACHE, + OPT_NDB_QUERY_CACHE_TYPE, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4414,12 +4414,16 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, - {"ndb_use_local_query_cache", OPT_NDB_USE_LOCAL_QUERY_CACHE, - "Use local query cache, note that this cache will _not_ " - "be invalidated if data is updated through other mysql servers", - (gptr*) &global_system_variables.ndb_use_local_query_cache, - (gptr*) &global_system_variables.ndb_use_local_query_cache, - 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_QUERY_CACHE + {"ndb_query_cache_type", OPT_NDB_QUERY_CACHE_TYPE, + "0 = OFF = Don't cache or retrieve results. 1 = ON = Cache as query_cache_type states and " + "invalidate cache if tables are updated by other mysql servers. " + "2 = LOCAL = Cache as query_cache_type states and don't bother about what's happening on other " + "mysql servers.", + (gptr*) &global_system_variables.ndb_query_cache_type, + (gptr*) &global_system_variables.ndb_query_cache_type, + 0, GET_ULONG, REQUIRED_ARG, 0, 0, 2, 0, 0, 0}, +#endif #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/set_var.cc b/sql/set_var.cc index a040e8b2ba4..c51bfce7a43 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -370,9 +370,12 @@ sys_ndb_force_send("ndb_force_send", sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); -sys_var_thd_bool -sys_ndb_use_local_query_cache("ndb_use_local_query_cache", - &SV::ndb_use_local_query_cache); +#ifdef HAVE_QUERY_CACHE +sys_var_thd_enum +sys_ndb_query_cache_type("ndb_query_cache_type", + &SV::ndb_query_cache_type, + &ndb_query_cache_type_typelib); +#endif sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); @@ -637,7 +640,9 @@ sys_var *sys_variables[]= &sys_ndb_autoincrement_prefetch_sz, &sys_ndb_force_send, &sys_ndb_use_exact_count, - &sys_ndb_use_local_query_cache, +#ifdef HAVE_QUERY_CACHE + &sys_ndb_query_cache_type, +#endif &sys_ndb_use_transactions, #endif &sys_unique_checks, @@ -805,8 +810,9 @@ struct show_var_st init_vars[]= { (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, - {sys_ndb_use_local_query_cache.name, - (char*) &sys_ndb_use_local_query_cache, SHOW_SYS}, +#ifdef HAVE_QUERY_CACHE + {sys_ndb_query_cache_type.name,(char*) &sys_ndb_query_cache_type, SHOW_SYS}, +#endif {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 1bf8d179770..da6998ded47 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -366,6 +366,12 @@ TYPELIB query_cache_type_typelib= array_elements(query_cache_type_names)-1,"", query_cache_type_names, NULL }; +const char *ndb_query_cache_type_names[]= { "OFF", "ON", "LOCAL",NullS }; +TYPELIB ndb_query_cache_type_typelib= +{ + array_elements(ndb_query_cache_type_names)-1,"", ndb_query_cache_type_names, NULL +}; + /***************************************************************************** Query_cache_block_table method(s) *****************************************************************************/ diff --git a/sql/sql_cache.h b/sql/sql_cache.h index c933a2349af..b8cbc7953b8 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -412,6 +412,7 @@ protected: }; extern Query_cache query_cache; +extern TYPELIB ndb_query_cache_type_typelib; extern TYPELIB query_cache_type_typelib; void query_cache_end_of_result(THD *thd); void query_cache_abort(NET *net); diff --git a/sql/sql_class.h b/sql/sql_class.h index 06975730195..2ebad4b466a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -403,7 +403,7 @@ struct system_variables ulong ndb_autoincrement_prefetch_sz; my_bool ndb_force_send; my_bool ndb_use_exact_count; - my_bool ndb_use_local_query_cache; + ulong ndb_query_cache_type; my_bool ndb_use_transactions; #endif /* HAVE_NDBCLUSTER_DB */ my_bool old_passwords; From 35502d45e0d95428524cc8d65bd029bd4c56c5e6 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 24 Nov 2004 11:56:51 +0000 Subject: [PATCH 03/45] sql/ha_innodb.cc enabled query cache for ndb modified engine interface somewhat sql/ha_innodb.h enabled query cache for ndb modified engine interface somewhat sql/ha_ndbcluster.cc enabled query cache for ndb modified engine interface somewhat ndb will only allow caching and retrieval if running autocommit - return false, but do not invalidate commit count is used as engine data, i.e. - store commit count before store of cache - allow retrieval if commit count has not changed on a table - invalidate if commit count has changed sql/ha_ndbcluster.h enabled query cache for ndb modified engine interface somewhat sql/handler.cc enabled query cache for ndb modified engine interface somewhat sql/handler.h enabled query cache for ndb modified engine interface somewhat new virtual handler method cached_table_registration called on each table before alowing store in query cache - return TRUE - ok to cache, FALSE - not allowed to cache, invalidate queries if engine_data below has changed - sets ulonglong (engine_data) that is stored in query cache for each table - sets callback to be called for each table before usage of cached query, callback = 0 -> no check later sql/mysql_priv.h enabled query cache for ndb modified engine interface somewhat callcack prototype for callback to engine before query cache retrieval sql/sql_cache.cc enabled query cache for ndb modified engine interface somewhat if callback is set on table in cache, do callback to check if allowed to use cache if not allowed to use cache, check if engine_data has changed, if so, invalidate all queries with that table + changes to store and pass callback and engine_data around sql/sql_cache.h enabled query cache for ndb modified engine interface somewhat changes to store callback and engine_data sql/table.h enabled query cache for ndb modified engine interface somewhat changes to store callback and engine_data sql/ha_innodb.cc: enabled query cache for ndb modified engine interface somewhat sql/ha_innodb.h: enabled query cache for ndb modified engine interface somewhat sql/ha_ndbcluster.cc: enabled query cache for ndb modified engine interface somewhat ndb will only allow caching and retrieval if running autocommit - return false, but do not invalidate commit count is used as engine data, i.e. - store commit count before store of cache - allow retrieval if commit count has not changed on a table - invalidate if commit count has changed sql/ha_ndbcluster.h: enabled query cache for ndb modified engine interface somewhat sql/handler.cc: enabled query cache for ndb modified engine interface somewhat sql/handler.h: enabled query cache for ndb modified engine interface somewhat new virtual handler method cached_table_registration called on each table before alowing store in query cache - return TRUE - ok to cache, FALSE - not allowed to cache, invalidate queries if engine_data below has changed - sets ulonglong (engine_data) that is stored in query cache for each table - sets callback to be called for each table before usage of cached query, callback = 0 -> no check later sql/mysql_priv.h: enabled query cache for ndb modified engine interface somewhat callcack prototype for callback to engine before query cache retrieval sql/sql_cache.cc: enabled query cache for ndb modified engine interface somewhat if callback is set on table in cache, do callback to check if allowed to use cache if not allowed to use cache, check if engine_data has changed, if so, invalidate all queries with that table + changes to store and pass callback and engine_data around sql/sql_cache.h: enabled query cache for ndb modified engine interface somewhat changes to store callback and engine_data sql/table.h: enabled query cache for ndb modified engine interface somewhat changes to store callback and engine_data --- sql/ha_innodb.cc | 3 +- sql/ha_innodb.h | 20 ++++++- sql/ha_ndbcluster.cc | 132 ++++++++++++++++++++++++++++++++++++++++++- sql/ha_ndbcluster.h | 5 +- sql/handler.cc | 9 --- sql/handler.h | 15 +++-- sql/mysql_priv.h | 3 + sql/sql_cache.cc | 64 ++++++++++++++++----- sql/sql_cache.h | 12 +++- sql/table.h | 4 ++ 10 files changed, 232 insertions(+), 35 deletions(-) diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 07d8da63733..b5c94386677 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -655,8 +655,9 @@ innobase_query_caching_of_table_permitted( char* full_name, /* in: concatenation of database name, the null character '\0', and the table name */ - uint full_name_len) /* in: length of the full name, i.e. + uint full_name_len, /* in: length of the full name, i.e. len(dbname) + len(tablename) + 1 */ + ulonglong *unused) /* unused for this engine */ { ibool is_autocommit; trx_t* trx; diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index e76a966c6b9..e3b058d0b42 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -33,6 +33,10 @@ typedef struct st_innobase_share { } INNOBASE_SHARE; +my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, + uint full_name_len, + ulonglong *unused); + /* The class defining a handle to an Innodb table */ class ha_innobase: public handler { @@ -168,6 +172,20 @@ class ha_innobase: public handler void init_table_handle_for_HANDLER(); longlong get_auto_increment(); uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } + /* + ask handler about permission to cache table during query registration + */ + my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *call_back, + ulonglong *engine_data) + { + *call_back= innobase_query_caching_of_table_permitted; + *engine_data= 0; + return innobase_query_caching_of_table_permitted(thd, table_key, + key_length, + engine_data); + } static char *get_mysql_bin_log_name(); static ulonglong get_mysql_bin_log_pos(); @@ -233,8 +251,6 @@ int innobase_close_connection(THD *thd); int innobase_drop_database(char *path); int innodb_show_status(THD* thd); -my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, - uint full_name_len); void innobase_release_temporary_latches(void* innobase_tid); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 8d82f60ae85..8621fb89563 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4455,10 +4455,138 @@ const char* ha_ndbcluster::index_type(uint key_number) } uint8 ha_ndbcluster::table_cache_type() { + DBUG_ENTER("ha_ndbcluster::table_cache_type"); if (m_use_local_query_cache) - return HA_CACHE_TBL_TRANSACT; + { + DBUG_PRINT("exit",("HA_CACHE_TBL_ASKTRANSACT")); + DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); //HA_CACHE_TBL_TRANSACT; + } else - return HA_CACHE_TBL_NOCACHE; + { + DBUG_PRINT("exit",("HA_CACHE_TBL_NOCACHE")); + DBUG_RETURN(HA_CACHE_TBL_NOCACHE); + } +} + +static +my_bool +ndbcluster_cache_retrieval_allowed( +/*======================================*/ + /* out: TRUE if permitted, FALSE if not; + note that the value FALSE means invalidation + of query cache if *engine_data is changed */ + THD* thd, /* in: thd of the user who is trying to + store a result to the query cache or + retrieve it */ + char* full_name, /* in: concatenation of database name, + the null character '\0', and the table + name */ + uint full_name_len, /* in: length of the full name, i.e. + len(dbname) + len(tablename) + 1 */ + ulonglong *engine_data) /* in: value set in call to + ha_ndbcluster::cached_table_registration + out: if return FALSE this is used to invalidate + all cached queries with this table*/ +{ + DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); + char tabname[128]; + char *dbname= full_name; + my_bool is_autocommit; + { + int dbname_len= strlen(full_name); + int tabname_len= full_name_len-dbname_len-1; + memcpy(tabname, full_name+dbname_len+1, tabname_len); + tabname[tabname_len]= '\0'; + } + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + is_autocommit = FALSE; + else + is_autocommit = TRUE; + DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d", + dbname,tabname,is_autocommit)); + if (!is_autocommit) + { + DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", + thd->options & OPTION_NOT_AUTOCOMMIT, + thd->options & OPTION_BEGIN)); + // ToDo enable cache inside a transaction + // no need to invalidate though so leave *engine_data + DBUG_RETURN(FALSE); + } + { + Ndb *ndb; + Uint64 commit_count; + if (!(ndb= check_ndb_in_thd(thd))) + { + *engine_data= *engine_data+1; // invalidate + DBUG_RETURN(FALSE); + } + ndb->setDatabaseName(dbname); + if (ndb_get_table_statistics(ndb, tabname, 0, &commit_count)) + { + *engine_data= *engine_data+1; // invalidate + DBUG_RETURN(FALSE); + } + if (*engine_data != commit_count) + { + *engine_data= commit_count; // invalidate + DBUG_RETURN(FALSE); + } + } + DBUG_PRINT("exit",("*engine_data=%d ok, use cache",*engine_data)); + DBUG_RETURN(TRUE); +} + +my_bool +ha_ndbcluster::cached_table_registration( +/*======================================*/ + /* out: TRUE if permitted, FALSE if not; + note that the value FALSE means invalidation + of query cache if *engine_data is changed */ + THD* thd, /* in: thd of the user who is trying to + store a result to the query cache or + retrieve it */ + char* full_name, /* in: concatenation of database name, + the null character '\0', and the table + name */ + uint full_name_len, /* in: length of the full name, i.e. + len(dbname) + len(tablename) + 1 */ + qc_engine_callback + *engine_callback, /* out: function to be called before using + cache on this table */ + ulonglong *engine_data) /* out: if return FALSE this is used to + invalidate all cached queries with this table*/ +{ + DBUG_ENTER("ha_ndbcluster::cached_table_registration"); + my_bool is_autocommit; + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + is_autocommit = FALSE; + else + is_autocommit = TRUE; + DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", + m_dbname,m_tabname,is_autocommit)); + if (!is_autocommit) + { + DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", + thd->options & OPTION_NOT_AUTOCOMMIT, + thd->options & OPTION_BEGIN)); + // ToDo enable cache inside a transaction + // no need to invalidate though so leave *engine_data + DBUG_RETURN(FALSE); + } + { + Uint64 commit_count; + m_ndb->setDatabaseName(m_dbname); + if (ndb_get_table_statistics(m_ndb, m_tabname, 0, &commit_count)) + { + *engine_data= 0; + DBUG_RETURN(FALSE); + } + *engine_data= commit_count; + } + *engine_callback= ndbcluster_cache_retrieval_allowed; + DBUG_PRINT("exit",("*engine_data=%d", *engine_data)); + DBUG_RETURN(TRUE); } /* diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index f6c712620c1..7b3b5658175 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -146,7 +146,10 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); uint8 table_cache_type(); - + my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data); private: int alter_table_name(const char *from, const char *to); int drop_table(); diff --git a/sql/handler.cc b/sql/handler.cc index 7ddd7b80a34..9e781817c02 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -229,15 +229,6 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) } } -bool ha_caching_allowed(THD* thd, char* table_key, - uint key_length, uint8 cache_type) -{ -#ifdef HAVE_INNOBASE_DB - if (cache_type == HA_CACHE_TBL_ASKTRANSACT) - return innobase_query_caching_of_table_permitted(thd, table_key, key_length); -#endif - return 1; -} int ha_init() { diff --git a/sql/handler.h b/sql/handler.h index 252861e5c37..31710ec728c 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -506,10 +506,15 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* - Is query with this table cachable (have sense only for ASKTRANSACT - tables) - */ + /* ask handler about permission to cache table during query registration */ + virtual my_bool cached_table_registration(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data) + { + *engine_callback= 0; + return 1; + } }; /* Some extern variables used with handlers */ @@ -528,8 +533,6 @@ extern TYPELIB tx_isolation_typelib; T != DB_TYPE_BERKELEY_DB && \ T != DB_TYPE_NDBCLUSTER) -bool ha_caching_allowed(THD* thd, char* table_key, - uint key_length, uint8 cache_type); enum db_type ha_resolve_by_name(const char *name, uint namelen); const char *ha_get_storage_engine(enum db_type db_type); handler *get_new_handler(TABLE *table, enum db_type db_type); diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 3f55a88b262..77703c2b390 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -340,6 +340,9 @@ inline THD *_current_thd(void) } #define current_thd _current_thd() +typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key, + uint key_length, + ulonglong *engine_data); #include "sql_string.h" #include "sql_list.h" #include "sql_map.h" diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 1bf8d179770..5c2698bcef2 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -912,12 +912,12 @@ end: int Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) { + ulonglong engine_data; Query_cache_query *query; Query_cache_block *first_result_block, *result_block; Query_cache_block_table *block_table, *block_table_end; ulong tot_length; Query_cache_query_flags flags; - bool check_tables; DBUG_ENTER("Query_cache::send_result_to_client"); if (query_cache_size == 0 || thd->variables.query_cache_type == 0) @@ -1018,7 +1018,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) goto err_unlock; } - check_tables= query->tables_type() & HA_CACHE_TBL_ASKTRANSACT; // Check access; block_table= query_block->table(0); block_table_end= block_table+query_block->n_tables; @@ -1079,19 +1078,29 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) goto err_unlock; // Parse query } #endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ - if (check_tables && !ha_caching_allowed(thd, table->db(), - table->key_length(), - table->type())) + engine_data= table->engine_data(); + if (table->callback() && + !(*table->callback())(thd, table->db(), + table->key_length(), + &engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", table_list.db, table_list.alias)); BLOCK_UNLOCK_RD(query_block); thd->lex->safe_to_cache_query= 0; // Don't try to cache this + if (engine_data != table->engine_data()) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %s.%s %lld-%lld", + table_list.db, table_list.alias, + engine_data, table->engine_data())); + invalidate_table(table->db(), table->key_length()); + } goto err_unlock; // Parse query } else - DBUG_PRINT("qcache", ("handler allow caching (%d) %s,%s", - check_tables, table_list.db, table_list.alias)); + DBUG_PRINT("qcache", ("handler allow caching %s,%s", + table_list.db, table_list.alias)); } move_to_query_list_end(query_block); hits++; @@ -2116,7 +2125,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, if (!insert_table(tables_used->table->key_length, tables_used->table->table_cache_key, block_table, tables_used->db_length, - tables_used->table->file->table_cache_type())) + tables_used->table->file->table_cache_type(), + tables_used->callback_func, + tables_used->engine_data)) break; if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) @@ -2132,9 +2143,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, uint key_length= filename_2_table_key(key, table->table->filename, &db_length); (++block_table)->n= ++n; + /* + There are not callback function for for MyISAM, and engine data + */ if (!insert_table(key_length, key, block_table, db_length, - tables_used->table->file->table_cache_type())) + tables_used->table->file->table_cache_type(), + 0, 0)) goto err; } } @@ -2161,7 +2176,9 @@ err: my_bool Query_cache::insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type) + uint32 db_length, uint8 cache_type, + qc_engine_callback callback, + ulonglong engine_data) { DBUG_ENTER("Query_cache::insert_table"); DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d", @@ -2171,6 +2188,23 @@ Query_cache::insert_table(uint key_len, char *key, hash_search(&tables, (byte*) key, key_len)); + if (table_block && + table_block->table()->engine_data() != engine_data) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %s.%s %lld-%lld", + table_block->table()->db(), + table_block->table()->table(), + engine_data, + table_block->table()->engine_data())); + /* + as far as we delete all queries with this table, table block will be + deleted, too + */ + invalidate_table(table_block); + table_block= 0; + } + if (table_block == 0) { DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)", @@ -2201,6 +2235,8 @@ Query_cache::insert_table(uint key_len, char *key, header->table(db + db_length + 1); header->key_length(key_len); header->type(cache_type); + header->callback(callback); + header->engine_data(engine_data); } Query_cache_block_table *list_root = table_block->table(0); @@ -2721,9 +2757,11 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, for (; tables_used; tables_used= tables_used->next) { TABLE *table= tables_used->table; - if (!ha_caching_allowed(thd, table->table_cache_key, - table->key_length, - table->file->table_cache_type())) + handler *handler= table->file; + if (!handler->cached_table_registration(thd, table->table_cache_key, + table->key_length, + &tables_used->callback_func, + &tables_used->engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", tables_used->db, tables_used->alias)); diff --git a/sql/sql_cache.h b/sql/sql_cache.h index c933a2349af..7595bfbbd54 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -145,6 +145,10 @@ struct Query_cache_table char *tbl; uint32 key_len; uint8 table_type; + /* unique for every engine reference */ + qc_engine_callback callback_func; + /* data need by some engines */ + ulonglong engine_data_buff; inline char *db() { return (char *) data(); } inline char *table() { return tbl; } @@ -153,6 +157,10 @@ struct Query_cache_table inline void key_length(uint32 len) { key_len= len; } inline uint8 type() { return table_type; } inline void type(uint8 t) { table_type= t; } + inline qc_engine_callback callback() { return callback_func; } + inline void callback(qc_engine_callback fn){ callback_func= fn; } + inline ulonglong engine_data() { return engine_data_buff; } + inline void engine_data(ulonglong data) { engine_data_buff= data; } inline gptr data() { return (gptr)(((byte*)this)+ @@ -281,7 +289,9 @@ protected: TABLE_COUNTER_TYPE tables); my_bool insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type); + uint32 db_length, uint8 cache_type, + qc_engine_callback callback, + ulonglong engine_data); void unlink_table(Query_cache_block_table *node); Query_cache_block *get_free_block (ulong len, my_bool not_less, ulong min); diff --git a/sql/table.h b/sql/table.h index 2eb854f553d..c3945ac5d79 100644 --- a/sql/table.h +++ b/sql/table.h @@ -207,6 +207,10 @@ typedef struct st_table_list TABLE *table; /* opened table */ st_table_list *table_list; /* pointer to node of list of all tables */ class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ + /* data need by some engines in query cache*/ + ulonglong engine_data; + /* call back function for asking handler about caching in query cache */ + qc_engine_callback callback_func; GRANT_INFO grant; thr_lock_type lock_type; uint outer_join; /* Which join type */ From 93191c739e8302c312d283169c736867cf8327b3 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 25 Nov 2004 19:49:50 +0200 Subject: [PATCH 04/45] new NDB test with QC sql/sql_cache.cc: if we removed old values in cache, then we can cache new one --- mysql-test/r/ndb_cache.result | 43 ----------------------------------- mysql-test/t/ndb_cache.test | 38 +++++++++++++++++++++++-------- sql/sql_cache.cc | 3 ++- 3 files changed, 30 insertions(+), 54 deletions(-) diff --git a/mysql-test/r/ndb_cache.result b/mysql-test/r/ndb_cache.result index 714e1831267..e69de29bb2d 100644 --- a/mysql-test/r/ndb_cache.result +++ b/mysql-test/r/ndb_cache.result @@ -1,43 +0,0 @@ -set GLOBAL query_cache_size=1355776; -reset query cache; -flush status; -drop table if exists t1,t2; -CREATE TABLE t1 (a int) ENGINE=ndbcluster; -CREATE TABLE t2 (a int); -select * from t1; -a -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 0 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 0 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from t2; -a -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from t1; -a -select * from t2; -a -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 1 -drop table t1, t2; -SET GLOBAL query_cache_size=0; diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test index abd09424f64..bd368105a84 100644 --- a/mysql-test/t/ndb_cache.test +++ b/mysql-test/t/ndb_cache.test @@ -1,31 +1,49 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc - +set GLOBAL ndb_query_cache_type=on; +# following line have to be removed when ndb_query_cache_type will made +# global only +set ndb_query_cache_type=on; set GLOBAL query_cache_size=1355776; reset query cache; flush status; --disable_warnings -drop table if exists t1,t2; +drop table if exists t1; --enable_warnings CREATE TABLE t1 (a int) ENGINE=ndbcluster; -CREATE TABLE t2 (a int); - +insert into t1 value (2); select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; -select * from t2; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; +connect (con1,localhost,root,,); +connection con1; +use test; +set autocommit=0; +update t1 set a=3; +connect (con2,localhost,root,,); +connection con2; +select * from t1; select * from t1; -select * from t2; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; +connection con1; +select * from t1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +commit; +connection con2; +select * from t1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +drop table t1; -drop table t1, t2; SET GLOBAL query_cache_size=0; diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 105dcae0319..28e814a2d62 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1093,7 +1093,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", table_list.db, table_list.alias)); BLOCK_UNLOCK_RD(query_block); - thd->lex->safe_to_cache_query= 0; // Don't try to cache this if (engine_data != table->engine_data()) { DBUG_PRINT("qcache", @@ -1102,6 +1101,8 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) engine_data, table->engine_data())); invalidate_table(table->db(), table->key_length()); } + else + thd->lex->safe_to_cache_query= 0; // Don't try to cache this goto err_unlock; // Parse query } else From e0b469aa1d9827fe722904d974bf1c7b2c3021cc Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 25 Nov 2004 18:56:10 +0000 Subject: [PATCH 05/45] removed special ndb query cache variable ndb will always return query cache type ASKTRANSACT --- sql/ha_ndbcluster.cc | 24 +++--------------------- sql/ha_ndbcluster.h | 1 - sql/mysqld.cc | 11 ----------- sql/set_var.cc | 12 ------------ sql/sql_cache.cc | 6 ------ sql/sql_cache.h | 1 - sql/sql_class.h | 1 - 7 files changed, 3 insertions(+), 53 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 5dd977dc95b..3a1dde2e4d4 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3025,9 +3025,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_transaction_on= FALSE; else m_transaction_on= thd->variables.ndb_use_transactions; -#ifdef HAVE_QUERY_CACHE - m_query_cache_type= thd->variables.ndb_query_cache_type; -#endif m_active_trans= thd->transaction.all.ndb_tid ? (NdbConnection*)thd->transaction.all.ndb_tid: @@ -3752,8 +3749,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_ha_not_exact_count(FALSE), m_force_send(TRUE), m_autoincrement_prefetch(32), - m_transaction_on(TRUE), - m_query_cache_type(0) + m_transaction_on(TRUE) { int i; @@ -4457,22 +4453,8 @@ const char* ha_ndbcluster::index_type(uint key_number) } uint8 ha_ndbcluster::table_cache_type() { - DBUG_ENTER("ha_ndbcluster::table_cache_type"); - switch (m_query_cache_type) - { - case 0: - DBUG_PRINT("exit",("HA_CACHE_TBL_NOCACHE")); - DBUG_RETURN(HA_CACHE_TBL_NOCACHE); - case 1: - DBUG_PRINT("exit",("HA_CACHE_TBL_ASKTRANSACT")); - DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); - case 2: - DBUG_PRINT("exit",("HA_CACHE_TBL_TRANSACT")); - DBUG_RETURN(HA_CACHE_TBL_TRANSACT); - default: - DBUG_PRINT("exit",("HA_CACHE_TBL_NOCACHE")); - DBUG_RETURN(HA_CACHE_TBL_NOCACHE); - } + DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT"); + DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); } static diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 33f06dd9092..ab39993c8e6 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -247,7 +247,6 @@ class ha_ndbcluster: public handler bool m_force_send; ha_rows m_autoincrement_prefetch; bool m_transaction_on; - ulong m_query_cache_type; void set_rec_per_key(); void records_update(); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 2778bdedee6..14fd267e85d 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3969,7 +3969,6 @@ enum options_mysqld OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, - OPT_NDB_QUERY_CACHE_TYPE, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4429,16 +4428,6 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, -#ifdef HAVE_QUERY_CACHE - {"ndb_query_cache_type", OPT_NDB_QUERY_CACHE_TYPE, - "0 = OFF = Don't cache or retrieve results. 1 = ON = Cache as query_cache_type states and " - "invalidate cache if tables are updated by other mysql servers. " - "2 = LOCAL = Cache as query_cache_type states and don't bother about what's happening on other " - "mysql servers.", - (gptr*) &global_system_variables.ndb_query_cache_type, - (gptr*) &global_system_variables.ndb_query_cache_type, - 0, GET_ULONG, REQUIRED_ARG, 0, 0, 2, 0, 0, 0}, -#endif #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/set_var.cc b/sql/set_var.cc index c51bfce7a43..2031ac15412 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -370,12 +370,6 @@ sys_ndb_force_send("ndb_force_send", sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); -#ifdef HAVE_QUERY_CACHE -sys_var_thd_enum -sys_ndb_query_cache_type("ndb_query_cache_type", - &SV::ndb_query_cache_type, - &ndb_query_cache_type_typelib); -#endif sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); @@ -640,9 +634,6 @@ sys_var *sys_variables[]= &sys_ndb_autoincrement_prefetch_sz, &sys_ndb_force_send, &sys_ndb_use_exact_count, -#ifdef HAVE_QUERY_CACHE - &sys_ndb_query_cache_type, -#endif &sys_ndb_use_transactions, #endif &sys_unique_checks, @@ -810,9 +801,6 @@ struct show_var_st init_vars[]= { (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, -#ifdef HAVE_QUERY_CACHE - {sys_ndb_query_cache_type.name,(char*) &sys_ndb_query_cache_type, SHOW_SYS}, -#endif {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 105dcae0319..5c2698bcef2 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -366,12 +366,6 @@ TYPELIB query_cache_type_typelib= array_elements(query_cache_type_names)-1,"", query_cache_type_names, NULL }; -const char *ndb_query_cache_type_names[]= { "OFF", "ON", "LOCAL",NullS }; -TYPELIB ndb_query_cache_type_typelib= -{ - array_elements(ndb_query_cache_type_names)-1,"", ndb_query_cache_type_names, NULL -}; - /***************************************************************************** Query_cache_block_table method(s) *****************************************************************************/ diff --git a/sql/sql_cache.h b/sql/sql_cache.h index 9ab9b7cc549..7595bfbbd54 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -422,7 +422,6 @@ protected: }; extern Query_cache query_cache; -extern TYPELIB ndb_query_cache_type_typelib; extern TYPELIB query_cache_type_typelib; void query_cache_end_of_result(THD *thd); void query_cache_abort(NET *net); diff --git a/sql/sql_class.h b/sql/sql_class.h index 2ebad4b466a..d0d9afc7746 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -403,7 +403,6 @@ struct system_variables ulong ndb_autoincrement_prefetch_sz; my_bool ndb_force_send; my_bool ndb_use_exact_count; - ulong ndb_query_cache_type; my_bool ndb_use_transactions; #endif /* HAVE_NDBCLUSTER_DB */ my_bool old_passwords; From ead004f91f38d5c90cb73e63969bc1a222c22325 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 9 Dec 2004 12:55:17 +0100 Subject: [PATCH 06/45] Test for query cache in combination with NDB mysql-test/r/ndb_cache.result: Updated tests and results for ndb_cache mysql-test/t/ndb_cache.test: Updated tests and results for ndb_cache --- mysql-test/r/ndb_cache.result | 191 ++++++++++++++++++++++++++++++++++ mysql-test/t/ndb_cache.test | 102 +++++++++++++++--- 2 files changed, 278 insertions(+), 15 deletions(-) diff --git a/mysql-test/r/ndb_cache.result b/mysql-test/r/ndb_cache.result index e69de29bb2d..7423771e026 100644 --- a/mysql-test/r/ndb_cache.result +++ b/mysql-test/r/ndb_cache.result @@ -0,0 +1,191 @@ +drop table if exists t1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; +CREATE TABLE t1 ( pk int not null primary key, +a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +update t1 set a=3 where pk=1; +select * from t1; +pk a b c +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +use test; +select * from t1; +pk a b c +2 7 8 Second row +1 3 3 First row +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 4 +update t1 set a=4 where b=3; +use test; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 5 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +begin; +update t1 set a=5 where pk=1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +commit; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 8 +drop table t1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +SET GLOBAL query_cache_size=0; diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test index bd368105a84..8bdcbe17728 100644 --- a/mysql-test/t/ndb_cache.test +++ b/mysql-test/t/ndb_cache.test @@ -1,33 +1,77 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc -set GLOBAL ndb_query_cache_type=on; -# following line have to be removed when ndb_query_cache_type will made -# global only -set ndb_query_cache_type=on; -set GLOBAL query_cache_size=1355776; -reset query cache; -flush status; --disable_warnings drop table if exists t1; --enable_warnings -CREATE TABLE t1 (a int) ENGINE=ndbcluster; -insert into t1 value (2); +# Turn on and reset query cache +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; + +# Create test table in NDB +CREATE TABLE t1 ( pk int not null primary key, + a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); + +# Perform one query which should be inerted in query cache select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; + +# Perform the same query and make sure the query cache is hit +select * from t1; +show status like "Qcache_hits"; + +# Update the table and make sure the correct data is returned +update t1 set a=3 where pk=1; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Insert a new record and make sure the correct data is returned +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_hits"; + +# Perform a "new" query and make sure the query cache is not hit +select * from t1 where b=3; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_hits"; + +# Same query again... +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Delete from the table +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Start another connection and check that the query cache is hit connect (con1,localhost,root,,); connection con1; use test; -set autocommit=0; -update t1 set a=3; +select * from t1; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Update the table and switch to other connection +update t1 set a=4 where b=3; connect (con2,localhost,root,,); connection con2; -select * from t1; -select * from t1; +use test; show status like "Qcache_queries_in_cache"; +select * from t1; +select * from t1; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; @@ -36,14 +80,42 @@ select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; -commit; + +# Use transactions and make sure the query cache is not updated until +# transaction is commited +begin; +update t1 set a=5 where pk=1; +# Note!! the below test shows that table is invalidated +# before transaction is committed +# TODO Fix so that cache is not invalidated HERE! +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; connection con2; select * from t1; -select * from t1; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; +connection con1; +commit; +# TODO Here query is invalidated once again, commit count in NDB has changed +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + drop table t1; +show status like "Qcache_queries_in_cache"; SET GLOBAL query_cache_size=0; + + From fcca1791e341ab23d9b571938002a4576972ed32 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Dec 2004 12:09:19 +0100 Subject: [PATCH 07/45] Added test case for multiple MySQL Servers connected to one cluster Added test for cache in combination with multiple MySQL Servers mysql-test/include/have_ndb.inc: Added suport for connecting to two MySQL Server instances (I hope this will work on all platforms, since it does not use the socket parameter when openeing the connections, analog to how it looks like in replication tests) --- mysql-test/include/have_ndb.inc | 33 +++++++++++-- mysql-test/r/ndb_cache_multi.result | 72 +++++++++++++++++++++++++++++ mysql-test/r/ndb_multi.result | 49 ++++++++++++++++++++ mysql-test/r/server_id.require | 2 + mysql-test/r/server_id1.require | 2 + mysql-test/t/ndb_cache_multi.test | 63 +++++++++++++++++++++++++ mysql-test/t/ndb_multi.test | 42 +++++++++++++++++ 7 files changed, 260 insertions(+), 3 deletions(-) create mode 100644 mysql-test/r/ndb_cache_multi.result create mode 100644 mysql-test/r/ndb_multi.result create mode 100644 mysql-test/r/server_id.require create mode 100644 mysql-test/r/server_id1.require create mode 100644 mysql-test/t/ndb_cache_multi.test create mode 100644 mysql-test/t/ndb_multi.test diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc index 84e60657876..249f3362bce 100644 --- a/mysql-test/include/have_ndb.inc +++ b/mysql-test/include/have_ndb.inc @@ -2,6 +2,33 @@ disable_query_log; show variables like "have_ndbcluster"; enable_query_log; -#connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK); -#connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,$MASTER_MYSOCK1); -#connection server1; + +# Setup connections to both MySQL Servers connected to the cluster +connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,); +connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); + +# Check that server1 has NDB support +connection server1; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id.require show variables like "server_id"; +enable_query_log; + +# Check that server2 has NDB support +connection server2; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id1.require show variables like "server_id"; +enable_query_log; + +# Set the default connection to 'server1' +connection server1; + diff --git a/mysql-test/r/ndb_cache_multi.result b/mysql-test/r/ndb_cache_multi.result new file mode 100644 index 00000000000..c7135ed9e8a --- /dev/null +++ b/mysql-test/r/ndb_cache_multi.result @@ -0,0 +1,72 @@ +drop table if exists t1, t2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +2 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +update t1 set a=3 where a=2; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +drop table t1, t2; diff --git a/mysql-test/r/ndb_multi.result b/mysql-test/r/ndb_multi.result new file mode 100644 index 00000000000..4a2389cd1ff --- /dev/null +++ b/mysql-test/r/ndb_multi.result @@ -0,0 +1,49 @@ +drop table if exists t1, t2, t3, t4; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 0 +flush status; +select * from t1; +a +2 +update t1 set a=3 where a=2; +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 1 +create table t3 (a int not null primary key, b varchar(22), +c int, last_col text) engine=ndb; +insert into t3 values(1, 'Hi!', 89, 'Longtext column'); +create table t4 (pk int primary key, b int) engine=ndb; +select * from t1; +a +3 +select * from t3; +a b c last_col +1 Hi! 89 Longtext column +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 1 +show tables like 't4'; +Tables_in_test (t4) +t4 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 2 +show tables; +Tables_in_test +t1 +t2 +t3 +t4 +drop table t1, t2, t3, t4; diff --git a/mysql-test/r/server_id.require b/mysql-test/r/server_id.require new file mode 100644 index 00000000000..adffcc483b1 --- /dev/null +++ b/mysql-test/r/server_id.require @@ -0,0 +1,2 @@ +Variable_name Value +server_id 1 diff --git a/mysql-test/r/server_id1.require b/mysql-test/r/server_id1.require new file mode 100644 index 00000000000..666c94ef633 --- /dev/null +++ b/mysql-test/r/server_id1.require @@ -0,0 +1,2 @@ +Variable_name Value +server_id 102 diff --git a/mysql-test/t/ndb_cache_multi.test b/mysql-test/t/ndb_cache_multi.test new file mode 100644 index 00000000000..ac4a80cee30 --- /dev/null +++ b/mysql-test/t/ndb_cache_multi.test @@ -0,0 +1,63 @@ +-- source include/have_query_cache.inc +-- source include/have_ndb.inc + +--disable_warnings +drop table if exists t1, t2; +--enable_warnings + + +# Turn on and reset query cache on server1 +connection server1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; + +# Turn on and reset query cache on server2 +connection server2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +reset query cache; +flush status; + + + +# Create test tables in NDB and load them into cache +# on server1 +connection server1; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + + +# Connect server2, load table in to cache, then update the table +connection server2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +update t1 set a=3 where a=2; + +# Connect to server1 and check that cache is invalidated +# and correct data is returned +connection server1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1, t2; + + diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test new file mode 100644 index 00000000000..65b1c7b2db4 --- /dev/null +++ b/mysql-test/t/ndb_multi.test @@ -0,0 +1,42 @@ +-- source include/have_ndb.inc + +--disable_warnings +drop table if exists t1, t2, t3, t4; +--enable_warnings + +flush status; + +# Create test tables on server1 +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like 'handler_discover%'; + +# Connect to server2 and use the tables from there +connection server2; +flush status; +select * from t1; +update t1 set a=3 where a=2; +show status like 'handler_discover%'; + +# Create a new table on server2 +create table t3 (a int not null primary key, b varchar(22), +c int, last_col text) engine=ndb; +insert into t3 values(1, 'Hi!', 89, 'Longtext column'); +create table t4 (pk int primary key, b int) engine=ndb; + +# Check that the tables are accessible from server1 +connection server1; +select * from t1; +select * from t3; +show status like 'handler_discover%'; +show tables like 't4'; +show status like 'handler_discover%'; +show tables; + +drop table t1, t2, t3, t4; + + From bf532e26d117201931718d5eab90ba93a17d3510 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 16 Dec 2004 11:09:06 +0100 Subject: [PATCH 08/45] Moved test for multi ndb to have_ndb_multi --- mysql-test/include/have_multi_ndb.inc | 28 ++++++++++++++++++++++++++ mysql-test/include/have_ndb.inc | 29 --------------------------- mysql-test/t/ndb_cache_multi.test | 1 + mysql-test/t/ndb_multi.test | 2 ++ 4 files changed, 31 insertions(+), 29 deletions(-) create mode 100644 mysql-test/include/have_multi_ndb.inc diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc new file mode 100644 index 00000000000..d0c083cab86 --- /dev/null +++ b/mysql-test/include/have_multi_ndb.inc @@ -0,0 +1,28 @@ +# Setup connections to both MySQL Servers connected to the cluster +connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,); +connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); + +# Check that server1 has NDB support +connection server1; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id.require show variables like "server_id"; +enable_query_log; + +# Check that server2 has NDB support +connection server2; +disable_query_log; +--disable_warnings +drop table if exists t1, t2; +--enable_warnings +flush tables; +@r/have_ndb.require show variables like "have_ndbcluster"; +@r/server_id1.require show variables like "server_id"; +enable_query_log; + +# Set the default connection to 'server1' +connection server1; diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc index 249f3362bce..d000a954733 100644 --- a/mysql-test/include/have_ndb.inc +++ b/mysql-test/include/have_ndb.inc @@ -3,32 +3,3 @@ disable_query_log; show variables like "have_ndbcluster"; enable_query_log; -# Setup connections to both MySQL Servers connected to the cluster -connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,); -connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); - -# Check that server1 has NDB support -connection server1; -disable_query_log; ---disable_warnings -drop table if exists t1, t2; ---enable_warnings -flush tables; -@r/have_ndb.require show variables like "have_ndbcluster"; -@r/server_id.require show variables like "server_id"; -enable_query_log; - -# Check that server2 has NDB support -connection server2; -disable_query_log; ---disable_warnings -drop table if exists t1, t2; ---enable_warnings -flush tables; -@r/have_ndb.require show variables like "have_ndbcluster"; -@r/server_id1.require show variables like "server_id"; -enable_query_log; - -# Set the default connection to 'server1' -connection server1; - diff --git a/mysql-test/t/ndb_cache_multi.test b/mysql-test/t/ndb_cache_multi.test index ac4a80cee30..7202b5f8558 100644 --- a/mysql-test/t/ndb_cache_multi.test +++ b/mysql-test/t/ndb_cache_multi.test @@ -1,5 +1,6 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc +-- source include/have_multi_ndb.inc --disable_warnings drop table if exists t1, t2; diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index 65b1c7b2db4..9286721b677 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -1,4 +1,6 @@ -- source include/have_ndb.inc +-- source include/have_multi_ndb.inc + --disable_warnings drop table if exists t1, t2, t3, t4; From d6747f963e13d87c1a7bc952b95ceeba8ba2aada Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 14 Jan 2005 14:33:26 +0100 Subject: [PATCH 09/45] Merge problem FC3 fix sql/ha_ndbcluster.cc: Fixed merge problem, that occured when m_ndb where removed. Fix fo FC3 compile problem. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + sql/ha_ndbcluster.cc | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 9e25aca5fa2..b352bceba8a 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -141,6 +141,7 @@ mronstrom@build.mysql.com mronstrom@mysql.com mskold@mysql.com msvensson@build.mysql.com +msvensson@neptunus.homeip.net mwagner@cash.mwagner.org mwagner@evoq.mwagner.org mwagner@here.mwagner.org diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 11dd20a46d6..0d83955a335 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2905,9 +2905,13 @@ int ha_ndbcluster::reset() DBUG_RETURN(1); } +static const char *ha_ndb_bas_ext[]= { ha_ndb_ext, NullS }; -const char **ha_ndbcluster::bas_ext() const -{ static const char *ext[]= { ha_ndb_ext, NullS }; return ext; } +const char** +ha_ndbcluster::bas_ext() const +{ + return ha_ndb_bas_ext; +} /* @@ -4644,8 +4648,9 @@ ha_ndbcluster::cached_table_registration( } { Uint64 commit_count; - m_ndb->setDatabaseName(m_dbname); - if (ndb_get_table_statistics(m_ndb, m_tabname, 0, &commit_count)) + Ndb *ndb= get_ndb(); + ndb->setDatabaseName(m_dbname); + if (ndb_get_table_statistics(ndb, m_tabname, 0, &commit_count)) { *engine_data= 0; DBUG_RETURN(FALSE); From 38e395aa325eb418cf92a6ce62646959ee2ed477 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 15:43:08 +0100 Subject: [PATCH 10/45] WL#2269 Enable query cache for NDB - Added a thread that fetches commit_count for open tables. This will mean that NDB will not have to be contacted for every use of a cached query. sql/ha_ndbcluster.cc: Added a thread that periodically will fetch commit_count for open tables and store that value in share. The commit count value is then used when query cache asks if a cached query can be used. The thread activation interval is regulated by the config variable ndb_cache_check_time, it's default value is 0 which means that NDB is contacted every time a cached query is reused. sql/ha_ndbcluster.h: Added commit_count to share Added ndb_cache_check_time sql/mysqld.cc: Added config variable ndb_cache_check_time sql/set_var.cc: Added config variable ndb_cache_check_time --- mysql-test/r/ndb_cache2.result | 193 +++++++++++++++++ mysql-test/r/ndb_cache_multi2.result | 74 +++++++ mysql-test/t/ndb_cache2.test | 126 +++++++++++ mysql-test/t/ndb_cache_multi2.test | 71 +++++++ sql/ha_ndbcluster.cc | 306 ++++++++++++++++++++------- sql/ha_ndbcluster.h | 2 + sql/mysqld.cc | 7 +- sql/set_var.cc | 3 + 8 files changed, 708 insertions(+), 74 deletions(-) create mode 100644 mysql-test/r/ndb_cache2.result create mode 100644 mysql-test/r/ndb_cache_multi2.result create mode 100644 mysql-test/t/ndb_cache2.test create mode 100644 mysql-test/t/ndb_cache_multi2.test diff --git a/mysql-test/r/ndb_cache2.result b/mysql-test/r/ndb_cache2.result new file mode 100644 index 00000000000..ce10e9dab00 --- /dev/null +++ b/mysql-test/r/ndb_cache2.result @@ -0,0 +1,193 @@ +drop table if exists t1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=5; +reset query cache; +flush status; +CREATE TABLE t1 ( pk int not null primary key, +a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +pk a b c +1 2 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +update t1 set a=3 where pk=1; +select * from t1; +pk a b c +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1; +pk a b c +2 7 8 Second row +4 5 6 Fourth row +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 2 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +use test; +select * from t1; +pk a b c +2 7 8 Second row +1 3 3 First row +select * from t1 where b=3; +pk a b c +1 3 3 First row +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 4 +update t1 set a=4 where b=3; +use test; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 5 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +begin; +update t1 set a=5 where pk=1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 7 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 4 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +commit; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 7 +select * from t1; +pk a b c +2 7 8 Second row +1 5 3 First row +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 9 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 8 +drop table t1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +SET GLOBAL query_cache_size=0; +SET GLOBAL ndb_cache_check_time=0; diff --git a/mysql-test/r/ndb_cache_multi2.result b/mysql-test/r/ndb_cache_multi2.result new file mode 100644 index 00000000000..6e435c071b5 --- /dev/null +++ b/mysql-test/r/ndb_cache_multi2.result @@ -0,0 +1,74 @@ +drop table if exists t1, t2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=1; +reset query cache; +flush status; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=1; +reset query cache; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +2 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +update t1 set a=3 where a=2; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 2 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a +3 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 2 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +drop table t1, t2; diff --git a/mysql-test/t/ndb_cache2.test b/mysql-test/t/ndb_cache2.test new file mode 100644 index 00000000000..5c1674a7021 --- /dev/null +++ b/mysql-test/t/ndb_cache2.test @@ -0,0 +1,126 @@ +-- source include/have_query_cache.inc +-- source include/have_ndb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + + +# Turn on and reset query cache +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +# Turn on thread that will fetch commit count for open tables +set GLOBAL ndb_cache_check_time=5; +reset query cache; +flush status; + +# Wait for thread to wake up and start "working" +sleep 20; + +# Create test table in NDB +CREATE TABLE t1 ( pk int not null primary key, + a int, b int not null, c varchar(20)) ENGINE=ndbcluster; +insert into t1 value (1, 2, 3, 'First row'); + +# Perform one query which should be inerted in query cache +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Perform the same query and make sure the query cache is hit +select * from t1; +show status like "Qcache_hits"; + +# Update the table and make sure the correct data is returned +update t1 set a=3 where pk=1; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Insert a new record and make sure the correct data is returned +insert into t1 value (2, 7, 8, 'Second row'); +insert into t1 value (4, 5, 6, 'Fourth row'); +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_hits"; + +# Perform a "new" query and make sure the query cache is not hit +select * from t1 where b=3; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_hits"; + +# Same query again... +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Delete from the table +delete from t1 where c='Fourth row'; +show status like "Qcache_queries_in_cache"; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Start another connection and check that the query cache is hit +connect (con1,localhost,root,,); +connection con1; +use test; +select * from t1; +select * from t1 where b=3; +show status like "Qcache_hits"; + +# Update the table and switch to other connection +update t1 set a=4 where b=3; +connect (con2,localhost,root,,); +connection con2; +use test; +show status like "Qcache_queries_in_cache"; +select * from t1; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +# Use transactions and make sure the query cache is not updated until +# transaction is commited +begin; +update t1 set a=5 where pk=1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +commit; +# Sleep to let the query cache thread update commit count +sleep 10; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con2; +select * from t1; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +connection con1; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1; + +show status like "Qcache_queries_in_cache"; + +SET GLOBAL query_cache_size=0; +SET GLOBAL ndb_cache_check_time=0; + + diff --git a/mysql-test/t/ndb_cache_multi2.test b/mysql-test/t/ndb_cache_multi2.test new file mode 100644 index 00000000000..a9d008dba7c --- /dev/null +++ b/mysql-test/t/ndb_cache_multi2.test @@ -0,0 +1,71 @@ +-- source include/have_query_cache.inc +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc + +--disable_warnings +drop table if exists t1, t2; +--enable_warnings + + +# Turn on and reset query cache on server1 +connection server1; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=1; +reset query cache; +flush status; + +# Turn on and reset query cache on server2 +connection server2; +set GLOBAL query_cache_type=on; +set GLOBAL query_cache_size=1355776; +set GLOBAL ndb_cache_check_time=1; +reset query cache; +flush status; + +# Sleep so that the query cache check thread has time to start +sleep 15; + + +# Create test tables in NDB and load them into cache +# on server1 +connection server1; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + + +# Connect server2, load table in to cache, then update the table +connection server2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +update t1 set a=3 where a=2; + +# Sleep so that the query cache check thread has time to run +sleep 5; + +# Connect to server1 and check that cache is invalidated +# and correct data is returned +connection server1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop table t1, t2; + + diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 0d83955a335..4f6e243db93 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -86,6 +86,12 @@ static int unpackfrm(const void **data, uint *len, static int ndb_get_table_statistics(Ndb*, const char *, Uint64* rows, Uint64* commits); +// Util thread variables +static pthread_t ndb_util_thread; +pthread_mutex_t LOCK_ndb_util_thread; +pthread_cond_t COND_ndb_util_thread; +extern "C" pthread_handler_decl(ndb_util_thread_func, arg); +ulong ndb_cache_check_time; /* Dummy buffer to read zero pack_length fields @@ -3865,6 +3871,7 @@ ha_ndbcluster::~ha_ndbcluster() } + /* Open a table for further use - fetch metadata for this table from NDB @@ -3963,16 +3970,14 @@ void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb) Ndb* check_ndb_in_thd(THD* thd) { - DBUG_ENTER("check_ndb_in_thd"); - Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; - + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; if (!thd_ndb) { if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) - DBUG_RETURN(NULL); + return NULL; thd->transaction.thd_ndb= thd_ndb; } - DBUG_RETURN(thd_ndb->ndb); + return thd_ndb->ndb; } @@ -4310,13 +4315,21 @@ bool ndbcluster_init() (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, (hash_get_key) ndbcluster_get_key,0,0); pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); + pthread_mutex_init(&LOCK_ndb_util_thread,MY_MUTEX_INIT_FAST); + pthread_cond_init(&COND_ndb_util_thread,NULL); + + // Create utility thread + pthread_t tmp; + if (pthread_create(&tmp,&connection_attrib,ndb_util_thread_func,0)) + { + DBUG_PRINT("error", ("Could not create ndb utility thread")); + goto ndbcluster_init_error; + } + ndbcluster_inited= 1; -#ifdef USE_DISCOVER_ON_STARTUP - if (ndb_discover_tables() != 0) - goto ndbcluster_init_error; -#endif DBUG_RETURN(FALSE); + ndbcluster_init_error: ndbcluster_end(); DBUG_RETURN(TRUE); @@ -4326,12 +4339,19 @@ bool ndbcluster_init() /* End use of the NDB Cluster table handler - free all global variables allocated by - ndcluster_init() + ndbcluster_init() */ bool ndbcluster_end() { DBUG_ENTER("ndbcluster_end"); + + // Kill ndb utility thread + (void) pthread_mutex_lock(&LOCK_ndb_util_thread); + DBUG_PRINT("exit",("killing ndb util thread: %lx",ndb_util_thread)); + (void) pthread_cond_signal(&COND_ndb_util_thread); + (void) pthread_mutex_unlock(&LOCK_ndb_util_thread); + if(g_ndb) delete g_ndb; g_ndb= NULL; @@ -4342,6 +4362,8 @@ bool ndbcluster_end() DBUG_RETURN(0); hash_free(&ndbcluster_open_tables); pthread_mutex_destroy(&ndbcluster_mutex); + pthread_mutex_destroy(&LOCK_ndb_util_thread); + pthread_cond_destroy(&COND_ndb_util_thread); ndbcluster_inited= 0; DBUG_RETURN(0); } @@ -4534,12 +4556,53 @@ const char* ha_ndbcluster::index_type(uint key_number) return "HASH"; } } + uint8 ha_ndbcluster::table_cache_type() { DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT"); DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); } + +uint ndb_get_commitcount(THD* thd, char* dbname, char* tabname, + Uint64* commit_count) +{ + DBUG_ENTER("ndb_get_commitcount"); + + if (ndb_cache_check_time > 0) + { + // Use cached commit_count from share + char name[FN_REFLEN]; + NDB_SHARE* share; + (void)strxnmov(name, FN_REFLEN, + "./",dbname,"/",tabname,NullS); + DBUG_PRINT("info", ("name: %s", name)); + pthread_mutex_lock(&ndbcluster_mutex); + if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables, + (byte*) name, + strlen(name)))) + { + pthread_mutex_unlock(&ndbcluster_mutex); + DBUG_RETURN(1); + } + *commit_count= share->commit_count; + DBUG_PRINT("info", ("commit_count: %d", *commit_count)); + pthread_mutex_unlock(&ndbcluster_mutex); + DBUG_RETURN(0); + } + + // Get commit_count from NDB + Ndb *ndb; + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(1); + ndb->setDatabaseName(dbname); + + if (ndb_get_table_statistics(ndb, tabname, 0, commit_count)) + DBUG_RETURN(1); + DBUG_RETURN(0); +} + + static my_bool ndbcluster_cache_retrieval_allowed( @@ -4561,51 +4624,33 @@ ndbcluster_cache_retrieval_allowed( all cached queries with this table*/ { DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); - char tabname[128]; - char *dbname= full_name; - my_bool is_autocommit; - { - int dbname_len= strlen(full_name); - int tabname_len= full_name_len-dbname_len-1; - memcpy(tabname, full_name+dbname_len+1, tabname_len); - tabname[tabname_len]= '\0'; - } - if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - is_autocommit = FALSE; - else - is_autocommit = TRUE; + + Uint64 commit_count; + bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + char* dbname= full_name; + char* tabname= dbname+strlen(dbname)+1; + DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d", - dbname,tabname,is_autocommit)); + dbname, tabname, is_autocommit)); + if (!is_autocommit) + DBUG_RETURN(FALSE); + + if (ndb_get_commitcount(thd, dbname, tabname, &commit_count)) { - DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", - thd->options & OPTION_NOT_AUTOCOMMIT, - thd->options & OPTION_BEGIN)); - // ToDo enable cache inside a transaction - // no need to invalidate though so leave *engine_data + *engine_data= *engine_data+1; // invalidate DBUG_RETURN(FALSE); } + DBUG_PRINT("info", ("*engine_data=%llu, commit_count=%llu", + *engine_data, commit_count)); + if (*engine_data != commit_count) { - Ndb *ndb; - Uint64 commit_count; - if (!(ndb= check_ndb_in_thd(thd))) - { - *engine_data= *engine_data+1; // invalidate - DBUG_RETURN(FALSE); - } - ndb->setDatabaseName(dbname); - if (ndb_get_table_statistics(ndb, tabname, 0, &commit_count)) - { - *engine_data= *engine_data+1; // invalidate - DBUG_RETURN(FALSE); - } - if (*engine_data != commit_count) - { - *engine_data= commit_count; // invalidate - DBUG_RETURN(FALSE); - } + *engine_data= commit_count; // invalidate + DBUG_PRINT("exit",("Do not use cache, commit_count has changed")); + DBUG_RETURN(FALSE); } - DBUG_PRINT("exit",("*engine_data=%d ok, use cache",*engine_data)); + + DBUG_PRINT("exit",("OK to use cache, *engine_data=%llu",*engine_data)); DBUG_RETURN(TRUE); } @@ -4630,35 +4675,24 @@ ha_ndbcluster::cached_table_registration( invalidate all cached queries with this table*/ { DBUG_ENTER("ha_ndbcluster::cached_table_registration"); - my_bool is_autocommit; - if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - is_autocommit = FALSE; - else - is_autocommit = TRUE; + + bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", m_dbname,m_tabname,is_autocommit)); if (!is_autocommit) + DBUG_RETURN(FALSE); + + + Uint64 commit_count; + if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count)) { - DBUG_PRINT("info",("OPTION_NOT_AUTOCOMMIT=%d OPTION_BEGIN=%d", - thd->options & OPTION_NOT_AUTOCOMMIT, - thd->options & OPTION_BEGIN)); - // ToDo enable cache inside a transaction - // no need to invalidate though so leave *engine_data + *engine_data= 0; + DBUG_PRINT("error", ("Could not get commitcount")) DBUG_RETURN(FALSE); } - { - Uint64 commit_count; - Ndb *ndb= get_ndb(); - ndb->setDatabaseName(m_dbname); - if (ndb_get_table_statistics(ndb, m_tabname, 0, &commit_count)) - { - *engine_data= 0; - DBUG_RETURN(FALSE); - } - *engine_data= commit_count; - } + *engine_data= commit_count; *engine_callback= ndbcluster_cache_retrieval_allowed; - DBUG_PRINT("exit",("*engine_data=%d", *engine_data)); + DBUG_PRINT("exit",("*engine_data=%llu", *engine_data)); DBUG_RETURN(TRUE); } @@ -4700,8 +4734,14 @@ static NDB_SHARE* get_share(const char *table_name) } thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + share->commit_count= 0; } } + DBUG_PRINT("share", + ("table_name: %s, length: %d, use_count: %d, commit_count: %d", + share->table_name, share->table_name_length, share->use_count, + share->commit_count)); + share->use_count++; pthread_mutex_unlock(&ndbcluster_mutex); return share; @@ -4868,10 +4908,10 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, ndb->closeTransaction(pTrans); if(row_count) - * row_count= sum_rows; + *row_count= sum_rows; if(commit_count) - * commit_count= sum_commits; - DBUG_PRINT("exit", ("records: %u commits: %u", sum_rows, sum_commits)); + *commit_count= sum_commits; + DBUG_PRINT("exit", ("records: %llu commits: %llu", sum_rows, sum_commits)); DBUG_RETURN(0); } while(0); @@ -4906,4 +4946,124 @@ int ha_ndbcluster::write_ndb_file() DBUG_RETURN(error); } + +// Utility thread main loop +extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused))) +{ + THD *thd; // needs to be first for thread_stack + int error = 0; + struct timespec abstime; + + my_thread_init(); + DBUG_ENTER("ndb_util_thread"); + DBUG_PRINT("enter", ("ndb_cache_check_time: %d", ndb_cache_check_time)); + + thd= new THD; // note that contructor of THD uses DBUG_ ! + THD_CHECK_SENTRY(thd); + + pthread_detach_this_thread(); + ndb_util_thread = pthread_self(); + + thd->thread_stack = (char*)&thd; // remember where our stack is + if (thd->store_globals()) + { + thd->cleanup(); + delete thd; + DBUG_RETURN(NULL); + } + + List util_open_tables; + set_timespec(abstime, ndb_cache_check_time); + for (;;) + { + + pthread_mutex_lock(&LOCK_ndb_util_thread); + error= pthread_cond_timedwait(&COND_ndb_util_thread, + &LOCK_ndb_util_thread, + &abstime); + pthread_mutex_unlock(&LOCK_ndb_util_thread); + + DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %d", + ndb_cache_check_time)); + + if (abort_loop) + break; // Shutting down server + + if (ndb_cache_check_time == 0) + { + set_timespec(abstime, 10); + continue; + } + + // Set new time to wake up + set_timespec(abstime, ndb_cache_check_time); + + // Lock mutex and fill list with pointers to all open tables + NDB_SHARE *share; + pthread_mutex_lock(&ndbcluster_mutex); + for (uint i= 0; i < ndbcluster_open_tables.records; i++) + { + share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i); + share->use_count++; // Make sure the table can't be closed + + DBUG_PRINT("ndb_util_thread", + ("Found open table[%d]: %s, use_count: %d", + i, share->table_name, share->use_count)); + + // Store pointer to table + util_open_tables.push_back(share); + } + pthread_mutex_unlock(&ndbcluster_mutex); + + + // Iterate through the open files list + List_iterator_fast it(util_open_tables); + while (share=it++) + { + // Split tab- and dbname + char buf[FN_REFLEN]; + char *tabname, *db; + uint length= dirname_length(share->table_name); + tabname= share->table_name+length; + memcpy(buf, share->table_name, length-1); + buf[length-1]= 0; + db= buf+dirname_length(buf); + DBUG_PRINT("ndb_util_thread", + ("Fetching commit count for: %s, db: %s, tab: %s", + share->table_name, db, tabname)); + + // Contact NDB to get commit count for table + g_ndb->setDatabaseName(db); + Uint64 rows, commit_count; + if(ndb_get_table_statistics(g_ndb, tabname, + &rows, &commit_count) == 0){ + DBUG_PRINT("ndb_util_thread", + ("Table: %s, rows: %llu, commit_count: %llu", + share->table_name, rows, commit_count)); + share->commit_count= commit_count; + } + else + { + DBUG_PRINT("ndb_util_thread", + ("Error: Could not get commit count for table %s", + share->table_name)); + share->commit_count++; // Invalidate + } + // Decrease the use count and possibly free share + free_share(share); + } + + // Clear the list of open tables + util_open_tables.empty(); + + } + + thd->cleanup(); + delete thd; + DBUG_PRINT("exit", ("ndb_util_thread")); + my_thread_end(); + DBUG_RETURN(NULL); +} + + #endif /* HAVE_NDBCLUSTER_DB */ diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index b5cf727ead7..df88afa678a 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -38,6 +38,7 @@ class NdbBlob; // connectstring to cluster if given by mysqld extern const char *ndbcluster_connectstring; +extern ulong ndb_cache_check_time; typedef enum ndb_index_type { UNDEFINED_INDEX = 0, @@ -59,6 +60,7 @@ typedef struct st_ndbcluster_share { pthread_mutex_t mutex; char *table_name; uint table_name_length,use_count; + uint commit_count; } NDB_SHARE; /* diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d1fef3519bf..671f38898c1 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -284,6 +284,7 @@ my_bool opt_console= 0, opt_bdb, opt_innodb, opt_isam, opt_ndbcluster; #ifdef HAVE_NDBCLUSTER_DB const char *opt_ndbcluster_connectstring= 0; my_bool opt_ndb_shm, opt_ndb_optimized_node_selection; +ulong opt_ndb_cache_check_time= 0; #endif my_bool opt_readonly, use_temp_pool, relay_log_purge; my_bool opt_sync_bdb_logs, opt_sync_frm; @@ -4016,7 +4017,7 @@ enum options_mysqld OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, - OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, + OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4498,6 +4499,10 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &opt_ndb_optimized_node_selection, (gptr*) &opt_ndb_optimized_node_selection, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + { "ndb_cache_check_time", OPT_NDB_CACHE_CHECK_TIME, + "A dedicated thread is created to update cached commit count value at the given interval.", + (gptr*) &opt_ndb_cache_check_time, (gptr*) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG, + 0, 0, LONG_TIMEOUT, 0, 1, 0}, #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/set_var.cc b/sql/set_var.cc index 082c55db188..58c30c8e9bc 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -370,6 +370,7 @@ sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); +sys_var_long_ptr sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time); #endif /* Time/date/datetime formats */ @@ -630,6 +631,7 @@ sys_var *sys_variables[]= &sys_ndb_force_send, &sys_ndb_use_exact_count, &sys_ndb_use_transactions, + &sys_ndb_cache_check_time, #endif &sys_unique_checks, &sys_warning_count @@ -797,6 +799,7 @@ struct show_var_st init_vars[]= { {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, + {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, {sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS}, From 6162c4a6eb22b413a477bb6b9b0f08ec9b98a193 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 15:08:31 -0800 Subject: [PATCH 11/45] Fix value of YEAR field when set from a non-numeric string. (Bug #6067) mysql-test/t/type_date.test: Add new regression test mysql-test/r/type_date.result: Add result sql/field.cc: Set YEAR to 0 when set to a non-numeric string, not 2000, and issue a warning. --- mysql-test/r/type_date.result | 8 ++++++++ mysql-test/t/type_date.test | 7 +++++++ sql/field.cc | 12 ++++++++++-- 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result index 71d1b9ad381..3428b5969d9 100644 --- a/mysql-test/r/type_date.result +++ b/mysql-test/r/type_date.result @@ -96,3 +96,11 @@ f2 19781126 19781126 DROP TABLE t1, t2, t3; +CREATE TABLE t1 (y YEAR); +INSERT INTO t1 VALUES ('abc'); +Warnings: +Warning 1265 Data truncated for column 'y' at row 1 +SELECT * FROM t1; +y +0000 +DROP TABLE t1; diff --git a/mysql-test/t/type_date.test b/mysql-test/t/type_date.test index 64420a85189..304ed19b971 100644 --- a/mysql-test/t/type_date.test +++ b/mysql-test/t/type_date.test @@ -107,3 +107,10 @@ SELECT * FROM t2; SELECT * FROM t3; DROP TABLE t1, t2, t3; + +# Test that setting YEAR to invalid string results in default value, not +# 2000. (Bug #6067) +CREATE TABLE t1 (y YEAR); +INSERT INTO t1 VALUES ('abc'); +SELECT * FROM t1; +DROP TABLE t1; diff --git a/sql/field.cc b/sql/field.cc index 7357bc06f11..a2b749257df 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -3511,9 +3511,17 @@ void Field_time::sql_type(String &res) const int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) { - int not_used; // We can ignore result from str2int + int err; char *end; - long nr= my_strntol(cs, from, len, 10, &end, ¬_used); + long nr= my_strntol(cs, from, len, 10, &end, &err); + + if (err) + { + if (table->in_use->count_cuted_fields) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + *ptr= 0; + return 0; + } if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155) { From d81a0bede23ca74222252b6f43bd85ead5d7be2a Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 08:38:24 +0200 Subject: [PATCH 12/45] Fix for BUG#8023. Allow LIMIT clause after DUAL. mysql-test/r/limit.result: Added test result for BUG#8023. mysql-test/t/limit.test: Added test for BUG#8023. sql/sql_yacc.yy: Allow the specification of a LIMIT clause after DUAL. This is needed for queries as: select a from t1 union all select 1 from dual limit 1; In this query LIMIT is applied to the whole UNION, so it makes sense, however, the current parser did not allow any clause after DUAL. --- mysql-test/r/limit.result | 9 +++++++++ mysql-test/t/limit.test | 10 ++++++++++ sql/sql_yacc.yy | 9 +++++---- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/limit.result b/mysql-test/r/limit.result index c82105e6a49..6a3d2bffab0 100644 --- a/mysql-test/r/limit.result +++ b/mysql-test/r/limit.result @@ -67,3 +67,12 @@ SELECT * FROM t1; id id2 3 0 DROP TABLE t1; +create table t1 (a integer); +insert into t1 values (1); +select 1 as a from t1 union all select 1 from dual limit 1; +a +1 +(select 1 as a from t1) union all (select 1 from dual) limit 1; +a +1 +drop table t1; diff --git a/mysql-test/t/limit.test b/mysql-test/t/limit.test index 61c57c9b468..28b287a5d4a 100644 --- a/mysql-test/t/limit.test +++ b/mysql-test/t/limit.test @@ -49,3 +49,13 @@ SELECT * FROM t1; DELETE FROM t1 WHERE id2 = 0 ORDER BY id desc LIMIT 1; SELECT * FROM t1; DROP TABLE t1; + +# +# Bug#8023 - limit on UNION with from DUAL, causes syntax error +# +create table t1 (a integer); +insert into t1 values (1); +# both queries must return one row +select 1 as a from t1 union all select 1 from dual limit 1; +(select 1 as a from t1) union all (select 1 from dual) limit 1; +drop table t1; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 1e51d8fb82d..e70efe14557 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -2455,10 +2455,11 @@ select_into: select_from: FROM join_table_list where_clause group_clause having_clause opt_order_clause opt_limit_clause procedure_clause - | FROM DUAL_SYM /* oracle compatibility: oracle always requires FROM - clause, and DUAL is system table without fields. - Is "SELECT 1 FROM DUAL" any better than - "SELECT 1" ? Hmmm :) */ + | FROM DUAL_SYM opt_limit_clause + /* oracle compatibility: oracle always requires FROM clause, + and DUAL is system table without fields. + Is "SELECT 1 FROM DUAL" any better than "SELECT 1" ? + Hmmm :) */ ; select_options: From ba51652c2cd753566d768ccdf02f2de92a17a4b2 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 15:03:34 -0800 Subject: [PATCH 13/45] Fix 'mysqlcheck --help' to not specify what storage engines are supported, rather than give incorrect information. (Bug #8029) client/mysqlcheck.c: Make usage message more general as to what storage engines are supported. --- client/mysqlcheck.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index c670b84db44..babf4de0c3d 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -195,7 +195,7 @@ static void usage(void) puts("and you are welcome to modify and redistribute it under the GPL license.\n"); puts("This program can be used to CHECK (-c,-m,-C), REPAIR (-r), ANALYZE (-a)"); puts("or OPTIMIZE (-o) tables. Some of the options (like -e or -q) can be"); - puts("used at the same time. It works on MyISAM and in some cases on BDB tables."); + puts("used at the same time. Not all options are supported by all storage engines."); puts("Please consult the MySQL manual for latest information about the"); puts("above. The options -c,-r,-a and -o are exclusive to each other, which"); puts("means that the last option will be used, if several was specified.\n"); From 567055363e69843feb8215c8cef0e832ceb472cf Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 18:26:59 +0000 Subject: [PATCH 14/45] Bug#7310 Fix test for classic builds mysql-test/t/multi_update.test: Bug#7310 Ignore warnings for Bug#5837 test --- mysql-test/t/multi_update.test | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test index 2d6770f77ed..2fc4ebcb275 100644 --- a/mysql-test/t/multi_update.test +++ b/mysql-test/t/multi_update.test @@ -340,8 +340,10 @@ drop table t1, t2; # Test for BUG#5837 - delete with outer join and const tables drop table if exists t2, t1; +--disable_warnings create table t1(aclid bigint not null primary key, status tinyint(1) not null ) type = innodb; create table t2(refid bigint not null primary key, aclid bigint, index idx_acl(aclid) )type = innodb; +--enable_warnings insert into t2 values(1,null); delete t2, t1 from t2 as a left join t1 as b on (a.aclid=b.aclid) where a.refid='1'; drop table t1, t2; From c2e9e15e9f44b2149286a9b6b784f93fe9b2938e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 21:50:56 +0100 Subject: [PATCH 15/45] mtr_cases.pl: new file --- mysql-test/lib/mtr_cases.pl | 270 ++++++++++++++++++++++++++++++++++++ 1 file changed, 270 insertions(+) create mode 100644 mysql-test/lib/mtr_cases.pl diff --git a/mysql-test/lib/mtr_cases.pl b/mysql-test/lib/mtr_cases.pl new file mode 100644 index 00000000000..5977bb380cf --- /dev/null +++ b/mysql-test/lib/mtr_cases.pl @@ -0,0 +1,270 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +sub collect_test_cases ($); +sub collect_one_test_case ($$$$$); + +############################################################################## +# +# Collect information about test cases we are to run +# +############################################################################## + +sub collect_test_cases ($) { + my $suite= shift; # Test suite name + + my $testdir; + my $resdir; + + if ( $suite eq "main" ) + { + $testdir= "$::glob_mysql_test_dir/t"; + $resdir= "$::glob_mysql_test_dir/r"; + } + else + { + $testdir= "$::glob_mysql_test_dir/suite/$suite/t"; + $resdir= "$::glob_mysql_test_dir/suite/$suite/r"; + } + + my $cases = []; # Array of hash, will be array of C struct + + opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!"); + + if ( @::opt_cases ) + { + foreach my $tname ( @::opt_cases ) { # Run in specified order, no sort + my $elem= "$tname.test"; + if ( ! -f "$testdir/$elem") + { + mtr_error("Test case $tname ($testdir/$elem) is not found"); + } + collect_one_test_case($testdir,$resdir,$tname,$elem,$cases); + } + closedir TESTDIR; + } + else + { + foreach my $elem ( sort readdir(TESTDIR) ) { + my $tname= mtr_match_extension($elem,"test"); + next if ! defined $tname; + next if $::opt_do_test and ! defined mtr_match_prefix($elem,$::opt_do_test); + + collect_one_test_case($testdir,$resdir,$tname,$elem,$cases); + } + closedir TESTDIR; + } + + # To speed things up, we sort first in if the test require a restart + # or not, second in alphanumeric order. + +# @$cases = sort { +# if ( $a->{'master_restart'} and $b->{'master_restart'} or +# ! $a->{'master_restart'} and ! $b->{'master_restart'} ) +# { +# return $a->{'name'} cmp $b->{'name'}; +# } +# if ( $a->{'master_restart'} ) +# { +# return 1; # Is greater +# } +# else +# { +# return -1; # Is less +# } +# } @$cases; + + return $cases; +} + + +############################################################################## +# +# Collect information about a single test case +# +############################################################################## + + +sub collect_one_test_case($$$$$) { + my $testdir= shift; + my $resdir= shift; + my $tname= shift; + my $elem= shift; + my $cases= shift; + + my $path= "$testdir/$elem"; + + # ---------------------------------------------------------------------- + # Skip some tests silently + # ---------------------------------------------------------------------- + + if ( $::opt_start_from and $tname lt $::opt_start_from ) + { + return; + } + + # ---------------------------------------------------------------------- + # Skip some tests but include in list, just mark them to skip + # ---------------------------------------------------------------------- + + my $tinfo= {}; + $tinfo->{'name'}= $tname; + $tinfo->{'result_file'}= "$resdir/$tname.result"; + push(@$cases, $tinfo); + + if ( $::opt_skip_test and defined mtr_match_prefix($tname,$::opt_skip_test) ) + { + $tinfo->{'skip'}= 1; + return; + } + + # FIXME temporary solution, we have a hard coded list of test cases to + # skip if we are using the embedded server + + if ( $::glob_use_embedded_server and + mtr_match_any_exact($tname,\@::skip_if_embedded_server) ) + { + $tinfo->{'skip'}= 1; + return; + } + + # ---------------------------------------------------------------------- + # Collect information about test case + # ---------------------------------------------------------------------- + + $tinfo->{'path'}= $path; + $tinfo->{'timezone'}= "GMT-3"; # for UNIX_TIMESTAMP tests to work + + if ( defined mtr_match_prefix($tname,"rpl") ) + { + if ( $::opt_skip_rpl ) + { + $tinfo->{'skip'}= 1; + return; + } + + $tinfo->{'slave_num'}= 1; # Default, use one slave + + # FIXME currently we always restart slaves + $tinfo->{'slave_restart'}= 1; + + if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' ) + { +# $tinfo->{'slave_num'}= 3; # Not 3 ? Check old code, strange + } + } + + # FIXME what about embedded_server + ndbcluster, skip ?! + + my $master_opt_file= "$testdir/$tname-master.opt"; + my $slave_opt_file= "$testdir/$tname-slave.opt"; + my $slave_mi_file= "$testdir/$tname.slave-mi"; + my $master_sh= "$testdir/$tname-master.sh"; + my $slave_sh= "$testdir/$tname-slave.sh"; + my $disabled= "$testdir/$tname.disabled"; + + $tinfo->{'master_opt'}= []; + $tinfo->{'slave_opt'}= []; + $tinfo->{'slave_mi'}= []; + + if ( -f $master_opt_file ) + { + $tinfo->{'master_restart'}= 1; # We think so for now + # This is a dirty hack from old mysql-test-run, we use the opt file + # to flag other things as well, it is not a opt list at all + my $extra_master_opt= mtr_get_opts_from_file($master_opt_file); + + foreach my $opt (@$extra_master_opt) + { + my $value; + + $value= mtr_match_prefix($opt, "--timezone="); + + if ( defined $value ) + { + $tinfo->{'timezone'}= $value; + $extra_master_opt= []; + $tinfo->{'master_restart'}= 0; + last; + } + + $value= mtr_match_prefix($opt, "--result-file="); + + if ( defined $value ) + { + $tinfo->{'result_file'}= "r/$value.result"; + if ( $::opt_result_ext and $::opt_record or + -f "$tinfo->{'result_file'}$::opt_result_ext") + { + $tinfo->{'result_file'}.= $::opt_result_ext; + } + $extra_master_opt= []; + $tinfo->{'master_restart'}= 0; + last; + } + } + + $tinfo->{'master_opt'}= $extra_master_opt; + } + + if ( -f $slave_opt_file ) + { + $tinfo->{'slave_opt'}= mtr_get_opts_from_file($slave_opt_file); + $tinfo->{'slave_restart'}= 1; + } + + if ( -f $slave_mi_file ) + { + $tinfo->{'slave_mi'}= mtr_get_opts_from_file($slave_mi_file); + $tinfo->{'slave_restart'}= 1; + } + + if ( -f $master_sh ) + { + if ( $::glob_win32_perl ) + { + $tinfo->{'skip'}= 1; + } + else + { + $tinfo->{'master_sh'}= $master_sh; + $tinfo->{'master_restart'}= 1; + } + } + + if ( -f $slave_sh ) + { + if ( $::glob_win32_perl ) + { + $tinfo->{'skip'}= 1; + } + else + { + $tinfo->{'slave_sh'}= $slave_sh; + $tinfo->{'slave_restart'}= 1; + } + } + + if ( -f $disabled ) + { + $tinfo->{'skip'}= 1; + $tinfo->{'disable'}= 1; # Sub type of 'skip' + $tinfo->{'comment'}= mtr_fromfile($disabled); + } + + # We can't restart a running server that may be in use + + if ( $::glob_use_running_server and + ( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) ) + { + $tinfo->{'skip'}= 1; + } +} + + +1; From 3c925ee0f1b3387e6df952de8f86c618f11c1a8d Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 21:11:12 +0000 Subject: [PATCH 16/45] Bug#8057 Fix crash with LAST_INSERT_ID() in UPDATE, Tests included, mysql-test/r/update.result: Bug#8057 Test for bug mysql-test/t/update.test: Bug#8057 Test for bug sql/item_func.cc: Bug#8057 Don't create new Item in val_int() --- mysql-test/r/update.result | 7 +++++++ mysql-test/t/update.test | 9 +++++++++ sql/item_func.cc | 8 ++------ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/update.result b/mysql-test/r/update.result index beab6105f79..ac370db9ecc 100644 --- a/mysql-test/r/update.result +++ b/mysql-test/r/update.result @@ -212,3 +212,10 @@ insert into t1 values (1, "t1c2-1", 10), (2, "t1c2-2", 20); update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1"; update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1" where t1.c3 = 10; drop table t1, t2; +create table t1 (id int not null auto_increment primary key, id_str varchar(32)); +insert into t1 (id_str) values ("test"); +update t1 set id_str = concat(id_str, id) where id = last_insert_id(); +select * from t1; +id id_str +1 test1 +drop table t1; diff --git a/mysql-test/t/update.test b/mysql-test/t/update.test index 704263b1216..04192f25ac8 100644 --- a/mysql-test/t/update.test +++ b/mysql-test/t/update.test @@ -170,3 +170,12 @@ insert into t1 values (1, "t1c2-1", 10), (2, "t1c2-2", 20); update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1"; update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1" where t1.c3 = 10; drop table t1, t2; + +# +# Bug #8057 +# +create table t1 (id int not null auto_increment primary key, id_str varchar(32)); +insert into t1 (id_str) values ("test"); +update t1 set id_str = concat(id_str, id) where id = last_insert_id(); +select * from t1; +drop table t1; diff --git a/sql/item_func.cc b/sql/item_func.cc index 7125f4704b8..03b5688efc2 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2284,14 +2284,10 @@ longlong Item_func_last_insert_id::val_int() longlong value=args[0]->val_int(); current_thd->insert_id(value); null_value=args[0]->null_value; - return value; } else - { - Item *it= get_system_var(current_thd, OPT_SESSION, "last_insert_id", 14, - "last_insert_id()"); - return it->val_int(); - } + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + return current_thd->insert_id(); } /* This function is just used to test speed of different functions */ From a3efbf47c80bf6896c6a9fceecd761b757f76971 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 14:21:16 -0800 Subject: [PATCH 17/45] Copy *.result.es files for binary distribution so embedded tests can be run scripts/make_binary_distribution.sh: Make sure to copy result.es files --- scripts/make_binary_distribution.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 33d4794e4f7..910aa38c33f 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -220,7 +220,7 @@ $CP mysql-test/include/*.inc $BASE/mysql-test/include $CP mysql-test/std_data/*.dat mysql-test/std_data/*.*001 $BASE/mysql-test/std_data $CP mysql-test/std_data/des_key_file $BASE/mysql-test/std_data $CP mysql-test/t/*test mysql-test/t/*.opt mysql-test/t/*.slave-mi mysql-test/t/*.sh $BASE/mysql-test/t -$CP mysql-test/r/*result mysql-test/r/*.require $BASE/mysql-test/r +$CP mysql-test/r/*result mysql-test/r/*result.es mysql-test/r/*.require $BASE/mysql-test/r if [ $BASE_SYSTEM != "netware" ] ; then chmod a+x $BASE/bin/* From 9f7c9aa7d5eed311e3d40c8f7d1a55abb7d4566c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 23:56:13 +0100 Subject: [PATCH 18/45] ndb - sol9x86: cc -xO3: fix optimizer error. ndb/src/common/util/NdbSqlUtil.cpp: sol9x86: cc -xO3: fix optimizer error. Note: same expression remains in Field_newdate::val_int(). --- ndb/src/common/util/NdbSqlUtil.cpp | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index 6b23da774af..53fa5d69215 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -526,6 +526,7 @@ NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32 union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; u2.p = p2; +#ifdef ndb_date_sol9x86_cc_xO3_madness // from Field_newdate::val_int Uint64 j1 = uint3korr(u1.v); Uint64 j2 = uint3korr(u2.v); @@ -536,6 +537,33 @@ NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32 if (j1 > j2) return +1; return 0; +#else + uint j1 = uint3korr(u1.v); + uint j2 = uint3korr(u2.v); + uint d1 = (j1 & 31); + uint d2 = (j2 & 31); + j1 = (j1 >> 5); + j2 = (j2 >> 5); + uint m1 = (j1 & 15); + uint m2 = (j2 & 15); + j1 = (j1 >> 4); + j2 = (j2 >> 4); + uint y1 = j1; + uint y2 = j2; + if (y1 < y2) + return -1; + if (y1 > y2) + return +1; + if (m1 < m2) + return -1; + if (m1 > m2) + return +1; + if (d1 < d2) + return -1; + if (d1 > d2) + return +1; + return 0; +#endif #endif } From ebda548d0d26f49a05d424f186e0b1d92c90925e Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 09:14:22 +0300 Subject: [PATCH 19/45] Fix for BUG#7716: in in_string::set() take into account that the value returned by item->val_str() may be a substring of the passed string. Disallow string=its_substring assignment in String::operator=(). mysql-test/r/func_misc.result: Testcase for BUG#7716 mysql-test/t/func_misc.test: Testcase for BUG#7716 sql/item_cmpfunc.cc: Fix for BUG#7716: in in_string::set() take into account that the string returned by item->val_str(S) may be not S but use the buffer owned by S. sql/sql_string.h: * Added assert: String& String::operator=(const String&) may not be used to do assignments like str = string_that_uses_buffer_owned_by_str * Added String::uses_buffer_owned_by(). --- mysql-test/r/func_misc.result | 21 +++++++++++++++++++++ mysql-test/t/func_misc.test | 15 +++++++++++++++ sql/item_cmpfunc.cc | 4 ++++ sql/sql_string.h | 10 ++++++++++ 4 files changed, 50 insertions(+) diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index 5a9f0f68228..2d464c891bf 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -28,3 +28,24 @@ length(format('nan', 2)) > 0 select concat("$",format(2500,2)); concat("$",format(2500,2)) $2,500.00 +create table t1 ( a timestamp ); +insert into t1 values ( '2004-01-06 12:34' ); +select a from t1 where left(a+0,6) in ( left(20040106,6) ); +a +2004-01-06 12:34:00 +select a from t1 where left(a+0,6) = ( left(20040106,6) ); +a +2004-01-06 12:34:00 +select a from t1 where right(a+0,6) in ( right(20040106123400,6) ); +a +2004-01-06 12:34:00 +select a from t1 where right(a+0,6) = ( right(20040106123400,6) ); +a +2004-01-06 12:34:00 +select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) ); +a +2004-01-06 12:34:00 +select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) ); +a +2004-01-06 12:34:00 +drop table t1; diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index e73f2a1b26c..89aba7ee583 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -23,3 +23,18 @@ select length(format('nan', 2)) > 0; # Test for bug #628 # select concat("$",format(2500,2)); + +# Test for BUG#7716 +create table t1 ( a timestamp ); +insert into t1 values ( '2004-01-06 12:34' ); +select a from t1 where left(a+0,6) in ( left(20040106,6) ); +select a from t1 where left(a+0,6) = ( left(20040106,6) ); + +select a from t1 where right(a+0,6) in ( right(20040106123400,6) ); +select a from t1 where right(a+0,6) = ( right(20040106123400,6) ); + +select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) ); +select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) ); + +drop table t1; + diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c5e6d520ab7..46ef3281dd1 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1503,7 +1503,11 @@ void in_string::set(uint pos,Item *item) String *str=((String*) base)+pos; String *res=item->val_str(str); if (res && res != str) + { + if (res->uses_buffer_owned_by(str)) + res->copy(); *str= *res; + } if (!str->charset()) { CHARSET_INFO *cs; diff --git a/sql/sql_string.h b/sql/sql_string.h index a8fb9574c0b..9136dddbbf2 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -182,6 +182,11 @@ public: { if (&s != this) { + /* + It is forbidden to do assignments like + some_string = substring_of_that_string + */ + DBUG_ASSERT(!s.uses_buffer_owned_by(this)); free(); Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length; alloced=0; @@ -313,4 +318,9 @@ public: /* Swap two string objects. Efficient way to exchange data without memcpy. */ void swap(String &s); + + inline bool uses_buffer_owned_by(const String *s) const + { + return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length); + } }; From fa17ed6895a3278c21934742272f75012e40656d Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 13:51:37 +0400 Subject: [PATCH 20/45] type_float.result.es updated mysql-test/r/type_float.result.es: Updated. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + mysql-test/r/type_float.result.es | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 0cd599d040a..ef28bf38f48 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -197,6 +197,7 @@ ram@gw.mysql.r18.ru ram@gw.udmsearch.izhnet.ru ram@mysql.r18.ru ram@ram.(none) +ramil@mysql.com ranger@regul.home.lan rburnett@build.mysql.com reggie@bob.(none) diff --git a/mysql-test/r/type_float.result.es b/mysql-test/r/type_float.result.es index 2751e6cb33b..f2639ef545a 100644 --- a/mysql-test/r/type_float.result.es +++ b/mysql-test/r/type_float.result.es @@ -1,4 +1,4 @@ -drop table if exists t1; +drop table if exists t1,t2; SELECT 10,10.0,10.,.1e+2,100.0e-1; 10 10.0 10. .1e+2 100.0e-1 10 10.0 10 10 10 @@ -8,6 +8,9 @@ SELECT 6e-05, -6e-05, --6e-05, -6e-05+1.000000; SELECT 1e1,1.e1,1.0e1,1e+1,1.e+1,1.0e+1,1e-1,1.e-1,1.0e-1; 1e1 1.e1 1.0e1 1e+1 1.e+1 1.0e+1 1e-1 1.e-1 1.0e-1 10 10 10 10 10 10 0.1 0.1 0.1 +SELECT 0.001e+1,0.001e-1, -0.001e+01,-0.001e-01; +0.001e+1 0.001e-1 -0.001e+01 -0.001e-01 +0.01 0.0001 -0.01 -0.0001 create table t1 (f1 float(24),f2 float(52)); show full columns from t1; Field Type Collation Null Key Default Extra Privileges Comment @@ -143,6 +146,15 @@ drop table t1; create table t1 (f float(54)); ERROR 42000: Incorrect column specifier for column 'f' drop table if exists t1; +create table t1 (d1 double, d2 double unsigned); +insert into t1 set d1 = -1.0; +update t1 set d2 = d1; +Warnings: +Warning 1264 Out of range value adjusted for column 'd2' at row 1 +select * from t1; +d1 d2 +-1 0 +drop table t1; create table t1 (f float(4,3)); insert into t1 values (-11.0),(-11),("-11"),(11.0),(11),("11"); Warnings: From 03bc59970610527a86a76ed5e8b993a6a3769d09 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 10:56:53 +0100 Subject: [PATCH 21/45] Merge of query cache from 4.1 to 5.0 mysql-test/r/ndb_cache.result: Added 'order by' to select's mysql-test/r/ndb_cache2.result: Added 'order by' to select's mysql-test/t/ndb_cache.test: Added 'order by' to select's mysql-test/t/ndb_cache2.test: Added 'order by' to select's sql/ha_innodb.h: Changed function name sql/ha_ndbcluster.cc: Merge from query cache from 4.1 to 5.0 Added better comments ndb_get_table_statistics had changed, so there where some adaptions to make sql/ha_ndbcluster.h: Changed name of function sql/handler.h: Changed name of function sql/sql_cache.cc: Changed name of function --- mysql-test/r/ndb_cache.result | 26 +++---- mysql-test/r/ndb_cache2.result | 24 +++---- mysql-test/t/ndb_cache.test | 20 +++--- mysql-test/t/ndb_cache2.test | 20 +++--- sql/ha_innodb.h | 8 +-- sql/ha_ndbcluster.cc | 120 +++++++++++++++++++-------------- sql/ha_ndbcluster.h | 10 +-- sql/handler.h | 11 +-- sql/sql_cache.cc | 9 +-- 9 files changed, 135 insertions(+), 113 deletions(-) diff --git a/mysql-test/r/ndb_cache.result b/mysql-test/r/ndb_cache.result index 7423771e026..478663b1aa1 100644 --- a/mysql-test/r/ndb_cache.result +++ b/mysql-test/r/ndb_cache.result @@ -36,22 +36,22 @@ Variable_name Value Qcache_hits 1 insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); -select * from t1; +select * from t1 order by pk; pk a b c +1 3 3 First row 2 7 8 Second row 4 5 6 Fourth row -1 3 3 First row show status like "Qcache_inserts"; Variable_name Value Qcache_inserts 3 show status like "Qcache_hits"; Variable_name Value Qcache_hits 1 -select * from t1; +select * from t1 order by pk; pk a b c +1 3 3 First row 2 7 8 Second row 4 5 6 Fourth row -1 3 3 First row show status like "Qcache_hits"; Variable_name Value Qcache_hits 2 @@ -81,10 +81,10 @@ show status like "Qcache_hits"; Variable_name Value Qcache_hits 3 use test; -select * from t1; +select * from t1 order by pk; pk a b c -2 7 8 Second row 1 3 3 First row +2 7 8 Second row select * from t1 where b=3; pk a b c 1 3 3 First row @@ -96,11 +96,11 @@ use test; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -110,11 +110,11 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 5 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -138,7 +138,7 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -161,7 +161,7 @@ Qcache_inserts 8 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 5 3 First row @@ -171,7 +171,7 @@ Qcache_inserts 9 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 5 3 First row diff --git a/mysql-test/r/ndb_cache2.result b/mysql-test/r/ndb_cache2.result index ce10e9dab00..de4b3e31874 100644 --- a/mysql-test/r/ndb_cache2.result +++ b/mysql-test/r/ndb_cache2.result @@ -37,10 +37,10 @@ Variable_name Value Qcache_hits 1 insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); -select * from t1; +select * from t1 order by pk desc; pk a b c -2 7 8 Second row 4 5 6 Fourth row +2 7 8 Second row 1 3 3 First row show status like "Qcache_inserts"; Variable_name Value @@ -48,10 +48,10 @@ Qcache_inserts 3 show status like "Qcache_hits"; Variable_name Value Qcache_hits 1 -select * from t1; +select * from t1 order by pk desc; pk a b c -2 7 8 Second row 4 5 6 Fourth row +2 7 8 Second row 1 3 3 First row show status like "Qcache_hits"; Variable_name Value @@ -82,7 +82,7 @@ show status like "Qcache_hits"; Variable_name Value Qcache_hits 3 use test; -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 3 3 First row @@ -97,11 +97,11 @@ use test; show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -111,11 +111,11 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 5 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -139,7 +139,7 @@ Qcache_inserts 7 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 4 3 First row @@ -162,7 +162,7 @@ Qcache_inserts 8 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 5 3 First row @@ -172,7 +172,7 @@ Qcache_inserts 9 show status like "Qcache_hits"; Variable_name Value Qcache_hits 7 -select * from t1; +select * from t1 order by pk desc; pk a b c 2 7 8 Second row 1 5 3 First row diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test index 8bdcbe17728..e899e94e4ac 100644 --- a/mysql-test/t/ndb_cache.test +++ b/mysql-test/t/ndb_cache.test @@ -35,10 +35,10 @@ show status like "Qcache_hits"; # Insert a new record and make sure the correct data is returned insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); -select * from t1; +select * from t1 order by pk; show status like "Qcache_inserts"; show status like "Qcache_hits"; -select * from t1; +select * from t1 order by pk; show status like "Qcache_hits"; # Perform a "new" query and make sure the query cache is not hit @@ -60,7 +60,7 @@ show status like "Qcache_hits"; connect (con1,localhost,root,,); connection con1; use test; -select * from t1; +select * from t1 order by pk; select * from t1 where b=3; show status like "Qcache_hits"; @@ -70,13 +70,13 @@ connect (con2,localhost,root,,); connection con2; use test; show status like "Qcache_queries_in_cache"; -select * from t1; -select * from t1; +select * from t1 order by pk desc; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; -select * from t1; -select * from t1; +select * from t1 order by pk desc; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -92,7 +92,7 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con2; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -103,11 +103,11 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con2; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; diff --git a/mysql-test/t/ndb_cache2.test b/mysql-test/t/ndb_cache2.test index 5c1674a7021..c46935e8601 100644 --- a/mysql-test/t/ndb_cache2.test +++ b/mysql-test/t/ndb_cache2.test @@ -41,10 +41,10 @@ show status like "Qcache_hits"; # Insert a new record and make sure the correct data is returned insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (4, 5, 6, 'Fourth row'); -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_hits"; # Perform a "new" query and make sure the query cache is not hit @@ -66,7 +66,7 @@ show status like "Qcache_hits"; connect (con1,localhost,root,,); connection con1; use test; -select * from t1; +select * from t1 order by pk desc; select * from t1 where b=3; show status like "Qcache_hits"; @@ -76,13 +76,13 @@ connect (con2,localhost,root,,); connection con2; use test; show status like "Qcache_queries_in_cache"; -select * from t1; -select * from t1; +select * from t1 order by pk desc; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; -select * from t1; -select * from t1; +select * from t1 order by pk desc; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -95,7 +95,7 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con2; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; @@ -107,11 +107,11 @@ show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con2; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_inserts"; show status like "Qcache_hits"; connection con1; -select * from t1; +select * from t1 order by pk desc; show status like "Qcache_queries_in_cache"; show status like "Qcache_inserts"; show status like "Qcache_hits"; diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index c5d501f3702..cca33cbbe1c 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -180,10 +180,10 @@ class ha_innobase: public handler /* ask handler about permission to cache table during query registration */ - my_bool cached_table_registration(THD *thd, char *table_key, - uint key_length, - qc_engine_callback *call_back, - ulonglong *engine_data) + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *call_back, + ulonglong *engine_data) { *call_back= innobase_query_caching_of_table_permitted; *engine_data= 0; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 849fac07821..d146e55f798 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4026,7 +4026,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_force_send(TRUE), m_autoincrement_prefetch(32), m_transaction_on(TRUE), - m_use_local_query_cache(FALSE), m_multi_cursor(NULL) { int i; @@ -4820,31 +4819,48 @@ uint ndb_get_commitcount(THD* thd, char* dbname, char* tabname, DBUG_RETURN(1); ndb->setDatabaseName(dbname); - if (ndb_get_table_statistics(ndb, tabname, 0, commit_count)) + struct Ndb_statistics stat; + if (ndb_get_table_statistics(ndb, tabname, &stat)) DBUG_RETURN(1); + *commit_count= stat.commit_count; DBUG_RETURN(0); } -static -my_bool -ndbcluster_cache_retrieval_allowed( -/*======================================*/ - /* out: TRUE if permitted, FALSE if not; - note that the value FALSE means invalidation - of query cache if *engine_data is changed */ - THD* thd, /* in: thd of the user who is trying to - store a result to the query cache or - retrieve it */ - char* full_name, /* in: concatenation of database name, - the null character '\0', and the table - name */ - uint full_name_len, /* in: length of the full name, i.e. - len(dbname) + len(tablename) + 1 */ - ulonglong *engine_data) /* in: value set in call to - ha_ndbcluster::cached_table_registration - out: if return FALSE this is used to invalidate - all cached queries with this table*/ +/* + Check if a cached query can be used. + This is done by comparing the supplied engine_data to commit_count of + the table. + The commit_count is either retrieved from the share for the table, where + it has been cached by the util thread. If the util thread is not started, + NDB has to be contacetd to retrieve the commit_count, this will introduce + a small delay while waiting for NDB to answer. + + + SYNOPSIS + ndbcluster_cache_retrieval_allowed + thd thread handle + full_name concatenation of database name, + the null character '\0', and the table + name + full_name_len length of the full name, + i.e. len(dbname) + len(tablename) + 1 + + engine_data parameter retrieved when query was first inserted into + the cache. If the value of engine_data is changed, + all queries for this table should be invalidated. + + RETURN VALUE + TRUE Yes, use the query from cache + FALSE No, don't use the cached query, and if engine_data + has changed, all queries for this table should be invalidated + +*/ + +static my_bool +ndbcluster_cache_retrieval_allowed(THD* thd, + char* full_name, uint full_name_len, + ulonglong *engine_data) { DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); @@ -4861,7 +4877,7 @@ ndbcluster_cache_retrieval_allowed( if (ndb_get_commitcount(thd, dbname, tabname, &commit_count)) { - *engine_data= *engine_data+1; // invalidate + *engine_data+= 1; // invalidate DBUG_RETURN(FALSE); } DBUG_PRINT("info", ("*engine_data=%llu, commit_count=%llu", @@ -4877,27 +4893,36 @@ ndbcluster_cache_retrieval_allowed( DBUG_RETURN(TRUE); } + +/** + Register a table for use in the query cache. Fetch the commit_count + for the table and return it in engine_data, this will later be used + to check if the table has changed, before the cached query is reused. + + SYNOPSIS + ha_ndbcluster::can_query_cache_table + thd thread handle + full_name concatenation of database name, + the null character '\0', and the table + name + full_name_len length of the full name, + i.e. len(dbname) + len(tablename) + 1 + qc_engine_callback function to be called before using cache on this table + engine_data out, commit_count for this table + + RETURN VALUE + TRUE Yes, it's ok to cahce this query + FALSE No, don't cach the query + +*/ + my_bool -ha_ndbcluster::cached_table_registration( -/*======================================*/ - /* out: TRUE if permitted, FALSE if not; - note that the value FALSE means invalidation - of query cache if *engine_data is changed */ - THD* thd, /* in: thd of the user who is trying to - store a result to the query cache or - retrieve it */ - char* full_name, /* in: concatenation of database name, - the null character '\0', and the table - name */ - uint full_name_len, /* in: length of the full name, i.e. - len(dbname) + len(tablename) + 1 */ - qc_engine_callback - *engine_callback, /* out: function to be called before using - cache on this table */ - ulonglong *engine_data) /* out: if return FALSE this is used to - invalidate all cached queries with this table*/ +ha_ndbcluster::register_query_cache_table(THD* thd, + char* full_name, uint full_name_len, + qc_engine_callback *engine_callback, + ulonglong *engine_data) { - DBUG_ENTER("ha_ndbcluster::cached_table_registration"); + DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", @@ -5139,10 +5164,6 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, pOp->close(TRUE); ndb->closeTransaction(pTrans); - if(row_count) - *row_count= sum_rows; - if(commit_count) - *commit_count= sum_commits; ndbstat->row_count= sum_rows; ndbstat->commit_count= sum_commits; @@ -5662,13 +5683,12 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused)) // Contact NDB to get commit count for table g_ndb->setDatabaseName(db); - Uint64 rows, commit_count; - if(ndb_get_table_statistics(g_ndb, tabname, - &rows, &commit_count) == 0){ + struct Ndb_statistics stat;; + if(ndb_get_table_statistics(g_ndb, tabname, &stat) == 0){ DBUG_PRINT("ndb_util_thread", ("Table: %s, rows: %llu, commit_count: %llu", - share->table_name, rows, commit_count)); - share->commit_count= commit_count; + share->table_name, stat.row_count, stat.commit_count)); + share->commit_count= stat.commit_count; } else { diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index bd0d8cc7be5..fb624353491 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -157,11 +157,11 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); uint8 table_cache_type(); - my_bool cached_table_registration(THD *thd, char *table_key, - uint key_length, - qc_engine_callback *engine_callback, - ulonglong *engine_data); - private: + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data); +private: int alter_table_name(const char *to); int drop_table(); int create_index(const char *name, KEY *key_info, bool unique); diff --git a/sql/handler.h b/sql/handler.h index 2720a0bff33..04f196dccca 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -592,11 +592,12 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* ask handler about permission to cache table during query registration */ - virtual my_bool cached_table_registration(THD *thd, char *table_key, - uint key_length, - qc_engine_callback *engine_callback, - ulonglong *engine_data) + /* ask handler about permission to cache table when query is to be cached */ + virtual my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback + *engine_callback, + ulonglong *engine_data) { *engine_callback= 0; return 1; diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 68964d80bf7..e38e417e6df 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -2770,10 +2770,11 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, for (; tables_used; tables_used= tables_used->next_global) { TABLE *table= tables_used->table; - if (!handler->cached_table_registration(thd, table->s->table_cache_key, - table->s->key_length, - &tables_used->callback_func, - &tables_used->engine_data)) + handler *handler= table->file; + if (!handler->register_query_cache_table(thd, table->s->table_cache_key, + table->s->key_length, + &tables_used->callback_func, + &tables_used->engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", tables_used->db, tables_used->alias)); From ca95b9fefbb307bd85185bb2eaf192af4df6c3bb Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 14:35:56 +0400 Subject: [PATCH 22/45] type_ranges.result.es updated. mysql-test/r/type_ranges.result.es: Updated. --- mysql-test/r/type_ranges.result.es | 176 +++++++++++++++-------------- 1 file changed, 90 insertions(+), 86 deletions(-) diff --git a/mysql-test/r/type_ranges.result.es b/mysql-test/r/type_ranges.result.es index c1f6d2453e9..5e2ea2aebbc 100644 --- a/mysql-test/r/type_ranges.result.es +++ b/mysql-test/r/type_ranges.result.es @@ -40,30 +40,30 @@ KEY (options,flags) ); show full fields from t1; Field Type Collation Null Key Default Extra Privileges Comment -auto int(5) unsigned NULL PRI NULL auto_increment -string varchar(10) latin1_swedish_ci YES hello -tiny tinyint(4) NULL MUL 0 -short smallint(6) NULL MUL 1 -medium mediumint(8) NULL MUL 0 -long_int int(11) NULL 0 -longlong bigint(13) NULL MUL 0 -real_float float(13,1) NULL MUL 0.0 +auto int(5) unsigned NULL NO PRI NULL auto_increment +string char(10) latin1_swedish_ci YES hello +tiny tinyint(4) NULL NO MUL 0 +short smallint(6) NULL NO MUL 1 +medium mediumint(8) NULL NO MUL 0 +long_int int(11) NULL NO 0 +longlong bigint(13) NULL NO MUL 0 +real_float float(13,1) NULL NO MUL 0.0 real_double double(16,4) NULL YES NULL -utiny tinyint(3) unsigned NULL MUL 0 -ushort smallint(5) unsigned zerofill NULL MUL 00000 -umedium mediumint(8) unsigned NULL MUL 0 -ulong int(11) unsigned NULL MUL 0 -ulonglong bigint(13) unsigned NULL MUL 0 +utiny tinyint(3) unsigned NULL NO MUL 0 +ushort smallint(5) unsigned zerofill NULL NO MUL 00000 +umedium mediumint(8) unsigned NULL NO MUL 0 +ulong int(11) unsigned NULL NO MUL 0 +ulonglong bigint(13) unsigned NULL NO MUL 0 time_stamp timestamp NULL YES CURRENT_TIMESTAMP date_field date NULL YES NULL time_field time NULL YES NULL date_time datetime NULL YES NULL blob_col blob NULL YES NULL tinyblob_col tinyblob NULL YES NULL -mediumblob_col mediumblob NULL -longblob_col longblob NULL -options enum('one','two','tree') latin1_swedish_ci MUL one -flags set('one','two','tree') latin1_swedish_ci +mediumblob_col mediumblob NULL NO +longblob_col longblob NULL NO +options enum('one','two','tree') latin1_swedish_ci NO MUL one +flags set('one','two','tree') latin1_swedish_ci NO show keys from t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment t1 0 PRIMARY 1 auto A 0 NULL NULL BTREE @@ -89,33 +89,33 @@ insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,N insert into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3); insert into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1); Warnings: -Warning 1264 Data truncated; out of range for column 'utiny' at row 1 -Warning 1264 Data truncated; out of range for column 'ushort' at row 1 -Warning 1264 Data truncated; out of range for column 'umedium' at row 1 -Warning 1264 Data truncated; out of range for column 'ulong' at row 1 +Warning 1264 Out of range value adjusted for column 'utiny' at row 1 +Warning 1264 Out of range value adjusted for column 'ushort' at row 1 +Warning 1264 Out of range value adjusted for column 'umedium' at row 1 +Warning 1264 Out of range value adjusted for column 'ulong' at row 1 Warning 1265 Data truncated for column 'options' at row 1 Warning 1265 Data truncated for column 'flags' at row 1 insert into t1 values (0,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,-4294967295,NULL,0,0,0,-4294967295,-4294967295,-4294967295,'-4294967295',0,"one,two,tree"); Warnings: Warning 1265 Data truncated for column 'string' at row 1 -Warning 1264 Data truncated; out of range for column 'tiny' at row 1 -Warning 1264 Data truncated; out of range for column 'short' at row 1 -Warning 1264 Data truncated; out of range for column 'medium' at row 1 -Warning 1264 Data truncated; out of range for column 'long_int' at row 1 -Warning 1264 Data truncated; out of range for column 'utiny' at row 1 -Warning 1264 Data truncated; out of range for column 'ushort' at row 1 -Warning 1264 Data truncated; out of range for column 'umedium' at row 1 -Warning 1264 Data truncated; out of range for column 'ulong' at row 1 +Warning 1264 Out of range value adjusted for column 'tiny' at row 1 +Warning 1264 Out of range value adjusted for column 'short' at row 1 +Warning 1264 Out of range value adjusted for column 'medium' at row 1 +Warning 1264 Out of range value adjusted for column 'long_int' at row 1 +Warning 1264 Out of range value adjusted for column 'utiny' at row 1 +Warning 1264 Out of range value adjusted for column 'ushort' at row 1 +Warning 1264 Out of range value adjusted for column 'umedium' at row 1 +Warning 1264 Out of range value adjusted for column 'ulong' at row 1 Warning 1265 Data truncated for column 'options' at row 1 insert into t1 values (0,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,NULL,0,0,0,4294967295,4294967295,4294967295,'4294967295',0,0); Warnings: -Warning 1264 Data truncated; out of range for column 'tiny' at row 1 -Warning 1264 Data truncated; out of range for column 'short' at row 1 -Warning 1264 Data truncated; out of range for column 'medium' at row 1 -Warning 1264 Data truncated; out of range for column 'long_int' at row 1 -Warning 1264 Data truncated; out of range for column 'utiny' at row 1 -Warning 1264 Data truncated; out of range for column 'ushort' at row 1 -Warning 1264 Data truncated; out of range for column 'umedium' at row 1 +Warning 1264 Out of range value adjusted for column 'tiny' at row 1 +Warning 1264 Out of range value adjusted for column 'short' at row 1 +Warning 1264 Out of range value adjusted for column 'medium' at row 1 +Warning 1264 Out of range value adjusted for column 'long_int' at row 1 +Warning 1264 Out of range value adjusted for column 'utiny' at row 1 +Warning 1264 Out of range value adjusted for column 'ushort' at row 1 +Warning 1264 Out of range value adjusted for column 'umedium' at row 1 Warning 1265 Data truncated for column 'options' at row 1 insert into t1 (tiny) values (1); select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,utiny,ushort,umedium,ulong,ulonglong,mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000),date_field,time_field,date_time,blob_col,tinyblob_col,mediumblob_col,longblob_col from t1; @@ -208,56 +208,56 @@ Warning 1265 Data truncated for column 'options' at row 6 update t2 set string="changed" where auto=16; show full columns from t1; Field Type Collation Null Key Default Extra Privileges Comment -auto int(5) unsigned NULL MUL NULL auto_increment -string varchar(10) latin1_swedish_ci YES new defaul -tiny tinyint(4) NULL MUL 0 -short smallint(6) NULL MUL 0 -medium mediumint(8) NULL MUL 0 -long_int int(11) NULL 0 -longlong bigint(13) NULL MUL 0 -real_float float(13,1) NULL MUL 0.0 +auto int(5) unsigned NULL NO MUL NULL auto_increment +string char(10) latin1_swedish_ci YES new defaul +tiny tinyint(4) NULL NO MUL 0 +short smallint(6) NULL NO MUL 0 +medium mediumint(8) NULL NO MUL 0 +long_int int(11) NULL NO 0 +longlong bigint(13) NULL NO MUL 0 +real_float float(13,1) NULL NO MUL 0.0 real_double double(16,4) NULL YES NULL -utiny tinyint(3) unsigned NULL 0 -ushort smallint(5) unsigned zerofill NULL 00000 -umedium mediumint(8) unsigned NULL MUL 0 -ulong int(11) unsigned NULL MUL 0 -ulonglong bigint(13) unsigned NULL MUL 0 +utiny tinyint(3) unsigned NULL NO 0 +ushort smallint(5) unsigned zerofill NULL NO 00000 +umedium mediumint(8) unsigned NULL NO MUL 0 +ulong int(11) unsigned NULL NO MUL 0 +ulonglong bigint(13) unsigned NULL NO MUL 0 time_stamp timestamp NULL YES CURRENT_TIMESTAMP date_field char(10) latin1_swedish_ci YES NULL time_field time NULL YES NULL date_time datetime NULL YES NULL new_blob_col varchar(20) latin1_swedish_ci YES NULL tinyblob_col tinyblob NULL YES NULL -mediumblob_col mediumblob NULL -options enum('one','two','tree') latin1_swedish_ci MUL one -flags set('one','two','tree') latin1_swedish_ci -new_field char(10) latin1_swedish_ci new +mediumblob_col mediumblob NULL NO +options enum('one','two','tree') latin1_swedish_ci NO MUL one +flags set('one','two','tree') latin1_swedish_ci NO +new_field char(10) latin1_swedish_ci NO new show full columns from t2; Field Type Collation Null Key Default Extra Privileges Comment -auto int(5) unsigned NULL 0 -string varchar(10) latin1_swedish_ci YES new defaul -tiny tinyint(4) NULL 0 -short smallint(6) NULL 0 -medium mediumint(8) NULL 0 -long_int int(11) NULL 0 -longlong bigint(13) NULL 0 -real_float float(13,1) NULL 0.0 +auto int(5) unsigned NULL NO 0 +string char(10) latin1_swedish_ci YES new defaul +tiny tinyint(4) NULL NO 0 +short smallint(6) NULL NO 0 +medium mediumint(8) NULL NO 0 +long_int int(11) NULL NO 0 +longlong bigint(13) NULL NO 0 +real_float float(13,1) NULL NO 0.0 real_double double(16,4) NULL YES NULL -utiny tinyint(3) unsigned NULL 0 -ushort smallint(5) unsigned zerofill NULL 00000 -umedium mediumint(8) unsigned NULL 0 -ulong int(11) unsigned NULL 0 -ulonglong bigint(13) unsigned NULL 0 +utiny tinyint(3) unsigned NULL NO 0 +ushort smallint(5) unsigned zerofill NULL NO 00000 +umedium mediumint(8) unsigned NULL NO 0 +ulong int(11) unsigned NULL NO 0 +ulonglong bigint(13) unsigned NULL NO 0 time_stamp timestamp NULL YES 0000-00-00 00:00:00 date_field char(10) latin1_swedish_ci YES NULL time_field time NULL YES NULL date_time datetime NULL YES NULL new_blob_col varchar(20) latin1_swedish_ci YES NULL tinyblob_col tinyblob NULL YES NULL -mediumblob_col mediumblob NULL -options enum('one','two','tree') latin1_swedish_ci one -flags set('one','two','tree') latin1_swedish_ci -new_field char(10) latin1_swedish_ci new +mediumblob_col mediumblob NULL NO +options enum('one','two','tree') latin1_swedish_ci NO one +flags set('one','two','tree') latin1_swedish_ci NO +new_field char(10) latin1_swedish_ci NO new select t1.auto,t2.auto from t1,t2 where t1.auto=t2.auto and ((t1.string<>t2.string and (t1.string is not null or t2.string is not null)) or (t1.tiny<>t2.tiny and (t1.tiny is not null or t2.tiny is not null)) or (t1.short<>t2.short and (t1.short is not null or t2.short is not null)) or (t1.medium<>t2.medium and (t1.medium is not null or t2.medium is not null)) or (t1.long_int<>t2.long_int and (t1.long_int is not null or t2.long_int is not null)) or (t1.longlong<>t2.longlong and (t1.longlong is not null or t2.longlong is not null)) or (t1.real_float<>t2.real_float and (t1.real_float is not null or t2.real_float is not null)) or (t1.real_double<>t2.real_double and (t1.real_double is not null or t2.real_double is not null)) or (t1.utiny<>t2.utiny and (t1.utiny is not null or t2.utiny is not null)) or (t1.ushort<>t2.ushort and (t1.ushort is not null or t2.ushort is not null)) or (t1.umedium<>t2.umedium and (t1.umedium is not null or t2.umedium is not null)) or (t1.ulong<>t2.ulong and (t1.ulong is not null or t2.ulong is not null)) or (t1.ulonglong<>t2.ulonglong and (t1.ulonglong is not null or t2.ulonglong is not null)) or (t1.time_stamp<>t2.time_stamp and (t1.time_stamp is not null or t2.time_stamp is not null)) or (t1.date_field<>t2.date_field and (t1.date_field is not null or t2.date_field is not null)) or (t1.time_field<>t2.time_field and (t1.time_field is not null or t2.time_field is not null)) or (t1.date_time<>t2.date_time and (t1.date_time is not null or t2.date_time is not null)) or (t1.new_blob_col<>t2.new_blob_col and (t1.new_blob_col is not null or t2.new_blob_col is not null)) or (t1.tinyblob_col<>t2.tinyblob_col and (t1.tinyblob_col is not null or t2.tinyblob_col is not null)) or (t1.mediumblob_col<>t2.mediumblob_col and (t1.mediumblob_col is not null or t2.mediumblob_col is not null)) or (t1.options<>t2.options and (t1.options is not null or t2.options is not null)) or (t1.flags<>t2.flags and (t1.flags is not null or t2.flags is not null)) or (t1.new_field<>t2.new_field and (t1.new_field is not null or t2.new_field is not null))); auto auto 16 16 @@ -265,23 +265,27 @@ select t1.auto,t2.auto from t1,t2 where t1.auto=t2.auto and not (t1.string<=>t2. auto auto 16 16 drop table t2; -create table t2 (primary key (auto)) select auto+1 as auto,1 as t1, "a" as t2, repeat("a",256) as t3, binary repeat("b",256) as t4 from t1; +create table t2 (primary key (auto)) select auto+1 as auto,1 as t1, 'a' as t2, repeat('a',256) as t3, binary repeat('b',256) as t4, repeat('a',4096) as t5, binary repeat('b',4096) as t6, '' as t7, binary '' as t8 from t1; show full columns from t2; Field Type Collation Null Key Default Extra Privileges Comment -auto bigint(17) unsigned NULL PRI 0 -t1 bigint(1) NULL 0 -t2 char(1) latin1_swedish_ci -t3 longtext latin1_swedish_ci -t4 longblob NULL -select * from t2; -auto t1 t2 t3 t4 -11 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -12 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -13 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -14 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -15 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -16 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -17 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +auto bigint(17) unsigned NULL NO PRI 0 +t1 bigint(1) NULL NO 0 +t2 varchar(1) latin1_swedish_ci NO +t3 varchar(256) latin1_swedish_ci NO +t4 varbinary(256) NULL NO +t5 longtext latin1_swedish_ci NO +t6 longblob NULL NO +t7 char(0) latin1_swedish_ci NO +t8 binary(0) NULL NO +select t1,t2,length(t3),length(t4),length(t5),length(t6),t7,t8 from t2; +t1 t2 length(t3) length(t4) length(t5) length(t6) t7 t8 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 +1 a 256 256 4096 4096 drop table t1,t2; create table t1 (c int); insert into t1 values(1),(2); @@ -293,7 +297,7 @@ show full columns from t3; Field Type Collation Null Key Default Extra Privileges Comment c1 int(11) NULL YES NULL c2 int(11) NULL YES NULL -const bigint(1) NULL 0 +const bigint(1) NULL NO 0 drop table t1,t2,t3; create table t1 ( myfield INT NOT NULL, UNIQUE INDEX (myfield), unique (myfield), index(myfield)); drop table t1; From 4c69539827f69a693236eca0a2f512b1618e80a1 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:35:08 +0400 Subject: [PATCH 23/45] type_float.result.es updated. mysql-test/r/type_float.result.es: Updated. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + mysql-test/r/type_float.result.es | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 6ccc886e161..bf88e38a780 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -180,6 +180,7 @@ ram@gw.mysql.r18.ru ram@gw.udmsearch.izhnet.ru ram@mysql.r18.ru ram@ram.(none) +ramil@mysql.com ranger@regul.home.lan rburnett@build.mysql.com reggie@bob.(none) diff --git a/mysql-test/r/type_float.result.es b/mysql-test/r/type_float.result.es index b93539b6bea..5fcf9213f83 100644 --- a/mysql-test/r/type_float.result.es +++ b/mysql-test/r/type_float.result.es @@ -143,6 +143,15 @@ drop table t1; create table t1 (f float(54)); ERROR 42000: Incorrect column specifier for column 'f' drop table if exists t1; +create table t1 (d1 double, d2 double unsigned); +insert into t1 set d1 = -1.0; +update t1 set d2 = d1; +Warnings: +Warning 1264 Data truncated; out of range for column 'd2' at row 1 +select * from t1; +d1 d2 +-1 0 +drop table t1; create table t1 (f float(4,3)); insert into t1 values (-11.0),(-11),("-11"),(11.0),(11),("11"); Warnings: From 66eb71f3fc13fefaa6d72532b521a28c09138aa3 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:31:36 +0300 Subject: [PATCH 24/45] A fix: bug#6931: Date Type column problem when using UNION-Table bug#7833: Wrong datatype of aggregate column is returned mysql-test/r/func_group.result: Test case for bug 7833: Wrong datatype of aggregate column is returned mysql-test/r/union.result: Test case for bug 6931: Date Type column problem when using UNION-Table. mysql-test/t/func_group.test: Test case for bug 7833: Wrong datatype of aggregate column is returned mysql-test/t/union.test: Test case for bug 6931: Date Type column problem when using UNION-Table. --- mysql-test/r/func_group.result | 12 +++++++ mysql-test/r/union.result | 36 +++++++++++++++++++ mysql-test/t/func_group.test | 14 ++++++++ mysql-test/t/union.test | 35 ++++++++++++++++++ sql/field.cc | 35 ++++++++++++++++++ sql/field.h | 1 + sql/item.cc | 65 +++++++++++++++++++++++++++------- sql/item.h | 4 +-- sql/sql_union.cc | 14 ++++++-- 9 files changed, 200 insertions(+), 16 deletions(-) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 4bb79a1cb41..fa645700875 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -733,3 +733,15 @@ one 2 two 2 three 1 drop table t1; +create table t1(f1 datetime); +insert into t1 values (now()); +create table t2 select f2 from (select max(now()) f2 from t1) a; +show columns from t2; +Field Type Null Key Default Extra +f2 datetime 0000-00-00 00:00:00 +drop table t2; +create table t2 select f2 from (select now() f2 from t1) a; +show columns from t2; +Field Type Null Key Default Extra +f2 datetime 0000-00-00 00:00:00 +drop table t2, t1; diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result index f07bdad9021..115ef6a47f9 100644 --- a/mysql-test/r/union.result +++ b/mysql-test/r/union.result @@ -1137,3 +1137,39 @@ t1 CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; drop table t2; +create table t1(a1 int, f1 char(10)); +create table t2 +select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a +union +select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a +order by f2, a1; +show columns from t2; +Field Type Null Key Default Extra +f2 date YES NULL +a1 int(11) YES NULL +drop table t1, t2; +create table t1 (f1 int); +create table t2 (f1 int, f2 int ,f3 date); +create table t3 (f1 int, f2 char(10)); +create table t4 +( +select t2.f3 as sdate +from t1 +left outer join t2 on (t1.f1 = t2.f1) +inner join t3 on (t2.f2 = t3.f1) +order by t1.f1, t3.f1, t2.f3 +) +union +( +select cast('2004-12-31' as date) as sdate +from t1 +left outer join t2 on (t1.f1 = t2.f1) +inner join t3 on (t2.f2 = t3.f1) +group by t1.f1 +order by t1.f1, t3.f1, t2.f3 +) +order by sdate; +show columns from t4; +Field Type Null Key Default Extra +sdate date YES NULL +drop table t1, t2, t3, t4; diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 79d6112e6de..465611a5ebb 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -473,3 +473,17 @@ INSERT INTO t1 VALUES select val, count(*) from t1 group by val; drop table t1; + + +# +# Bug 7833: Wrong datatype of aggregate column is returned +# + +create table t1(f1 datetime); +insert into t1 values (now()); +create table t2 select f2 from (select max(now()) f2 from t1) a; +show columns from t2; +drop table t2; +create table t2 select f2 from (select now() f2 from t1) a; +show columns from t2; +drop table t2, t1; diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test index 8682808f3f3..90b2197603b 100644 --- a/mysql-test/t/union.test +++ b/mysql-test/t/union.test @@ -664,3 +664,38 @@ show create table t1; drop table t1; drop table t2; +# +# Bug 6931: Date Type column problem when using UNION-Table. +# +create table t1(a1 int, f1 char(10)); +create table t2 +select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a +union +select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a +order by f2, a1; +show columns from t2; +drop table t1, t2; + +create table t1 (f1 int); +create table t2 (f1 int, f2 int ,f3 date); +create table t3 (f1 int, f2 char(10)); +create table t4 +( + select t2.f3 as sdate + from t1 + left outer join t2 on (t1.f1 = t2.f1) + inner join t3 on (t2.f2 = t3.f1) + order by t1.f1, t3.f1, t2.f3 +) +union +( + select cast('2004-12-31' as date) as sdate + from t1 + left outer join t2 on (t1.f1 = t2.f1) + inner join t3 on (t2.f2 = t3.f1) + group by t1.f1 + order by t1.f1, t3.f1, t2.f3 +) +order by sdate; +show columns from t4; +drop table t1, t2, t3, t4; diff --git a/sql/field.cc b/sql/field.cc index 7357bc06f11..9965cb792be 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -245,6 +245,7 @@ static Field::field_cast_enum field_cast_date[]= Field::FIELD_CAST_BLOB, Field::FIELD_CAST_STOP}; static Field::field_cast_enum field_cast_newdate[]= {Field::FIELD_CAST_NEWDATE, + Field::FIELD_CAST_DATE, Field::FIELD_CAST_DATETIME, Field::FIELD_CAST_STRING, Field::FIELD_CAST_VARSTRING, Field::FIELD_CAST_BLOB, Field::FIELD_CAST_STOP}; @@ -6024,6 +6025,40 @@ Field *make_field(char *ptr, uint32 field_length, } +/* + Check if field_type is appropriate field type + to create field for tmp table using + item->tmp_table_field() method + + SYNOPSIS + field_types_to_be_kept() + field_type - field type + + NOTE + it is used in function get_holder_example_field() + from item.cc + + RETURN + 1 - can use item->tmp_table_field() method + 0 - can not use item->tmp_table_field() method + +*/ + +bool field_types_to_be_kept(enum_field_types field_type) +{ + switch (field_type) + { + case FIELD_TYPE_DATE: + case FIELD_TYPE_NEWDATE: + case FIELD_TYPE_TIME: + case FIELD_TYPE_DATETIME: + return 1; + default: + return 0; + } +} + + /* Create a field suitable for create of table */ create_field::create_field(Field *old_field,Field *orig_field) diff --git a/sql/field.h b/sql/field.h index 27a01a69273..fd0f2f9c2f1 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1265,6 +1265,7 @@ int set_field_to_null(Field *field); int set_field_to_null_with_conversions(Field *field, bool no_conversions); bool test_if_int(const char *str, int length, const char *int_end, CHARSET_INFO *cs); +bool field_types_to_be_kept(enum_field_types field_type); /* The following are for the interface with the .frm file diff --git a/sql/item.cc b/sql/item.cc index ab29c147dfb..d61d628e8fa 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -2639,7 +2639,53 @@ void Item_cache_row::bring_value() } -Item_type_holder::Item_type_holder(THD *thd, Item *item) +/* + Returns field for temporary table dependind on item type + + SYNOPSIS + get_holder_example_field() + thd - thread handler + item - pointer to item + table - empty table object + + NOTE + It is possible to return field for Item_func + items only if field type of this item is + date or time or datetime type. + also see function field_types_to_be_kept() from + field.cc + + RETURN + # - field + 0 - no field +*/ + +Field *get_holder_example_field(THD *thd, Item *item, TABLE *table) +{ + DBUG_ASSERT(table); + + Item_func *tmp_item= 0; + if (item->type() == Item::FIELD_ITEM) + return (((Item_field*) item)->field); + if (item->type() == Item::FUNC_ITEM) + tmp_item= (Item_func *) item; + else if (item->type() == Item::SUM_FUNC_ITEM) + { + Item_sum *item_sum= (Item_sum *) item; + if (item_sum->keep_field_type()) + { + if (item_sum->args[0]->type() == Item::FIELD_ITEM) + return (((Item_field*) item_sum->args[0])->field); + if (item_sum->args[0]->type() == Item::FUNC_ITEM) + tmp_item= (Item_func *) item_sum->args[0]; + } + } + return (tmp_item && field_types_to_be_kept(tmp_item->field_type()) ? + tmp_item->tmp_table_field(table) : 0); +} + + +Item_type_holder::Item_type_holder(THD *thd, Item *item, TABLE *table) :Item(thd, item), item_type(item->result_type()), orig_type(item_type) { @@ -2649,10 +2695,7 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item) It is safe assign pointer on field, because it will be used just after all JOIN::prepare calls and before any SELECT execution */ - if (item->type() == Item::FIELD_ITEM) - field_example= ((Item_field*) item)->field; - else - field_example= 0; + field_example= get_holder_example_field(thd, item, table); max_length= real_length(item); maybe_null= item->maybe_null; collation.set(item->collation); @@ -2692,25 +2735,23 @@ inline bool is_attr_compatible(Item *from, Item *to) (to->maybe_null || !from->maybe_null) && (to->result_type() != STRING_RESULT || from->result_type() != STRING_RESULT || - my_charset_same(from->collation.collation, - to->collation.collation))); + (from->collation.collation == to->collation.collation))); } -bool Item_type_holder::join_types(THD *thd, Item *item) +bool Item_type_holder::join_types(THD *thd, Item *item, TABLE *table) { uint32 new_length= real_length(item); bool use_new_field= 0, use_expression_type= 0; Item_result new_result_type= type_convertor[item_type][item->result_type()]; - bool item_is_a_field= item->type() == Item::FIELD_ITEM; - + Field *field= get_holder_example_field(thd, item, table); + bool item_is_a_field= field; /* Check if both items point to fields: in this case we can adjust column types of result table in the union smartly. */ if (field_example && item_is_a_field) { - Field *field= ((Item_field *)item)->field; /* Can 'field_example' field store data of the column? */ if ((use_new_field= (!field->field_cast_compatible(field_example->field_cast_type()) || @@ -2751,7 +2792,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item) It is safe to assign a pointer to field here, because it will be used before any table is closed. */ - field_example= ((Item_field*) item)->field; + field_example= field; } old_cs= collation.collation->name; diff --git a/sql/item.h b/sql/item.h index 237a8f7efac..e0de7452eec 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1321,14 +1321,14 @@ protected: Item_result orig_type; Field *field_example; public: - Item_type_holder(THD*, Item*); + Item_type_holder(THD*, Item*, TABLE *); Item_result result_type () const { return item_type; } enum Type type() const { return TYPE_HOLDER; } double val(); longlong val_int(); String *val_str(String*); - bool join_types(THD *thd, Item *); + bool join_types(THD *thd, Item *, TABLE *); Field *example() { return field_example; } static uint32 real_length(Item *item); void cleanup() diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 027a21db7ac..882316d57d7 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -148,6 +148,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, SELECT_LEX *sl, *first_select; select_result *tmp_result; bool is_union; + TABLE *empty_table= 0; DBUG_ENTER("st_select_lex_unit::prepare"); describe= test(additional_options & SELECT_DESCRIBE); @@ -239,13 +240,21 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, goto err; if (sl == first_select) { + /* + We need to create an empty table object. It is used + to create tmp_table fields in Item_type_holder. + The main reason of this is that we can't create + field object without table. + */ + DBUG_ASSERT(!empty_table); + empty_table= (TABLE*) thd->calloc(sizeof(TABLE)); types.empty(); List_iterator_fast it(sl->item_list); Item *item_tmp; while ((item_tmp= it++)) { /* Error's in 'new' will be detected after loop */ - types.push_back(new Item_type_holder(thd_arg, item_tmp)); + types.push_back(new Item_type_holder(thd_arg, item_tmp, empty_table)); } if (thd_arg->is_fatal_error) @@ -264,7 +273,8 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, Item *type, *item_tmp; while ((type= tp++, item_tmp= it++)) { - if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp)) + if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp, + empty_table)) DBUG_RETURN(-1); } } From e5e7cd8ea8d0cee5e3d1f6f50b6846403b0a7f24 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 13:53:16 +0100 Subject: [PATCH 25/45] Proposal to fix this problem: when using libmysqlclient, you must call mysql_server_end() to nicely free memory at the end of your program; it however sounds weird to call a function named *SERVER_end* when you're the CLIENT (you're not ending the server, you're ending your ability to talk to servers). So here I add two defines which should be more generic names. This was longly discussed with Konstantin, Serg, Brian. The problem started from a post on valgrind-users list: http://sourceforge.net/mailarchive/forum.php?thread_id=5778035&forum_id=32038 ; our manual mentions these functions only for libmysqld API so needs some fixing, and then we can close BUG#8099 and BUG#6149. include/mysql.h: Creating synonyms (defines): mysql_library_init for mysql_server_init, mysql_library_end for mysql_server_end; these new names are more generic, so suitable when using libmysqlclient as well as libmysqld.c --- include/mysql.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/mysql.h b/include/mysql.h index 58c314207c1..b87b865608e 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -337,6 +337,17 @@ typedef struct st_mysql_parameters */ int STDCALL mysql_server_init(int argc, char **argv, char **groups); void STDCALL mysql_server_end(void); +/* + mysql_server_init/end need to be called when using libmysqld or + libmysqlclient (exactly, mysql_server_init() is called by mysql_init() so + you don't need to call it explicitely; but you need to call + mysql_server_end() to free memory). The names are a bit misleading + (mysql_SERVER* to be used when using libmysqlCLIENT). So we add more general + names which suit well whether you're using libmysqld or libmysqlclient. We + intend to promote these aliases over the mysql_server* ones. +*/ +#define mysql_library_init mysql_server_init +#define mysql_library_end mysql_server_end MYSQL_PARAMETERS *STDCALL mysql_get_parameters(void); From 48e2d224047ddb5a70dcca3abd7f4f828ee0b5bd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 14:25:08 +0100 Subject: [PATCH 26/45] added test to trigger drifferent fragmentations in ndb corrected documentation on fragmentation set "fragmentation medium" to mean 2 fragments per node instead of 1 set default fragmentation to small instead of medium bug#8284 adjust fragmentation to max_rows mysql-test/r/ndb_basic.result: added test to trigger drifferent fragmentations in ndb mysql-test/t/ndb_basic.test: added test to trigger drifferent fragmentations in ndb ndb/include/ndbapi/NdbDictionary.hpp: corrected documentation on fragmentation ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: set "fragmentation medium" to mean 2 fragments per node instead of 1 ndb/src/ndbapi/NdbDictionaryImpl.cpp: set default fragmentation to small instead of medium sql/ha_ndbcluster.cc: bug#8284 adjust fragmentation to max_rows --- mysql-test/r/ndb_basic.result | 34 +++++++++++++++++ mysql-test/t/ndb_basic.test | 38 +++++++++++++++++++ ndb/include/ndbapi/NdbDictionary.hpp | 6 +-- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 2 +- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 2 +- sql/ha_ndbcluster.cc | 45 ++++++++++++++++++++++- 6 files changed, 121 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index 6ec5338acbe..a6396080ef0 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -573,3 +573,37 @@ select * from t1 where a12345678901234567890123456789a1234567890=2; a1234567890123456789012345678901234567890 a12345678901234567890123456789a1234567890 5 2 drop table t1; +create table t1 +(a bigint, b bigint, c bigint, d bigint, +primary key (a,b,c,d)) +engine=ndb +max_rows=200000000; +Warnings: +Warning 1105 Ndb might have problems storing the max amount of rows specified +insert into t1 values +(1,2,3,4),(2,3,4,5),(3,4,5,6), +(3,2,3,4),(1,3,4,5),(2,4,5,6), +(1,2,3,5),(2,3,4,8),(3,4,5,9), +(3,2,3,5),(1,3,4,8),(2,4,5,9), +(1,2,3,6),(2,3,4,6),(3,4,5,7), +(3,2,3,6),(1,3,4,6),(2,4,5,7), +(1,2,3,7),(2,3,4,7),(3,4,5,8), +(3,2,3,7),(1,3,4,7),(2,4,5,8), +(1,3,3,4),(2,4,4,5),(3,5,5,6), +(3,3,3,4),(1,4,4,5),(2,5,5,6), +(1,3,3,5),(2,4,4,8),(3,5,5,9), +(3,3,3,5),(1,4,4,8),(2,5,5,9), +(1,3,3,6),(2,4,4,6),(3,5,5,7), +(3,3,3,6),(1,4,4,6),(2,5,5,7), +(1,3,3,7),(2,4,4,7),(3,5,5,8), +(3,3,3,7),(1,4,4,7),(2,5,5,8); +select count(*) from t1; +count(*) +48 +drop table t1; +create table t1 +(a bigint, b bigint, c bigint, d bigint, +primary key (a)) +engine=ndb +max_rows=1; +drop table t1; diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 2671223ada8..f460c573a9d 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -539,3 +539,41 @@ insert into t1 values (1,1),(2,1),(3,1),(4,1),(5,2),(6,1),(7,1); explain select * from t1 where a12345678901234567890123456789a1234567890=2; select * from t1 where a12345678901234567890123456789a1234567890=2; drop table t1; + +# +# test fragment creation +# +# first a table with _many_ fragments per node group +# then a table with just one fragment per node group +# +create table t1 + (a bigint, b bigint, c bigint, d bigint, + primary key (a,b,c,d)) + engine=ndb + max_rows=200000000; +insert into t1 values + (1,2,3,4),(2,3,4,5),(3,4,5,6), + (3,2,3,4),(1,3,4,5),(2,4,5,6), + (1,2,3,5),(2,3,4,8),(3,4,5,9), + (3,2,3,5),(1,3,4,8),(2,4,5,9), + (1,2,3,6),(2,3,4,6),(3,4,5,7), + (3,2,3,6),(1,3,4,6),(2,4,5,7), + (1,2,3,7),(2,3,4,7),(3,4,5,8), + (3,2,3,7),(1,3,4,7),(2,4,5,8), + (1,3,3,4),(2,4,4,5),(3,5,5,6), + (3,3,3,4),(1,4,4,5),(2,5,5,6), + (1,3,3,5),(2,4,4,8),(3,5,5,9), + (3,3,3,5),(1,4,4,8),(2,5,5,9), + (1,3,3,6),(2,4,4,6),(3,5,5,7), + (3,3,3,6),(1,4,4,6),(2,5,5,7), + (1,3,3,7),(2,4,4,7),(3,5,5,8), + (3,3,3,7),(1,4,4,7),(2,5,5,8); +select count(*) from t1; +drop table t1; + +create table t1 + (a bigint, b bigint, c bigint, d bigint, + primary key (a)) + engine=ndb + max_rows=1; +drop table t1; diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 0dca1c0f106..49afbd695c9 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -141,9 +141,9 @@ public: enum FragmentType { FragUndefined = 0, ///< Fragmentation type undefined or default FragSingle = 1, ///< Only one fragment - FragAllSmall = 2, ///< One fragment per node group - FragAllMedium = 3, ///< Default value. Two fragments per node group. - FragAllLarge = 4 ///< Eight fragments per node group. + FragAllSmall = 2, ///< One fragment per node, default + FragAllMedium = 3, ///< two fragments per node + FragAllLarge = 4 ///< Four fragments per node. }; }; diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index dba1efbba9a..0bc8351a9db 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -6178,7 +6178,7 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){ break; case DictTabInfo::AllNodesMediumTable: jam(); - noOfFragments = csystemnodes; + noOfFragments = 2 * csystemnodes; break; case DictTabInfo::AllNodesLargeTable: jam(); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 9f6ed144fb0..530f15d3a2e 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -284,7 +284,7 @@ void NdbTableImpl::init(){ clearNewProperties(); m_frm.clear(); - m_fragmentType = NdbDictionary::Object::FragAllMedium; + m_fragmentType = NdbDictionary::Object::FragAllSmall; m_logging = true; m_kvalue = 6; m_minLoadFactor = 78; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a959cbaf434..9f0da616289 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3503,6 +3503,47 @@ static int create_ndb_column(NDBCOL &col, Create a table in NDB Cluster */ +static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) +{ + if (form->max_rows == 0) /* default setting, don't set fragmentation */ + return; + /** + * get the number of fragments right + */ + uint no_fragments; + { +#if MYSQL_VERSION_ID >= 50000 + uint acc_row_size= 25+2; +#else + uint acc_row_size= pk_length*4; + /* add acc overhead */ + if (pk_length <= 8) + acc_row_size+= 25+2; /* main page will set the limit */ + else + acc_row_size+= 4+4; /* overflow page will set the limit */ +#endif + ulonglong acc_fragment_size= 512*1024*1024; + ulonglong max_rows= form->max_rows; + no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; + } + { + uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); + NDBTAB::FragmentType ftype; + if (no_fragments > 2*no_nodes) + { + ftype= NDBTAB::FragAllLarge; + if (no_fragments > 4*no_nodes) + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Ndb might have problems storing the max amount of rows specified"); + } + else if (no_fragments > no_nodes) + ftype= NDBTAB::FragAllMedium; + else + ftype= NDBTAB::FragAllSmall; + tab.setFragmentType(ftype); + } +} + int ha_ndbcluster::create(const char *name, TABLE *form, HA_CREATE_INFO *info) @@ -3605,7 +3646,9 @@ int ha_ndbcluster::create(const char *name, break; } } - + + ndb_set_fragmentation(tab, form, pk_length); + if ((my_errno= check_ndb_connection())) DBUG_RETURN(my_errno); From 58fd4d94ce16eb473ccf56134bb51e1cd38c9c49 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 14:33:08 +0100 Subject: [PATCH 27/45] ha_ndbcluster.cc: fixed change in struct in 4.1->5.0 merge sql/ha_ndbcluster.cc: fixed change in struct in 4.1->5.0 merge --- sql/ha_ndbcluster.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 3911ad920fa..7cfb6501948 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3652,7 +3652,7 @@ static int create_ndb_column(NDBCOL &col, static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { - if (form->max_rows == 0) /* default setting, don't set fragmentation */ + if (form->s->max_rows == 0) /* default setting, don't set fragmentation */ return; /** * get the number of fragments right @@ -3670,7 +3670,7 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) acc_row_size+= 4+4; /* overflow page will set the limit */ #endif ulonglong acc_fragment_size= 512*1024*1024; - ulonglong max_rows= form->max_rows; + ulonglong max_rows= form->s->max_rows; no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; } { From e16b1b4a79210476229302f107dfa1c4c473bab2 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:44:09 +0200 Subject: [PATCH 28/45] InnoDB: A small cleanup: remove two duplicate rec_get_offsets() calls innobase/page/page0cur.c: Remove two duplicate rec_get_offsets() calls --- innobase/page/page0cur.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/innobase/page/page0cur.c b/innobase/page/page0cur.c index fc94fc4b1e0..488d2757115 100644 --- a/innobase/page/page0cur.c +++ b/innobase/page/page0cur.c @@ -311,9 +311,6 @@ page_cur_search_with_match( low_matched_bytes = cur_matched_bytes; } else if (cmp == -1) { - offsets = rec_get_offsets(mid_rec, index, offsets, - dtuple_get_n_fields_cmp(tuple), &heap); - if (mode == PAGE_CUR_LE_OR_EXTENDS && page_cur_rec_field_extends(tuple, mid_rec, offsets, cur_matched_fields)) { @@ -366,9 +363,6 @@ page_cur_search_with_match( low_matched_bytes = cur_matched_bytes; } else if (cmp == -1) { - offsets = rec_get_offsets(mid_rec, index, offsets, - dtuple_get_n_fields_cmp(tuple), &heap); - if (mode == PAGE_CUR_LE_OR_EXTENDS && page_cur_rec_field_extends(tuple, mid_rec, offsets, cur_matched_fields)) { From 9e92b63a0bc55356e42c169827b5cd2d631a92f9 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:08:08 +0100 Subject: [PATCH 29/45] Updated after review. sql/ha_ndbcluster.cc: Fixing spaces in parameter list. --- sql/ha_ndbcluster.cc | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d146e55f798..5c44479aa12 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4537,13 +4537,13 @@ bool ndbcluster_init() (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, (hash_get_key) ndbcluster_get_key,0,0); pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); - pthread_mutex_init(&LOCK_ndb_util_thread,MY_MUTEX_INIT_FAST); - pthread_cond_init(&COND_ndb_util_thread,NULL); + pthread_mutex_init(&LOCK_ndb_util_thread, MY_MUTEX_INIT_FAST); + pthread_cond_init(&COND_ndb_util_thread, NULL); // Create utility thread pthread_t tmp; - if (pthread_create(&tmp,&connection_attrib,ndb_util_thread_func,0)) + if (pthread_create(&tmp, &connection_attrib, ndb_util_thread_func, 0)) { DBUG_PRINT("error", ("Could not create ndb utility thread")); goto ndbcluster_init_error; @@ -4570,7 +4570,7 @@ bool ndbcluster_end() // Kill ndb utility thread (void) pthread_mutex_lock(&LOCK_ndb_util_thread); - DBUG_PRINT("exit",("killing ndb util thread: %lx",ndb_util_thread)); + DBUG_PRINT("exit",("killing ndb util thread: %lx", ndb_util_thread)); (void) pthread_cond_signal(&COND_ndb_util_thread); (void) pthread_mutex_unlock(&LOCK_ndb_util_thread); @@ -5597,7 +5597,8 @@ ha_ndbcluster::update_table_comment( // Utility thread main loop -extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused))) +extern "C" pthread_handler_decl(ndb_util_thread_func, + arg __attribute__((unused))) { THD *thd; // needs to be first for thread_stack int error = 0; @@ -5628,8 +5629,8 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused)) pthread_mutex_lock(&LOCK_ndb_util_thread); error= pthread_cond_timedwait(&COND_ndb_util_thread, - &LOCK_ndb_util_thread, - &abstime); + &LOCK_ndb_util_thread, + &abstime); pthread_mutex_unlock(&LOCK_ndb_util_thread); DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %d", From 8c750c466b0109723233e93aea85a39673409b40 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:24:06 +0100 Subject: [PATCH 30/45] indexless boolean fulltext search was depending on default_charset_info - Bug#8159 ftbw->off wasn't cleared on reinit - Bug#8234 include/ft_global.h: get rid of default_charset_info in indexless fulltext searches myisam/ft_boolean_search.c: get rid of default_charset_info in indexless fulltext searches clear ftbw->off on reinits myisam/ft_static.c: get rid of default_charset_info in indexless fulltext searches myisam/ftdefs.h: get rid of default_charset_info in indexless fulltext searches sql/ha_myisam.h: get rid of default_charset_info in indexless fulltext searches sql/handler.h: get rid of default_charset_info in indexless fulltext searches sql/item_func.cc: get rid of default_charset_info in indexless fulltext searches --- include/ft_global.h | 2 +- myisam/ft_boolean_search.c | 7 ++++--- myisam/ft_static.c | 5 +++-- myisam/ftdefs.h | 2 +- sql/ha_myisam.h | 8 ++++++-- sql/handler.h | 3 +-- sql/item_func.cc | 8 +++----- 7 files changed, 19 insertions(+), 16 deletions(-) diff --git a/include/ft_global.h b/include/ft_global.h index 94f6ad9ef51..c3f60d13a7a 100644 --- a/include/ft_global.h +++ b/include/ft_global.h @@ -62,7 +62,7 @@ void ft_free_stopwords(void); #define FT_SORTED 2 #define FT_EXPAND 4 /* query expansion */ -FT_INFO *ft_init_search(uint,void *, uint, byte *, uint, byte *); +FT_INFO *ft_init_search(uint,void *, uint, byte *, uint,CHARSET_INFO *, byte *); my_bool ft_boolean_check_syntax_string(const byte *); #ifdef __cplusplus diff --git a/myisam/ft_boolean_search.c b/myisam/ft_boolean_search.c index aab3854dd34..4253b5ff96f 100644 --- a/myisam/ft_boolean_search.c +++ b/myisam/ft_boolean_search.c @@ -365,6 +365,7 @@ static void _ftb_init_index_search(FT_INFO *ftb) reset_tree(& ftb->no_dupes); } + ftbw->off=0; /* in case of reinit */ if (_ft2_search(ftb, ftbw, 1)) return; } @@ -373,7 +374,7 @@ static void _ftb_init_index_search(FT_INFO *ftb) FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query, - uint query_len) + uint query_len, CHARSET_INFO *cs) { FTB *ftb; FTB_EXPR *ftbe; @@ -385,8 +386,8 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query, ftb->state=UNINITIALIZED; ftb->info=info; ftb->keynr=keynr; - ftb->charset= ((keynr==NO_SUCH_KEY) ? - default_charset_info : info->s->keyinfo[keynr].seg->charset); + ftb->charset=cs; + DBUG_ASSERT(keynr==NO_SUCH_KEY || cs == info->s->keyinfo[keynr].seg->charset); ftb->with_scan=0; ftb->lastpos=HA_OFFSET_ERROR; bzero(& ftb->no_dupes, sizeof(TREE)); diff --git a/myisam/ft_static.c b/myisam/ft_static.c index 7168406d027..994a94d0c49 100644 --- a/myisam/ft_static.c +++ b/myisam/ft_static.c @@ -55,11 +55,12 @@ const struct _ft_vft _ft_vft_boolean = { FT_INFO *ft_init_search(uint flags, void *info, uint keynr, - byte *query, uint query_len, byte *record) + byte *query, uint query_len, CHARSET_INFO *cs, + byte *record) { FT_INFO *res; if (flags & FT_BOOL) - res= ft_init_boolean_search((MI_INFO *)info, keynr, query, query_len); + res= ft_init_boolean_search((MI_INFO *)info, keynr, query, query_len,cs); else res= ft_init_nlq_search((MI_INFO *)info, keynr, query, query_len, flags, record); diff --git a/myisam/ftdefs.h b/myisam/ftdefs.h index e7a0829e140..ddb9fbfead2 100644 --- a/myisam/ftdefs.h +++ b/myisam/ftdefs.h @@ -131,7 +131,7 @@ FT_WORD * _mi_ft_parserecord(MI_INFO *, uint, const byte *); uint _mi_ft_parse(TREE *, MI_INFO *, uint, const byte *, my_bool); FT_INFO *ft_init_nlq_search(MI_INFO *, uint, byte *, uint, uint, byte *); -FT_INFO *ft_init_boolean_search(MI_INFO *, uint, byte *, uint); +FT_INFO *ft_init_boolean_search(MI_INFO *, uint, byte *, uint, CHARSET_INFO *); extern const struct _ft_vft _ft_vft_nlq; int ft_nlq_read_next(FT_INFO *, char *); diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index 972d6b18e19..1e6cf2f4ada 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -88,8 +88,12 @@ class ha_myisam: public handler ft_handler->please->reinit_search(ft_handler); return 0; } - FT_INFO *ft_init_ext(uint flags, uint inx,const byte *key, uint keylen) - { return ft_init_search(flags,file,inx,(byte*) key,keylen, table->record[0]); } + FT_INFO *ft_init_ext(uint flags, uint inx,String *key) + { + return ft_init_search(flags,file,inx, + (byte *)key->ptr(), key->length(), key->charset(), + table->record[0]); + } int ft_read(byte *buf); int rnd_init(bool scan); int rnd_next(byte *buf); diff --git a/sql/handler.h b/sql/handler.h index 245defe61e0..0426312f404 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -373,8 +373,7 @@ public: int compare_key(key_range *range); virtual int ft_init() { return HA_ERR_WRONG_COMMAND; } void ft_end() { ft_handler=NULL; } - virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key, - uint keylen) + virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key) { return NULL; } virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; } virtual int rnd_next(byte *buf)=0; diff --git a/sql/item_func.cc b/sql/item_func.cc index bff49541252..85cd1c693b7 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -3047,9 +3047,7 @@ void Item_func_match::init_search(bool no_order) if (join_key && !no_order) flags|=FT_SORTED; - ft_handler=table->file->ft_init_ext(flags, key, - (byte*) ft_tmp->ptr(), - ft_tmp->length()); + ft_handler=table->file->ft_init_ext(flags, key, ft_tmp); if (join_key) table->file->ft_handler=ft_handler; @@ -3091,12 +3089,12 @@ bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) } /* Check that all columns come from the same table. - We've already checked that columns in MATCH are fields so + We've already checked that columns in MATCH are fields so PARAM_TABLE_BIT can only appear from AGAINST argument. */ if ((used_tables_cache & ~PARAM_TABLE_BIT) != item->used_tables()) key=NO_SUCH_KEY; - + if (key == NO_SUCH_KEY && !(flags & FT_BOOL)) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH"); From 8ed40c4b09bb4c56f17a1432fc9024a5dfc4e04a Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 19:12:15 +0400 Subject: [PATCH 31/45] Embedded version of test fixed mysql-test/r/insert_select.result.es: Test.es fixed --- mysql-test/r/insert_select.result.es | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mysql-test/r/insert_select.result.es b/mysql-test/r/insert_select.result.es index 9e11402733d..9cac6d31b8f 100644 --- a/mysql-test/r/insert_select.result.es +++ b/mysql-test/r/insert_select.result.es @@ -633,3 +633,15 @@ No Field Count 0 1 100 0 2 100 drop table t1, t2; +CREATE TABLE t1 ( +ID int(11) NOT NULL auto_increment, +NO int(11) NOT NULL default '0', +SEQ int(11) NOT NULL default '0', +PRIMARY KEY (ID), +KEY t1$NO (SEQ,NO) +) ENGINE=MyISAM; +INSERT INTO t1 (SEQ, NO) SELECT "1" AS SEQ, IF(MAX(NO) IS NULL, 0, MAX(NO)) + 1 AS NO FROM t1 WHERE (SEQ = 1); +select SQL_BUFFER_RESULT * from t1 WHERE (SEQ = 1); +ID NO SEQ +1 1 1 +drop table t1; From 5d16b7a93a553b7ec7678d6c0e83a345667895d2 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 17:24:18 +0100 Subject: [PATCH 32/45] mysql-test-run.sh: USE_RUNNING_SERVER should be set to 0 or 1 mysql-test/mysql-test-run.sh: USE_RUNNING_SERVER should be set to 0 or 1 --- mysql-test/mysql-test-run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index fadc14d4e1b..42f96789eef 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -259,13 +259,13 @@ while test $# -gt 0; do --embedded-server) USE_EMBEDDED_SERVER=1 USE_MANAGER=0 NO_SLAVE=1 - USE_RUNNING_SERVER="" + USE_RUNNING_SERVER=0 RESULT_EXT=".es" TEST_MODE="$TEST_MODE embedded" ;; --purify) USE_PURIFY=1 USE_MANAGER=0 - USE_RUNNING_SERVER="" + USE_RUNNING_SERVER=0 TEST_MODE="$TEST_MODE purify" ;; --user=*) DBUSER=`$ECHO "$1" | $SED -e "s;--user=;;"` ;; --force) FORCE=1 ;; From 12c522515f505854850cf3e64721499492da4d53 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 12:01:30 -0800 Subject: [PATCH 33/45] Merge --- sql/field.h | 240 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 154 insertions(+), 86 deletions(-) diff --git a/sql/field.h b/sql/field.h index fd0f2f9c2f1..756fa713707 100644 --- a/sql/field.h +++ b/sql/field.h @@ -37,11 +37,7 @@ class Field void operator=(Field &); public: static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } - static void operator delete(void *ptr_arg, size_t size) { -#ifdef SAFEMALLOC - bfill(ptr_arg, size, 0x8F); -#endif - } + static void operator delete(void *ptr_arg, size_t size) { TRASH(ptr_arg, size); } char *ptr; // Position to field in record uchar *null_ptr; // Byte where null_bit is @@ -51,7 +47,7 @@ public: */ struct st_table *table; // Pointer for table struct st_table *orig_table; // Pointer to original table - const char *table_name,*field_name; + const char **table_name, *field_name; LEX_STRING comment; ulong query_id; // For quick test of used fields /* Field is part of the following keys */ @@ -84,7 +80,7 @@ public: FIELD_CAST_TIMESTAMP, FIELD_CAST_YEAR, FIELD_CAST_DATE, FIELD_CAST_NEWDATE, FIELD_CAST_TIME, FIELD_CAST_DATETIME, FIELD_CAST_STRING, FIELD_CAST_VARSTRING, FIELD_CAST_BLOB, - FIELD_CAST_GEOM, FIELD_CAST_ENUM, FIELD_CAST_SET + FIELD_CAST_GEOM, FIELD_CAST_ENUM, FIELD_CAST_SET, FIELD_CAST_BIT }; utype unireg_check; @@ -100,7 +96,7 @@ public: virtual int store(const char *to,uint length,CHARSET_INFO *cs)=0; virtual int store(double nr)=0; virtual int store(longlong nr)=0; - virtual void store_time(TIME *ltime,timestamp_type t_type); + virtual int store_time(TIME *ltime, timestamp_type t_type); virtual double val_real(void)=0; virtual longlong val_int(void)=0; inline String *val_str(String *str) { return val_str(str, str); } @@ -117,16 +113,22 @@ public: This trickery is used to decrease a number of malloc calls. */ virtual String *val_str(String*,String *)=0; + String *val_int_as_str(String *val_buffer, my_bool unsigned_flag); virtual Item_result result_type () const=0; virtual Item_result cmp_type () const { return result_type(); } - bool eq(Field *field) { return ptr == field->ptr && null_ptr == field->null_ptr; } + bool eq(Field *field) + { + return (ptr == field->ptr && null_ptr == field->null_ptr && + null_bit == field->null_bit); + } virtual bool eq_def(Field *field); virtual uint32 pack_length() const { return (uint32) field_length; } + virtual uint32 pack_length_in_rec() const { return pack_length(); } virtual void reset(void) { bzero(ptr,pack_length()); } virtual void reset_fields() {} virtual void set_default() { - my_ptrdiff_t offset = (my_ptrdiff_t) (table->default_values - + my_ptrdiff_t offset = (my_ptrdiff_t) (table->s->default_values - table->record[0]); memcpy(ptr, ptr + offset, pack_length()); if (null_ptr) @@ -143,10 +145,9 @@ public: virtual int cmp(const char *,const char *)=0; virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L) { return memcmp(a,b,pack_length()); } - virtual int cmp_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } - virtual int cmp_binary_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } + int cmp_offset(uint row_offset) { return cmp(ptr,ptr+row_offset); } + int cmp_binary_offset(uint row_offset) + { return cmp_binary(ptr, ptr+row_offset); }; virtual int key_cmp(const byte *a,const byte *b) { return cmp((char*) a,(char*) b); } virtual int key_cmp(const byte *str, uint length) @@ -176,7 +177,7 @@ public: { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } inline bool real_maybe_null(void) { return null_ptr != 0; } - virtual void make_field(Send_field *)=0; + virtual void make_field(Send_field *); virtual void sort_string(char *buff,uint length)=0; virtual bool optimize_range(uint idx, uint part); /* @@ -188,28 +189,11 @@ public: */ virtual bool can_be_compared_as_longlong() const { return FALSE; } virtual void free() {} - Field *new_field(MEM_ROOT *root, struct st_table *new_table) - { - Field *tmp= (Field*) memdup_root(root,(char*) this,size_of()); - if (tmp) - { - if (tmp->table->maybe_null) - tmp->flags&= ~NOT_NULL_FLAG; - tmp->table= new_table; - tmp->key_start.init(0); - tmp->part_of_key.init(0); - tmp->part_of_sortkey.init(0); - tmp->unireg_check=Field::NONE; - tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | - ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG); -#ifdef PROBABLY_WRONG - tmp->table_name= new_table->table_name; -#endif - tmp->reset_fields(); - } - return tmp; - } - inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) + virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table); + virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); + virtual void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) { ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg; } @@ -224,11 +208,10 @@ public: { memcpy(buff,ptr,length); } inline void set_image(char *buff,uint length, CHARSET_INFO *cs) { memcpy(ptr,buff,length); } - virtual void get_key_image(char *buff,uint length, CHARSET_INFO *cs, - imagetype type) - { get_image(buff,length,cs); } - virtual void set_key_image(char *buff,uint length, CHARSET_INFO *cs) - { set_image(buff,length,cs); } + virtual void get_key_image(char *buff, uint length, imagetype type) + { get_image(buff,length, &my_charset_bin); } + virtual void set_key_image(char *buff,uint length) + { set_image(buff,length, &my_charset_bin); } inline longlong val_int_offset(uint row_offset) { ptr+=row_offset; @@ -236,6 +219,17 @@ public: ptr-=row_offset; return tmp; } + + inline String *val_str(String *str, char *new_ptr) + { + char *old_ptr= ptr; + ptr= new_ptr; + val_str(str); + ptr= old_ptr; + return str; + } + bool quote_data(String *unquoted_string); + bool needs_quotes(void); virtual bool send_binary(Protocol *protocol); virtual char *pack(char* to, const char *from, uint max_length=~(uint) 0) { @@ -267,9 +261,11 @@ public: virtual uint max_packed_col_length(uint max_length) { return max_length;} - virtual int pack_cmp(const char *a,const char *b, uint key_length_arg) + virtual int pack_cmp(const char *a,const char *b, uint key_length_arg, + my_bool insert_or_update) { return cmp(a,b); } - virtual int pack_cmp(const char *b, uint key_length_arg) + virtual int pack_cmp(const char *b, uint key_length_arg, + my_bool insert_or_update) { return cmp(ptr,b); } uint offset(); // Should be inline ... void copy_from_tmp(int offset); @@ -281,6 +277,8 @@ public: virtual void set_charset(CHARSET_INFO *charset) { } bool set_warning(const unsigned int level, const unsigned int code, int cuted_increment); + bool check_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs); void set_datetime_warning(const uint level, const uint code, const char *str, uint str_len, timestamp_type ts_type, int cuted_increment); @@ -360,7 +358,6 @@ public: int store(double nr); int store(longlong nr)=0; int store(const char *to,uint length,CHARSET_INFO *cs)=0; - void make_field(Send_field *); uint size_of() const { return sizeof(*this); } CHARSET_INFO *charset(void) const { return field_charset; } void set_charset(CHARSET_INFO *charset) { field_charset=charset; } @@ -719,7 +716,7 @@ public: if ((*null_value= is_null())) return 0; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) return sint4korr(ptr); #endif long tmp; @@ -803,7 +800,7 @@ public: int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); - void store_time(TIME *ltime,timestamp_type type); + int store_time(TIME *ltime, timestamp_type type); void reset(void) { ptr[0]=ptr[1]=ptr[2]=0; } double val_real(void); longlong val_int(void); @@ -836,6 +833,7 @@ public: enum_field_types type() const { return FIELD_TYPE_TIME;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; } enum Item_result cmp_type () const { return INT_RESULT; } + int store_time(TIME *ltime, timestamp_type type); int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); @@ -876,7 +874,7 @@ public: int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); - void store_time(TIME *ltime,timestamp_type type); + int store_time(TIME *ltime, timestamp_type type); void reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=ptr[4]=ptr[5]=ptr[6]=ptr[7]=0; } double val_real(void); longlong val_int(void); @@ -909,9 +907,11 @@ public: enum_field_types type() const { - return ((table && table->db_create_options & HA_OPTION_PACK_RECORD && - field_length >= 4) ? - FIELD_TYPE_VAR_STRING : FIELD_TYPE_STRING); + return ((orig_table && + orig_table->s->db_create_options & HA_OPTION_PACK_RECORD && + field_length >= 4) && + orig_table->s->frm_version < FRM_VER_TRUE_VARCHAR ? + MYSQL_TYPE_VAR_STRING : MYSQL_TYPE_STRING); } enum ha_base_keytype key_type() const { return binary() ? HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT; } @@ -928,8 +928,9 @@ public: void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); const char *unpack(char* to, const char *from); - int pack_cmp(const char *a,const char *b,uint key_length); - int pack_cmp(const char *b,uint key_length); + int pack_cmp(const char *a,const char *b,uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b,uint key_length,my_bool insert_or_update); uint packed_col_length(const char *to, uint length); uint max_packed_col_length(uint max_length); uint size_of() const { return sizeof(*this); } @@ -937,31 +938,43 @@ public: bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } field_cast_enum field_cast_type() { return FIELD_CAST_STRING; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table); }; class Field_varstring :public Field_str { public: - Field_varstring(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg, + /* Store number of bytes used to store length (1 or 2) */ + uint32 length_bytes; + Field_varstring(char *ptr_arg, + uint32 len_arg, uint length_bytes_arg, + uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg, cs) - {} + unireg_check_arg, field_name_arg, table_arg, cs), + length_bytes(length_bytes_arg) + { + if (table) + table->s->varchar_fields++; + } Field_varstring(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg, cs) - {} + NONE, field_name_arg, table_arg, cs), + length_bytes(len_arg < 256 ? 1 :2) + { + if (table) + table->s->varchar_fields++; + } - enum_field_types type() const { return FIELD_TYPE_VAR_STRING; } - enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + enum_field_types type() const { return MYSQL_TYPE_VARCHAR; } + enum ha_base_keytype key_type() const; bool zero_pack() const { return 0; } - void reset(void) { bzero(ptr,field_length+2); } - uint32 pack_length() const { return (uint32) field_length+2; } + void reset(void) { bzero(ptr,field_length+length_bytes); } + uint32 pack_length() const { return (uint32) field_length+length_bytes; } uint32 key_length() const { return (uint32) field_length; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(longlong nr); @@ -971,21 +984,31 @@ public: String *val_str(String*,String *); int cmp(const char *,const char*); void sort_string(char *buff,uint length); - void get_key_image(char *buff,uint length, CHARSET_INFO *cs, imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); + void get_key_image(char *buff,uint length, imagetype type); + void set_key_image(char *buff,uint length); void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); char *pack_key(char *to, const char *from, uint max_length); + char *pack_key_from_key_image(char* to, const char *from, uint max_length); const char *unpack(char* to, const char *from); - int pack_cmp(const char *a, const char *b, uint key_length); - int pack_cmp(const char *b, uint key_length); + const char *unpack_key(char* to, const char *from, uint max_length); + int pack_cmp(const char *a, const char *b, uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b, uint key_length,my_bool insert_or_update); + int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); + int key_cmp(const byte *,const byte*); + int key_cmp(const byte *str, uint length); uint packed_col_length(const char *to, uint length); uint max_packed_col_length(uint max_length); uint size_of() const { return sizeof(*this); } - enum_field_types real_type() const { return FIELD_TYPE_VAR_STRING; } + enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } field_cast_enum field_cast_type() { return FIELD_CAST_VARSTRING; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); }; @@ -1008,7 +1031,7 @@ public: } enum_field_types type() const { return FIELD_TYPE_BLOB;} enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + { return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); @@ -1017,15 +1040,13 @@ public: String *val_str(String*,String *); int cmp(const char *,const char*); int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length); - int cmp_offset(uint offset); int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); - int cmp_binary_offset(uint row_offset); int key_cmp(const byte *,const byte*); int key_cmp(const byte *str, uint length); uint32 key_length() const { return 0; } void sort_string(char *buff,uint length); uint32 pack_length() const - { return (uint32) (packlength+table->blob_ptr_size); } + { return (uint32) (packlength+table->s->blob_ptr_size); } inline uint32 max_data_length() const { return (uint32) (((ulonglong) 1 << (packlength*8)) -1); @@ -1051,8 +1072,8 @@ public: store_length(length); memcpy_fixed(ptr+packlength,&data,sizeof(char*)); } - void get_key_image(char *buff,uint length, CHARSET_INFO *cs, imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); + void get_key_image(char *buff,uint length, imagetype type); + void set_key_image(char *buff,uint length); void sql_type(String &str) const; inline bool copy() { char *tmp; @@ -1066,12 +1087,13 @@ public: return 0; } char *pack(char *to, const char *from, uint max_length= ~(uint) 0); - const char *unpack(char *to, const char *from); char *pack_key(char *to, const char *from, uint max_length); char *pack_key_from_key_image(char* to, const char *from, uint max_length); + const char *unpack(char *to, const char *from); const char *unpack_key(char* to, const char *from, uint max_length); - int pack_cmp(const char *a, const char *b, uint key_length); - int pack_cmp(const char *b, uint key_length); + int pack_cmp(const char *a, const char *b, uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b, uint key_length,my_bool insert_or_update); uint packed_col_length(const char *col_ptr, uint length); uint max_packed_col_length(uint max_length); void free() { value.free(); } @@ -1084,6 +1106,7 @@ public: uint32 max_length(); }; + #ifdef HAVE_SPATIAL class Field_geom :public Field_blob { public: @@ -1101,19 +1124,19 @@ public: :Field_blob(len_arg, maybe_null_arg, field_name_arg, table_arg, &my_charset_bin) { geom_type= geom_type_arg; } - enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; } enum_field_types type() const { return FIELD_TYPE_GEOMETRY; } void sql_type(String &str) const; int store(const char *to, uint length, CHARSET_INFO *charset); int store(double nr) { return 1; } int store(longlong nr) { return 1; } - void get_key_image(char *buff,uint length, CHARSET_INFO *cs,imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); + void get_key_image(char *buff,uint length,imagetype type); field_cast_enum field_cast_type() { return FIELD_CAST_GEOM; } }; #endif /*HAVE_SPATIAL*/ + class Field_enum :public Field_str { protected: uint packlength; @@ -1182,6 +1205,52 @@ public: }; +class Field_bit :public Field { +public: + uchar *bit_ptr; // position in record where 'uneven' bits store + uchar bit_ofs; // offset to 'uneven' high bits + uint bit_len; // number of 'uneven' high bits + Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg); + enum_field_types type() const { return FIELD_TYPE_BIT; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; } + uint32 key_length() const { return (uint32) field_length + (bit_len > 0); } + uint32 max_length() { return (uint32) field_length + (bit_len > 0); } + uint size_of() const { return sizeof(*this); } + Item_result result_type () const { return INT_RESULT; } + void reset(void) { bzero(ptr, field_length); } + int store(const char *to, uint length, CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); + double val_real(void); + longlong val_int(void); + String *val_str(String*, String *); + int cmp(const char *a, const char *b) + { return cmp_binary(a, b); } + int key_cmp(const byte *a, const byte *b) + { return cmp_binary((char *) a, (char *) b); } + int key_cmp(const byte *str, uint length); + int cmp_offset(uint row_offset); + void get_key_image(char *buff, uint length, imagetype type); + void set_key_image(char *buff, uint length) + { Field_bit::store(buff, length, &my_charset_bin); } + void sort_string(char *buff, uint length) + { get_key_image(buff, length, itRAW); } + uint32 pack_length() const + { return (uint32) field_length + (bit_len > 0); } + uint32 pack_length_in_rec() const { return field_length; } + void sql_type(String &str) const; + field_cast_enum field_cast_type() { return FIELD_CAST_BIT; } + char *pack(char *to, const char *from, uint max_length=~(uint) 0); + const char *unpack(char* to, const char *from); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); +}; + + /* Create field class for CREATE TABLE */ @@ -1194,8 +1263,8 @@ public: LEX_STRING comment; // Comment for field Item *def; // Default value enum enum_field_types sql_type; - uint32 length; - uint decimals,flags,pack_length; + ulong length; + uint decimals, flags, pack_length, key_length; Field::utype unireg_check; TYPELIB *interval; // Which interval to use List interval_list; @@ -1260,11 +1329,10 @@ Field *make_field(char *ptr, uint32 field_length, TYPELIB *interval, const char *field_name, struct st_table *table); uint pack_length_to_packflag(uint type); +enum_field_types get_blob_type_from_length(ulong length); uint32 calc_pack_length(enum_field_types type,uint32 length); int set_field_to_null(Field *field); int set_field_to_null_with_conversions(Field *field, bool no_conversions); -bool test_if_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs); bool field_types_to_be_kept(enum_field_types field_type); /* @@ -1284,6 +1352,7 @@ bool field_types_to_be_kept(enum_field_types field_type); #define FIELDFLAG_LEFT_FULLSCREEN 8192 #define FIELDFLAG_RIGHT_FULLSCREEN 16384 #define FIELDFLAG_FORMAT_NUMBER 16384 // predit: ###,,## in output +#define FIELDFLAG_NO_DEFAULT 16384 /* sql */ #define FIELDFLAG_SUM ((uint) 32768)// predit: +#fieldflag #define FIELDFLAG_MAYBE_NULL ((uint) 32768)// sql #define FIELDFLAG_PACK_SHIFT 3 @@ -1292,8 +1361,6 @@ bool field_types_to_be_kept(enum_field_types field_type); #define FIELDFLAG_NUM_SCREEN_TYPE 0x7F01 #define FIELDFLAG_ALFA_SCREEN_TYPE 0x7800 -#define FIELD_SORT_REVERSE 16384 - #define MTYP_TYPENR(type) (type & 127) /* Remove bits from type */ #define f_is_dec(x) ((x) & FIELDFLAG_DECIMAL) @@ -1311,3 +1378,4 @@ bool field_types_to_be_kept(enum_field_types field_type); #define f_is_equ(x) ((x) & (1+2+FIELDFLAG_PACK+31*256)) #define f_settype(x) (((int) x) << FIELDFLAG_PACK_SHIFT) #define f_maybe_null(x) (x & FIELDFLAG_MAYBE_NULL) +#define f_no_default(x) (x & FIELDFLAG_NO_DEFAULT) From 7a3c7a7f7c89031be489524715666bf232ef0c5c Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 12:59:02 -0800 Subject: [PATCH 34/45] Update test results mysql-test/r/func_group.result: Update results --- mysql-test/r/func_group.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 706bf4c5c6b..fdadd378ceb 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -742,10 +742,10 @@ insert into t1 values (now()); create table t2 select f2 from (select max(now()) f2 from t1) a; show columns from t2; Field Type Null Key Default Extra -f2 datetime 0000-00-00 00:00:00 +f2 datetime NO 0000-00-00 00:00:00 drop table t2; create table t2 select f2 from (select now() f2 from t1) a; show columns from t2; Field Type Null Key Default Extra -f2 datetime 0000-00-00 00:00:00 +f2 datetime NO 0000-00-00 00:00:00 drop table t2, t1; From ededf83143df115f5feaabbbcfeea2494d7d65d5 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 22:43:54 +0100 Subject: [PATCH 35/45] Fix for BUG#8055 "Trouble with replication from temporary tables and ignores": when we close the session's temp tables at session end, we automatically write to binlog *one* DROP TEMPORARY TABLE *per tmp table*. mysql-test/r/drop_temp_table.result: result update (note: one DROP TEMPORARY TABLE per tmp table) mysql-test/t/drop_temp_table.test: checking that we have one DROP TEMPORARY TABLE per tmp table now, not one multi-table DROP. Hiding columns Log_pos/End_log_pos per Monty's request. sql/sql_base.cc: When we close the session's temp tables at session end, we automatically write to binlog one DROP TEMPORARY TABLE per tmp table, not one single multi-table DROP TEMPORARY TABLE (because it causes problems if slave has --replicate*table rules). --- mysql-test/r/drop_temp_table.result | 16 ++++--- mysql-test/t/drop_temp_table.test | 3 ++ sql/sql_base.cc | 66 ++++++++++++++--------------- 3 files changed, 45 insertions(+), 40 deletions(-) diff --git a/mysql-test/r/drop_temp_table.result b/mysql-test/r/drop_temp_table.result index 266196877c8..a486964feb2 100644 --- a/mysql-test/r/drop_temp_table.result +++ b/mysql-test/r/drop_temp_table.result @@ -1,7 +1,9 @@ reset master; create database `drop-temp+table-test`; use `drop-temp+table-test`; +create temporary table shortn1 (a int); create temporary table `table:name` (a int); +create temporary table shortn2 (a int); select get_lock("a",10); get_lock("a",10) 1 @@ -10,9 +12,13 @@ get_lock("a",10) 1 show binlog events; Log_name Pos Event_type Server_id Orig_log_pos Info -master-bin.000001 4 Start 1 4 Server ver: VERSION, Binlog ver: 3 -master-bin.000001 79 Query 1 79 create database `drop-temp+table-test` -master-bin.000001 168 Query 1 168 use `drop-temp+table-test`; create temporary table `table:name` (a int) -master-bin.000001 262 Query 1 262 use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`table:name` -master-bin.000001 391 Query 1 391 use `drop-temp+table-test`; DO RELEASE_LOCK("a") +master-bin.000001 # Start 1 # Server ver: VERSION, Binlog ver: 3 +master-bin.000001 # Query 1 # create database `drop-temp+table-test` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn1 (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table `table:name` (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn2 (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn2` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`table:name` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn1` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DO RELEASE_LOCK("a") drop database `drop-temp+table-test`; diff --git a/mysql-test/t/drop_temp_table.test b/mysql-test/t/drop_temp_table.test index 1a7d8796bb3..dcd95721179 100644 --- a/mysql-test/t/drop_temp_table.test +++ b/mysql-test/t/drop_temp_table.test @@ -4,7 +4,9 @@ connection con1; reset master; create database `drop-temp+table-test`; use `drop-temp+table-test`; +create temporary table shortn1 (a int); create temporary table `table:name` (a int); +create temporary table shortn2 (a int); select get_lock("a",10); disconnect con1; @@ -15,5 +17,6 @@ connection con2; select get_lock("a",10); let $VERSION=`select version()`; --replace_result $VERSION VERSION +--replace_column 2 # 5 # show binlog events; drop database `drop-temp+table-test`; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 7434897ab90..fe1f268e277 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -485,62 +485,58 @@ void close_temporary(TABLE *table,bool delete_table) void close_temporary_tables(THD *thd) { TABLE *table,*next; - char *query, *end; - uint query_buf_size; - bool found_user_tables = 0; + char *query, *name_in_query, *end; + uint greatest_key_length= 0; if (!thd->temporary_tables) return; + /* + We write a DROP TEMPORARY TABLE for each temp table left, so that our + replication slave can clean them up. Not one multi-table DROP TABLE binlog + event: this would cause problems if slave uses --replicate-*-table. + */ LINT_INIT(end); - query_buf_size= 50; // Enough for DROP ... TABLE IF EXISTS + /* We'll re-use always same buffer so make it big enough for longest name */ for (table=thd->temporary_tables ; table ; table=table->next) - /* - We are going to add 4 ` around the db/table names, so 1 does not look - enough; indeed it is enough, because table->key_length is greater (by 8, - because of server_id and thread_id) than db||table. - */ - query_buf_size+= table->key_length+1; + greatest_key_length= max(greatest_key_length, table->key_length); - if ((query = alloc_root(thd->mem_root, query_buf_size))) + if ((query = alloc_root(thd->mem_root, greatest_key_length+50))) // Better add "if exists", in case a RESET MASTER has been done - end=strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "); + name_in_query= strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `"); for (table=thd->temporary_tables ; table ; table=next) { - if (query) // we might be out of memory, but this is not fatal + /* + In we are OOM for 'query' this is not fatal. We skip temporary tables + not created directly by the user. + */ + if (query && mysql_bin_log.is_open() && (table->real_name[0] != '#')) { - // skip temporary tables not created directly by the user - if (table->real_name[0] != '#') - found_user_tables = 1; /* Here we assume table_cache_key always starts with \0 terminated db name */ - end = strxmov(end,"`",table->table_cache_key,"`.`", - table->real_name,"`,", NullS); + end = strxmov(name_in_query, table->table_cache_key, "`.`", + table->real_name, "`", NullS); + Query_log_event qinfo(thd, query, (ulong)(end-query), 0, FALSE); + /* + Imagine the thread had created a temp table, then was doing a SELECT, and + the SELECT was killed. Then it's not clever to mark the statement above as + "killed", because it's not really a statement updating data, and there + are 99.99% chances it will succeed on slave. And, if thread is + killed now, it's not clever either. + If a real update (one updating a persistent table) was killed on the + master, then this real update will be logged with error_code=killed, + rightfully causing the slave to stop. + */ + qinfo.error_code= 0; + mysql_bin_log.write(&qinfo); } next=table->next; close_temporary(table); } - if (query && found_user_tables && mysql_bin_log.is_open()) - { - /* The -1 is to remove last ',' */ - thd->clear_error(); - Query_log_event qinfo(thd, query, (ulong)(end-query)-1, 0, FALSE); - /* - Imagine the thread had created a temp table, then was doing a SELECT, and - the SELECT was killed. Then it's not clever to mark the statement above as - "killed", because it's not really a statement updating data, and there - are 99.99% chances it will succeed on slave. - If a real update (one updating a persistent table) was killed on the - master, then this real update will be logged with error_code=killed, - rightfully causing the slave to stop. - */ - qinfo.error_code= 0; - mysql_bin_log.write(&qinfo); - } thd->temporary_tables=0; } From 987e620d6376ae3b9047d1185dca7141585173ac Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 23:07:10 +0100 Subject: [PATCH 36/45] Backport of ChangeSet 1.1845 05/02/04 13:53:16 guilhem@mysql.com +1 -0 from 5.0. Proposal to fix this problem: when using libmysqlclient, you must call mysql_server_end() to nicely free memory at the end of your program; it however sounds weird to call a function named *SERVER_end* when you're the CLIENT (you're not ending the server, you're ending your ability to talk to servers). So here I add two defines which should be more generic names. Our manual mentions these functions only for libmysqld API so needs some fixing, and then we can close BUG#8099 and BUG#6149. include/mysql.h: Creating synonyms (defines): mysql_library_init for mysql_server_init, mysql_library_end for mysql_server_end; these new names are more generic, so suitable when using libmysqlclient as well as libmysqld. --- include/mysql.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/mysql.h b/include/mysql.h index 2c0197e2300..d8a56126756 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -334,6 +334,17 @@ typedef struct st_mysql_parameters */ int STDCALL mysql_server_init(int argc, char **argv, char **groups); void STDCALL mysql_server_end(void); +/* + mysql_server_init/end need to be called when using libmysqld or + libmysqlclient (exactly, mysql_server_init() is called by mysql_init() so + you don't need to call it explicitely; but you need to call + mysql_server_end() to free memory). The names are a bit misleading + (mysql_SERVER* to be used when using libmysqlCLIENT). So we add more general + names which suit well whether you're using libmysqld or libmysqlclient. We + intend to promote these aliases over the mysql_server* ones. +*/ +#define mysql_library_init mysql_server_init +#define mysql_library_end mysql_server_end MYSQL_PARAMETERS *STDCALL mysql_get_parameters(void); From fe83a1938d51c2fe780631911282902aeae1c0a9 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 5 Feb 2005 01:21:16 +0300 Subject: [PATCH 37/45] A fix for Bug#6273 "building fails on link": we should not use CLIENT_LIBS in mysql_config as CLIENT_LIBS point to builddir when we use the bundled zlib. acinclude.m4: Extend MYSQL_CHECK_ZLIB_WITH_COMPRESS m4 macro to substitute ZLIB_DEPS - this is a special version of ZLIB_LIBS to use in mysql_config configure.in: Remove NON_THREADED_CLIENT_LIBS which weren't really NON_THREADED_CLIENT_LIBS and use NON_THREADED_LIBS instead. AC_SUBST NON_THREADED_LIBS and STATIC_NSS_FLAGS as they're now needed inside mysql_config.sh scripts/Makefile.am: Add STATIC_NSS_FLAGS, NON_THREADED_LIBS and ZLIB_DEPS to sed substitution list. scripts/mysql_config.sh: We can't use CLIENT_LIBS as in case when we use the bundled zlib it has a reference to $(top_builddir)/zlib. libs and libs_r now need to be specified explicitly. zlib/Makefile.am: Install libz.la in case it's used by MySQL: this way we guarantee that paths printed by mysql_config are valid in all cases. --- acinclude.m4 | 16 +++++++++++++--- configure.in | 16 ++++++++++------ scripts/Makefile.am | 3 +++ scripts/mysql_config.sh | 7 ++++--- zlib/Makefile.am | 2 +- 5 files changed, 31 insertions(+), 13 deletions(-) diff --git a/acinclude.m4 b/acinclude.m4 index d7e22332655..5ddd8952c42 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -194,6 +194,8 @@ dnl Define zlib paths to point at bundled zlib AC_DEFUN([MYSQL_USE_BUNDLED_ZLIB], [ ZLIB_INCLUDES="-I\$(top_srcdir)/zlib" ZLIB_LIBS="\$(top_builddir)/zlib/libz.la" +dnl Omit -L$pkglibdir as it's always in the list of mysql_config deps. +ZLIB_DEPS="-lz" zlib_dir="zlib" AC_SUBST([zlib_dir]) mysql_cv_compress="yes" @@ -235,8 +237,13 @@ dnl $prefix/lib. If zlib headers or binaries weren't found at $prefix, the dnl macro bails out with error. dnl dnl If the library was found, this function #defines HAVE_COMPRESS -dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include) and -dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz). +dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include), +dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz) and ZLIB_DEPS which is +dnl used in mysql_config and is always the same as ZLIB_LIBS except to +dnl when we use the bundled zlib. In the latter case ZLIB_LIBS points to the +dnl build dir ($top_builddir/zlib), while mysql_config must point to the +dnl installation dir ($pkglibdir), so ZLIB_DEPS is set to point to +dnl $pkglibdir. AC_DEFUN([MYSQL_CHECK_ZLIB_WITH_COMPRESS], [ AC_MSG_CHECKING([for zlib compression library]) @@ -285,7 +292,11 @@ case $SYSTEM_TYPE in ;; esac if test "$mysql_cv_compress" = "yes"; then + if test "x$ZLIB_DEPS" = "x"; then + ZLIB_DEPS="$ZLIB_LIBS" + fi AC_SUBST([ZLIB_LIBS]) + AC_SUBST([ZLIB_DEPS]) AC_SUBST([ZLIB_INCLUDES]) AC_DEFINE([HAVE_COMPRESS], [1], [Define to enable compression support]) fi @@ -1039,7 +1050,6 @@ AC_MSG_CHECKING(for OpenSSL) echo "You can't use the --all-static link option when using openssl." exit 1 fi - NON_THREADED_CLIENT_LIBS="$NON_THREADED_CLIENT_LIBS $openssl_libs" else AC_MSG_RESULT(no) if test ! -z "$openssl_includes" diff --git a/configure.in b/configure.in index caa42004736..665029accb3 100644 --- a/configure.in +++ b/configure.in @@ -924,9 +924,11 @@ if test "$ac_cv_header_termio_h" = "no" -a "$ac_cv_header_termios_h" = "no" then AC_CHECK_FUNC(gtty, , AC_CHECK_LIB(compat, gtty)) fi -# We make a special variable for client library's to avoid including -# thread libs in the client. -NON_THREADED_CLIENT_LIBS="$LIBS $ZLIB_LIBS" + +# We make a special variable for non-threaded version of LIBS to avoid +# including thread libs into non-threaded version of MySQL client library. +# Later in this script LIBS will be augmented with a threads library. +NON_THREADED_LIBS="$LIBS" AC_MSG_CHECKING([for int8]) case $SYSTEM_TYPE in @@ -1502,7 +1504,7 @@ then if test -f /usr/lib/libxnet.so -a "$SYSTEM_TYPE" = "sni-sysv4" then LIBS="-lxnet $LIBS" - NON_THREADED_CLIENT_LIBS="$NON_THREADED_CLIENT_LIBS -lxnet" + NON_THREADED_LIBS="-lxnet $NON_THREADED_LIBS" with_named_thread="-Kthread $LDFLAGS -lxnet" LD_FLAGS="" CFLAGS="-Kthread $CFLAGS" @@ -2826,7 +2828,7 @@ dnl This probably should be cleaned up more - for now the threaded dnl client is just using plain-old libs. sql_client_dirs="libmysql strings regex client" linked_client_targets="linked_libmysql_sources" -CLIENT_LIBS=$NON_THREADED_CLIENT_LIBS + if test "$THREAD_SAFE_CLIENT" != "no" then sql_client_dirs="libmysql_r $sql_client_dirs" @@ -2834,9 +2836,11 @@ then AC_DEFINE([THREAD_SAFE_CLIENT], [1], [Should be client be thread safe]) fi -CLIENT_LIBS="$CLIENT_LIBS $STATIC_NSS_FLAGS" +CLIENT_LIBS="$NON_THREADED_LIBS $openssl_libs $ZLIB_LIBS $STATIC_NSS_FLAGS" AC_SUBST(CLIENT_LIBS) +AC_SUBST(NON_THREADED_LIBS) +AC_SUBST(STATIC_NSS_FLAGS) AC_SUBST(sql_client_dirs) AC_SUBST(linked_client_targets) diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 71b70fc0e4a..d5337df35b1 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -140,6 +140,9 @@ SUFFIXES = .sh -e 's!@''IS_LINUX''@!@IS_LINUX@!' \ -e "s!@""CONF_COMMAND""@!@CONF_COMMAND@!" \ -e 's!@''MYSQLD_USER''@!@MYSQLD_USER@!' \ + -e 's!@''STATIC_NSS_FLAGS''@!@STATIC_NSS_FLAGS@!' \ + -e 's!@''NON_THREADED_LIBS''@!@NON_THREADED_LIBS@!' \ + -e 's!@''ZLIB_DEPS''@!@ZLIB_DEPS@!' \ -e "s!@MAKE@!$(MAKE)!" \ $< > $@-t @CHMOD@ +x $@-t diff --git a/scripts/mysql_config.sh b/scripts/mysql_config.sh index 90418de3d1d..a5c8af5ecb2 100644 --- a/scripts/mysql_config.sh +++ b/scripts/mysql_config.sh @@ -82,13 +82,14 @@ version='@VERSION@' socket='@MYSQL_UNIX_ADDR@' port='@MYSQL_TCP_PORT@' ldflags='@LDFLAGS@' -client_libs='@CLIENT_LIBS@' # Create options -libs="$ldflags -L$pkglibdir -lmysqlclient $client_libs" +libs="$ldflags -L$pkglibdir -lmysqlclient @ZLIB_DEPS@ @NON_THREADED_LIBS@" +libs="$libs @openssl_libs@ @STATIC_NSS_FLAGS@" libs=`echo "$libs" | sed -e 's; \+; ;g' | sed -e 's;^ *;;' | sed -e 's; *\$;;'` -libs_r="$ldflags -L$pkglibdir -lmysqlclient_r @LIBS@ @ZLIB_LIBS@ @openssl_libs@" + +libs_r="$ldflags -L$pkglibdir -lmysqlclient_r @ZLIB_DEPS@ @LIBS@ @openssl_libs@" libs_r=`echo "$libs_r" | sed -e 's; \+; ;g' | sed -e 's;^ *;;' | sed -e 's; *\$;;'` cflags="-I$pkgincludedir @CFLAGS@ " #note: end space! include="-I$pkgincludedir" diff --git a/zlib/Makefile.am b/zlib/Makefile.am index 58d3811cd7c..e94d184a841 100644 --- a/zlib/Makefile.am +++ b/zlib/Makefile.am @@ -16,7 +16,7 @@ # Process this file with automake to create Makefile.in -noinst_LTLIBRARIES=libz.la +pkglib_LTLIBRARIES=libz.la noinst_HEADERS= crc32.h deflate.h inffast.h inffixed.h inflate.h \ inftrees.h trees.h zconf.h zlib.h zutil.h From 5cf29b3b609f7c3789cb433fc4819b350aad8409 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 5 Feb 2005 16:05:46 +0200 Subject: [PATCH 38/45] Fixed bug detected by sp-tests Cleanup during reviews of new pushed code BUILD/compile-pentium-debug-max: Use --debug=full as default BUILD/compile-pentium-debug: Use --debug=full as default mysys/my_alloc.c: More debugging sql/item_func.cc: Cleanup new code Don't call insert_id() for last_insert_id(value) to avoid side effects sql/item_subselect.cc: Fixed DBUG output sql/sp_head.cc: Simple cleanup sql/sql_lex.cc: Moved usage of arguments first in lex_start to make their usage clearer Remove sl->expr_list.deleete_elements() becasue: - It didn't do anything (delete_elements on a list of list is a no-op operation) - The deleted for loop used SELECT_LEX elements that was allocated in mysql_new_select() in sp-head, but freed when sphead->mem_root was freed. (delete sphead doesn't remove used SELECT_LEX elements from the global all_selects_list) sql/sql_parse.cc: More DBUG entries --- BUILD/compile-pentium-debug | 2 +- BUILD/compile-pentium-debug-max | 2 +- mysys/my_alloc.c | 6 ++++-- sql/item_func.cc | 13 +++++++------ sql/item_subselect.cc | 2 +- sql/sp_head.cc | 4 ++-- sql/sql_lex.cc | 14 +++++++------- sql/sql_parse.cc | 14 ++++++++------ 8 files changed, 31 insertions(+), 26 deletions(-) diff --git a/BUILD/compile-pentium-debug b/BUILD/compile-pentium-debug index 4a9d0e74599..7957caead29 100755 --- a/BUILD/compile-pentium-debug +++ b/BUILD/compile-pentium-debug @@ -1,7 +1,7 @@ #! /bin/sh path=`dirname $0` -. "$path/SETUP.sh" +. "$path/SETUP.sh" $@ --with-debug=full extra_flags="$pentium_cflags $debug_cflags" c_warnings="$c_warnings $debug_extra_warnings" diff --git a/BUILD/compile-pentium-debug-max b/BUILD/compile-pentium-debug-max index 420657e0b73..7a11ad24c44 100755 --- a/BUILD/compile-pentium-debug-max +++ b/BUILD/compile-pentium-debug-max @@ -1,7 +1,7 @@ #! /bin/sh path=`dirname $0` -. "$path/SETUP.sh" +. "$path/SETUP.sh" $@ --with-debug=full extra_flags="$pentium_cflags $debug_cflags $max_cflags" c_warnings="$c_warnings $debug_extra_warnings" diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c index c14b2899b4b..e0d6288f76b 100644 --- a/mysys/my_alloc.c +++ b/mysys/my_alloc.c @@ -166,7 +166,8 @@ gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size) gptr point; reg1 USED_MEM *next= 0; reg2 USED_MEM **prev; - + DBUG_ENTER("alloc_root"); + DBUG_PRINT("enter",("root: 0x%lx", mem_root)); DBUG_ASSERT(alloc_root_inited(mem_root)); Size= ALIGN_SIZE(Size); @@ -213,7 +214,8 @@ gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size) mem_root->used= next; mem_root->first_block_usage= 0; } - return(point); + DBUG_PRINT("exit",("ptr: 0x%lx", (ulong) point)); + DBUG_RETURN(point); #endif } diff --git a/sql/item_func.cc b/sql/item_func.cc index 8ee1891eafd..134fb3be0e6 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2377,16 +2377,17 @@ longlong Item_func_release_lock::val_int() longlong Item_func_last_insert_id::val_int() { + THD *thd= current_thd; DBUG_ASSERT(fixed == 1); if (arg_count) { - longlong value=args[0]->val_int(); - current_thd->insert_id(value); - null_value=args[0]->null_value; + longlong value= args[0]->val_int(); + thd->insert_id(value); + null_value= args[0]->null_value; + return value; // Avoid side effect of insert_id() } - else - current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); - return current_thd->insert_id(); + thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + return thd->insert_id(); } /* This function is just used to test speed of different functions */ diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 3ac75bfdd30..b5cb01494fa 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -53,7 +53,7 @@ void Item_subselect::init(st_select_lex *select_lex, { DBUG_ENTER("Item_subselect::init"); - DBUG_PRINT("subs", ("select_lex 0x%xl", (ulong) select_lex)); + DBUG_PRINT("enter", ("select_lex: 0x%x", (ulong) select_lex)); unit= select_lex->master_unit(); if (unit->item) diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 3f2969768c5..d52474998a8 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -424,10 +424,10 @@ sp_head::~sp_head() void sp_head::destroy() { - DBUG_ENTER("sp_head::destroy"); - DBUG_PRINT("info", ("name: %s", m_name.str)); sp_instr *i; LEX *lex; + DBUG_ENTER("sp_head::destroy"); + DBUG_PRINT("info", ("name: %s", m_name.str)); for (uint ip = 0 ; (i = get_instr(ip)) ; ip++) delete i; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 06e271333bf..0644ca5af68 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -115,9 +115,14 @@ void lex_free(void) void lex_start(THD *thd, uchar *buf,uint length) { LEX *lex= thd->lex; + DBUG_ENTER("lex_start"); + + lex->thd= lex->unit.thd= thd; + lex->buf= lex->ptr= buf; + lex->end_of_query= buf+length; + lex->unit.init_query(); lex->unit.init_select(); - lex->thd= lex->unit.thd= thd; lex->select_lex.init_query(); lex->value_list.empty(); lex->update_list.empty(); @@ -150,8 +155,6 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->empty_field_list_on_rset= 0; lex->select_lex.select_number= 1; lex->next_state=MY_LEX_START; - lex->buf= lex->ptr= buf; - lex->end_of_query=buf+length; lex->yylineno = 1; lex->in_comment=0; lex->length=0; @@ -173,14 +176,11 @@ void lex_start(THD *thd, uchar *buf,uint length) if (lex->spfuns.records) my_hash_reset(&lex->spfuns); + DBUG_VOID_RETURN; } void lex_end(LEX *lex) { - for (SELECT_LEX *sl= lex->all_selects_list; - sl; - sl= sl->next_select_in_list()) - sl->expr_list.delete_elements(); // If error when parsing sql-varargs x_free(lex->yacc_yyss); x_free(lex->yacc_yyvs); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index d309f58a37c..2fb90502863 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -4719,19 +4719,21 @@ bool mysql_new_select(LEX *lex, bool move_down) { SELECT_LEX *select_lex; + DBUG_ENTER("mysql_new_select"); + if (!(select_lex= new(lex->thd->mem_root) SELECT_LEX())) - return 1; + DBUG_RETURN(1); select_lex->select_number= ++lex->thd->select_number; select_lex->init_query(); select_lex->init_select(); select_lex->parent_lex= lex; if (move_down) { + SELECT_LEX_UNIT *unit; lex->subqueries= TRUE; /* first select_lex of subselect or derived table */ - SELECT_LEX_UNIT *unit; if (!(unit= new(lex->thd->mem_root) SELECT_LEX_UNIT())) - return 1; + DBUG_RETURN(1); unit->init_query(); unit->init_select(); @@ -4748,7 +4750,7 @@ mysql_new_select(LEX *lex, bool move_down) if (lex->current_select->order_list.first && !lex->current_select->braces) { my_error(ER_WRONG_USAGE, MYF(0), "UNION", "ORDER BY"); - return 1; + DBUG_RETURN(1); } select_lex->include_neighbour(lex->current_select); SELECT_LEX_UNIT *unit= select_lex->master_unit(); @@ -4760,7 +4762,7 @@ mysql_new_select(LEX *lex, bool move_down) fake SELECT_LEX for UNION processing */ if (!(fake= unit->fake_select_lex= new(lex->thd->mem_root) SELECT_LEX())) - return 1; + DBUG_RETURN(1); fake->include_standalone(unit, (SELECT_LEX_NODE**)&unit->fake_select_lex); fake->select_number= INT_MAX; @@ -4774,7 +4776,7 @@ mysql_new_select(LEX *lex, bool move_down) select_lex->include_global((st_select_lex_node**)&lex->all_selects_list); lex->current_select= select_lex; select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; - return 0; + DBUG_RETURN(0); } /* From 13e47c5eb9f371626adb43e0bb27b942dde96855 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 5 Feb 2005 15:47:06 +0100 Subject: [PATCH 39/45] Fix for bug#8315 NdbScanFilter cmp method only works for strings of exact word boundry length --- ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index 9778c938e0f..bc1700c12ad 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -1865,6 +1865,11 @@ int Dbtup::interpreterNextLab(Signal* signal, } else { + /* --------------------------------------------------------- */ + // If length of argument rounded to nearest word is + // the same as attribute size, use that as argument size + /* --------------------------------------------------------- */ + if ((((argLen + 3) >> 2) << 2) == attrLen) argLen= attrLen; res = (*sqlType.m_cmp)(cs, s1, attrLen, s2, argLen, true); } From d52afba56aed1623f971cd74c34c0014a0303ebc Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 5 Feb 2005 18:16:29 +0300 Subject: [PATCH 40/45] Outer joins cleanup: Remove TABLE::outer_join and use TABLE::maybe_null only (2nd patch after Monty's comments). sql/mysql_priv.h: Outer joins cleanup: Remove TABLE::outer_join and use TABLE::maybe_null only. sql/opt_range.cc: Outer joins cleanup: Remove TABLE::outer_join and use TABLE::maybe_null only. sql/sql_base.cc: Outer joins cleanup: Remove TABLE::outer_join and use TABLE::maybe_null only. sql/sql_select.cc: Outer joins cleanup: Remove TABLE::outer_join and use TABLE::maybe_null only. sql/table.h: Outer joins cleanup: * Remove TABLE::outer_join and use TABLE::maybe_null only. * Added comments. --- sql/mysql_priv.h | 2 +- sql/opt_range.cc | 2 +- sql/sql_base.cc | 5 ++--- sql/sql_select.cc | 4 ++-- sql/table.h | 14 +++++++++----- 5 files changed, 15 insertions(+), 12 deletions(-) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 6311b2d5f52..397f1ad4fb6 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1329,7 +1329,7 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) table->null_row= 0; table->status= STATUS_NO_RECORD; table->keys_in_use_for_query= table->s->keys_in_use; - table->maybe_null= test(table->outer_join= table_list->outer_join); + table->maybe_null= table_list->outer_join; table->tablenr= tablenr; table->map= (table_map) 1 << tablenr; table->force_index= table_list->force_index; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index e5799bfd509..ceb9f97bbbc 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3611,7 +3611,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, if (!value) // IS NULL or IS NOT NULL { - if (field->table->outer_join) // Can't use a key on this + if (field->table->maybe_null) // Can't use a key on this DBUG_RETURN(0); if (!maybe_null) // Not null field DBUG_RETURN(type == Item_func::ISNULL_FUNC ? &null_element : 0); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 8a7ae2dffc3..44e575858b8 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -854,7 +854,7 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) table->tablenr=thd->current_tablenr++; table->used_fields=0; table->const_table=0; - table->outer_join= table->null_row= table->maybe_null= table->force_index= 0; + table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; table->keys_in_use_for_query= share->keys_in_use; table->used_keys= share->keys_for_keyread; @@ -1078,7 +1078,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table->tablenr=thd->current_tablenr++; table->used_fields=0; table->const_table=0; - table->outer_join= table->null_row= table->maybe_null= table->force_index= 0; + table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; table->keys_in_use_for_query= table->s->keys_in_use; table->insert_values= 0; @@ -1150,7 +1150,6 @@ bool reopen_table(TABLE *table,bool locked) tmp.tablenr= table->tablenr; tmp.used_fields= table->used_fields; tmp.const_table= table->const_table; - tmp.outer_join= table->outer_join; tmp.null_row= table->null_row; tmp.maybe_null= table->maybe_null; tmp.status= table->status; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 89b84f40eb6..68438f7a785 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -9181,7 +9181,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) tab->info="const row not found"; /* Mark for EXPLAIN that the row was not found */ pos->records_read=0.0; - if (!table->outer_join || error > 0) + if (!table->maybe_null || error > 0) DBUG_RETURN(error); } } @@ -9200,7 +9200,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) tab->info="unique row not found"; /* Mark for EXPLAIN that the row was not found */ pos->records_read=0.0; - if (!table->outer_join || error > 0) + if (!table->maybe_null || error > 0) DBUG_RETURN(error); } if (table->key_read) diff --git a/sql/table.h b/sql/table.h index 8240a3445ec..5d804a7837e 100644 --- a/sql/table.h +++ b/sql/table.h @@ -217,14 +217,18 @@ struct st_table { uint derived_select_number; int current_lock; /* Type of lock on table */ my_bool copy_blobs; /* copy_blobs when storing */ + + /* + 0 or JOIN_TYPE_{LEFT|RIGHT}. Currently this is only compared to 0. + If maybe_null !=0, this table is inner w.r.t. some outer join operation, + and null_row may be true. + */ + uint maybe_null; /* - Used in outer joins: if true, all columns are considered to have NULL - values, including columns declared as "not null". + If true, the current table row is considered to have all columns set to + NULL, including columns declared as "not null" (see maybe_null). */ my_bool null_row; - /* 0 or JOIN_TYPE_{LEFT|RIGHT}, same as TABLE_LIST::outer_join */ - my_bool outer_join; - my_bool maybe_null; /* true if (outer_join != 0) */ my_bool force_index; my_bool distinct,const_table,no_rows; my_bool key_read, no_keyread; From ac1e5aba61901d457bab083a4b40928f506637d0 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 5 Feb 2005 08:54:13 -0800 Subject: [PATCH 41/45] Update test result mysql-test/r/type_date.result: Update results --- mysql-test/r/type_date.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result index 3428b5969d9..e88eebffb55 100644 --- a/mysql-test/r/type_date.result +++ b/mysql-test/r/type_date.result @@ -99,7 +99,7 @@ DROP TABLE t1, t2, t3; CREATE TABLE t1 (y YEAR); INSERT INTO t1 VALUES ('abc'); Warnings: -Warning 1265 Data truncated for column 'y' at row 1 +Warning 1264 Out of range value adjusted for column 'y' at row 1 SELECT * FROM t1; y 0000 From 2c0acb32d0769dadf68bda4fd59f6dc70adcc422 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 5 Feb 2005 19:49:02 +0100 Subject: [PATCH 42/45] result update after 4.1->5.0 merge mysql-test/r/drop_temp_table.result: result update --- mysql-test/r/drop_temp_table.result | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/drop_temp_table.result b/mysql-test/r/drop_temp_table.result index 8a2a75f1723..163fc845e88 100644 --- a/mysql-test/r/drop_temp_table.result +++ b/mysql-test/r/drop_temp_table.result @@ -1,7 +1,9 @@ reset master; create database `drop-temp+table-test`; use `drop-temp+table-test`; +create temporary table shortn1 (a int); create temporary table `table:name` (a int); +create temporary table shortn2 (a int); select get_lock("a",10); get_lock("a",10) 1 @@ -10,9 +12,13 @@ get_lock("a",10) 1 show binlog events; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 4 Format_desc 1 95 Server ver: VERSION, Binlog ver: 4 -master-bin.000001 95 Query 1 213 create database `drop-temp+table-test` -master-bin.000001 213 Query 1 336 use `drop-temp+table-test`; create temporary table `table:name` (a int) -master-bin.000001 336 Query 1 494 use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`table:name` -master-bin.000001 494 Query 1 594 use `drop-temp+table-test`; DO RELEASE_LOCK("a") +master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4 +master-bin.000001 # Query 1 # create database `drop-temp+table-test` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn1 (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table `table:name` (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn2 (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn2` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`table:name` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn1` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DO RELEASE_LOCK("a") drop database `drop-temp+table-test`; From 3455bc53988cad19cabf204e2d0f4d3477f67b35 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 6 Feb 2005 13:06:12 +0200 Subject: [PATCH 43/45] fixed test 'subselect' in case when innodb is not compiled in (thanks HF who niticed it) mysql-test/r/subselect.result: test depends on innodb moved from 'subselect' to 'subselect_innodb' mysql-test/r/subselect_innodb.result: test depends on innodb moved from 'subselect' to 'subselect_innodb' mysql-test/t/subselect.test: test depends on innodb moved from 'subselect' to 'subselect_innodb' mysql-test/t/subselect_innodb.test: test depends on innodb moved from 'subselect' to 'subselect_innodb' --- mysql-test/r/subselect.result | 12 ------------ mysql-test/r/subselect_innodb.result | 12 ++++++++++++ mysql-test/t/subselect.test | 14 -------------- mysql-test/t/subselect_innodb.test | 14 ++++++++++++++ 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 437fd624ae1..03dcc23c919 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -2196,15 +2196,3 @@ ERROR 42S22: Reference 'xx' not supported (forward reference in item list) select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx from DUAL; ERROR 42S22: Reference 'xx' not supported (forward reference in item list) drop table t1; -CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB; -CREATE TABLE t2 LIKE t1; -INSERT INTO t1 VALUES (1,1,1); -INSERT INTO t2 VALUES (1,1,1); -PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having -count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)"; -EXECUTE my_stmt; -b count(*) -EXECUTE my_stmt; -b count(*) -deallocate prepare my_stmt; -drop table t1,t2; diff --git a/mysql-test/r/subselect_innodb.result b/mysql-test/r/subselect_innodb.result index 0b813a07a1d..0666fd76661 100644 --- a/mysql-test/r/subselect_innodb.result +++ b/mysql-test/r/subselect_innodb.result @@ -140,3 +140,15 @@ id date1 coworkerid description sum_used sum_remaining comments 6 2004-01-01 1 test 22 33 comment 7 2004-01-01 1 test 22 33 comment drop table t1; +CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB; +CREATE TABLE t2 LIKE t1; +INSERT INTO t1 VALUES (1,1,1); +INSERT INTO t2 VALUES (1,1,1); +PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having +count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)"; +EXECUTE my_stmt; +b count(*) +EXECUTE my_stmt; +b count(*) +deallocate prepare my_stmt; +drop table t1,t2; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index cdec080611d..55400dae0be 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -1465,17 +1465,3 @@ select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx; -- error 1247 select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx from DUAL; drop table t1; - -# -# cleaning up of results of subselects (BUG#8125) -# -CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB; -CREATE TABLE t2 LIKE t1; -INSERT INTO t1 VALUES (1,1,1); -INSERT INTO t2 VALUES (1,1,1); -PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having -count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)"; -EXECUTE my_stmt; -EXECUTE my_stmt; -deallocate prepare my_stmt; -drop table t1,t2; diff --git a/mysql-test/t/subselect_innodb.test b/mysql-test/t/subselect_innodb.test index aa7fe138876..5d796988178 100644 --- a/mysql-test/t/subselect_innodb.test +++ b/mysql-test/t/subselect_innodb.test @@ -145,3 +145,17 @@ SELECT DISTINCT FROM t1; select * from t1; drop table t1; + +# +# cleaning up of results of subselects (BUG#8125) +# +CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB; +CREATE TABLE t2 LIKE t1; +INSERT INTO t1 VALUES (1,1,1); +INSERT INTO t2 VALUES (1,1,1); +PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having +count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)"; +EXECUTE my_stmt; +EXECUTE my_stmt; +deallocate prepare my_stmt; +drop table t1,t2; From b19ff40dda17c7ee6cfd3c7019c59cdb77fdbd6f Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 00:12:46 -0600 Subject: [PATCH 44/45] Do-solaris-pkg: Perl script to create Solaris installation packages. --- Build-tools/Do-solaris-pkg | 180 +++++++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 Build-tools/Do-solaris-pkg diff --git a/Build-tools/Do-solaris-pkg b/Build-tools/Do-solaris-pkg new file mode 100644 index 00000000000..5b7326f78e2 --- /dev/null +++ b/Build-tools/Do-solaris-pkg @@ -0,0 +1,180 @@ +#!/usr/bin/perl +# +# Script to create Solaris packages +# +$INTERACTIVE= 0; +$find = "/usr/bin/find"; +$pkgproto = "/usr/bin/pkgproto"; +$pkgmk = "/usr/bin/pkgmk -o"; +$pkgtrans = "/usr/bin/pkgtrans"; +$temp = "/tmp/prototype$$"; +$prototype = "prototype"; +$pkginfo = "pkginfo"; +($gid ,$pkg ,$uid ,$userInfo ,$email ,$quota ,$group ,$passwd +,$category ,$userHome ,$vendor ,$loginShell ,$pstamp ,$basedir)=(); + +$fullname = shift @ARGV; +$fullname or die "No package name was specified"; +-d $fullname or die "That directory is not present!"; + +$fullname =~ s,/+$,,; # Remove ending slash if any + +$pkgdir = `cd ../pkgs; pwd`; +$pwd = `pwd`; +if ($pwd =~ '\/usr\/local') { + $pwd = $`; +} +die "Wrong location, please cd to /usr/local/ and run again.\n" + if ($pwd eq ""); + +system ("$find . -print | $pkgproto > $temp"); +open (PREPROTO,"<$temp") or die "Unable to read prototype information ($!)\n"; +open (PROTO,">$prototype") or die "Unable to write file prototype ($!)\n"; +print PROTO "i pkginfo=./$pkginfo\n"; +while () { + # Read the prototype information from /tmp/prototype$$ + chomp; + $thisline = $_; + if ($thisline =~ " prototype " + or $thisline =~ " pkginfo ") { + # We don't need that line + } elsif ($thisline =~ "^[fd] ") { + # Change the ownership for files and directories + ($dir, $none, $file, $mode, $user, $group) = split / /,$thisline; + print PROTO "$dir $none $file $mode bin bin\n"; + } else { + # Symlinks and other stuff should be printed as well ofcourse + print PROTO "$thisline\n"; + } +} +close PROTO; +close PREPROTO; + +# Clean up +unlink $temp or warn "Unable to remove tempfile ($!)\n"; + +# Now we can start building the package +# +# First get some info + +$fullname =~ /^((mysql)-.+)-([\d\.]+)-.+$/ + or die "This name is not what I expected - \"$fullname\""; + +$default{"name"}= $2; +$default{"version"}= $3; +$default{"pkg"}= $1; +$default{"arch"} = `uname -m`; +chomp $default{"arch"}; +$default{"category"}= "application"; +$default{"vendor"}= "MySQL AB"; +$default{"email"}= "build\@mysql.com"; +$default{"pstamp"}= "MySQL AB Build Engineers"; +$os = `uname -r`; +$os =~ '\.'; +$os = "sol$'"; +chomp $os; +$default{"basedir"}= "/usr/local"; +$default{"packagename"}= $fullname; + +# Check for correctness of guessed values by userinput + +%questions = ( + pkg => "Please give the name for this package", + name => "Now enter the real name for this package", + arch => "What architecture did you build the package on?", + version => "Enter the version number of the package", + category => "What category does this package belong to?", + vendor => "Who is the vendor of this package?", + email => "Enter the email adress for contact", + pstamp => "Enter your own name", + basedir => "What is the basedir this package will install into?", + packagename => "How should I call the packagefile?", +); + +@vars = qw(pkg name arch version category vendor email pstamp basedir + packagename); +foreach $varname (@vars) { + getvar_noq($varname); +} + +if ($INTERACTIVE) { + while (!&chkvar()) { + print "\n"; + foreach $varname (@vars) { + getvar($varname); + } + @vars = qw(pkg name arch version category vendor email pstamp basedir + packagename); + } +} +$classes = "none"; + +# Create the pkginfo file + +print "\nNow creating $pkginfo file\n"; +open (PKGINFO,">$pkginfo") || die "Unable to open $pkginfo for writing ($!)\n"; +print PKGINFO "PKG=\"$pkg\"\n"; +print PKGINFO "NAME=\"$name\"\n"; +print PKGINFO "ARCH=\"$arch\"\n"; +print PKGINFO "VERSION=\"$version\"\n"; +print PKGINFO "CATEGORY=\"$category\"\n"; +print PKGINFO "VENDOR=\"$vendor\"\n"; +print PKGINFO "EMAIL=\"$email\"\n"; +print PKGINFO "PSTAMP=\"$pstamp\"\n"; +print PKGINFO "BASEDIR=\"$basedir\"\n"; +print PKGINFO "CLASSES=\"$classes\"\n"; +close PKGINFO; +print "Done.\n"; + +# Build and zip the package + +print "Building package\n"; +system ("$pkgmk -r `pwd`"); +system ("(cd /var/spool/pkg; $pkgtrans -s -o `pwd` /tmp/$packagename $pkg)"); +system ("gzip /tmp/$packagename"); + +# Clean-up the spool area +system ("(cd /var/spool/pkg; rm -rf $pkg)"); +unlink $pkginfo; +unlink $prototype; +system ("mv /tmp/${packagename}.gz $pkgdir"); +print "Done. (~/packaging/pkgs/$packagename.gz)\n"; +# The subroutines +sub chkvar { + print "\n"; + + print "PKG=$pkg\n"; + print "NAME=$name\n"; + print "ARCH=$arch\n"; + print "VERSION=$version\n"; + print "CATEGORY=$category\n"; + print "VENDOR=$vendor\n"; + print "EMAIL=$email\n"; + print "PSTAMP=$pstamp\n"; + print "BASEDIR=$basedir\n"; + print "PACKAGENAME=$packagename\n"; + + + print "\nIs this information correct? [Y/n]: "; + my $answer= ; + chomp $answer; + $answer= 'Y' if ($answer eq ""); + $answer= uc $answer; + my $res= ($answer eq 'Y')? 1 : 0; + return($res); +} + +sub getvar_noq { + my $questionname = "@_"; + $$questionname = $default{$questionname}; +} + +sub getvar { + my $questionname = "@_"; + my $ucquestionname= uc $questionname; + print "$ucquestionname: $questions{$questionname} [$default{\"$questionname\"}]: "; + my $answer = ; + chomp $answer; + $$questionname = $answer; + $$questionname = $default{$questionname} if ($$questionname eq ""); +} From d3a6f130e212d3a306c624ee777704b30f9ceef8 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 01:27:58 -0600 Subject: [PATCH 45/45] Do-solaris-pkg: Deposit the new .pkg.gz into the ~/$hostname/ directory Build-tools/Do-solaris-pkg: Deposit the new .pkg.gz into the ~/$hostname/ directory --- Build-tools/Do-solaris-pkg | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Build-tools/Do-solaris-pkg b/Build-tools/Do-solaris-pkg index 5b7326f78e2..374113b28a1 100644 --- a/Build-tools/Do-solaris-pkg +++ b/Build-tools/Do-solaris-pkg @@ -3,6 +3,7 @@ # Script to create Solaris packages # $INTERACTIVE= 0; +$hostname= `hostname`; $find = "/usr/bin/find"; $pkgproto = "/usr/bin/pkgproto"; $pkgmk = "/usr/bin/pkgmk -o"; @@ -19,7 +20,7 @@ $fullname or die "No package name was specified"; $fullname =~ s,/+$,,; # Remove ending slash if any -$pkgdir = `cd ../pkgs; pwd`; +$pkgdir= `cd ../$hostname; pwd`; $pwd = `pwd`; if ($pwd =~ '\/usr\/local') { $pwd = $`; @@ -74,7 +75,7 @@ $os =~ '\.'; $os = "sol$'"; chomp $os; $default{"basedir"}= "/usr/local"; -$default{"packagename"}= $fullname; +$default{"packagename"}= $fullname . '.pkg'; # Check for correctness of guessed values by userinput @@ -138,7 +139,7 @@ system ("(cd /var/spool/pkg; rm -rf $pkg)"); unlink $pkginfo; unlink $prototype; system ("mv /tmp/${packagename}.gz $pkgdir"); -print "Done. (~/packaging/pkgs/$packagename.gz)\n"; +print "Done. (~/$hostname/$packagename.gz)\n"; # The subroutines sub chkvar { print "\n";