diff --git a/.bzrignore b/.bzrignore index f32b85ce6af..6abf5d3eb8b 100644 --- a/.bzrignore +++ b/.bzrignore @@ -34,6 +34,8 @@ *.vcproj */*.dir/* */*_pure_*warnings +*/.deps +*/.libs/* */.pure */debug/* */release/* @@ -1240,6 +1242,7 @@ mysql-test/gmon.out mysql-test/install_test_db mysql-test/mtr mysql-test/mysql-test-run +mysql-test/mysql-test-run-shell mysql-test/mysql-test-run.log mysql-test/mysql_test_run_new mysql-test/ndb/ndbcluster @@ -1731,6 +1734,7 @@ netware/.deps/my_manage.Po netware/.deps/mysql_install_db.Po netware/.deps/mysql_test_run.Po netware/.deps/mysqld_safe.Po +netware/libmysql.imp pack_isam/*.ds? perror/*.ds? perror/*.vcproj diff --git a/BUILD/compile-solaris-sparc b/BUILD/compile-solaris-sparc index 0c05bf8a101..db46fc4db27 100755 --- a/BUILD/compile-solaris-sparc +++ b/BUILD/compile-solaris-sparc @@ -1,11 +1,7 @@ -#! /bin/sh - -gmake -k clean || true -/bin/rm -f */.deps/*.P config.cache - +#!/usr/bin/bash path=`dirname $0` -. "$path/autorun.sh" +. "$path/SETUP.sh" +extra_flags="" +extra_configs="$max_configs" -CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -O3 -fno-omit-frame-pointer -mcpu=v8 -Wa,-xarch=v8plusa" CXX=gcc CXXFLAGS="-Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -O3 -fno-omit-frame-pointer -mcpu=v8 -Wa,-xarch=v8plusa -g" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client - -gmake -j 4 +. "$path/FINISH.sh" diff --git a/BUILD/compile-solaris-sparc-debug b/BUILD/compile-solaris-sparc-debug index 3384b623ccb..e92f7fecc84 100755 --- a/BUILD/compile-solaris-sparc-debug +++ b/BUILD/compile-solaris-sparc-debug @@ -1,11 +1,7 @@ -#! /bin/sh - -gmake -k clean || true -/bin/rm -f */.deps/*.P config.cache - +#!/usr/bin/bash path=`dirname $0` -. "$path/autorun.sh" - -CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -O3 -fno-omit-frame-pointer -mcpu=v8 -Wa,-xarch=v8plusa" CXX=gcc CXXFLAGS="-Wimplicit -Wreturn-type -Wid-clash-51 -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -O3 -fno-omit-frame-pointer -mcpu=v8 -Wa,-xarch=v8plusa -g" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-debug +. "$path/SETUP.sh" +extra_flags="$debug_cflags" +extra_configs="$debug_configs $max_configs" -gmake -j 4 +. "$path/FINISH.sh" diff --git a/BitKeeper/etc/collapsed b/BitKeeper/etc/collapsed index d4d681937a2..79836bb030e 100644 --- a/BitKeeper/etc/collapsed +++ b/BitKeeper/etc/collapsed @@ -2,3 +2,8 @@ 44ec850ac2k4y2Omgr92GiWPBAVKGQ 44edb86b1iE5knJ97MbliK_3lCiAXA 44f33f3aj5KW5qweQeekY1LU0E9ZCg +45001f7c3b2hhCXDKfUvzkX9TNe6VA +45002051rHJfMEXAIMiAZV0clxvKSA +4513d8e4Af4dQWuk13sArwofRgFDQw +4519a6c5BVUxEHTf5iJnjZkixMBs8g +451ab499rgdjXyOnUDqHu-wBDoS-OQ diff --git a/BitKeeper/etc/config b/BitKeeper/etc/config index 6d06edd193e..1a027813ff4 100644 --- a/BitKeeper/etc/config +++ b/BitKeeper/etc/config @@ -75,5 +75,6 @@ hours: [tomas:]checkout:get [guilhem:]checkout:get [pekka:]checkout:get +[msvensson:]checkout:get checkout:edit eoln:unix diff --git a/CMakeLists.txt b/CMakeLists.txt index 9814b80793c..3ea2dc1fbe6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -146,5 +146,7 @@ ADD_SUBDIRECTORY(sql) ADD_SUBDIRECTORY(server-tools/instance-manager) ADD_SUBDIRECTORY(libmysql) ADD_SUBDIRECTORY(tests) -ADD_SUBDIRECTORY(libmysqld) -ADD_SUBDIRECTORY(libmysqld/examples) + +# disable libmysqld until it's fixed, so we can use Cmake 2.2 and 2.4 +#ADD_SUBDIRECTORY(libmysqld) +#ADD_SUBDIRECTORY(libmysqld/examples) diff --git a/Docs/Makefile.am b/Docs/Makefile.am index 3e81cc8b7e1..40e987bb254 100644 --- a/Docs/Makefile.am +++ b/Docs/Makefile.am @@ -16,7 +16,7 @@ noinst_SCRIPTS = generate-text-files.pl -EXTRA_DIST = $(noinst_SCRIPTS) mysql.info INSTALL-BINARY +EXTRA_DIST = $(noinst_SCRIPTS) manual.chm mysql.info INSTALL-BINARY TXT_FILES= ../INSTALL-SOURCE ../INSTALL-WIN-SOURCE ../EXCEPTIONS-CLIENT \ INSTALL-BINARY ../support-files/MacOSX/ReadMe.txt diff --git a/Docs/manual.chm b/Docs/manual.chm new file mode 100644 index 00000000000..28c3e1b5a86 --- /dev/null +++ b/Docs/manual.chm @@ -0,0 +1,14 @@ + +********************************************************* + +This is a dummy placeholder file for "manual.chm" in the +MySQL source trees. + +Note, that the documentation has been moved into a separate +BitKeeper source tree named "mysqldoc" - do not attempt to edit this +file! All changes to it should be done in the mysqldoc tree. + +This dummy file is being replaced with the actual file from the +mysqldoc tree when building the official source distribution. + +********************************************************* diff --git a/Makefile.am b/Makefile.am index 4adc06acad7..a0d19e8271d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -24,11 +24,11 @@ EXTRA_DIST = INSTALL-SOURCE INSTALL-WIN-SOURCE \ SUBDIRS = . include @docs_dirs@ @zlib_dir@ \ @readline_topdir@ sql-common \ @thread_dirs@ pstack \ - @sql_union_dirs@ storage plugin \ + @sql_union_dirs@ unittest storage plugin \ @sql_server@ scripts @man_dirs@ tests \ netware @libmysqld_dirs@ \ mysql-test support-files @tools_dirs@ \ - unittest win + win DIST_SUBDIRS = $(SUBDIRS) BUILD @@ -114,7 +114,7 @@ test-unit: test-ps: cd mysql-test ; \ - ./mysql-test-run.pl $(force) --ps-protocol --mysqld=--binlog-format=statement + ./mysql-test-run.pl $(force) --ps-protocol --mysqld=--binlog-format=mixed test-nr: cd mysql-test ; \ @@ -125,6 +125,10 @@ test-pr: ./mysql-test-run.pl $(force) --ps-protocol --mysqld=--binlog-format=row test-ns: + cd mysql-test ; \ + ./mysql-test-run.pl $(force) --mysqld=--binlog-format=mixed + +test-binlog-statement: cd mysql-test ; \ ./mysql-test-run.pl $(force) --mysqld=--binlog-format=statement diff --git a/bdb/CMakeLists.txt b/bdb/CMakeLists.txt deleted file mode 100755 index c5dd60852d4..00000000000 --- a/bdb/CMakeLists.txt +++ /dev/null @@ -1,44 +0,0 @@ -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") - -INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/bdb/build_win32 - ${CMAKE_SOURCE_DIR}/bdb/dbinc - ${CMAKE_SOURCE_DIR}/bdb) - -# BDB needs a number of source files that are auto-generated by the unix -# configure. So to build BDB, it is necessary to copy these over to the Windows -# bitkeeper tree, or to use a source .tar.gz package which already has these -# files. -ADD_LIBRARY(bdb btree/bt_compare.c btree/bt_conv.c btree/bt_curadj.c btree/bt_cursor.c - btree/bt_delete.c btree/bt_method.c btree/bt_open.c btree/bt_put.c btree/bt_rec.c - btree/bt_reclaim.c btree/bt_recno.c btree/bt_rsearch.c btree/bt_search.c - btree/bt_split.c btree/bt_stat.c btree/bt_upgrade.c btree/bt_verify.c btree/btree_auto.c - db/crdel_auto.c db/crdel_rec.c db/db.c db/db_am.c db/db_auto.c common/db_byteorder.c - db/db_cam.c db/db_conv.c db/db_dispatch.c db/db_dup.c common/db_err.c common/db_getlong.c - common/db_idspace.c db/db_iface.c db/db_join.c common/db_log2.c db/db_meta.c - db/db_method.c db/db_open.c db/db_overflow.c db/db_pr.c db/db_rec.c db/db_reclaim.c - db/db_remove.c db/db_rename.c db/db_ret.c env/db_salloc.c env/db_shash.c db/db_truncate.c - db/db_upg.c db/db_upg_opd.c db/db_vrfy.c db/db_vrfyutil.c dbm/dbm.c dbreg/dbreg.c - dbreg/dbreg_auto.c dbreg/dbreg_rec.c dbreg/dbreg_util.c env/env_file.c env/env_method.c - env/env_open.c env/env_recover.c env/env_region.c fileops/fileops_auto.c fileops/fop_basic.c - fileops/fop_rec.c fileops/fop_util.c hash/hash.c hash/hash_auto.c hash/hash_conv.c - hash/hash_dup.c hash/hash_func.c hash/hash_meta.c hash/hash_method.c hash/hash_open.c - hash/hash_page.c hash/hash_rec.c hash/hash_reclaim.c hash/hash_stat.c hash/hash_upgrade.c - hash/hash_verify.c hmac/hmac.c hsearch/hsearch.c lock/lock.c lock/lock_deadlock.c - lock/lock_method.c lock/lock_region.c lock/lock_stat.c lock/lock_util.c log/log.c - log/log_archive.c log/log_compare.c log/log_get.c log/log_method.c log/log_put.c - mp/mp_alloc.c mp/mp_bh.c mp/mp_fget.c mp/mp_fopen.c mp/mp_fput.c - mp/mp_fset.c mp/mp_method.c mp/mp_region.c mp/mp_register.c mp/mp_stat.c mp/mp_sync.c - mp/mp_trickle.c mutex/mut_tas.c mutex/mut_win32.c mutex/mutex.c os_win32/os_abs.c - os/os_alloc.c os_win32/os_clock.c os_win32/os_config.c os_win32/os_dir.c os_win32/os_errno.c - os_win32/os_fid.c os_win32/os_fsync.c os_win32/os_handle.c os/os_id.c os_win32/os_map.c - os/os_method.c os/os_oflags.c os_win32/os_open.c os/os_region.c os_win32/os_rename.c - os/os_root.c os/os_rpath.c os_win32/os_rw.c os_win32/os_seek.c os_win32/os_sleep.c - os_win32/os_spin.c os_win32/os_stat.c os/os_tmpdir.c os_win32/os_type.c os/os_unlink.c - qam/qam.c qam/qam_auto.c qam/qam_conv.c qam/qam_files.c qam/qam_method.c qam/qam_open.c - qam/qam_rec.c qam/qam_stat.c qam/qam_upgrade.c qam/qam_verify.c rep/rep_method.c - rep/rep_record.c rep/rep_region.c rep/rep_util.c hmac/sha1.c - clib/strcasecmp.c txn/txn.c txn/txn_auto.c txn/txn_method.c txn/txn_rec.c - txn/txn_recover.c txn/txn_region.c txn/txn_stat.c txn/txn_util.c common/util_log.c - common/util_sig.c xa/xa.c xa/xa_db.c xa/xa_map.c) - diff --git a/client/mysql.cc b/client/mysql.cc index b5f28090283..1dbdeb8be97 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -49,6 +49,9 @@ const char *VER= "14.12"; /* Don't try to make a nice table if the data is too big */ #define MAX_COLUMN_LENGTH 1024 +/* Buffer to hold 'version' and 'version_comment' */ +#define MAX_SERVER_VERSION_LENGTH 128 + gptr sql_alloc(unsigned size); // Don't use mysqld alloc for these void sql_element_free(void *ptr); #include "sql_string.h" @@ -207,6 +210,7 @@ static int com_nopager(String *str, char*), com_pager(String *str, char*), static int read_and_execute(bool interactive); static int sql_connect(char *host,char *database,char *user,char *password, uint silent); +static const char *server_version_string(MYSQL *mysql); static int put_info(const char *str,INFO_TYPE info,uint error=0, const char *sql_state=0); static int put_error(MYSQL *mysql); @@ -430,8 +434,8 @@ int main(int argc,char *argv[]) put_info("Welcome to the MySQL monitor. Commands end with ; or \\g.", INFO_INFO); sprintf((char*) glob_buffer.ptr(), - "Your MySQL connection id is %lu to server version: %s\n", - mysql_thread_id(&mysql),mysql_get_server_info(&mysql)); + "Your MySQL connection id is %lu\nServer version: %s\n", + mysql_thread_id(&mysql), server_version_string(&mysql)); put_info((char*) glob_buffer.ptr(),INFO_INFO); #ifdef HAVE_READLINE @@ -2493,9 +2497,14 @@ print_table_data_xml(MYSQL_RES *result) { tee_fprintf(PAGER, "\t"); - xmlencode_print(cur[i], lengths[i]); - tee_fprintf(PAGER, "\n"); + if (cur[i]) + { + tee_fprintf(PAGER, "\">"); + xmlencode_print(cur[i], lengths[i]); + tee_fprintf(PAGER, "\n"); + } + else + tee_fprintf(PAGER, "\" xsi:nil=\"true\" />\n"); } (void) tee_fputs(" \n", PAGER); } @@ -2896,7 +2905,7 @@ com_connect(String *buffer, char *line) bzero(buff, sizeof(buff)); if (buffer) { - strmake(buff, line, sizeof(buff)); + strmake(buff, line, sizeof(buff) - 1); tmp= get_arg(buff, 0); if (tmp && *tmp) { @@ -3011,7 +3020,7 @@ com_use(String *buffer __attribute__((unused)), char *line) int select_db; bzero(buff, sizeof(buff)); - strmov(buff, line); + strmake(buff, line, sizeof(buff) - 1); tmp= get_arg(buff, 0); if (!tmp || !*tmp) { @@ -3321,16 +3330,13 @@ com_status(String *buffer __attribute__((unused)), tee_fprintf(stdout, "Using outfile:\t\t'%s'\n", opt_outfile ? outfile : ""); #endif tee_fprintf(stdout, "Using delimiter:\t%s\n", delimiter); - tee_fprintf(stdout, "Server version:\t\t%s\n", mysql_get_server_info(&mysql)); + tee_fprintf(stdout, "Server version:\t\t%s\n", server_version_string(&mysql)); tee_fprintf(stdout, "Protocol version:\t%d\n", mysql_get_proto_info(&mysql)); tee_fprintf(stdout, "Connection:\t\t%s\n", mysql_get_host_info(&mysql)); if ((id= mysql_insert_id(&mysql))) tee_fprintf(stdout, "Insert id:\t\t%s\n", llstr(id, buff)); - /* - Don't remove "limit 1", - it is protection againts SQL_SELECT_LIMIT=0 - */ + /* "limit 1" is protection against SQL_SELECT_LIMIT=0 */ if (!mysql_query(&mysql,"select @@character_set_client, @@character_set_connection, @@character_set_server, @@character_set_database limit 1") && (result=mysql_use_result(&mysql))) { @@ -3395,6 +3401,39 @@ select_limit, max_join_size); return 0; } +static const char * +server_version_string(MYSQL *mysql) +{ + static char buf[MAX_SERVER_VERSION_LENGTH] = ""; + + /* Only one thread calls this, so no synchronization is needed */ + if (buf[0] == '\0') + { + char *bufp = buf; + MYSQL_RES *result; + MYSQL_ROW cur; + + bufp = strnmov(buf, mysql_get_server_info(mysql), sizeof buf); + + /* "limit 1" is protection against SQL_SELECT_LIMIT=0 */ + if (!mysql_query(mysql, "select @@version_comment limit 1") && + (result = mysql_use_result(mysql))) + { + MYSQL_ROW cur = mysql_fetch_row(result); + if (cur && cur[0]) + { + bufp = strxnmov(bufp, sizeof buf - (bufp - buf), " ", cur[0], NullS); + } + mysql_free_result(result); + } + + /* str*nmov doesn't guarantee NUL-termination */ + if (bufp == buf + sizeof buf) + buf[sizeof buf - 1] = '\0'; + } + + return buf; +} static int put_info(const char *str,INFO_TYPE info_type, uint error, const char *sqlstate) @@ -3516,11 +3555,11 @@ void tee_puts(const char *s, FILE *file) { NETWARE_YIELD; fputs(s, file); - fputs("\n", file); + fputc('\n', file); if (opt_outfile) { fputs(s, OUTFILE); - fputs("\n", OUTFILE); + fputc('\n', OUTFILE); } } diff --git a/client/mysqldump.c b/client/mysqldump.c index f1e1d549f6a..2d2961e465c 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -671,13 +671,13 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), tty_password=1; break; case 'r': - if (!(md_result_file = my_fopen(argument, O_WRONLY | FILE_BINARY, + if (!(md_result_file= my_fopen(argument, O_WRONLY | FILE_BINARY, MYF(MY_WME)))) exit(1); break; case 'W': #ifdef __WIN__ - opt_protocol = MYSQL_PROTOCOL_PIPE; + opt_protocol= MYSQL_PROTOCOL_PIPE; #endif break; case 'N': @@ -692,7 +692,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), #include case 'V': print_version(); exit(0); case 'X': - opt_xml = 1; + opt_xml= 1; extended_insert= opt_drop= opt_lock= opt_disable_keys= opt_autocommit= opt_create_db= 0; break; @@ -1582,7 +1582,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, const char *insert_option; char name_buff[NAME_LEN+3],table_buff[NAME_LEN*2+3]; char table_buff2[NAME_LEN*2+3], query_buff[QUERY_LENGTH]; - FILE *sql_file = md_result_file; + FILE *sql_file= md_result_file; int len; MYSQL_RES *result; MYSQL_ROW row; @@ -1626,7 +1626,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, opt_quoted_table= quote_name(table, table_buff2, 0); if (opt_order_by_primary) - order_by = primary_key_fields(result_table); + order_by= primary_key_fields(result_table); if (!opt_xml && !mysql_query_with_error_report(mysql, 0, query_buff)) { @@ -1678,7 +1678,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, field= mysql_fetch_field_direct(result, 0); if (strcmp(field->name, "View") == 0) { - char *scv_buff = NULL; + char *scv_buff= NULL; verbose_msg("-- It's a view, create dummy table for view\n"); @@ -1715,7 +1715,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR)); safe_exit(EX_MYSQLERR); - DBUG_RETURN(0); + DBUG_RETURN(0); } else my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR)); @@ -2085,7 +2085,7 @@ static void dump_triggers_for_table (char *table, char *db) char name_buff[NAME_LEN*4+3], table_buff[NAME_LEN*2+3]; char query_buff[QUERY_LENGTH]; uint old_opt_compatible_mode=opt_compatible_mode; - FILE *sql_file = md_result_file; + FILE *sql_file= md_result_file; MYSQL_RES *result; MYSQL_ROW row; @@ -2329,15 +2329,15 @@ static void dump_table(char *table, char *db) end= strmov(end,buff); if (where || order_by) { - query = alloc_query_str((ulong) ((end - query) + 1 + + query= alloc_query_str((ulong) ((end - query) + 1 + (where ? strlen(where) + 7 : 0) + (order_by ? strlen(order_by) + 10 : 0))); - end = strmov(query, query_buf); + end= strmov(query, query_buf); if (where) - end = strxmov(end, " WHERE ", where, NullS); + end= strxmov(end, " WHERE ", where, NullS); if (order_by) - end = strxmov(end, " ORDER BY ", order_by, NullS); + end= strxmov(end, " ORDER BY ", order_by, NullS); } if (mysql_real_query(mysql, query, (uint) (end - query))) { @@ -2358,10 +2358,10 @@ static void dump_table(char *table, char *db) result_table); if (where || order_by) { - query = alloc_query_str((ulong) (strlen(query) + 1 + + query= alloc_query_str((ulong) (strlen(query) + 1 + (where ? strlen(where) + 7 : 0) + (order_by ? strlen(order_by) + 10 : 0))); - end = strmov(query, query_buf); + end= strmov(query, query_buf); if (where) { @@ -2370,7 +2370,7 @@ static void dump_table(char *table, char *db) fprintf(md_result_file, "-- WHERE: %s\n", where); check_io(md_result_file); } - end = strxmov(end, " WHERE ", where, NullS); + end= strxmov(end, " WHERE ", where, NullS); } if (order_by) { @@ -2379,7 +2379,7 @@ static void dump_table(char *table, char *db) fprintf(md_result_file, "-- ORDER BY: %s\n", order_by); check_io(md_result_file); } - end = strxmov(end, " ORDER BY ", order_by, NullS); + end= strxmov(end, " ORDER BY ", order_by, NullS); } } if (!opt_xml && !opt_compact) @@ -2455,12 +2455,12 @@ static void dump_table(char *table, char *db) check_io(md_result_file); } - for (i = 0; i < mysql_num_fields(res); i++) + for (i= 0; i < mysql_num_fields(res); i++) { int is_blob; ulong length= lengths[i]; - if (!(field = mysql_fetch_field(res))) + if (!(field= mysql_fetch_field(res))) { my_snprintf(query, QUERY_LENGTH, "%s: Not enough fields from table %s! Aborting.\n", @@ -2532,7 +2532,7 @@ static void dump_table(char *table, char *db) else { /* change any strings ("inf", "-inf", "nan") into NULL */ - char *ptr = row[i]; + char *ptr= row[i]; if (my_isalpha(charset_info, *ptr) || (*ptr == '-' && my_isalpha(charset_info, ptr[1]))) dynstr_append(&extended_row, "NULL"); @@ -2592,7 +2592,7 @@ static void dump_table(char *table, char *db) else { /* change any strings ("inf", "-inf", "nan") into NULL */ - char *ptr = row[i]; + char *ptr= row[i]; if (opt_xml) { print_xml_tag1(md_result_file, "\t\t", "field name=", @@ -2638,10 +2638,10 @@ static void dump_table(char *table, char *db) { ulong row_length; dynstr_append(&extended_row,")"); - row_length = 2 + extended_row.length; + row_length= 2 + extended_row.length; if (total_length + row_length < opt_net_buffer_length) { - total_length += row_length; + total_length+= row_length; fputc(',',md_result_file); /* Always row break */ fputs(extended_row.str,md_result_file); } @@ -2653,7 +2653,7 @@ static void dump_table(char *table, char *db) fputs(insert_pat.str,md_result_file); fputs(extended_row.str,md_result_file); - total_length = row_length+init_length; + total_length= row_length+init_length; } check_io(md_result_file); } @@ -2718,15 +2718,15 @@ err: static char *getTableName(int reset) { - static MYSQL_RES *res = NULL; + static MYSQL_RES *res= NULL; MYSQL_ROW row; if (!res) { - if (!(res = mysql_list_tables(mysql,NullS))) + if (!(res= mysql_list_tables(mysql,NullS))) return(NULL); } - if ((row = mysql_fetch_row(res))) + if ((row= mysql_fetch_row(res))) return((char*) row[0]); if (reset) @@ -2734,7 +2734,7 @@ static char *getTableName(int reset) else { mysql_free_result(res); - res = NULL; + res= NULL; } return(NULL); } /* getTableName */ @@ -2889,7 +2889,7 @@ static int dump_all_databases() if (mysql_query_with_error_report(mysql, &tableres, "SHOW DATABASES")) return 1; - while ((row = mysql_fetch_row(tableres))) + while ((row= mysql_fetch_row(tableres))) { if (dump_all_tables_in_db(row[0])) result=1; @@ -2897,13 +2897,13 @@ static int dump_all_databases() if (seen_views) { if (mysql_query(mysql, "SHOW DATABASES") || - !(tableres = mysql_store_result(mysql))) + !(tableres= mysql_store_result(mysql))) { my_printf_error(0, "Error: Couldn't execute 'SHOW DATABASES': %s", MYF(0), mysql_error(mysql)); return 1; } - while ((row = mysql_fetch_row(tableres))) + while ((row= mysql_fetch_row(tableres))) { if (dump_all_views_in_db(row[0])) result=1; @@ -3349,7 +3349,7 @@ static int do_show_master_status(MYSQL *mysql_con) } else { - row = mysql_fetch_row(master); + row= mysql_fetch_row(master); if (row && row[0] && row[1]) { /* SHOW MASTER STATUS reports file and position */ @@ -3476,7 +3476,7 @@ static void print_value(FILE *file, MYSQL_RES *result, MYSQL_ROW row, MYSQL_FIELD *field; mysql_field_seek(result, 0); - for ( ; (field = mysql_fetch_field(result)) ; row++) + for ( ; (field= mysql_fetch_field(result)) ; row++) { if (!strcmp(field->name,name)) { @@ -3604,17 +3604,19 @@ char check_if_ignore_table(const char *table_name, char *table_type) static char *primary_key_fields(const char *table_name) { - MYSQL_RES *res = NULL; + MYSQL_RES *res= NULL; MYSQL_ROW row; /* SHOW KEYS FROM + table name * 2 (escaped) + 2 quotes + \0 */ char show_keys_buff[15 + NAME_LEN * 2 + 3]; - uint result_length = 0; - char *result = 0; + uint result_length= 0; + char *result= 0; + char buff[NAME_LEN * 2 + 3]; + char *quoted_field; my_snprintf(show_keys_buff, sizeof(show_keys_buff), "SHOW KEYS FROM %s", table_name); if (mysql_query(mysql, show_keys_buff) || - !(res = mysql_store_result(mysql))) + !(res= mysql_store_result(mysql))) { fprintf(stderr, "Warning: Couldn't read keys from table %s;" " records are NOT sorted (%s)\n", @@ -3629,12 +3631,14 @@ static char *primary_key_fields(const char *table_name) * row, and UNIQUE keys come before others. So we only need to check * the first key, not all keys. */ - if ((row = mysql_fetch_row(res)) && atoi(row[1]) == 0) + if ((row= mysql_fetch_row(res)) && atoi(row[1]) == 0) { /* Key is unique */ do - result_length += strlen(row[4]) + 1; /* + 1 for ',' or \0 */ - while ((row = mysql_fetch_row(res)) && atoi(row[3]) > 1); + { + quoted_field= quote_name(row[4], buff, 0); + result_length+= strlen(quoted_field) + 1; /* + 1 for ',' or \0 */ + } while ((row= mysql_fetch_row(res)) && atoi(row[3]) > 1); } /* Build the ORDER BY clause result */ @@ -3642,17 +3646,21 @@ static char *primary_key_fields(const char *table_name) { char *end; /* result (terminating \0 is already in result_length) */ - result = my_malloc(result_length + 10, MYF(MY_WME)); + result= my_malloc(result_length + 10, MYF(MY_WME)); if (!result) { fprintf(stderr, "Error: Not enough memory to store ORDER BY clause\n"); goto cleanup; } mysql_data_seek(res, 0); - row = mysql_fetch_row(res); - end = strmov(result, row[4]); - while ((row = mysql_fetch_row(res)) && atoi(row[3]) > 1) - end = strxmov(end, ",", row[4], NullS); + row= mysql_fetch_row(res); + quoted_field= quote_name(row[4], buff, 0); + end= strmov(result, quoted_field); + while ((row= mysql_fetch_row(res)) && atoi(row[3]) > 1) + { + quoted_field= quote_name(row[4], buff, 0); + end= strxmov(end, ",", quoted_field, NullS); + } } cleanup: @@ -3720,7 +3728,7 @@ static my_bool get_view_structure(char *table, char* db) char table_buff[NAME_LEN*2+3]; char table_buff2[NAME_LEN*2+3]; char query[QUERY_LENGTH]; - FILE *sql_file = md_result_file; + FILE *sql_file= md_result_file; DBUG_ENTER("get_view_structure"); if (opt_no_create_info) /* Don't write table creation info */ diff --git a/client/mysqltest.c b/client/mysqltest.c index 27104282dd4..f9c4ae617fd 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -1784,8 +1784,12 @@ int do_save_master_pos() int do_let(struct st_query *query) { + int ret; char *p= query->first_argument; - char *var_name, *var_name_end, *var_val_start; + char *var_name, *var_name_end; + DYNAMIC_STRING let_rhs_expr; + + init_dynamic_string(&let_rhs_expr, "", 512, 2048); /* Find */ if (!*p) @@ -1805,10 +1809,16 @@ int do_let(struct st_query *query) /* Find start of */ while (*p && my_isspace(charset_info,*p)) p++; - var_val_start= p; + + do_eval(&let_rhs_expr, p, FALSE); + query->last_argument= query->end; /* Assign var_val to var_name */ - return var_set(var_name, var_name_end, var_val_start, query->end); + ret= var_set(var_name, var_name_end, let_rhs_expr.str, + (let_rhs_expr.str + let_rhs_expr.length)); + dynstr_free(&let_rhs_expr); + + return(ret); } diff --git a/config/ac-macros/plugins.m4 b/config/ac-macros/plugins.m4 index cfc5f8dbcbe..87f057e696a 100644 --- a/config/ac-macros/plugins.m4 +++ b/config/ac-macros/plugins.m4 @@ -280,6 +280,8 @@ AC_DEFUN([MYSQL_CONFIGURE_PLUGINS],[ _MYSQL_EMIT_PLUGIN_ACTIONS(m4_bpatsubst(__mysql_plugin_list__, :, [,])) AC_SUBST([mysql_se_dirs]) AC_SUBST([mysql_pg_dirs]) + AC_SUBST([mysql_se_unittest_dirs]) + AC_SUBST([mysql_pg_unittest_dirs]) ]) ]) ]) @@ -410,9 +412,15 @@ dnl Although this is "pretty", it breaks libmysqld build [AC_CONFIG_FILES($6/Makefile)] ) ifelse(m4_substr($6, 0, 8), [storage/], - [mysql_se_dirs="$mysql_se_dirs ]m4_substr($6, 8)", + [ + [mysql_se_dirs="$mysql_se_dirs ]m4_substr($6, 8)" + mysql_se_unittest_dirs="$mysql_se_unittest_dirs ../$6" + ], m4_substr($6, 0, 7), [plugin/], - [mysql_pg_dirs="$mysql_pg_dirs ]m4_substr($6, 7)", + [ + [mysql_pg_dirs="$mysql_pg_dirs ]m4_substr($6, 7)" + mysql_pg_unittest_dirs="$mysql_pg_unittest_dirs ../$6" + ], [AC_FATAL([don't know how to handle plugin dir ]$6)]) fi ]) diff --git a/configure.in b/configure.in index 46d27beade3..fbd12e2707e 100644 --- a/configure.in +++ b/configure.in @@ -420,7 +420,7 @@ AC_PATH_PROG(PS, ps, ps) AC_MSG_CHECKING("how to check if pid exists") PS=$ac_cv_path_PS # Linux style -if $PS p $$ 2> /dev/null | grep $0 > /dev/null +if $PS p $$ 2> /dev/null | grep `echo $0 | sed s/\-//` > /dev/null then FIND_PROC="$PS p \$\$PID | grep -v grep | grep \$\$MYSQLD > /dev/null" # Solaris @@ -1086,6 +1086,7 @@ EOF # echo -n "making sure specific build files are writable... " for file in \ + Docs/manual.chm \ Docs/mysql.info \ Docs/INSTALL-BINARY \ INSTALL-SOURCE \ @@ -1861,7 +1862,7 @@ esac # isinf() could be a function or a macro (HPUX) AC_MSG_CHECKING(for isinf with ) -AC_TRY_LINK([#include ], [float f = 0.0; isinf(f)], +AC_TRY_LINK([#include ], [float f = 0.0; int r = isinf(f); return r], AC_MSG_RESULT(yes) AC_DEFINE(HAVE_ISINF, [1], [isinf() macro or function]), AC_MSG_RESULT(no)) diff --git a/extra/Makefile.am b/extra/Makefile.am index de8e519e641..81a6f5f5cb9 100644 --- a/extra/Makefile.am +++ b/extra/Makefile.am @@ -22,11 +22,15 @@ BUILT_SOURCES= $(top_builddir)/include/mysqld_error.h \ $(top_builddir)/include/sql_state.h \ $(top_builddir)/include/mysqld_ername.h pkginclude_HEADERS= $(BUILT_SOURCES) -CLEANFILES = $(BUILT_SOURCES) +DISTCLEANFILES = $(BUILT_SOURCES) SUBDIRS = @yassl_dir@ -# This will build mysqld_error.h and sql_state.h -$(top_builddir)/include/mysqld_error.h: comp_err$(EXEEXT) +# This will build mysqld_error.h, mysqld_ername.h and sql_state.h +# NOTE Built files should depend on their sources to avoid +# the built files being rebuilt in source dist +$(top_builddir)/include/mysqld_error.h: comp_err.c \ + $(top_srcdir)/sql/share/errmsg.txt + $(MAKE) $(AM_MAKEFLAGS) comp_err$(EXEEXT) $(top_builddir)/extra/comp_err$(EXEEXT) \ --charset=$(top_srcdir)/sql/share/charsets \ --out-dir=$(top_builddir)/sql/share/ \ @@ -37,9 +41,10 @@ $(top_builddir)/include/mysqld_error.h: comp_err$(EXEEXT) $(top_builddir)/include/mysqld_ername.h: $(top_builddir)/include/mysqld_error.h $(top_builddir)/include/sql_state.h: $(top_builddir)/include/mysqld_error.h -bin_PROGRAMS = replace comp_err perror resolveip my_print_defaults \ +bin_PROGRAMS = replace perror resolveip my_print_defaults \ resolve_stack_dump mysql_waitpid innochecksum noinst_PROGRAMS = charset2html +EXTRA_PROGRAMS = comp_err EXTRA_DIST = CMakeLists.txt perror.o: perror.c diff --git a/extra/yassl/CMakeLists.txt b/extra/yassl/CMakeLists.txt index e5429876072..09bd2b046da 100644 --- a/extra/yassl/CMakeLists.txt +++ b/extra/yassl/CMakeLists.txt @@ -1,6 +1,6 @@ ADD_DEFINITIONS("-DWIN32 -D_LIB -DYASSL_PREFIX") -INCLUDE_DIRECTORIES(include taocrypt/include mySTL) +INCLUDE_DIRECTORIES(include taocrypt/include taocrypt/mySTL) ADD_LIBRARY(yassl src/buffer.cpp src/cert_wrapper.cpp src/crypto_wrapper.cpp src/handshake.cpp src/lock.cpp src/log.cpp src/socket_wrapper.cpp src/ssl.cpp src/timer.cpp src/yassl_error.cpp src/yassl_imp.cpp src/yassl_int.cpp) diff --git a/extra/yassl/FLOSS-EXCEPTIONS b/extra/yassl/FLOSS-EXCEPTIONS index 344083b0114..47f86ff65f2 100644 --- a/extra/yassl/FLOSS-EXCEPTIONS +++ b/extra/yassl/FLOSS-EXCEPTIONS @@ -1,7 +1,7 @@ yaSSL FLOSS License Exception **************************************** -Version 0.1, 26 June 2006 +Version 0.2, 31 August 2006 The Sawtooth Consulting Ltd. Exception for Free/Libre and Open Source Software-only Applications Using yaSSL Libraries (the "FLOSS Exception"). @@ -81,6 +81,7 @@ the GPL: Python license (CNRI Python License) - Python Software Foundation License 2.1.1 Sleepycat License "1999" + University of Illinois/NCSA Open Source License - W3C License "2001" X11 License "2001" Zlib/libpng License - diff --git a/extra/yassl/Makefile.am b/extra/yassl/Makefile.am index e4a81019cd1..95efa656907 100644 --- a/extra/yassl/Makefile.am +++ b/extra/yassl/Makefile.am @@ -1,3 +1,3 @@ SUBDIRS = taocrypt src testsuite -EXTRA_DIST = yassl.dsp yassl.dsw yassl.vcproj $(wildcard mySTL/*.hpp) \ +EXTRA_DIST = yassl.dsp yassl.dsw yassl.vcproj \ CMakeLists.txt diff --git a/extra/yassl/README b/extra/yassl/README index 25d4d94c306..2af4e98fe4c 100644 --- a/extra/yassl/README +++ b/extra/yassl/README @@ -1,4 +1,15 @@ -yaSSL Release notes, version 1.3.7 (06/26/06) +yaSSL Release notes, version 1.4.0 (08/13/06) + + + This release of yaSSL contains bug fixes, portability enhancements, + nonblocking connect and accept, better OpenSSL error mapping, and + certificate caching for session resumption. + +See normal build instructions below under 1.0.6. +See libcurl build instructions below under 1.3.0. + + +********************yaSSL Release notes, version 1.3.7 (06/26/06) This release of yaSSL contains bug fixes, portability enhancements, diff --git a/extra/yassl/examples/client/client.cpp b/extra/yassl/examples/client/client.cpp index 94bf753210b..d655011deb6 100644 --- a/extra/yassl/examples/client/client.cpp +++ b/extra/yassl/examples/client/client.cpp @@ -27,7 +27,13 @@ void client_test(void* args) SSL_set_fd(ssl, sockfd); - if (SSL_connect(ssl) != SSL_SUCCESS) err_sys("SSL_connect failed"); + if (SSL_connect(ssl) != SSL_SUCCESS) + { + SSL_CTX_free(ctx); + SSL_free(ssl); + tcp_close(sockfd); + err_sys("SSL_connect failed"); + } showPeer(ssl); const char* cipher = 0; @@ -39,11 +45,16 @@ void client_test(void* args) strncat(list, cipher, strlen(cipher) + 1); } printf("%s\n", list); - printf("Using Cipher Suite %s\n", SSL_get_cipher(ssl)); + printf("Using Cipher Suite: %s\n", SSL_get_cipher(ssl)); char msg[] = "hello yassl!"; if (SSL_write(ssl, msg, sizeof(msg)) != sizeof(msg)) + { + SSL_CTX_free(ctx); + SSL_free(ssl); + tcp_close(sockfd); err_sys("SSL_write failed"); + } char reply[1024]; reply[SSL_read(ssl, reply, sizeof(reply))] = 0; @@ -56,22 +67,36 @@ void client_test(void* args) SSL_shutdown(ssl); SSL_free(ssl); + tcp_close(sockfd); #ifdef TEST_RESUME tcp_connect(sockfd); SSL_set_fd(sslResume, sockfd); SSL_set_session(sslResume, session); - if (SSL_connect(sslResume) != SSL_SUCCESS) err_sys("SSL resume failed"); + if (SSL_connect(sslResume) != SSL_SUCCESS) + { + SSL_CTX_free(ctx); + SSL_free(ssl); + tcp_close(sockfd); + err_sys("SSL resume failed"); + } + showPeer(sslResume); if (SSL_write(sslResume, msg, sizeof(msg)) != sizeof(msg)) + { + SSL_CTX_free(ctx); + SSL_free(ssl); + tcp_close(sockfd); err_sys("SSL_write failed"); + } reply[SSL_read(sslResume, reply, sizeof(reply))] = 0; printf("Server response: %s\n", reply); SSL_shutdown(sslResume); SSL_free(sslResume); + tcp_close(sockfd); #endif // TEST_RESUME SSL_CTX_free(ctx); diff --git a/extra/yassl/examples/echoclient/echoclient.cpp b/extra/yassl/examples/echoclient/echoclient.cpp index fd3f7dd48a3..983254bf8a7 100644 --- a/extra/yassl/examples/echoclient/echoclient.cpp +++ b/extra/yassl/examples/echoclient/echoclient.cpp @@ -41,7 +41,14 @@ void echoclient_test(void* args) SSL* ssl = SSL_new(ctx); SSL_set_fd(ssl, sockfd); - if (SSL_connect(ssl) != SSL_SUCCESS) err_sys("SSL_connect failed"); + + if (SSL_connect(ssl) != SSL_SUCCESS) + { + SSL_CTX_free(ctx); + SSL_free(ssl); + tcp_close(sockfd); + err_sys("SSL_connect failed"); + } char send[1024]; char reply[1024]; @@ -50,7 +57,12 @@ void echoclient_test(void* args) int sendSz = strlen(send) + 1; if (SSL_write(ssl, send, sendSz) != sendSz) + { + SSL_CTX_free(ctx); + SSL_free(ssl); + tcp_close(sockfd); err_sys("SSL_write failed"); + } if (strncmp(send, "quit", 4) == 0) { fputs("sending server shutdown command: quit!\n", fout); @@ -63,6 +75,7 @@ void echoclient_test(void* args) SSL_CTX_free(ctx); SSL_free(ssl); + tcp_close(sockfd); fflush(fout); if (inCreated) fclose(fin); diff --git a/extra/yassl/examples/echoserver/echoserver.cpp b/extra/yassl/examples/echoserver/echoserver.cpp index 8e23ead20ab..cd31fedddd8 100644 --- a/extra/yassl/examples/echoserver/echoserver.cpp +++ b/extra/yassl/examples/echoserver/echoserver.cpp @@ -23,6 +23,18 @@ #endif // NO_MAIN_DRIVER + +void EchoError(SSL_CTX* ctx, SSL* ssl, SOCKET_T& s1, SOCKET_T& s2, + const char* msg) +{ + SSL_CTX_free(ctx); + SSL_free(ssl); + tcp_close(s1); + tcp_close(s2); + err_sys(msg); +} + + THREAD_RETURN YASSL_API echoserver_test(void* args) { #ifdef _WIN32 @@ -65,13 +77,18 @@ THREAD_RETURN YASSL_API echoserver_test(void* args) while (!shutdown) { sockaddr_in client; socklen_t client_len = sizeof(client); - int clientfd = accept(sockfd, (sockaddr*)&client, + SOCKET_T clientfd = accept(sockfd, (sockaddr*)&client, (ACCEPT_THIRD_T)&client_len); - if (clientfd == -1) err_sys("tcp accept failed"); + if (clientfd == -1) { + SSL_CTX_free(ctx); + tcp_close(sockfd); + err_sys("tcp accept failed"); + } SSL* ssl = SSL_new(ctx); SSL_set_fd(ssl, clientfd); - if (SSL_accept(ssl) != SSL_SUCCESS) err_sys("SSL_accept failed"); + if (SSL_accept(ssl) != SSL_SUCCESS) + EchoError(ctx, ssl, sockfd, clientfd, "SSL_accept failed"); char command[1024]; int echoSz(0); @@ -100,7 +117,8 @@ THREAD_RETURN YASSL_API echoserver_test(void* args) echoSz += sizeof(footer); if (SSL_write(ssl, command, echoSz) != echoSz) - err_sys("SSL_write failed"); + EchoError(ctx, ssl, sockfd, clientfd, "SSL_write failed"); + break; } command[echoSz] = 0; @@ -110,16 +128,13 @@ THREAD_RETURN YASSL_API echoserver_test(void* args) #endif if (SSL_write(ssl, command, echoSz) != echoSz) - err_sys("SSL_write failed"); + EchoError(ctx, ssl, sockfd, clientfd, "SSL_write failed"); } SSL_free(ssl); + tcp_close(clientfd); } -#ifdef _WIN32 - closesocket(sockfd); -#else - close(sockfd); -#endif + tcp_close(sockfd); DH_free(dh); SSL_CTX_free(ctx); diff --git a/extra/yassl/examples/server/server.cpp b/extra/yassl/examples/server/server.cpp index 73cff19e371..d0bf70cd634 100644 --- a/extra/yassl/examples/server/server.cpp +++ b/extra/yassl/examples/server/server.cpp @@ -4,6 +4,15 @@ #include "../../testsuite/test.hpp" +void ServerError(SSL_CTX* ctx, SSL* ssl, SOCKET_T& sockfd, const char* msg) +{ + SSL_CTX_free(ctx); + SSL_free(ssl); + tcp_close(sockfd); + err_sys(msg); +} + + THREAD_RETURN YASSL_API server_test(void* args) { #ifdef _WIN32 @@ -12,18 +21,14 @@ THREAD_RETURN YASSL_API server_test(void* args) #endif SOCKET_T sockfd = 0; - int clientfd = 0; + SOCKET_T clientfd = 0; int argc = 0; char** argv = 0; set_args(argc, argv, *static_cast(args)); tcp_accept(sockfd, clientfd, *static_cast(args)); -#ifdef _WIN32 - closesocket(sockfd); -#else - close(sockfd); -#endif + tcp_close(sockfd); SSL_METHOD* method = TLSv1_server_method(); SSL_CTX* ctx = SSL_CTX_new(method); @@ -36,9 +41,11 @@ THREAD_RETURN YASSL_API server_test(void* args) SSL* ssl = SSL_new(ctx); SSL_set_fd(ssl, clientfd); - if (SSL_accept(ssl) != SSL_SUCCESS) err_sys("SSL_accept failed"); + if (SSL_accept(ssl) != SSL_SUCCESS) + ServerError(ctx, ssl, clientfd, "SSL_accept failed"); + showPeer(ssl); - printf("Using Cipher Suite %s\n", SSL_get_cipher(ssl)); + printf("Using Cipher Suite: %s\n", SSL_get_cipher(ssl)); char command[1024]; command[SSL_read(ssl, command, sizeof(command))] = 0; @@ -46,12 +53,14 @@ THREAD_RETURN YASSL_API server_test(void* args) char msg[] = "I hear you, fa shizzle!"; if (SSL_write(ssl, msg, sizeof(msg)) != sizeof(msg)) - err_sys("SSL_write failed"); + ServerError(ctx, ssl, clientfd, "SSL_write failed"); DH_free(dh); SSL_CTX_free(ctx); SSL_free(ssl); + tcp_close(clientfd); + ((func_args*)args)->return_code = 0; return 0; } diff --git a/extra/yassl/include/buffer.hpp b/extra/yassl/include/buffer.hpp index 4816f79a9bc..c2709a8c847 100644 --- a/extra/yassl/include/buffer.hpp +++ b/extra/yassl/include/buffer.hpp @@ -34,7 +34,10 @@ #include // assert #include "yassl_types.hpp" // ysDelete #include "memory.hpp" // mySTL::auto_ptr -#include "algorithm.hpp" // mySTL::swap +#include STL_ALGORITHM_FILE + + +namespace STL = STL_NAMESPACE; #ifdef _MSC_VER @@ -199,7 +202,7 @@ struct del_ptr_zero void operator()(T*& p) const { T* tmp = 0; - mySTL::swap(tmp, p); + STL::swap(tmp, p); checked_delete(tmp); } }; diff --git a/extra/yassl/include/cert_wrapper.hpp b/extra/yassl/include/cert_wrapper.hpp index 8b5b7491772..761be0e9b04 100644 --- a/extra/yassl/include/cert_wrapper.hpp +++ b/extra/yassl/include/cert_wrapper.hpp @@ -41,8 +41,12 @@ #include "yassl_types.hpp" // SignatureAlgorithm #include "buffer.hpp" // input_buffer #include "asn.hpp" // SignerList -#include "list.hpp" // mySTL::list -#include "algorithm.hpp" // mySTL::for_each +#include STL_LIST_FILE +#include STL_ALGORITHM_FILE + + +namespace STL = STL_NAMESPACE; + namespace yaSSL { @@ -72,7 +76,7 @@ private: // Certificate Manager keeps a list of the cert chain and public key class CertManager { - typedef mySTL::list CertList; + typedef STL::list CertList; CertList list_; // self input_buffer privateKey_; @@ -120,6 +124,7 @@ public: void setVerifyNone(); void setFailNoCert(); void setSendVerify(); + void setPeerX509(X509*); private: CertManager(const CertManager&); // hide copy CertManager& operator=(const CertManager&); // and assign diff --git a/extra/yassl/include/crypto_wrapper.hpp b/extra/yassl/include/crypto_wrapper.hpp index 4c4e4d5da5b..83cf3d26398 100644 --- a/extra/yassl/include/crypto_wrapper.hpp +++ b/extra/yassl/include/crypto_wrapper.hpp @@ -416,7 +416,17 @@ private: class x509; -x509* PemToDer(FILE*, CertType); +struct EncryptedInfo { + enum { IV_SZ = 32, NAME_SZ = 80 }; + char name[NAME_SZ]; // max one line + byte iv[IV_SZ]; // in base16 rep + uint ivSz; + bool set; + + EncryptedInfo() : ivSz(0), set(false) {} +}; + +x509* PemToDer(FILE*, CertType, EncryptedInfo* info = 0); } // naemspace diff --git a/extra/yassl/include/factory.hpp b/extra/yassl/include/factory.hpp index 5619e90cd62..04d742431dc 100644 --- a/extra/yassl/include/factory.hpp +++ b/extra/yassl/include/factory.hpp @@ -35,10 +35,12 @@ #ifndef yaSSL_FACTORY_HPP #define yaSSL_FACTORY_HPP -#include "vector.hpp" -#include "pair.hpp" +#include STL_VECTOR_FILE +#include STL_PAIR_FILE +namespace STL = STL_NAMESPACE; + // VC60 workaround: it doesn't allow typename in some places #if defined(_MSC_VER) && (_MSC_VER < 1300) @@ -58,8 +60,8 @@ template class Factory { - typedef mySTL::pair CallBack; - typedef mySTL::vector CallBackVector; + typedef STL::pair CallBack; + typedef STL::vector CallBackVector; CallBackVector callbacks_; public: @@ -79,14 +81,16 @@ public: // register callback void Register(const IdentifierType& id, ProductCreator pc) { - callbacks_.push_back(mySTL::make_pair(id, pc)); + callbacks_.push_back(STL::make_pair(id, pc)); } // THE Creator, returns a new object of the proper type or 0 AbstractProduct* CreateObject(const IdentifierType& id) const { - const CallBack* first = callbacks_.begin(); - const CallBack* last = callbacks_.end(); + typedef typename STL::vector::const_iterator cIter; + + cIter first = callbacks_.begin(); + cIter last = callbacks_.end(); while (first != last) { if (first->first == id) diff --git a/extra/yassl/include/openssl/ssl.h b/extra/yassl/include/openssl/ssl.h index 47b4d075894..5e7290d2a7a 100644 --- a/extra/yassl/include/openssl/ssl.h +++ b/extra/yassl/include/openssl/ssl.h @@ -41,7 +41,7 @@ #include "rsa.h" -#define YASSL_VERSION "1.3.7" +#define YASSL_VERSION "1.4.3" #if defined(__cplusplus) @@ -505,6 +505,8 @@ ASN1_TIME* X509_get_notAfter(X509* x); #define V_ASN1_UTF8STRING 12 #define GEN_DNS 2 +#define CERTFICATE_ERROR 0x14090086 /* SSLv3 error */ + typedef struct MD4_CTX { int buffer[32]; /* big enough to hold, check size in Init */ diff --git a/extra/yassl/include/socket_wrapper.hpp b/extra/yassl/include/socket_wrapper.hpp index 1dd61b63148..9fc0d62f90e 100644 --- a/extra/yassl/include/socket_wrapper.hpp +++ b/extra/yassl/include/socket_wrapper.hpp @@ -71,6 +71,7 @@ typedef unsigned char byte; class Socket { socket_t socket_; // underlying socket descriptor bool wouldBlock_; // for non-blocking data + bool blocking_; // is option set public: explicit Socket(socket_t s = INVALID_SOCKET); ~Socket(); @@ -84,6 +85,7 @@ public: bool wait(); bool WouldBlock() const; + bool IsBlocking() const; void closeSocket(); void shutDown(int how = SD_SEND); diff --git a/extra/yassl/include/yassl_error.hpp b/extra/yassl/include/yassl_error.hpp index 3c3d5fa5231..72b79b05dbd 100644 --- a/extra/yassl/include/yassl_error.hpp +++ b/extra/yassl/include/yassl_error.hpp @@ -54,7 +54,11 @@ enum YasslError { verify_error = 112, send_error = 113, receive_error = 114, - certificate_error = 115 + certificate_error = 115, + privateKey_error = 116, + badVersion_error = 117 + + // !!!! add error message to .cpp !!!! // 1000+ from TaoCrypt error.hpp diff --git a/extra/yassl/include/yassl_imp.hpp b/extra/yassl/include/yassl_imp.hpp index 6e475c23db8..180d7fe7fe1 100644 --- a/extra/yassl/include/yassl_imp.hpp +++ b/extra/yassl/include/yassl_imp.hpp @@ -39,7 +39,10 @@ #include "yassl_types.hpp" #include "factory.hpp" -#include "list.hpp" // mySTL::list +#include STL_LIST_FILE + + +namespace STL = STL_NAMESPACE; namespace yaSSL { @@ -427,7 +430,7 @@ private: class CertificateRequest : public HandShakeBase { ClientCertificateType certificate_types_[CERT_TYPES]; int typeTotal_; - mySTL::list certificate_authorities_; + STL::list certificate_authorities_; public: CertificateRequest(); ~CertificateRequest(); diff --git a/extra/yassl/include/yassl_int.hpp b/extra/yassl/include/yassl_int.hpp index 26900aed3af..28e9d237622 100644 --- a/extra/yassl/include/yassl_int.hpp +++ b/extra/yassl/include/yassl_int.hpp @@ -40,6 +40,13 @@ #include "lock.hpp" #include "openssl/ssl.h" // ASN1_STRING and DH +#ifdef _POSIX_THREADS + #include +#endif + + +namespace STL = STL_NAMESPACE; + namespace yaSSL { @@ -80,12 +87,35 @@ enum ServerState { }; +// client connect state for nonblocking restart +enum ConnectState { + CONNECT_BEGIN = 0, + CLIENT_HELLO_SENT, + FIRST_REPLY_DONE, + FINISHED_DONE, + SECOND_REPLY_DONE +}; + + +// server accpet state for nonblocking restart +enum AcceptState { + ACCEPT_BEGIN = 0, + ACCEPT_FIRST_REPLY_DONE, + SERVER_HELLO_DONE, + ACCEPT_SECOND_REPLY_DONE, + ACCEPT_FINISHED_DONE, + ACCEPT_THIRD_REPLY_DONE +}; + + // combines all states class States { RecordLayerState recordLayer_; HandShakeState handshakeLayer_; ClientState clientState_; ServerState serverState_; + ConnectState connectState_; + AcceptState acceptState_; char errorString_[MAX_ERROR_SZ]; YasslError what_; public: @@ -95,6 +125,8 @@ public: const HandShakeState& getHandShake() const; const ClientState& getClient() const; const ServerState& getServer() const; + const ConnectState& GetConnect() const; + const AcceptState& GetAccept() const; const char* getString() const; YasslError What() const; @@ -102,6 +134,8 @@ public: HandShakeState& useHandShake(); ClientState& useClient(); ServerState& useServer(); + ConnectState& UseConnect(); + AcceptState& UseAccept(); char* useString(); void SetError(YasslError); private: @@ -142,8 +176,9 @@ public: X509_NAME(const char*, size_t sz); ~X509_NAME(); - char* GetName(); + const char* GetName() const; ASN1_STRING* GetEntry(int i); + size_t GetLength() const; private: X509_NAME(const X509_NAME&); // hide copy X509_NAME& operator=(const X509_NAME&); // and assign @@ -157,6 +192,9 @@ public: ~StringHolder(); ASN1_STRING* GetString(); +private: + StringHolder(const StringHolder&); // hide copy + StringHolder& operator=(const StringHolder&); // and assign }; @@ -176,6 +214,7 @@ public: ASN1_STRING* GetBefore(); ASN1_STRING* GetAfter(); + private: X509(const X509&); // hide copy X509& operator=(const X509&); // and assign @@ -202,6 +241,7 @@ class SSL_SESSION { uint bornOn_; // create time in seconds uint timeout_; // timeout in seconds RandomPool& random_; // will clean master secret + X509* peerX509_; public: explicit SSL_SESSION(RandomPool&); SSL_SESSION(const SSL&, RandomPool&); @@ -212,17 +252,20 @@ public: const Cipher* GetSuite() const; uint GetBornOn() const; uint GetTimeOut() const; + X509* GetPeerX509() const; void SetTimeOut(uint); SSL_SESSION& operator=(const SSL_SESSION&); // allow assign for resumption private: SSL_SESSION(const SSL_SESSION&); // hide copy + + void CopyX509(X509*); }; // holds all sessions class Sessions { - mySTL::list list_; + STL::list list_; RandomPool random_; // for session cleaning Mutex mutex_; // no-op for single threaded @@ -241,8 +284,42 @@ private: }; +#ifdef _POSIX_THREADS + typedef pthread_t THREAD_ID_T; +#else + typedef DWORD THREAD_ID_T; +#endif + +// thread error data +struct ThreadError { + THREAD_ID_T threadID_; + int errorID_; +}; + + +// holds all errors +class Errors { + STL::list list_; + Mutex mutex_; + + Errors() {} // only GetErrors can create +public: + int Lookup(bool peek); // self lookup + void Add(int); + void Remove(); // remove self + + ~Errors() {} + + friend Errors& GetErrors(); // singleton creator +private: + Errors(const Errors&); // hide copy + Errors& operator=(const Errors); // and assign +}; + + Sessions& GetSessions(); // forward singletons sslFactory& GetSSL_Factory(); +Errors& GetErrors(); // openSSL method and context types @@ -252,8 +329,10 @@ class SSL_METHOD { bool verifyPeer_; // request or send certificate bool verifyNone_; // whether to verify certificate bool failNoCert_; + bool multipleProtocol_; // for SSLv23 compatibility public: - explicit SSL_METHOD(ConnectionEnd ce, ProtocolVersion pv); + SSL_METHOD(ConnectionEnd ce, ProtocolVersion pv, + bool multipleProtocol = false); ProtocolVersion getVersion() const; ConnectionEnd getSide() const; @@ -265,6 +344,7 @@ public: bool verifyPeer() const; bool verifyNone() const; bool failNoCert() const; + bool multipleProtocol() const; private: SSL_METHOD(const SSL_METHOD&); // hide copy SSL_METHOD& operator=(const SSL_METHOD&); // and assign @@ -334,7 +414,7 @@ private: // the SSL context class SSL_CTX { public: - typedef mySTL::list CertList; + typedef STL::list CertList; private: SSL_METHOD* method_; x509* certificate_; @@ -342,6 +422,8 @@ private: CertList caList_; Ciphers ciphers_; DH_Parms dhParms_; + pem_password_cb passwordCb_; + void* userData_; Stats stats_; Mutex mutex_; // for Stats public: @@ -354,12 +436,16 @@ public: const Ciphers& GetCiphers() const; const DH_Parms& GetDH_Parms() const; const Stats& GetStats() const; + pem_password_cb GetPasswordCb() const; + void* GetUserData() const; void setVerifyPeer(); void setVerifyNone(); void setFailNoCert(); bool SetCipherList(const char*); bool SetDH(const DH&); + void SetPasswordCb(pem_password_cb cb); + void SetUserData(void*); void IncrementStats(StatsField); void AddCA(x509* ca); @@ -434,13 +520,14 @@ private: // holds input and output buffers class Buffers { public: - typedef mySTL::list inputList; - typedef mySTL::list outputList; + typedef STL::list inputList; + typedef STL::list outputList; private: inputList dataList_; // list of users app data / handshake outputList handShakeList_; // buffered handshake msgs + input_buffer* rawInput_; // buffered raw input yet to process public: - Buffers() {} + Buffers(); ~Buffers(); const inputList& getData() const; @@ -448,6 +535,9 @@ public: inputList& useData(); outputList& useHandShake(); + + void SetRawInput(input_buffer*); // takes ownership + input_buffer* TakeRawInput(); // takes ownership private: Buffers(const Buffers&); // hide copy Buffers& operator=(const Buffers&); // and assign @@ -502,6 +592,7 @@ public: const sslFactory& getFactory() const; const Socket& getSocket() const; YasslError GetError() const; + bool GetMultiProtocol() const; Crypto& useCrypto(); Security& useSecurity(); @@ -509,6 +600,7 @@ public: sslHashes& useHashes(); Socket& useSocket(); Log& useLog(); + Buffers& useBuffers(); // sets void set_pending(Cipher suite); diff --git a/extra/yassl/include/yassl_types.hpp b/extra/yassl/include/yassl_types.hpp index b75a2a45302..e602ee180bf 100644 --- a/extra/yassl/include/yassl_types.hpp +++ b/extra/yassl/include/yassl_types.hpp @@ -38,6 +38,8 @@ namespace yaSSL { +#define YASSL_LIB + #ifdef YASSL_PURE_C @@ -76,7 +78,7 @@ namespace yaSSL { ::operator delete[](ptr, yaSSL::ys); } - #define NEW_YS new (ys) + #define NEW_YS new (yaSSL::ys) // to resolve compiler generated operator delete on base classes with // virtual destructors (when on stack), make sure doesn't get called @@ -122,6 +124,39 @@ typedef opaque byte; typedef unsigned int uint; +#ifdef USE_SYS_STL + // use system STL + #define STL_VECTOR_FILE + #define STL_LIST_FILE + #define STL_ALGORITHM_FILE + #define STL_MEMORY_FILE + #define STL_PAIR_FILE + + #define STL_NAMESPACE std +#else + // use mySTL + #define STL_VECTOR_FILE "vector.hpp" + #define STL_LIST_FILE "list.hpp" + #define STL_ALGORITHM_FILE "algorithm.hpp" + #define STL_MEMORY_FILE "memory.hpp" + #define STL_PAIR_FILE "pair.hpp" + + #define STL_NAMESPACE mySTL +#endif + + +#ifdef min + #undef min +#endif + +template +T min(T a, T b) +{ + return a < b ? a : b; +} + + + // all length constants in bytes const int ID_LEN = 32; // session id length const int SUITE_LEN = 2; // cipher suite length @@ -163,6 +198,7 @@ const int DES_BLOCK = 8; // DES is always fixed block size 8 const int DES_IV_SZ = DES_BLOCK; // Init Vector length for DES const int RC4_KEY_SZ = 16; // RC4 Key length const int AES_128_KEY_SZ = 16; // AES 128bit Key length +const int AES_192_KEY_SZ = 24; // AES 192bit Key length const int AES_256_KEY_SZ = 32; // AES 256bit Key length const int AES_BLOCK_SZ = 16; // AES 128bit block size, rfc 3268 const int AES_IV_SZ = AES_BLOCK_SZ; // AES Init Vector length diff --git a/extra/yassl/src/Makefile.am b/extra/yassl/src/Makefile.am index 2b3e1aea5f5..910bbbdd13f 100644 --- a/extra/yassl/src/Makefile.am +++ b/extra/yassl/src/Makefile.am @@ -1,4 +1,4 @@ -INCLUDES = -I../include -I../taocrypt/include -I../mySTL +INCLUDES = -I../include -I../taocrypt/include -I../taocrypt/mySTL noinst_LTLIBRARIES = libyassl.la libyassl_la_SOURCES = buffer.cpp cert_wrapper.cpp crypto_wrapper.cpp \ @@ -6,3 +6,7 @@ libyassl_la_SOURCES = buffer.cpp cert_wrapper.cpp crypto_wrapper.cpp \ template_instnt.cpp timer.cpp yassl_imp.cpp yassl_error.cpp yassl_int.cpp EXTRA_DIST = $(wildcard ../include/*.hpp) $(wildcard ../include/openssl/*.h) AM_CXXFLAGS = -DYASSL_PURE_C -DYASSL_PREFIX + +# Don't update the files from bitkeeper +%::SCCS/s.% + diff --git a/extra/yassl/src/cert_wrapper.cpp b/extra/yassl/src/cert_wrapper.cpp index 6ad0aa568ed..c3ae9c0c561 100644 --- a/extra/yassl/src/cert_wrapper.cpp +++ b/extra/yassl/src/cert_wrapper.cpp @@ -63,8 +63,8 @@ x509::x509(const x509& that) : length_(that.length_), void x509::Swap(x509& that) { - mySTL::swap(length_, that.length_); - mySTL::swap(buffer_, that.buffer_); + STL::swap(length_, that.length_); + STL::swap(buffer_, that.buffer_); } @@ -105,11 +105,11 @@ CertManager::~CertManager() { ysDelete(peerX509_); - mySTL::for_each(signers_.begin(), signers_.end(), del_ptr_zero()) ; + STL::for_each(signers_.begin(), signers_.end(), del_ptr_zero()) ; - mySTL::for_each(peerList_.begin(), peerList_.end(), del_ptr_zero()) ; + STL::for_each(peerList_.begin(), peerList_.end(), del_ptr_zero()) ; - mySTL::for_each(list_.begin(), list_.end(), del_ptr_zero()) ; + STL::for_each(list_.begin(), list_.end(), del_ptr_zero()) ; } @@ -242,7 +242,7 @@ uint CertManager::get_privateKeyLength() const // Validate the peer's certificate list, from root to peer (last to first) int CertManager::Validate() { - CertList::iterator last = peerList_.rbegin(); // fix this + CertList::reverse_iterator last = peerList_.rbegin(); int count = peerList_.size(); while ( count > 1 ) { @@ -255,7 +255,7 @@ int CertManager::Validate() const TaoCrypt::PublicKey& key = cert.GetPublicKey(); signers_.push_back(NEW_YS TaoCrypt::Signer(key.GetKey(), key.size(), cert.GetCommonName(), cert.GetHash())); - --last; + ++last; --count; } @@ -310,6 +310,23 @@ int CertManager::SetPrivateKey(const x509& key) } +// Store OpenSSL type peer's cert +void CertManager::setPeerX509(X509* x) +{ + assert(peerX509_ == 0); + if (x == 0) return; + + X509_NAME* issuer = x->GetIssuer(); + X509_NAME* subject = x->GetSubject(); + ASN1_STRING* before = x->GetBefore(); + ASN1_STRING* after = x->GetAfter(); + + peerX509_ = NEW_YS X509(issuer->GetName(), issuer->GetLength(), + subject->GetName(), subject->GetLength(), (const char*) before->data, + before->length, (const char*) after->data, after->length); +} + + #if defined(USE_CML_LIB) // Get the peer's certificate, extract and save public key diff --git a/extra/yassl/src/crypto_wrapper.cpp b/extra/yassl/src/crypto_wrapper.cpp index 799106ec7c0..7344a70b367 100644 --- a/extra/yassl/src/crypto_wrapper.cpp +++ b/extra/yassl/src/crypto_wrapper.cpp @@ -908,7 +908,7 @@ void DiffieHellman::get_parms(byte* bp, byte* bg, byte* bpub) const // convert PEM file to DER x509 type -x509* PemToDer(FILE* file, CertType type) +x509* PemToDer(FILE* file, CertType type, EncryptedInfo* info) { using namespace TaoCrypt; @@ -935,6 +935,37 @@ x509* PemToDer(FILE* file, CertType type) break; } + // remove encrypted header if there + if (fgets(line, sizeof(line), file)) { + char encHeader[] = "Proc-Type"; + if (strncmp(encHeader, line, strlen(encHeader)) == 0 && + fgets(line,sizeof(line), file)) { + + char* start = strstr(line, "DES"); + char* finish = strstr(line, ","); + if (!start) + start = strstr(line, "AES"); + + if (!info) return 0; + + if ( start && finish && (start < finish)) { + memcpy(info->name, start, finish - start); + info->name[finish - start] = 0; + memcpy(info->iv, finish + 1, sizeof(info->iv)); + + char* newline = strstr(line, "\r"); + if (!newline) newline = strstr(line, "\n"); + if (newline && (newline > finish)) { + info->ivSz = newline - (finish + 1); + info->set = true; + } + } + fgets(line,sizeof(line), file); // get blank line + begin = ftell(file); + } + + } + while(fgets(line, sizeof(line), file)) if (strncmp(footer, line, strlen(footer)) == 0) { foundEnd = true; @@ -956,7 +987,7 @@ x509* PemToDer(FILE* file, CertType type) Base64Decoder b64Dec(der); uint sz = der.size(); - mySTL::auto_ptr x(NEW_YS x509(sz), ysDelete); + mySTL::auto_ptr x(NEW_YS x509(sz)); memcpy(x->use_buffer(), der.get_buffer(), sz); return x.release(); diff --git a/extra/yassl/src/handshake.cpp b/extra/yassl/src/handshake.cpp index e93f5385b3d..25f36c4ea8c 100644 --- a/extra/yassl/src/handshake.cpp +++ b/extra/yassl/src/handshake.cpp @@ -37,7 +37,6 @@ namespace yaSSL { -using mySTL::min; // Build a client hello message from cipher suites and compression method @@ -363,7 +362,7 @@ void p_hash(output_buffer& result, const output_buffer& secret, uint lastLen = result.get_capacity() % len; opaque previous[SHA_LEN]; // max size opaque current[SHA_LEN]; // max size - mySTL::auto_ptr hmac(ysDelete); + mySTL::auto_ptr hmac; if (lastLen) times += 1; @@ -582,7 +581,7 @@ void hmac(SSL& ssl, byte* digest, const byte* buffer, uint sz, void TLS_hmac(SSL& ssl, byte* digest, const byte* buffer, uint sz, ContentType content, bool verify) { - mySTL::auto_ptr hmac(ysDelete); + mySTL::auto_ptr hmac; opaque seq[SEQ_SZ] = { 0x00, 0x00, 0x00, 0x00 }; opaque length[LENGTH_SZ]; opaque inner[SIZEOF_ENUM + VERSION_SZ + LENGTH_SZ]; // type + version + len @@ -660,25 +659,25 @@ void build_certHashes(SSL& ssl, Hashes& hashes) -// do process input requests -mySTL::auto_ptr -DoProcessReply(SSL& ssl, mySTL::auto_ptr buffered) +// do process input requests, return 0 is done, 1 is call again to complete +int DoProcessReply(SSL& ssl) { // wait for input if blocking if (!ssl.useSocket().wait()) { ssl.SetError(receive_error); - buffered.reset(0); - return buffered; + return 0; } uint ready = ssl.getSocket().get_ready(); - if (!ready) return buffered; + if (!ready) return 1; // add buffered data if its there - uint buffSz = buffered.get() ? buffered.get()->get_size() : 0; + input_buffer* buffered = ssl.useBuffers().TakeRawInput(); + uint buffSz = buffered ? buffered->get_size() : 0; input_buffer buffer(buffSz + ready); if (buffSz) { - buffer.assign(buffered.get()->get_buffer(), buffSz); - buffered.reset(0); + buffer.assign(buffered->get_buffer(), buffSz); + ysDelete(buffered); + buffered = 0; } // add new data @@ -692,10 +691,8 @@ DoProcessReply(SSL& ssl, mySTL::auto_ptr buffered) ssl.getStates().getServer() == clientNull) if (buffer.peek() != handshake) { ProcessOldClientHello(buffer, ssl); - if (ssl.GetError()) { - buffered.reset(0); - return buffered; - } + if (ssl.GetError()) + return 0; } while(!buffer.eof()) { @@ -715,31 +712,28 @@ DoProcessReply(SSL& ssl, mySTL::auto_ptr buffered) // put header in front for next time processing uint extra = needHdr ? 0 : RECORD_HEADER; uint sz = buffer.get_remaining() + extra; - buffered.reset(NEW_YS input_buffer(sz, buffer.get_buffer() + - buffer.get_current() - extra, sz)); - break; + ssl.useBuffers().SetRawInput(NEW_YS input_buffer(sz, + buffer.get_buffer() + buffer.get_current() - extra, sz)); + return 1; } while (buffer.get_current() < hdr.length_ + RECORD_HEADER + offset) { // each message in record, can be more than 1 if not encrypted if (ssl.getSecurity().get_parms().pending_ == false) // cipher on decrypt_message(ssl, buffer, hdr.length_); - mySTL::auto_ptr msg(mf.CreateObject(hdr.type_), ysDelete); + mySTL::auto_ptr msg(mf.CreateObject(hdr.type_)); if (!msg.get()) { ssl.SetError(factory_error); - buffered.reset(0); - return buffered; + return 0; } buffer >> *msg; msg->Process(buffer, ssl); - if (ssl.GetError()) { - buffered.reset(0); - return buffered; - } + if (ssl.GetError()) + return 0; } offset += hdr.length_ + RECORD_HEADER; } - return buffered; + return 0; } @@ -747,16 +741,17 @@ DoProcessReply(SSL& ssl, mySTL::auto_ptr buffered) void processReply(SSL& ssl) { if (ssl.GetError()) return; - mySTL::auto_ptr buffered(ysDelete); - for (;;) { - mySTL::auto_ptr tmp(DoProcessReply(ssl, buffered)); - if (tmp.get()) // had only part of a record's data, call again - buffered = tmp; - else - break; - if (ssl.GetError()) return; + if (DoProcessReply(ssl)) + // didn't complete process + if (!ssl.getSocket().IsBlocking()) { + // keep trying now + while (!ssl.GetError()) + if (DoProcessReply(ssl) == 0) break; } + else + // user will have try again later + ssl.SetError(YasslError(SSL_ERROR_WANT_READ)); } @@ -793,7 +788,7 @@ void sendClientKeyExchange(SSL& ssl, BufferOutput buffer) RecordLayerHeader rlHeader; HandShakeHeader hsHeader; - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); buildHeaders(ssl, hsHeader, rlHeader, ck); buildOutput(*out.get(), rlHeader, hsHeader, ck); hashHandShake(ssl, *out.get()); @@ -814,7 +809,7 @@ void sendServerKeyExchange(SSL& ssl, BufferOutput buffer) RecordLayerHeader rlHeader; HandShakeHeader hsHeader; - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); buildHeaders(ssl, hsHeader, rlHeader, sk); buildOutput(*out.get(), rlHeader, hsHeader, sk); hashHandShake(ssl, *out.get()); @@ -839,7 +834,7 @@ void sendChangeCipher(SSL& ssl, BufferOutput buffer) ChangeCipherSpec ccs; RecordLayerHeader rlHeader; buildHeader(ssl, rlHeader, ccs); - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); buildOutput(*out.get(), rlHeader, ccs); if (buffer == buffered) @@ -856,7 +851,7 @@ void sendFinished(SSL& ssl, ConnectionEnd side, BufferOutput buffer) Finished fin; buildFinished(ssl, fin, side == client_end ? client : server); - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); cipherFinished(ssl, fin, *out.get()); // hashes handshake if (ssl.getSecurity().get_resuming()) { @@ -955,7 +950,7 @@ void sendServerHello(SSL& ssl, BufferOutput buffer) ServerHello sh(ssl.getSecurity().get_connection().version_); RecordLayerHeader rlHeader; HandShakeHeader hsHeader; - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); buildServerHello(ssl, sh); ssl.set_random(sh.get_random(), server_end); @@ -978,7 +973,7 @@ void sendServerHelloDone(SSL& ssl, BufferOutput buffer) ServerHelloDone shd; RecordLayerHeader rlHeader; HandShakeHeader hsHeader; - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); buildHeaders(ssl, hsHeader, rlHeader, shd); buildOutput(*out.get(), rlHeader, hsHeader, shd); @@ -999,7 +994,7 @@ void sendCertificate(SSL& ssl, BufferOutput buffer) Certificate cert(ssl.getCrypto().get_certManager().get_cert()); RecordLayerHeader rlHeader; HandShakeHeader hsHeader; - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); buildHeaders(ssl, hsHeader, rlHeader, cert); buildOutput(*out.get(), rlHeader, hsHeader, cert); @@ -1021,7 +1016,7 @@ void sendCertificateRequest(SSL& ssl, BufferOutput buffer) request.Build(); RecordLayerHeader rlHeader; HandShakeHeader hsHeader; - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); buildHeaders(ssl, hsHeader, rlHeader, request); buildOutput(*out.get(), rlHeader, hsHeader, request); @@ -1043,7 +1038,7 @@ void sendCertificateVerify(SSL& ssl, BufferOutput buffer) verify.Build(ssl); RecordLayerHeader rlHeader; HandShakeHeader hsHeader; - mySTL::auto_ptr out(NEW_YS output_buffer, ysDelete); + mySTL::auto_ptr out(NEW_YS output_buffer); buildHeaders(ssl, hsHeader, rlHeader, verify); buildOutput(*out.get(), rlHeader, hsHeader, verify); diff --git a/extra/yassl/src/socket_wrapper.cpp b/extra/yassl/src/socket_wrapper.cpp index 7790001fc2d..70944831884 100644 --- a/extra/yassl/src/socket_wrapper.cpp +++ b/extra/yassl/src/socket_wrapper.cpp @@ -41,9 +41,10 @@ #include #include #include + #include #endif // _WIN32 -#if defined(__sun) || defined(__SCO_VERSION__) +#if defined(__sun) || defined(__SCO_VERSION__) || defined(__NETWARE__) #include #endif @@ -62,7 +63,7 @@ namespace yaSSL { Socket::Socket(socket_t s) - : socket_(s), wouldBlock_(false) + : socket_(s), wouldBlock_(false), blocking_(false) {} @@ -148,6 +149,7 @@ uint Socket::receive(byte* buf, unsigned int sz, int flags) if (get_lastError() == SOCKET_EWOULDBLOCK || get_lastError() == SOCKET_EAGAIN) { wouldBlock_ = true; + blocking_ = true; // socket can block, only way to tell for win32 return 0; } } @@ -189,6 +191,12 @@ bool Socket::WouldBlock() const } +bool Socket::IsBlocking() const +{ + return blocking_; +} + + void Socket::set_lastError(int errorCode) { #ifdef _WIN32 diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp index 81e585ff735..a008ea7228b 100644 --- a/extra/yassl/src/ssl.cpp +++ b/extra/yassl/src/ssl.cpp @@ -42,6 +42,9 @@ #include "yassl_int.hpp" #include "md5.hpp" // for TaoCrypt MD5 size assert #include "md4.hpp" // for TaoCrypt MD4 size assert +#include "file.hpp" // for TaoCrypt Source +#include "coding.hpp" // HexDecoder +#include "helpers.hpp" // for placement new hack #include #ifdef _WIN32 @@ -55,7 +58,6 @@ namespace yaSSL { -using mySTL::min; int read_file(SSL_CTX* ctx, const char* file, int format, CertType type) @@ -93,11 +95,55 @@ int read_file(SSL_CTX* ctx, const char* file, int format, CertType type) } } else { - x = PemToDer(input, type); + EncryptedInfo info; + x = PemToDer(input, type, &info); if (!x) { fclose(input); return SSL_BAD_FILE; } + if (info.set) { + // decrypt + char password[80]; + pem_password_cb cb = ctx->GetPasswordCb(); + if (!cb) { + fclose(input); + return SSL_BAD_FILE; + } + int passwordSz = cb(password, sizeof(password), 0, + ctx->GetUserData()); + byte key[AES_256_KEY_SZ]; // max sizes + byte iv[AES_IV_SZ]; + + // use file's salt for key derivation, but not real iv + TaoCrypt::Source source(info.iv, info.ivSz); + TaoCrypt::HexDecoder dec(source); + memcpy(info.iv, source.get_buffer(), min((uint)sizeof(info.iv), + source.size())); + EVP_BytesToKey(info.name, "MD5", info.iv, (byte*)password, + passwordSz, 1, key, iv); + + STL::auto_ptr cipher; + if (strncmp(info.name, "DES-CBC", 7) == 0) + cipher.reset(NEW_YS DES); + else if (strncmp(info.name, "DES-EDE3-CBC", 13) == 0) + cipher.reset(NEW_YS DES_EDE); + else if (strncmp(info.name, "AES-128-CBC", 13) == 0) + cipher.reset(NEW_YS AES(AES_128_KEY_SZ)); + else if (strncmp(info.name, "AES-192-CBC", 13) == 0) + cipher.reset(NEW_YS AES(AES_192_KEY_SZ)); + else if (strncmp(info.name, "AES-256-CBC", 13) == 0) + cipher.reset(NEW_YS AES(AES_256_KEY_SZ)); + else { + fclose(input); + return SSL_BAD_FILE; + } + cipher->set_decryptKey(key, info.iv); + STL::auto_ptr newx(NEW_YS x509(x->get_length())); + cipher->decrypt(newx->use_buffer(), x->get_buffer(), + x->get_length()); + ysDelete(x); + x = newx.release(); + } } } fclose(input); @@ -140,8 +186,17 @@ SSL_METHOD* TLSv1_client_method() SSL_METHOD* SSLv23_server_method() { - // compatibility only, no version 2 support - return SSLv3_server_method(); + // compatibility only, no version 2 support, but does SSL 3 and TLS 1 + return NEW_YS SSL_METHOD(server_end, ProtocolVersion(3,1), true); +} + + +SSL_METHOD* SSLv23_client_method() +{ + // compatibility only, no version 2 support, but does SSL 3 and TLS 1 + // though it sends TLS1 hello not SSLv2 so SSLv3 only servers will decline + // TODO: maybe add support to send SSLv2 hello ??? + return NEW_YS SSL_METHOD(client_end, ProtocolVersion(3,1), true); } @@ -178,14 +233,29 @@ int SSL_set_fd(SSL* ssl, int fd) int SSL_connect(SSL* ssl) { + if (ssl->GetError() == YasslError(SSL_ERROR_WANT_READ)) + ssl->SetError(no_error); + + ClientState neededState; + + switch (ssl->getStates().GetConnect()) { + + case CONNECT_BEGIN : sendClientHello(*ssl); - ClientState neededState = ssl->getSecurity().get_resuming() ? + if (!ssl->GetError()) + ssl->useStates().UseConnect() = CLIENT_HELLO_SENT; + + case CLIENT_HELLO_SENT : + neededState = ssl->getSecurity().get_resuming() ? serverFinishedComplete : serverHelloDoneComplete; while (ssl->getStates().getClient() < neededState) { if (ssl->GetError()) break; processReply(*ssl); } + if (!ssl->GetError()) + ssl->useStates().UseConnect() = FIRST_REPLY_DONE; + case FIRST_REPLY_DONE : if(ssl->getCrypto().get_certManager().sendVerify()) sendCertificate(*ssl); @@ -198,18 +268,32 @@ int SSL_connect(SSL* ssl) sendChangeCipher(*ssl); sendFinished(*ssl, client_end); ssl->flushBuffer(); + + if (!ssl->GetError()) + ssl->useStates().UseConnect() = FINISHED_DONE; + + case FINISHED_DONE : if (!ssl->getSecurity().get_resuming()) while (ssl->getStates().getClient() < serverFinishedComplete) { if (ssl->GetError()) break; processReply(*ssl); } + if (!ssl->GetError()) + ssl->useStates().UseConnect() = SECOND_REPLY_DONE; + case SECOND_REPLY_DONE : ssl->verifyState(serverFinishedComplete); ssl->useLog().ShowTCP(ssl->getSocket().get_fd()); - if (ssl->GetError()) + if (ssl->GetError()) { + GetErrors().Add(ssl->GetError()); return SSL_FATAL_ERROR; + } return SSL_SUCCESS; + + default : + return SSL_FATAL_ERROR; // unkown state + } } @@ -228,7 +312,17 @@ int SSL_read(SSL* ssl, void* buffer, int sz) int SSL_accept(SSL* ssl) { + if (ssl->GetError() == YasslError(SSL_ERROR_WANT_READ)) + ssl->SetError(no_error); + + switch (ssl->getStates().GetAccept()) { + + case ACCEPT_BEGIN : processReply(*ssl); + if (!ssl->GetError()) + ssl->useStates().UseAccept() = ACCEPT_FIRST_REPLY_DONE; + + case ACCEPT_FIRST_REPLY_DONE : sendServerHello(*ssl); if (!ssl->getSecurity().get_resuming()) { @@ -242,27 +336,51 @@ int SSL_accept(SSL* ssl) sendServerHelloDone(*ssl); ssl->flushBuffer(); + } + + if (!ssl->GetError()) + ssl->useStates().UseAccept() = SERVER_HELLO_DONE; + case SERVER_HELLO_DONE : + if (!ssl->getSecurity().get_resuming()) { while (ssl->getStates().getServer() < clientFinishedComplete) { if (ssl->GetError()) break; processReply(*ssl); } } + if (!ssl->GetError()) + ssl->useStates().UseAccept() = ACCEPT_SECOND_REPLY_DONE; + + case ACCEPT_SECOND_REPLY_DONE : sendChangeCipher(*ssl); sendFinished(*ssl, server_end); ssl->flushBuffer(); + + if (!ssl->GetError()) + ssl->useStates().UseAccept() = ACCEPT_FINISHED_DONE; + + case ACCEPT_FINISHED_DONE : if (ssl->getSecurity().get_resuming()) { while (ssl->getStates().getServer() < clientFinishedComplete) { if (ssl->GetError()) break; processReply(*ssl); } } + if (!ssl->GetError()) + ssl->useStates().UseAccept() = ACCEPT_THIRD_REPLY_DONE; + case ACCEPT_THIRD_REPLY_DONE : ssl->useLog().ShowTCP(ssl->getSocket().get_fd()); - if (ssl->GetError()) + if (ssl->GetError()) { + GetErrors().Add(ssl->GetError()); return SSL_FATAL_ERROR; + } return SSL_SUCCESS; + + default: + return SSL_FATAL_ERROR; // unknown state + } } @@ -278,6 +396,8 @@ int SSL_do_handshake(SSL* ssl) int SSL_clear(SSL* ssl) { ssl->useSocket().closeSocket(); + GetErrors().Remove(); + return SSL_SUCCESS; } @@ -289,6 +409,8 @@ int SSL_shutdown(SSL* ssl) ssl->useLog().ShowTCP(ssl->getSocket().get_fd(), true); ssl->useSocket().closeSocket(); + GetErrors().Remove(); + return SSL_SUCCESS; } @@ -762,9 +884,8 @@ void DH_free(DH* dh) // be created BIGNUM* BN_bin2bn(const unsigned char* num, int sz, BIGNUM* retVal) { - using mySTL::auto_ptr; bool created = false; - auto_ptr bn(ysDelete); + mySTL::auto_ptr bn; if (!retVal) { created = true; @@ -825,7 +946,7 @@ const EVP_MD* EVP_md5(void) const EVP_CIPHER* EVP_des_ede3_cbc(void) { - static const char* type = "DES_EDE3_CBC"; + static const char* type = "DES-EDE3-CBC"; return type; } @@ -836,16 +957,37 @@ int EVP_BytesToKey(const EVP_CIPHER* type, const EVP_MD* md, const byte* salt, // only support MD5 for now if (strncmp(md, "MD5", 3)) return 0; - // only support DES_EDE3_CBC for now - if (strncmp(type, "DES_EDE3_CBC", 12)) return 0; + int keyLen = 0; + int ivLen = 0; + + // only support CBC DES and AES for now + if (strncmp(type, "DES-CBC", 7) == 0) { + keyLen = DES_KEY_SZ; + ivLen = DES_IV_SZ; + } + else if (strncmp(type, "DES-EDE3-CBC", 12) == 0) { + keyLen = DES_EDE_KEY_SZ; + ivLen = DES_IV_SZ; + } + else if (strncmp(type, "AES-128-CBC", 11) == 0) { + keyLen = AES_128_KEY_SZ; + ivLen = AES_IV_SZ; + } + else if (strncmp(type, "AES-192-CBC", 11) == 0) { + keyLen = AES_192_KEY_SZ; + ivLen = AES_IV_SZ; + } + else if (strncmp(type, "AES-256-CBC", 11) == 0) { + keyLen = AES_256_KEY_SZ; + ivLen = AES_IV_SZ; + } + else + return 0; yaSSL::MD5 myMD; uint digestSz = myMD.get_digestSize(); byte digest[SHA_LEN]; // max size - yaSSL::DES_EDE cipher; - int keyLen = cipher.get_keySize(); - int ivLen = cipher.get_ivSize(); int keyLeft = keyLen; int ivLeft = ivLen; int keyOutput = 0; @@ -878,7 +1020,7 @@ int EVP_BytesToKey(const EVP_CIPHER* type, const EVP_MD* md, const byte* salt, if (ivLeft && digestLeft) { int store = min(ivLeft, digestLeft); - memcpy(&iv[ivLen - ivLeft], digest, store); + memcpy(&iv[ivLen - ivLeft], &digest[digestSz - digestLeft], store); keyOutput += store; ivLeft -= store; @@ -954,10 +1096,9 @@ void DES_ecb_encrypt(DES_cblock* input, DES_cblock* output, } -void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX*, void* userdata) +void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX* ctx, void* userdata) { - // yaSSL doesn't support yet, unencrypt your PEM file with userdata - // before handing off to yaSSL + ctx->SetUserData(userdata); } @@ -1034,12 +1175,6 @@ ASN1_TIME* X509_get_notAfter(X509* x) } -SSL_METHOD* SSLv23_client_method(void) /* doesn't actually roll back */ -{ - return SSLv3_client_method(); -} - - SSL_METHOD* SSLv2_client_method(void) /* will never work, no v 2 */ { return 0; @@ -1363,9 +1498,9 @@ int SSL_pending(SSL* ssl) } - void SSL_CTX_set_default_passwd_cb(SSL_CTX*, pem_password_cb) + void SSL_CTX_set_default_passwd_cb(SSL_CTX* ctx, pem_password_cb cb) { - // TDOD: + ctx->SetPasswordCb(cb); } @@ -1428,7 +1563,7 @@ int SSL_pending(SSL* ssl) void ERR_remove_state(unsigned long) { - // TODO: + GetErrors().Remove(); } @@ -1437,16 +1572,30 @@ int SSL_pending(SSL* ssl) return l & 0xfff; } + unsigned long err_helper(bool peek = false) + { + int ysError = GetErrors().Lookup(peek); + + // translate cert error for libcurl, it uses OpenSSL hex code + switch (ysError) { + case TaoCrypt::SIG_OTHER_E: + return CERTFICATE_ERROR; + break; + default : + return 0; + } + } + unsigned long ERR_peek_error() { - return 0; // TODO: + return err_helper(true); } unsigned long ERR_get_error() { - return ERR_peek_error(); + return err_helper(); } diff --git a/extra/yassl/src/template_instnt.cpp b/extra/yassl/src/template_instnt.cpp index c5fc23dabdb..fb488e47672 100644 --- a/extra/yassl/src/template_instnt.cpp +++ b/extra/yassl/src/template_instnt.cpp @@ -65,6 +65,19 @@ template yaSSL::del_ptr_zero for_each::iterat template yaSSL::del_ptr_zero for_each::iterator, yaSSL::del_ptr_zero>(mySTL::list::iterator, mySTL::list::iterator, yaSSL::del_ptr_zero); template yaSSL::del_ptr_zero for_each::iterator, yaSSL::del_ptr_zero>(mySTL::list::iterator, mySTL::list::iterator, yaSSL::del_ptr_zero); template yaSSL::del_ptr_zero for_each::iterator, yaSSL::del_ptr_zero>(mySTL::list::iterator, mySTL::list::iterator, yaSSL::del_ptr_zero); +template bool list::erase(list::iterator); +template void list::push_back(yaSSL::ThreadError); +template void list::pop_front(); +template void list::pop_back(); +template list::~list(); +template pair* GetArrayMemory >(size_t); +template void FreeArrayMemory >(pair*); +template pair* GetArrayMemory >(size_t); +template void FreeArrayMemory >(pair*); +template pair* GetArrayMemory >(size_t); +template void FreeArrayMemory >(pair*); +template pair* GetArrayMemory >(size_t); +template void FreeArrayMemory >(pair*); } namespace yaSSL { @@ -90,8 +103,13 @@ template void ysDelete(X509*); template void ysDelete(Message*); template void ysDelete(sslFactory*); template void ysDelete(Sessions*); +template void ysDelete(Errors*); template void ysArrayDelete(unsigned char*); template void ysArrayDelete(char*); + +template int min(int, int); +template unsigned int min(unsigned int, unsigned int); +template unsigned long min(unsigned long, unsigned long); } #endif // HAVE_EXPLICIT_TEMPLATE_INSTANTIATION diff --git a/extra/yassl/src/yassl.cpp b/extra/yassl/src/yassl.cpp index 5bc8bad8bbc..e253ef84bb5 100644 --- a/extra/yassl/src/yassl.cpp +++ b/extra/yassl/src/yassl.cpp @@ -36,21 +36,9 @@ #include "openssl/ssl.h" // get rid of this -// yaSSL overloads hide these -void* operator new[](size_t sz) -{ - return ::operator new(sz); -} - -void operator delete[](void* ptr) -{ - ::operator delete(ptr); -} - namespace yaSSL { -using mySTL::min; struct Base { diff --git a/extra/yassl/src/yassl_error.cpp b/extra/yassl/src/yassl_error.cpp index 4f75de34a98..3531c0a2c74 100644 --- a/extra/yassl/src/yassl_error.cpp +++ b/extra/yassl/src/yassl_error.cpp @@ -125,13 +125,21 @@ void SetErrorString(YasslError error, char* buffer) strncpy(buffer, "unable to proccess cerificate", max); break; + case privateKey_error : + strncpy(buffer, "unable to proccess private key, bad format", max); + break; + + case badVersion_error : + strncpy(buffer, "protocl version mismatch", max); + break; + // openssl errors case SSL_ERROR_WANT_READ : strncpy(buffer, "the read operation would block", max); break; // TaoCrypt errors - case NO_ERROR : + case NO_ERROR_E : strncpy(buffer, "not in error state", max); break; @@ -235,6 +243,10 @@ void SetErrorString(YasslError error, char* buffer) strncpy(buffer, "ASN: bad other signature confirmation", max); break; + case CERTFICATE_ERROR : + strncpy(buffer, "Unable to verify certificate", max); + break; + default : strncpy(buffer, "unknown error number", max); } diff --git a/extra/yassl/src/yassl_imp.cpp b/extra/yassl/src/yassl_imp.cpp index 98f8035732e..bd07f8b70f2 100644 --- a/extra/yassl/src/yassl_imp.cpp +++ b/extra/yassl/src/yassl_imp.cpp @@ -139,7 +139,7 @@ void DH_Server::build(SSL& ssl) parms_.alloc_pub(pubSz)); short sigSz = 0; - mySTL::auto_ptr auth(ysDelete); + mySTL::auto_ptr auth; const CertManager& cert = ssl.getCrypto().get_certManager(); if (ssl.getSecurity().get_parms().sig_algo_ == rsa_sa_algo) @@ -151,9 +151,11 @@ void DH_Server::build(SSL& ssl) sigSz += DSS_ENCODED_EXTRA; } - sigSz += auth->get_signatureLength(); - + if (!sigSz) { + ssl.SetError(privateKey_error); + return; + } length_ = 8; // pLen + gLen + YsLen + SigLen length_ += pSz + gSz + pubSz + sigSz; @@ -612,7 +614,7 @@ void HandShakeHeader::Process(input_buffer& input, SSL& ssl) { ssl.verifyState(*this); const HandShakeFactory& hsf = ssl.getFactory().getHandShake(); - mySTL::auto_ptr hs(hsf.CreateObject(type_), ysDelete); + mySTL::auto_ptr hs(hsf.CreateObject(type_)); if (!hs.get()) { ssl.SetError(factory_error); return; @@ -1214,6 +1216,20 @@ output_buffer& operator<<(output_buffer& output, const ServerHello& hello) // Server Hello processing handler void ServerHello::Process(input_buffer&, SSL& ssl) { + if (ssl.GetMultiProtocol()) { // SSLv23 support + if (ssl.isTLS() && server_version_.minor_ < 1) + // downgrade to SSLv3 + ssl.useSecurity().use_connection().TurnOffTLS(); + } + else if (ssl.isTLS() && server_version_.minor_ < 1) { + ssl.SetError(badVersion_error); + return; + } + else if (!ssl.isTLS() && (server_version_.major_ == 3 && + server_version_.minor_ >= 1)) { + ssl.SetError(badVersion_error); + return; + } ssl.set_pending(cipher_suite_[1]); ssl.set_random(random_, server_end); if (id_len_) @@ -1384,11 +1400,23 @@ output_buffer& operator<<(output_buffer& output, const ClientHello& hello) // Client Hello processing handler void ClientHello::Process(input_buffer&, SSL& ssl) { - if (ssl.isTLS() && client_version_.minor_ == 0) { + if (ssl.GetMultiProtocol()) { // SSLv23 support + if (ssl.isTLS() && client_version_.minor_ < 1) { + // downgrade to SSLv3 ssl.useSecurity().use_connection().TurnOffTLS(); ProtocolVersion pv = ssl.getSecurity().get_connection().version_; ssl.useSecurity().use_parms().SetSuites(pv); // reset w/ SSL suites } + } + else if (ssl.isTLS() && client_version_.minor_ < 1) { + ssl.SetError(badVersion_error); + return; + } + else if (!ssl.isTLS() && (client_version_.major_ == 3 && + client_version_.minor_ >= 1)) { + ssl.SetError(badVersion_error); + return; + } ssl.set_random(random_, client_end); while (id_len_) { // trying to resume @@ -1541,7 +1569,7 @@ CertificateRequest::CertificateRequest() CertificateRequest::~CertificateRequest() { - mySTL::for_each(certificate_authorities_.begin(), + STL::for_each(certificate_authorities_.begin(), certificate_authorities_.end(), del_ptr_zero()) ; } @@ -1634,9 +1662,9 @@ output_buffer& operator<<(output_buffer& output, request.typeTotal_ - REQUEST_HEADER, tmp); output.write(tmp, sizeof(tmp)); - mySTL::list::const_iterator first = + STL::list::const_iterator first = request.certificate_authorities_.begin(); - mySTL::list::const_iterator last = + STL::list::const_iterator last = request.certificate_authorities_.end(); while (first != last) { uint16 sz; @@ -1684,7 +1712,7 @@ void CertificateVerify::Build(SSL& ssl) uint16 sz = 0; byte len[VERIFY_HEADER]; - mySTL::auto_ptr sig(ysArrayDelete); + mySTL::auto_array sig; // sign const CertManager& cert = ssl.getCrypto().get_certManager(); diff --git a/extra/yassl/src/yassl_int.cpp b/extra/yassl/src/yassl_int.cpp index 9b83f964348..5288acb2bcd 100644 --- a/extra/yassl/src/yassl_int.cpp +++ b/extra/yassl/src/yassl_int.cpp @@ -33,6 +33,10 @@ #include "handshake.hpp" #include "timer.hpp" +#ifdef _POSIX_THREADS + #include "pthread.h" +#endif + #ifdef YASSL_PURE_C @@ -74,7 +78,6 @@ namespace yaSSL { -using mySTL::min; @@ -155,6 +158,7 @@ void c32toa(uint32 u32, opaque* c) States::States() : recordLayer_(recordReady), handshakeLayer_(preHandshake), clientState_(serverNull), serverState_(clientNull), + connectState_(CONNECT_BEGIN), acceptState_(ACCEPT_BEGIN), what_(no_error) {} const RecordLayerState& States::getRecord() const @@ -181,6 +185,18 @@ const ServerState& States::getServer() const } +const ConnectState& States::GetConnect() const +{ + return connectState_; +} + + +const AcceptState& States::GetAccept() const +{ + return acceptState_; +} + + const char* States::getString() const { return errorString_; @@ -217,6 +233,18 @@ ServerState& States::useServer() } +ConnectState& States::UseConnect() +{ + return connectState_; +} + + +AcceptState& States::UseAccept() +{ + return acceptState_; +} + + char* States::useString() { return errorString_; @@ -722,6 +750,12 @@ void SSL::SetError(YasslError ye) } +Buffers& SSL::useBuffers() +{ + return buffers_; +} + + // locals namespace { @@ -959,7 +993,7 @@ using namespace yassl_int_cpp_local1; uint SSL::bufferedData() { - return mySTL::for_each(buffers_.getData().begin(),buffers_.getData().end(), + return STL::for_each(buffers_.getData().begin(),buffers_.getData().end(), SumData()).total_; } @@ -1002,7 +1036,7 @@ void SSL::PeekData(Data& data) data.set_length(0); // output, actual data filled dataSz = min(dataSz, bufferedData()); - Buffers::inputList::iterator front = buffers_.getData().begin(); + Buffers::inputList::iterator front = buffers_.useData().begin(); while (elements) { uint frontSz = (*front)->get_remaining(); @@ -1027,7 +1061,7 @@ void SSL::flushBuffer() { if (GetError()) return; - uint sz = mySTL::for_each(buffers_.getHandShake().begin(), + uint sz = STL::for_each(buffers_.getHandShake().begin(), buffers_.getHandShake().end(), SumBuffer()).total_; output_buffer out(sz); @@ -1213,8 +1247,10 @@ void SSL::matchSuite(const opaque* peer, uint length) void SSL::set_session(SSL_SESSION* s) { - if (s && GetSessions().lookup(s->GetID(), &secure_.use_resume())) + if (s && GetSessions().lookup(s->GetID(), &secure_.use_resume())) { secure_.set_resuming(true); + crypto_.use_certManager().setPeerX509(s->GetPeerX509()); + } } @@ -1260,6 +1296,12 @@ YasslError SSL::GetError() const } +bool SSL::GetMultiProtocol() const +{ + return secure_.GetContext()->getMethod()->multipleProtocol(); +} + + Crypto& SSL::useCrypto() { return crypto_; @@ -1314,9 +1356,25 @@ void SSL::addBuffer(output_buffer* b) } +void SSL_SESSION::CopyX509(X509* x) +{ + assert(peerX509_ == 0); + if (x == 0) return; + + X509_NAME* issuer = x->GetIssuer(); + X509_NAME* subject = x->GetSubject(); + ASN1_STRING* before = x->GetBefore(); + ASN1_STRING* after = x->GetAfter(); + + peerX509_ = NEW_YS X509(issuer->GetName(), issuer->GetLength(), + subject->GetName(), subject->GetLength(), (const char*) before->data, + before->length, (const char*) after->data, after->length); +} + + // store connection parameters SSL_SESSION::SSL_SESSION(const SSL& ssl, RandomPool& ran) - : timeout_(DEFAULT_TIMEOUT), random_(ran) + : timeout_(DEFAULT_TIMEOUT), random_(ran), peerX509_(0) { const Connection& conn = ssl.getSecurity().get_connection(); @@ -1325,12 +1383,14 @@ SSL_SESSION::SSL_SESSION(const SSL& ssl, RandomPool& ran) memcpy(suite_, ssl.getSecurity().get_parms().suite_, SUITE_LEN); bornOn_ = lowResTimer(); + + CopyX509(ssl.getCrypto().get_certManager().get_peerX509()); } // for resumption copy in ssl::parameters SSL_SESSION::SSL_SESSION(RandomPool& ran) - : bornOn_(0), timeout_(0), random_(ran) + : bornOn_(0), timeout_(0), random_(ran), peerX509_(0) { memset(sessionID_, 0, ID_LEN); memset(master_secret_, 0, SECRET_LEN); @@ -1347,6 +1407,12 @@ SSL_SESSION& SSL_SESSION::operator=(const SSL_SESSION& that) bornOn_ = that.bornOn_; timeout_ = that.timeout_; + if (peerX509_) { + ysDelete(peerX509_); + peerX509_ = 0; + } + CopyX509(that.peerX509_); + return *this; } @@ -1369,6 +1435,12 @@ const Cipher* SSL_SESSION::GetSuite() const } +X509* SSL_SESSION::GetPeerX509() const +{ + return peerX509_; +} + + uint SSL_SESSION::GetBornOn() const { return bornOn_; @@ -1395,6 +1467,8 @@ SSL_SESSION::~SSL_SESSION() { volatile opaque* p = master_secret_; clean(p, SECRET_LEN, random_); + + ysDelete(peerX509_); } @@ -1418,6 +1492,15 @@ sslFactory& GetSSL_Factory() } +static Errors* errorsInstance = 0; + +Errors& GetErrors() +{ + if (!errorsInstance) + errorsInstance = NEW_YS Errors; + return *errorsInstance; +} + typedef Mutex::Lock Lock; @@ -1433,14 +1516,15 @@ void Sessions::add(const SSL& ssl) Sessions::~Sessions() { - mySTL::for_each(list_.begin(), list_.end(), del_ptr_zero()); + STL::for_each(list_.begin(), list_.end(), del_ptr_zero()); } // locals namespace yassl_int_cpp_local2 { // for explicit templates -typedef mySTL::list::iterator iterator; +typedef STL::list::iterator sess_iterator; +typedef STL::list::iterator thr_iterator; struct sess_match { const opaque* id_; @@ -1455,6 +1539,28 @@ struct sess_match { }; +THREAD_ID_T GetSelf() +{ +#ifndef _POSIX_THREADS + return GetCurrentThreadId(); +#else + return pthread_self(); +#endif +} + +struct thr_match { + THREAD_ID_T id_; + explicit thr_match() : id_(GetSelf()) {} + + bool operator()(ThreadError thr) + { + if (thr.threadID_ == id_) + return true; + return false; + } +}; + + } // local namespace using namespace yassl_int_cpp_local2; @@ -1463,8 +1569,8 @@ using namespace yassl_int_cpp_local2; SSL_SESSION* Sessions::lookup(const opaque* id, SSL_SESSION* copy) { Lock guard(mutex_); - iterator find = mySTL::find_if(list_.begin(), list_.end(), sess_match(id)); - + sess_iterator find = STL::find_if(list_.begin(), list_.end(), + sess_match(id)); if (find != list_.end()) { uint current = lowResTimer(); if ( ((*find)->GetBornOn() + (*find)->GetTimeOut()) < current) { @@ -1484,8 +1590,8 @@ SSL_SESSION* Sessions::lookup(const opaque* id, SSL_SESSION* copy) void Sessions::remove(const opaque* id) { Lock guard(mutex_); - iterator find = mySTL::find_if(list_.begin(), list_.end(), sess_match(id)); - + sess_iterator find = STL::find_if(list_.begin(), list_.end(), + sess_match(id)); if (find != list_.end()) { del_ptr_zero()(*find); list_.erase(find); @@ -1493,9 +1599,51 @@ void Sessions::remove(const opaque* id) } -SSL_METHOD::SSL_METHOD(ConnectionEnd ce, ProtocolVersion pv) +// remove a self thread error +void Errors::Remove() +{ + Lock guard(mutex_); + thr_iterator find = STL::find_if(list_.begin(), list_.end(), + thr_match()); + if (find != list_.end()) + list_.erase(find); +} + + +// lookup self error code +int Errors::Lookup(bool peek) +{ + Lock guard(mutex_); + thr_iterator find = STL::find_if(list_.begin(), list_.end(), + thr_match()); + if (find != list_.end()) { + int ret = find->errorID_; + if (!peek) + list_.erase(find); + return ret; + } + else + return 0; +} + + +// add a new error code for self +void Errors::Add(int error) +{ + ThreadError add; + add.errorID_ = error; + add.threadID_ = GetSelf(); + + Remove(); // may have old error + + Lock guard(mutex_); + list_.push_back(add); +} + + +SSL_METHOD::SSL_METHOD(ConnectionEnd ce, ProtocolVersion pv, bool multiProto) : version_(pv), side_(ce), verifyPeer_(false), verifyNone_(false), - failNoCert_(false) + failNoCert_(false), multipleProtocol_(multiProto) {} @@ -1547,8 +1695,15 @@ bool SSL_METHOD::failNoCert() const } +bool SSL_METHOD::multipleProtocol() const +{ + return multipleProtocol_; +} + + SSL_CTX::SSL_CTX(SSL_METHOD* meth) - : method_(meth), certificate_(0), privateKey_(0) + : method_(meth), certificate_(0), privateKey_(0), passwordCb_(0), + userData_(0) {} @@ -1558,7 +1713,7 @@ SSL_CTX::~SSL_CTX() ysDelete(certificate_); ysDelete(privateKey_); - mySTL::for_each(caList_.begin(), caList_.end(), del_ptr_zero()); + STL::for_each(caList_.begin(), caList_.end(), del_ptr_zero()); } @@ -1611,6 +1766,30 @@ const Stats& SSL_CTX::GetStats() const } +pem_password_cb SSL_CTX::GetPasswordCb() const +{ + return passwordCb_; +} + + +void SSL_CTX::SetPasswordCb(pem_password_cb cb) +{ + passwordCb_ = cb; +} + + +void* SSL_CTX::GetUserData() const +{ + return userData_; +} + + +void SSL_CTX::SetUserData(void* data) +{ + userData_ = data; +} + + void SSL_CTX::setVerifyPeer() { method_->setVerifyPeer(); @@ -1914,12 +2093,33 @@ Hashes& sslHashes::use_certVerify() } +Buffers::Buffers() : rawInput_(0) +{} + + Buffers::~Buffers() { - mySTL::for_each(handShakeList_.begin(), handShakeList_.end(), + STL::for_each(handShakeList_.begin(), handShakeList_.end(), del_ptr_zero()) ; - mySTL::for_each(dataList_.begin(), dataList_.end(), + STL::for_each(dataList_.begin(), dataList_.end(), del_ptr_zero()) ; + ysDelete(rawInput_); +} + + +void Buffers::SetRawInput(input_buffer* ib) +{ + assert(rawInput_ == 0); + rawInput_ = ib; +} + + +input_buffer* Buffers::TakeRawInput() +{ + input_buffer* ret = rawInput_; + rawInput_ = 0; + + return ret; } @@ -2026,12 +2226,18 @@ X509_NAME::~X509_NAME() } -char* X509_NAME::GetName() +const char* X509_NAME::GetName() const { return name_; } +size_t X509_NAME::GetLength() const +{ + return sz_; +} + + X509::X509(const char* i, size_t iSz, const char* s, size_t sSz, const char* b, int bSz, const char* a, int aSz) : issuer_(i, iSz), subject_(s, sSz), @@ -2114,10 +2320,12 @@ extern "C" void yaSSL_CleanUp() TaoCrypt::CleanUp(); yaSSL::ysDelete(yaSSL::sslFactoryInstance); yaSSL::ysDelete(yaSSL::sessionsInstance); + yaSSL::ysDelete(yaSSL::errorsInstance); // In case user calls more than once, prevent seg fault yaSSL::sslFactoryInstance = 0; yaSSL::sessionsInstance = 0; + yaSSL::errorsInstance = 0; } @@ -2126,6 +2334,7 @@ namespace mySTL { template yaSSL::yassl_int_cpp_local1::SumData for_each::iterator, yaSSL::yassl_int_cpp_local1::SumData>(mySTL::list::iterator, mySTL::list::iterator, yaSSL::yassl_int_cpp_local1::SumData); template yaSSL::yassl_int_cpp_local1::SumBuffer for_each::iterator, yaSSL::yassl_int_cpp_local1::SumBuffer>(mySTL::list::iterator, mySTL::list::iterator, yaSSL::yassl_int_cpp_local1::SumBuffer); template mySTL::list::iterator find_if::iterator, yaSSL::yassl_int_cpp_local2::sess_match>(mySTL::list::iterator, mySTL::list::iterator, yaSSL::yassl_int_cpp_local2::sess_match); +template mySTL::list::iterator find_if::iterator, yaSSL::yassl_int_cpp_local2::thr_match>(mySTL::list::iterator, mySTL::list::iterator, yaSSL::yassl_int_cpp_local2::thr_match); } #endif diff --git a/extra/yassl/taocrypt/CMakeLists.txt b/extra/yassl/taocrypt/CMakeLists.txt index 0af0a242e5d..540827954d0 100644 --- a/extra/yassl/taocrypt/CMakeLists.txt +++ b/extra/yassl/taocrypt/CMakeLists.txt @@ -1,4 +1,4 @@ -INCLUDE_DIRECTORIES(../mySTL include) +INCLUDE_DIRECTORIES(mySTL include) ADD_LIBRARY(taocrypt src/aes.cpp src/aestables.cpp src/algebra.cpp src/arc4.cpp src/asn.cpp src/coding.cpp src/des.cpp src/dh.cpp src/dsa.cpp src/file.cpp src/hash.cpp src/integer.cpp src/md2.cpp diff --git a/extra/yassl/taocrypt/COPYING b/extra/yassl/taocrypt/COPYING new file mode 100644 index 00000000000..d60c31a97a5 --- /dev/null +++ b/extra/yassl/taocrypt/COPYING @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/extra/yassl/taocrypt/INSTALL b/extra/yassl/taocrypt/INSTALL new file mode 100644 index 00000000000..54caf7c190f --- /dev/null +++ b/extra/yassl/taocrypt/INSTALL @@ -0,0 +1,229 @@ +Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002 Free Software +Foundation, Inc. + + This file is free documentation; the Free Software Foundation gives +unlimited permission to copy, distribute and modify it. + +Basic Installation +================== + + These are generic installation instructions. + + The `configure' shell script attempts to guess correct values for +various system-dependent variables used during compilation. It uses +those values to create a `Makefile' in each directory of the package. +It may also create one or more `.h' files containing system-dependent +definitions. Finally, it creates a shell script `config.status' that +you can run in the future to recreate the current configuration, and a +file `config.log' containing compiler output (useful mainly for +debugging `configure'). + + It can also use an optional file (typically called `config.cache' +and enabled with `--cache-file=config.cache' or simply `-C') that saves +the results of its tests to speed up reconfiguring. (Caching is +disabled by default to prevent problems with accidental use of stale +cache files.) + + If you need to do unusual things to compile the package, please try +to figure out how `configure' could check whether to do them, and mail +diffs or instructions to the address given in the `README' so they can +be considered for the next release. If you are using the cache, and at +some point `config.cache' contains results you don't want to keep, you +may remove or edit it. + + The file `configure.ac' (or `configure.in') is used to create +`configure' by a program called `autoconf'. You only need +`configure.ac' if you want to change it or regenerate `configure' using +a newer version of `autoconf'. + +The simplest way to compile this package is: + + 1. `cd' to the directory containing the package's source code and type + `./configure' to configure the package for your system. If you're + using `csh' on an old version of System V, you might need to type + `sh ./configure' instead to prevent `csh' from trying to execute + `configure' itself. + + Running `configure' takes awhile. While running, it prints some + messages telling which features it is checking for. + + 2. Type `make' to compile the package. + + 3. Optionally, type `make check' to run any self-tests that come with + the package. + + 4. Type `make install' to install the programs and any data files and + documentation. + + 5. You can remove the program binaries and object files from the + source code directory by typing `make clean'. To also remove the + files that `configure' created (so you can compile the package for + a different kind of computer), type `make distclean'. There is + also a `make maintainer-clean' target, but that is intended mainly + for the package's developers. If you use it, you may have to get + all sorts of other programs in order to regenerate files that came + with the distribution. + +Compilers and Options +===================== + + Some systems require unusual options for compilation or linking that +the `configure' script does not know about. Run `./configure --help' +for details on some of the pertinent environment variables. + + You can give `configure' initial values for configuration parameters +by setting variables in the command line or in the environment. Here +is an example: + + ./configure CC=c89 CFLAGS=-O2 LIBS=-lposix + + *Note Defining Variables::, for more details. + +Compiling For Multiple Architectures +==================================== + + You can compile the package for more than one kind of computer at the +same time, by placing the object files for each architecture in their +own directory. To do this, you must use a version of `make' that +supports the `VPATH' variable, such as GNU `make'. `cd' to the +directory where you want the object files and executables to go and run +the `configure' script. `configure' automatically checks for the +source code in the directory that `configure' is in and in `..'. + + If you have to use a `make' that does not support the `VPATH' +variable, you have to compile the package for one architecture at a +time in the source code directory. After you have installed the +package for one architecture, use `make distclean' before reconfiguring +for another architecture. + +Installation Names +================== + + By default, `make install' will install the package's files in +`/usr/local/bin', `/usr/local/man', etc. You can specify an +installation prefix other than `/usr/local' by giving `configure' the +option `--prefix=PATH'. + + You can specify separate installation prefixes for +architecture-specific files and architecture-independent files. If you +give `configure' the option `--exec-prefix=PATH', the package will use +PATH as the prefix for installing programs and libraries. +Documentation and other data files will still use the regular prefix. + + In addition, if you use an unusual directory layout you can give +options like `--bindir=PATH' to specify different values for particular +kinds of files. Run `configure --help' for a list of the directories +you can set and what kinds of files go in them. + + If the package supports it, you can cause programs to be installed +with an extra prefix or suffix on their names by giving `configure' the +option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. + +Optional Features +================= + + Some packages pay attention to `--enable-FEATURE' options to +`configure', where FEATURE indicates an optional part of the package. +They may also pay attention to `--with-PACKAGE' options, where PACKAGE +is something like `gnu-as' or `x' (for the X Window System). The +`README' should mention any `--enable-' and `--with-' options that the +package recognizes. + + For packages that use the X Window System, `configure' can usually +find the X include and library files automatically, but if it doesn't, +you can use the `configure' options `--x-includes=DIR' and +`--x-libraries=DIR' to specify their locations. + +Specifying the System Type +========================== + + There may be some features `configure' cannot figure out +automatically, but needs to determine by the type of machine the package +will run on. Usually, assuming the package is built to be run on the +_same_ architectures, `configure' can figure that out, but if it prints +a message saying it cannot guess the machine type, give it the +`--build=TYPE' option. TYPE can either be a short name for the system +type, such as `sun4', or a canonical name which has the form: + + CPU-COMPANY-SYSTEM + +where SYSTEM can have one of these forms: + + OS KERNEL-OS + + See the file `config.sub' for the possible values of each field. If +`config.sub' isn't included in this package, then this package doesn't +need to know the machine type. + + If you are _building_ compiler tools for cross-compiling, you should +use the `--target=TYPE' option to select the type of system they will +produce code for. + + If you want to _use_ a cross compiler, that generates code for a +platform different from the build platform, you should specify the +"host" platform (i.e., that on which the generated programs will +eventually be run) with `--host=TYPE'. + +Sharing Defaults +================ + + If you want to set default values for `configure' scripts to share, +you can create a site shell script called `config.site' that gives +default values for variables like `CC', `cache_file', and `prefix'. +`configure' looks for `PREFIX/share/config.site' if it exists, then +`PREFIX/etc/config.site' if it exists. Or, you can set the +`CONFIG_SITE' environment variable to the location of the site script. +A warning: not all `configure' scripts look for a site script. + +Defining Variables +================== + + Variables not defined in a site shell script can be set in the +environment passed to `configure'. However, some packages may run +configure again during the build, and the customized values of these +variables may be lost. In order to avoid this problem, you should set +them in the `configure' command line, using `VAR=value'. For example: + + ./configure CC=/usr/local2/bin/gcc + +will cause the specified gcc to be used as the C compiler (unless it is +overridden in the site shell script). + +`configure' Invocation +====================== + + `configure' recognizes the following options to control how it +operates. + +`--help' +`-h' + Print a summary of the options to `configure', and exit. + +`--version' +`-V' + Print the version of Autoconf used to generate the `configure' + script, and exit. + +`--cache-file=FILE' + Enable the cache: use and save the results of the tests in FILE, + traditionally `config.cache'. FILE defaults to `/dev/null' to + disable caching. + +`--config-cache' +`-C' + Alias for `--cache-file=config.cache'. + +`--quiet' +`--silent' +`-q' + Do not print messages saying which checks are being made. To + suppress all normal output, redirect it to `/dev/null' (any error + messages will still be shown). + +`--srcdir=DIR' + Look for the package's source code in directory DIR. Usually + `configure' can determine that directory automatically. + +`configure' also accepts some other, not widely useful, options. Run +`configure --help' for more details. + diff --git a/extra/yassl/taocrypt/Makefile.am b/extra/yassl/taocrypt/Makefile.am index c242696b82f..d22cdcebf67 100644 --- a/extra/yassl/taocrypt/Makefile.am +++ b/extra/yassl/taocrypt/Makefile.am @@ -1,2 +1,2 @@ SUBDIRS = src test benchmark -EXTRA_DIST = taocrypt.dsw taocrypt.dsp taocrypt.vcproj CMakeLists.txt +EXTRA_DIST = taocrypt.dsw taocrypt.dsp taocrypt.vcproj CMakeLists.txt $(wildcard mySTL/*.hpp) diff --git a/extra/yassl/taocrypt/README b/extra/yassl/taocrypt/README new file mode 100644 index 00000000000..34e1744492e --- /dev/null +++ b/extra/yassl/taocrypt/README @@ -0,0 +1,37 @@ +TaoCrypt release 0.9.0 09/18/2006 + +This is the first release of TaoCrypt, it was previously only included with +yaSSL. TaoCrypt is highly portable and fast, its features include: + +One way hash functions: SHA-1, MD2, MD4, MD5, RIPEMD-160 +Message authentication codes: HMAC +Block Ciphers: DES, Triple-DES, AES, Blowfish, Twofish +Stream Ciphers: ARC4 +Public Key Crypto: RSA, DSA, Diffie-Hellman +Password based key derivation: PBKDF2 from PKCS #5 +Pseudo Random Number Generators +Lare Integer Support +Base 16/64 encoding/decoding +DER encoding/decoding +X.509 processing +SSE2 and ia32 asm for the right processors and compilers + + +To build on Unix + + ./configure + make + + To test the build, from the ./test directory run ./test + + +On Windows + + Open the taocrypt project workspace + Choose (Re)Build All + + To test the build, run the test executable + + +Please send any questions or comments to todd@yassl.com. + diff --git a/extra/yassl/taocrypt/benchmark/Makefile.am b/extra/yassl/taocrypt/benchmark/Makefile.am index 81200ff7e6a..a5b1713427c 100644 --- a/extra/yassl/taocrypt/benchmark/Makefile.am +++ b/extra/yassl/taocrypt/benchmark/Makefile.am @@ -1,8 +1,6 @@ -INCLUDES = -I../include -I../../mySTL +INCLUDES = -I../include -I../mySTL bin_PROGRAMS = benchmark benchmark_SOURCES = benchmark.cpp -benchmark_LDFLAGS = -L../src -benchmark_LDADD = -ltaocrypt +benchmark_LDADD = $(top_builddir)/extra/yassl/taocrypt/src/libtaocrypt.la benchmark_CXXFLAGS = -DYASSL_PURE_C -benchmark_DEPENDENCIES = ../src/libtaocrypt.la EXTRA_DIST = benchmark.dsp rsa1024.der dh1024.der dsa1024.der make.bat diff --git a/extra/yassl/taocrypt/include/asn.hpp b/extra/yassl/taocrypt/include/asn.hpp index 8bea2ae780b..dbee54be6f1 100644 --- a/extra/yassl/taocrypt/include/asn.hpp +++ b/extra/yassl/taocrypt/include/asn.hpp @@ -33,10 +33,12 @@ #include "misc.hpp" #include "block.hpp" -#include "list.hpp" #include "error.hpp" +#include STL_LIST_FILE +namespace STL = STL_NAMESPACE; + namespace TaoCrypt { @@ -232,7 +234,7 @@ private: }; -typedef mySTL::list SignerList; +typedef STL::list SignerList; enum SigType { SHAwDSA = 517, MD2wRSA = 646, MD5wRSA = 648, SHAwRSA =649}; diff --git a/extra/yassl/taocrypt/include/block.hpp b/extra/yassl/taocrypt/include/block.hpp index 88cb06f62f1..a931158a83d 100644 --- a/extra/yassl/taocrypt/include/block.hpp +++ b/extra/yassl/taocrypt/include/block.hpp @@ -31,12 +31,14 @@ #ifndef TAO_CRYPT_BLOCK_HPP #define TAO_CRYPT_BLOCK_HPP -#include "algorithm.hpp" // mySTL::swap #include "misc.hpp" #include // memcpy #include // ptrdiff_t +#include STL_ALGORITHM_FILE +namespace STL = STL_NAMESPACE; + namespace TaoCrypt { @@ -80,7 +82,7 @@ typename A::pointer StdReallocate(A& a, T* p, typename A::size_type oldSize, typename A::pointer newPointer = b.allocate(newSize, 0); memcpy(newPointer, p, sizeof(T) * min(oldSize, newSize)); a.deallocate(p, oldSize); - mySTL::swap(a, b); + STL::swap(a, b); return newPointer; } else { @@ -183,9 +185,9 @@ public: } void Swap(Block& other) { - mySTL::swap(sz_, other.sz_); - mySTL::swap(buffer_, other.buffer_); - mySTL::swap(allocator_, other.allocator_); + STL::swap(sz_, other.sz_); + STL::swap(buffer_, other.buffer_); + STL::swap(allocator_, other.allocator_); } ~Block() { allocator_.deallocate(buffer_, sz_); } diff --git a/extra/yassl/taocrypt/include/blowfish.hpp b/extra/yassl/taocrypt/include/blowfish.hpp index fbc4f223702..40953624232 100644 --- a/extra/yassl/taocrypt/include/blowfish.hpp +++ b/extra/yassl/taocrypt/include/blowfish.hpp @@ -32,7 +32,11 @@ #include "misc.hpp" #include "modes.hpp" -#include "algorithm.hpp" +#include STL_ALGORITHM_FILE + + +namespace STL = STL_NAMESPACE; + namespace TaoCrypt { diff --git a/extra/yassl/taocrypt/include/error.hpp b/extra/yassl/taocrypt/include/error.hpp index b0ff9b280ba..1a93056db45 100644 --- a/extra/yassl/taocrypt/include/error.hpp +++ b/extra/yassl/taocrypt/include/error.hpp @@ -37,7 +37,7 @@ namespace TaoCrypt { enum ErrorNumber { -NO_ERROR = 0, // "not in error state" +NO_ERROR_E = 0, // "not in error state" // RandomNumberGenerator WINCRYPT_E = 1001, // "bad wincrypt acquire" @@ -78,7 +78,7 @@ SIG_OTHER_E = 1039 // "bad other signature confirmation" struct Error { ErrorNumber what_; // description number, 0 for no error - explicit Error(ErrorNumber w = NO_ERROR) : what_(w) {} + explicit Error(ErrorNumber w = NO_ERROR_E) : what_(w) {} ErrorNumber What() const { return what_; } void SetError(ErrorNumber w) { what_ = w; } diff --git a/extra/yassl/taocrypt/include/file.hpp b/extra/yassl/taocrypt/include/file.hpp index 87fc6139f8f..c12b5c73bac 100644 --- a/extra/yassl/taocrypt/include/file.hpp +++ b/extra/yassl/taocrypt/include/file.hpp @@ -83,7 +83,7 @@ private: void Swap(Source& other) { buffer_.Swap(other.buffer_); - mySTL::swap(current_, other.current_); + STL::swap(current_, other.current_); } }; diff --git a/extra/yassl/taocrypt/include/integer.hpp b/extra/yassl/taocrypt/include/integer.hpp index 7e4f6450316..70b4dc79e73 100644 --- a/extra/yassl/taocrypt/include/integer.hpp +++ b/extra/yassl/taocrypt/include/integer.hpp @@ -44,8 +44,8 @@ #include "block.hpp" #include "random.hpp" #include "file.hpp" -#include "algorithm.hpp" // mySTL::swap #include +#include STL_ALGORITHM_FILE #ifdef TAOCRYPT_X86ASM_AVAILABLE diff --git a/extra/yassl/taocrypt/include/misc.hpp b/extra/yassl/taocrypt/include/misc.hpp index 48604620706..3d2d4c62466 100644 --- a/extra/yassl/taocrypt/include/misc.hpp +++ b/extra/yassl/taocrypt/include/misc.hpp @@ -198,6 +198,23 @@ void CleanUp(); #endif +#ifdef USE_SYS_STL + // use system STL + #define STL_VECTOR_FILE + #define STL_LIST_FILE + #define STL_ALGORITHM_FILE + #define STL_MEMORY_FILE + #define STL_NAMESPACE std +#else + // use mySTL + #define STL_VECTOR_FILE "vector.hpp" + #define STL_LIST_FILE "list.hpp" + #define STL_ALGORITHM_FILE "algorithm.hpp" + #define STL_MEMORY_FILE "memory.hpp" + #define STL_NAMESPACE mySTL +#endif + + // ***************** DLL related ******************** #ifdef TAOCRYPT_WIN32_AVAILABLE diff --git a/extra/yassl/taocrypt/include/pwdbased.hpp b/extra/yassl/taocrypt/include/pwdbased.hpp index c3e916e3d83..78df7ede02e 100644 --- a/extra/yassl/taocrypt/include/pwdbased.hpp +++ b/extra/yassl/taocrypt/include/pwdbased.hpp @@ -74,7 +74,7 @@ word32 PBKDF2_HMAC::DeriveKey(byte* derived, word32 dLen, const byte* pwd, } hmac.Final(buffer.get_buffer()); - word32 segmentLen = mySTL::min(dLen, buffer.size()); + word32 segmentLen = min(dLen, buffer.size()); memcpy(derived, buffer.get_buffer(), segmentLen); for (j = 1; j < iterations; j++) { diff --git a/extra/yassl/taocrypt/include/twofish.hpp b/extra/yassl/taocrypt/include/twofish.hpp index 0529c37d6c5..ba144d2defb 100644 --- a/extra/yassl/taocrypt/include/twofish.hpp +++ b/extra/yassl/taocrypt/include/twofish.hpp @@ -32,7 +32,11 @@ #include "misc.hpp" #include "modes.hpp" -#include "algorithm.hpp" +#include STL_ALGORITHM_FILE + + +namespace STL = STL_NAMESPACE; + namespace TaoCrypt { diff --git a/extra/yassl/mySTL/algorithm.hpp b/extra/yassl/taocrypt/mySTL/algorithm.hpp similarity index 99% rename from extra/yassl/mySTL/algorithm.hpp rename to extra/yassl/taocrypt/mySTL/algorithm.hpp index efc7aa21a07..9a51d1f2281 100644 --- a/extra/yassl/mySTL/algorithm.hpp +++ b/extra/yassl/taocrypt/mySTL/algorithm.hpp @@ -8,7 +8,7 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * There are special exceptions to the terms and conditions of the GPL as it * is applied to yaSSL. View the full text of the exception in the file * FLOSS-EXCEPTIONS in the directory of this software distribution. diff --git a/extra/yassl/mySTL/helpers.hpp b/extra/yassl/taocrypt/mySTL/helpers.hpp similarity index 80% rename from extra/yassl/mySTL/helpers.hpp rename to extra/yassl/taocrypt/mySTL/helpers.hpp index c4449519db3..43381c0f6ea 100644 --- a/extra/yassl/mySTL/helpers.hpp +++ b/extra/yassl/taocrypt/mySTL/helpers.hpp @@ -113,6 +113,47 @@ PlaceIter uninit_fill_n(PlaceIter place, Size n, const T& value) } +template +T* GetArrayMemory(size_t items) +{ + unsigned char* ret; + + #ifdef YASSL_LIB + ret = NEW_YS unsigned char[sizeof(T) * items]; + #else + ret = NEW_TC unsigned char[sizeof(T) * items]; + #endif + + return reinterpret_cast(ret); +} + + +template +void FreeArrayMemory(T* ptr) +{ + unsigned char* p = reinterpret_cast(ptr); + + #ifdef YASSL_LIB + yaSSL::ysArrayDelete(p); + #else + TaoCrypt::tcArrayDelete(p); + #endif +} + + + +inline void* GetMemory(size_t bytes) +{ + return GetArrayMemory(bytes); +} + + +inline void FreeMemory(void* ptr) +{ + FreeArrayMemory(ptr); +} + + } // namespace mySTL diff --git a/extra/yassl/mySTL/list.hpp b/extra/yassl/taocrypt/mySTL/list.hpp similarity index 76% rename from extra/yassl/mySTL/list.hpp rename to extra/yassl/taocrypt/mySTL/list.hpp index 11a1a914868..831c5bbbbbd 100644 --- a/extra/yassl/mySTL/list.hpp +++ b/extra/yassl/taocrypt/mySTL/list.hpp @@ -33,7 +33,6 @@ #include "helpers.hpp" -#include namespace mySTL { @@ -75,8 +74,7 @@ public: class iterator { node* current_; public: - iterator() : current_(0) {} - explicit iterator(node* p) : current_(p) {} + explicit iterator(node* p = 0) : current_(p) {} T& operator*() const { @@ -127,11 +125,67 @@ public: friend class list; }; + + class reverse_iterator { + node* current_; + public: + explicit reverse_iterator(node* p = 0) : current_(p) {} + + T& operator*() const + { + return current_->value_; + } + + T* operator->() const + { + return &(operator*()); + } + + reverse_iterator& operator++() + { + current_ = current_->prev_; + return *this; + } + + reverse_iterator& operator--() + { + current_ = current_->next_; + return *this; + } + + reverse_iterator operator++(int) + { + reverse_iterator tmp = *this; + current_ = current_->prev_; + return tmp; + } + + reverse_iterator operator--(int) + { + reverse_iterator tmp = *this; + current_ = current_->next_; + return tmp; + } + + bool operator==(const reverse_iterator& other) const + { + return current_ == other.current_; + } + + bool operator!=(const reverse_iterator& other) const + { + return current_ != other.current_; + } + + friend class list; + }; + bool erase(iterator); - iterator begin() const { return iterator(head_); } - iterator rbegin() const { return iterator(tail_); } - iterator end() const { return iterator(); } + iterator begin() const { return iterator(head_); } + reverse_iterator rbegin() const { return reverse_iterator(tail_); } + iterator end() const { return iterator(); } + reverse_iterator rend() const { return reverse_iterator(); } typedef iterator const_iterator; // for now @@ -158,7 +212,7 @@ list::~list() for (; start; start = next_) { next_ = start->next_; destroy(start); - free(start); + FreeMemory(start); } } @@ -166,8 +220,7 @@ list::~list() template void list::push_front(T t) { - void* mem = malloc(sizeof(node)); - if (!mem) abort(); + void* mem = GetMemory(sizeof(node)); node* add = new (reinterpret_cast(mem)) node(t); if (head_) { @@ -196,7 +249,7 @@ void list::pop_front() head_->prev_ = 0; } destroy(front); - free(front); + FreeMemory(front); --sz_; } @@ -204,7 +257,7 @@ void list::pop_front() template T list::front() const { - if (head_ == 0) return 0; + if (head_ == 0) return T(); return head_->value_; } @@ -212,8 +265,7 @@ T list::front() const template void list::push_back(T t) { - void* mem = malloc(sizeof(node)); - if (!mem) abort(); + void* mem = GetMemory(sizeof(node)); node* add = new (reinterpret_cast(mem)) node(t); if (tail_) { @@ -242,7 +294,7 @@ void list::pop_back() tail_->next_ = 0; } destroy(rear); - free(rear); + FreeMemory(rear); --sz_; } @@ -250,7 +302,7 @@ void list::pop_back() template T list::back() const { - if (tail_ == 0) return 0; + if (tail_ == 0) return T(); return tail_->value_; } @@ -286,7 +338,7 @@ bool list::remove(T t) del->next_->prev_ = del->prev_; destroy(del); - free(del); + FreeMemory(del); --sz_; } return true; @@ -309,78 +361,13 @@ bool list::erase(iterator iter) del->next_->prev_ = del->prev_; destroy(del); - free(del); + FreeMemory(del); --sz_; } return true; } -/* MSVC can't handle ?? - -template -T& list::iterator::operator*() const -{ - return current_->value_; -} - - -template -T* list::iterator::operator->() const -{ - return &(operator*()); -} - - -template -typename list::iterator& list::iterator::operator++() -{ - current_ = current_->next_; - return *this; -} - - -template -typename list::iterator& list::iterator::operator--() -{ - current_ = current_->prev_; - return *this; -} - - -template -typename list::iterator& list::iterator::operator++(int) -{ - iterator tmp = *this; - current_ = current_->next_; - return tmp; -} - - -template -typename list::iterator& list::iterator::operator--(int) -{ - iterator tmp = *this; - current_ = current_->prev_; - return tmp; -} - - -template -bool list::iterator::operator==(const iterator& other) const -{ - return current_ == other.current_; -} - - -template -bool list::iterator::operator!=(const iterator& other) const -{ - return current_ != other.current_; -} -*/ // end MSVC 6 can't handle - - } // namespace mySTL diff --git a/extra/yassl/mySTL/memory.hpp b/extra/yassl/taocrypt/mySTL/memory.hpp similarity index 79% rename from extra/yassl/mySTL/memory.hpp rename to extra/yassl/taocrypt/mySTL/memory.hpp index f480af12316..5855423c6fc 100644 --- a/extra/yassl/mySTL/memory.hpp +++ b/extra/yassl/taocrypt/mySTL/memory.hpp @@ -31,6 +31,7 @@ #ifndef mySTL_MEMORY_HPP #define mySTL_MEMORY_HPP +#include "memory_array.hpp" // for auto_array #ifdef _MSC_VER // disable operator-> warning for builtins @@ -43,27 +44,25 @@ namespace mySTL { template struct auto_ptr_ref { - typedef void (*Deletor)(T*); - T* ptr_; - Deletor del_; - auto_ptr_ref(T* p, Deletor d) : ptr_(p), del_(d) {} + T* ptr_; + explicit auto_ptr_ref(T* p) : ptr_(p) {} }; template class auto_ptr { - typedef void (*Deletor)(T*); T* ptr_; - Deletor del_; void Destroy() { - del_(ptr_); + #ifdef YASSL_LIB + yaSSL::ysDelete(ptr_); + #else + TaoCrypt::tcDelete(ptr_); + #endif } public: - auto_ptr(T* p, Deletor d) : ptr_(p), del_(d) {} - - explicit auto_ptr(Deletor d) : ptr_(0), del_(d) {} + explicit auto_ptr(T* p = 0) : ptr_(p) {} ~auto_ptr() { @@ -71,14 +70,13 @@ public: } - auto_ptr(auto_ptr& other) : ptr_(other.release()), del_(other.del_) {} + auto_ptr(auto_ptr& other) : ptr_(other.release()) {} auto_ptr& operator=(auto_ptr& that) { if (this != &that) { Destroy(); ptr_ = that.release(); - del_ = that.del_; } return *this; } @@ -115,14 +113,13 @@ public: } // auto_ptr_ref conversions - auto_ptr(auto_ptr_ref ref) : ptr_(ref.ptr_), del_(ref.del_) {} + auto_ptr(auto_ptr_ref ref) : ptr_(ref.ptr_) {} auto_ptr& operator=(auto_ptr_ref ref) { if (this->ptr_ != ref.ptr_) { Destroy(); ptr_ = ref.ptr_; - del_ = ref.del_; } return *this; } @@ -130,13 +127,13 @@ public: template operator auto_ptr() { - return auto_ptr(this->release(), this->del_); + return auto_ptr(this->release()); } template operator auto_ptr_ref() { - return auto_ptr_ref(this->release(), this->del_); + return auto_ptr_ref(this->release()); } }; diff --git a/extra/yassl/taocrypt/mySTL/memory_array.hpp b/extra/yassl/taocrypt/mySTL/memory_array.hpp new file mode 100644 index 00000000000..f089c69f815 --- /dev/null +++ b/extra/yassl/taocrypt/mySTL/memory_array.hpp @@ -0,0 +1,142 @@ +/* mySTL memory_array.hpp + * + * Copyright (C) 2003 Sawtooth Consulting Ltd. + * + * This file is part of yaSSL. + * + * yaSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * There are special exceptions to the terms and conditions of the GPL as it + * is applied to yaSSL. View the full text of the exception in the file + * FLOSS-EXCEPTIONS in the directory of this software distribution. + * + * yaSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + */ + + +/* mySTL memory_arry implements auto_array + * + */ + +#ifndef mySTL_MEMORY_ARRAY_HPP +#define mySTL_MEMORY_ARRAY_HPP + + +#ifdef _MSC_VER + // disable operator-> warning for builtins + #pragma warning(disable:4284) +#endif + + +namespace mySTL { + + +template +struct auto_array_ref { + T* ptr_; + explicit auto_array_ref(T* p) : ptr_(p) {} +}; + + +template +class auto_array { + T* ptr_; + + void Destroy() + { + #ifdef YASSL_LIB + yaSSL::ysArrayDelete(ptr_); + #else + TaoCrypt::tcArrayDelete(ptr_); + #endif + } +public: + explicit auto_array(T* p = 0) : ptr_(p) {} + + ~auto_array() + { + Destroy(); + } + + + auto_array(auto_array& other) : ptr_(other.release()) {} + + auto_array& operator=(auto_array& that) + { + if (this != &that) { + Destroy(); + ptr_ = that.release(); + } + return *this; + } + + + T* operator->() const + { + return ptr_; + } + + T& operator*() const + { + return *ptr_; + } + + T* get() const + { + return ptr_; + } + + T* release() + { + T* tmp = ptr_; + ptr_ = 0; + return tmp; + } + + void reset(T* p = 0) + { + if (ptr_ != p) { + Destroy(); + ptr_ = p; + } + } + + // auto_array_ref conversions + auto_array(auto_array_ref ref) : ptr_(ref.ptr_) {} + + auto_array& operator=(auto_array_ref ref) + { + if (this->ptr_ != ref.ptr_) { + Destroy(); + ptr_ = ref.ptr_; + } + return *this; + } + + template + operator auto_array() + { + return auto_array(this->release()); + } + + template + operator auto_array_ref() + { + return auto_array_ref(this->release()); + } +}; + + +} // namespace mySTL + +#endif // mySTL_MEMORY_ARRAY_HPP diff --git a/extra/yassl/mySTL/pair.hpp b/extra/yassl/taocrypt/mySTL/pair.hpp similarity index 100% rename from extra/yassl/mySTL/pair.hpp rename to extra/yassl/taocrypt/mySTL/pair.hpp diff --git a/extra/yassl/mySTL/stdexcept.hpp b/extra/yassl/taocrypt/mySTL/stdexcept.hpp similarity index 100% rename from extra/yassl/mySTL/stdexcept.hpp rename to extra/yassl/taocrypt/mySTL/stdexcept.hpp diff --git a/extra/yassl/mySTL/vector.hpp b/extra/yassl/taocrypt/mySTL/vector.hpp similarity index 94% rename from extra/yassl/mySTL/vector.hpp rename to extra/yassl/taocrypt/mySTL/vector.hpp index 6a412447b91..902e014f75b 100644 --- a/extra/yassl/mySTL/vector.hpp +++ b/extra/yassl/taocrypt/mySTL/vector.hpp @@ -34,7 +34,6 @@ #include "helpers.hpp" // construct, destory, fill, etc. #include "algorithm.hpp" // swap #include // assert -#include // malloc namespace mySTL { @@ -49,14 +48,15 @@ struct vector_base { vector_base() : start_(0), finish_(0), end_of_storage_(0) {} vector_base(size_t n) { - // Don't allow malloc(0), if n is 0 use 1 - start_ = static_cast(malloc((n ? n : 1) * sizeof(T))); - if (!start_) abort(); + start_ = GetArrayMemory(n); finish_ = start_; end_of_storage_ = start_ + n; } - ~vector_base() { if (start_) free(start_); } + ~vector_base() + { + FreeArrayMemory(start_); + } void Swap(vector_base& that) { @@ -71,6 +71,9 @@ struct vector_base { template class vector { public: + typedef T* iterator; + typedef const T* const_iterator; + vector() {} explicit vector(size_t n) : vec_(n) { diff --git a/extra/yassl/taocrypt/src/Makefile.am b/extra/yassl/taocrypt/src/Makefile.am index 1110ed335b8..6d02a625275 100644 --- a/extra/yassl/taocrypt/src/Makefile.am +++ b/extra/yassl/taocrypt/src/Makefile.am @@ -1,4 +1,4 @@ -INCLUDES = -I../include -I../../mySTL +INCLUDES = -I../include -I../mySTL noinst_LTLIBRARIES = libtaocrypt.la @@ -11,3 +11,7 @@ libtaocrypt_la_SOURCES = aes.cpp aestables.cpp algebra.cpp arc4.cpp \ libtaocrypt_la_CXXFLAGS = @yassl_taocrypt_extra_cxxflags@ -DYASSL_PURE_C EXTRA_DIST = $(wildcard ../include/*.hpp) + +# Don't update the files from bitkeeper +%::SCCS/s.% + diff --git a/extra/yassl/taocrypt/src/algebra.cpp b/extra/yassl/taocrypt/src/algebra.cpp index e9bc3fceac0..375cd6cd524 100644 --- a/extra/yassl/taocrypt/src/algebra.cpp +++ b/extra/yassl/taocrypt/src/algebra.cpp @@ -29,7 +29,10 @@ #include "runtime.hpp" #include "algebra.hpp" -#include "vector.hpp" // mySTL::vector (simple) +#include STL_VECTOR_FILE + + +namespace STL = STL_NAMESPACE; namespace TaoCrypt { @@ -82,7 +85,7 @@ const Integer& AbstractEuclideanDomain::Mod(const Element &a, const Integer& AbstractEuclideanDomain::Gcd(const Element &a, const Element &b) const { - mySTL::vector g(3); + STL::vector g(3); g[0]= b; g[1]= a; unsigned int i0=0, i1=1, i2=2; @@ -115,7 +118,7 @@ Integer AbstractGroup::CascadeScalarMultiply(const Element &x, const unsigned w = (expLen <= 46 ? 1 : (expLen <= 260 ? 2 : 3)); const unsigned tableSize = 1< powerTable(tableSize << w); + STL::vector powerTable(tableSize << w); powerTable[1] = x; powerTable[tableSize] = y; @@ -240,8 +243,8 @@ struct WindowSlider void AbstractGroup::SimultaneousMultiply(Integer *results, const Integer &base, const Integer *expBegin, unsigned int expCount) const { - mySTL::vector > buckets(expCount); - mySTL::vector exponents; + STL::vector > buckets(expCount); + STL::vector exponents; exponents.reserve(expCount); unsigned int i; @@ -332,6 +335,8 @@ void AbstractRing::SimultaneousExponentiate(Integer *results, namespace mySTL { template TaoCrypt::WindowSlider* uninit_copy(TaoCrypt::WindowSlider*, TaoCrypt::WindowSlider*, TaoCrypt::WindowSlider*); template void destroy(TaoCrypt::WindowSlider*, TaoCrypt::WindowSlider*); +template TaoCrypt::WindowSlider* GetArrayMemory(size_t); +template void FreeArrayMemory(TaoCrypt::WindowSlider*); } #endif diff --git a/extra/yassl/taocrypt/src/asn.cpp b/extra/yassl/taocrypt/src/asn.cpp index 45fb1e60a0c..3dc3638d85f 100644 --- a/extra/yassl/taocrypt/src/asn.cpp +++ b/extra/yassl/taocrypt/src/asn.cpp @@ -38,7 +38,8 @@ #include "sha.hpp" #include "coding.hpp" #include // gmtime(); -#include "memory.hpp" // mySTL::auto_ptr +#include "memory.hpp" // some auto_ptr don't have reset, also need auto_array + namespace TaoCrypt { @@ -202,13 +203,13 @@ void PublicKey::SetKey(const byte* k) void PublicKey::AddToEnd(const byte* data, word32 len) { - mySTL::auto_ptr tmp(NEW_TC byte[sz_ + len], tcArrayDelete); + mySTL::auto_array tmp(NEW_TC byte[sz_ + len]); memcpy(tmp.get(), key_, sz_); memcpy(tmp.get() + sz_, data, len); byte* del = 0; - mySTL::swap(del, key_); + STL::swap(del, key_); tcArrayDelete(del); key_ = tmp.release(); @@ -856,7 +857,7 @@ bool CertDecoder::ValidateSignature(SignerList* signers) bool CertDecoder::ConfirmSignature(Source& pub) { HashType ht; - mySTL::auto_ptr hasher(tcDelete); + mySTL::auto_ptr hasher; if (signatureOID_ == MD5wRSA) { hasher.reset(NEW_TC MD5); diff --git a/extra/yassl/taocrypt/src/blowfish.cpp b/extra/yassl/taocrypt/src/blowfish.cpp index cc929cd7d41..40ae1a17e6c 100644 --- a/extra/yassl/taocrypt/src/blowfish.cpp +++ b/extra/yassl/taocrypt/src/blowfish.cpp @@ -133,7 +133,7 @@ void Blowfish::SetKey(const byte* key_string, word32 keylength, CipherDir dir) if (dir==DECRYPTION) for (i=0; i<(ROUNDS+2)/2; i++) - mySTL::swap(pbox_[i], pbox_[ROUNDS+1-i]); + STL::swap(pbox_[i], pbox_[ROUNDS+1-i]); } diff --git a/extra/yassl/taocrypt/src/crypto.cpp b/extra/yassl/taocrypt/src/crypto.cpp new file mode 100644 index 00000000000..95238100f5d --- /dev/null +++ b/extra/yassl/taocrypt/src/crypto.cpp @@ -0,0 +1,39 @@ +/* crypto.cpp + * + * Copyright (C) 2003 Sawtooth Consulting Ltd. + * + * This file is part of yaSSL. + * + * yaSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * There are special exceptions to the terms and conditions of the GPL as it + * is applied to yaSSL. View the full text of the exception in the file + * FLOSS-EXCEPTIONS in the directory of this software distribution. + * + * yaSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + */ + +/* put features that other apps expect from OpenSSL type crypto */ + + + +extern "C" { + + // for libcurl configure test, these are the signatures they use + // locking handled internally by library + char CRYPTO_lock() { return 0;} + char CRYPTO_add_lock() { return 0;} +} // extern "C" + + + diff --git a/extra/yassl/taocrypt/src/des.cpp b/extra/yassl/taocrypt/src/des.cpp index 054c8c2eb78..2628e142bae 100644 --- a/extra/yassl/taocrypt/src/des.cpp +++ b/extra/yassl/taocrypt/src/des.cpp @@ -34,7 +34,10 @@ #include "runtime.hpp" #include "des.hpp" -#include "algorithm.hpp" // mySTL::swap +#include STL_ALGORITHM_FILE + + +namespace STL = STL_NAMESPACE; #if defined(TAOCRYPT_X86ASM_AVAILABLE) && defined(TAO_ASM) @@ -265,8 +268,8 @@ void BasicDES::SetKey(const byte* key, word32 /*length*/, CipherDir dir) // reverse key schedule order if (dir == DECRYPTION) for (i = 0; i < 16; i += 2) { - mySTL::swap(k_[i], k_[32 - 2 - i]); - mySTL::swap(k_[i+1], k_[32 - 1 - i]); + STL::swap(k_[i], k_[32 - 2 - i]); + STL::swap(k_[i+1], k_[32 - 1 - i]); } } diff --git a/extra/yassl/taocrypt/src/dh.cpp b/extra/yassl/taocrypt/src/dh.cpp index aec7122b70b..671a20d0d78 100644 --- a/extra/yassl/taocrypt/src/dh.cpp +++ b/extra/yassl/taocrypt/src/dh.cpp @@ -61,7 +61,7 @@ void DH::GenerateKeyPair(RandomNumberGenerator& rng, byte* priv, byte* pub) // Generate private value void DH::GeneratePrivate(RandomNumberGenerator& rng, byte* priv) { - Integer x(rng, Integer::One(), mySTL::min(p_ - 1, + Integer x(rng, Integer::One(), min(p_ - 1, Integer::Power2(2*DiscreteLogWorkFactor(p_.BitCount())) ) ); x.Encode(priv, p_.ByteCount()); } diff --git a/extra/yassl/taocrypt/src/integer.cpp b/extra/yassl/taocrypt/src/integer.cpp index 823c0c5e193..500160cfe37 100644 --- a/extra/yassl/taocrypt/src/integer.cpp +++ b/extra/yassl/taocrypt/src/integer.cpp @@ -1094,7 +1094,7 @@ static bool IsP4() word32 cpuid[4]; CpuId(0, cpuid); - mySTL::swap(cpuid[2], cpuid[3]); + STL::swap(cpuid[2], cpuid[3]); if (memcmp(cpuid+1, "GenuineIntel", 12) != 0) return false; @@ -2384,8 +2384,8 @@ void AsymmetricMultiply(word *R, word *T, const word *A, unsigned int NA, if (NA > NB) { - mySTL::swap(A, B); - mySTL::swap(NA, NB); + STL::swap(A, B); + STL::swap(NA, NB); } assert(NB % NA == 0); @@ -2521,8 +2521,8 @@ unsigned int AlmostInverse(word *R, word *T, const word *A, unsigned int NA, if (Compare(f, g, fgLen)==-1) { - mySTL::swap(f, g); - mySTL::swap(b, c); + STL::swap(f, g); + STL::swap(b, c); s++; } @@ -3162,7 +3162,7 @@ signed long Integer::ConvertToLong() const void Integer::Swap(Integer& a) { reg_.Swap(a.reg_); - mySTL::swap(sign_, a.sign_); + STL::swap(sign_, a.sign_); } diff --git a/extra/yassl/taocrypt/src/md4.cpp b/extra/yassl/taocrypt/src/md4.cpp index 6012330cba3..0dee8bf40cb 100644 --- a/extra/yassl/taocrypt/src/md4.cpp +++ b/extra/yassl/taocrypt/src/md4.cpp @@ -28,9 +28,11 @@ #include "runtime.hpp" #include "md4.hpp" -#include "algorithm.hpp" // mySTL::swap +#include STL_ALGORITHM_FILE +namespace STL = STL_NAMESPACE; + namespace TaoCrypt { @@ -69,9 +71,9 @@ MD4& MD4::operator= (const MD4& that) void MD4::Swap(MD4& other) { - mySTL::swap(loLen_, other.loLen_); - mySTL::swap(hiLen_, other.hiLen_); - mySTL::swap(buffLen_, other.buffLen_); + STL::swap(loLen_, other.loLen_); + STL::swap(hiLen_, other.hiLen_); + STL::swap(buffLen_, other.buffLen_); memcpy(digest_, other.digest_, DIGEST_SIZE); memcpy(buffer_, other.buffer_, BLOCK_SIZE); diff --git a/extra/yassl/taocrypt/src/md5.cpp b/extra/yassl/taocrypt/src/md5.cpp index f7b0b1ee2dc..2bddc7fe308 100644 --- a/extra/yassl/taocrypt/src/md5.cpp +++ b/extra/yassl/taocrypt/src/md5.cpp @@ -28,7 +28,10 @@ #include "runtime.hpp" #include "md5.hpp" -#include "algorithm.hpp" // mySTL::swap +#include STL_ALGORITHM_FILE + + +namespace STL = STL_NAMESPACE; #if defined(TAOCRYPT_X86ASM_AVAILABLE) && defined(TAO_ASM) @@ -72,9 +75,9 @@ MD5& MD5::operator= (const MD5& that) void MD5::Swap(MD5& other) { - mySTL::swap(loLen_, other.loLen_); - mySTL::swap(hiLen_, other.hiLen_); - mySTL::swap(buffLen_, other.buffLen_); + STL::swap(loLen_, other.loLen_); + STL::swap(hiLen_, other.hiLen_); + STL::swap(buffLen_, other.buffLen_); memcpy(digest_, other.digest_, DIGEST_SIZE); memcpy(buffer_, other.buffer_, BLOCK_SIZE); diff --git a/extra/yassl/taocrypt/src/misc.cpp b/extra/yassl/taocrypt/src/misc.cpp index a33ca4fa432..084a263a4ae 100644 --- a/extra/yassl/taocrypt/src/misc.cpp +++ b/extra/yassl/taocrypt/src/misc.cpp @@ -29,15 +29,6 @@ #include "runtime.hpp" #include "misc.hpp" -#if !defined(YASSL_MYSQL_COMPATIBLE) -extern "C" { - - // for libcurl configure test, these are the signatures they use - // locking handled internally by library - char CRYPTO_lock() { return 0;} - char CRYPTO_add_lock() { return 0;} -} // extern "C" -#endif #ifdef YASSL_PURE_C diff --git a/extra/yassl/taocrypt/src/random.cpp b/extra/yassl/taocrypt/src/random.cpp index 2ee1e57a663..c7bb6ae9549 100644 --- a/extra/yassl/taocrypt/src/random.cpp +++ b/extra/yassl/taocrypt/src/random.cpp @@ -31,7 +31,7 @@ #include "runtime.hpp" #include "random.hpp" #include - +#include #if defined(_WIN32) #define _WIN32_WINNT 0x0400 @@ -74,6 +74,8 @@ byte RandomNumberGenerator::GenerateByte() #if defined(_WIN32) +/* The OS_Seed implementation for windows */ + OS_Seed::OS_Seed() { if(!CryptAcquireContext(&handle_, 0, 0, PROV_RSA_FULL, @@ -95,8 +97,70 @@ void OS_Seed::GenerateSeed(byte* output, word32 sz) } -#else // _WIN32 +#elif defined(__NETWARE__) +/* The OS_Seed implementation for Netware */ + +#include +#include + +// Loop on high resulution Read Time Stamp Counter +static void NetwareSeed(byte* output, word32 sz) +{ + word32 tscResult; + + for (word32 i = 0; i < sz; i += sizeof(tscResult)) { + #if defined(__GNUC__) + asm volatile("rdtsc" : "=A" (tscResult)); + #else + #ifdef __MWERKS__ + asm { + #else + __asm { + #endif + rdtsc + mov tscResult, eax + } + #endif + + memcpy(output, &tscResult, sizeof(tscResult)); + output += sizeof(tscResult); + + NXThreadYield(); // induce more variance + } +} + + +OS_Seed::OS_Seed() +{ +} + + +OS_Seed::~OS_Seed() +{ +} + + +void OS_Seed::GenerateSeed(byte* output, word32 sz) +{ + /* + Try to use NXSeedRandom as it will generate a strong + seed using the onboard 82802 chip + + As it's not always supported, fallback to default + implementation if an error is returned + */ + + if (NXSeedRandom(sz, output) != 0) + { + NetwareSeed(output, sz); + } +} + + +#else + +/* The default OS_Seed implementation */ OS_Seed::OS_Seed() { diff --git a/extra/yassl/taocrypt/src/ripemd.cpp b/extra/yassl/taocrypt/src/ripemd.cpp index c791189544f..03c09edde84 100644 --- a/extra/yassl/taocrypt/src/ripemd.cpp +++ b/extra/yassl/taocrypt/src/ripemd.cpp @@ -28,9 +28,11 @@ #include "runtime.hpp" #include "ripemd.hpp" -#include "algorithm.hpp" // mySTL::swap +#include STL_ALGORITHM_FILE +namespace STL = STL_NAMESPACE; + #if defined(TAOCRYPT_X86ASM_AVAILABLE) && defined(TAO_ASM) #define DO_RIPEMD_ASM @@ -75,9 +77,9 @@ RIPEMD160& RIPEMD160::operator= (const RIPEMD160& that) void RIPEMD160::Swap(RIPEMD160& other) { - mySTL::swap(loLen_, other.loLen_); - mySTL::swap(hiLen_, other.hiLen_); - mySTL::swap(buffLen_, other.buffLen_); + STL::swap(loLen_, other.loLen_); + STL::swap(hiLen_, other.hiLen_); + STL::swap(buffLen_, other.buffLen_); memcpy(digest_, other.digest_, DIGEST_SIZE); memcpy(buffer_, other.buffer_, BLOCK_SIZE); diff --git a/extra/yassl/taocrypt/src/sha.cpp b/extra/yassl/taocrypt/src/sha.cpp index b877e2b7857..280d42fb3d4 100644 --- a/extra/yassl/taocrypt/src/sha.cpp +++ b/extra/yassl/taocrypt/src/sha.cpp @@ -27,8 +27,11 @@ #include "runtime.hpp" #include -#include "algorithm.hpp" // mySTL::swap #include "sha.hpp" +#include STL_ALGORITHM_FILE + + +namespace STL = STL_NAMESPACE; #if defined(TAOCRYPT_X86ASM_AVAILABLE) && defined(TAO_ASM) @@ -96,9 +99,9 @@ SHA& SHA::operator= (const SHA& that) void SHA::Swap(SHA& other) { - mySTL::swap(loLen_, other.loLen_); - mySTL::swap(hiLen_, other.hiLen_); - mySTL::swap(buffLen_, other.buffLen_); + STL::swap(loLen_, other.loLen_); + STL::swap(hiLen_, other.hiLen_); + STL::swap(buffLen_, other.buffLen_); memcpy(digest_, other.digest_, DIGEST_SIZE); memcpy(buffer_, other.buffer_, BLOCK_SIZE); diff --git a/extra/yassl/taocrypt/src/template_instnt.cpp b/extra/yassl/taocrypt/src/template_instnt.cpp index 512c5cf9dce..db1ae645e09 100644 --- a/extra/yassl/taocrypt/src/template_instnt.cpp +++ b/extra/yassl/taocrypt/src/template_instnt.cpp @@ -77,6 +77,13 @@ template void destroy*>(vector*, ve template TaoCrypt::Integer* uninit_copy(TaoCrypt::Integer*, TaoCrypt::Integer*, TaoCrypt::Integer*); template TaoCrypt::Integer* uninit_fill_n(TaoCrypt::Integer*, size_t, TaoCrypt::Integer const&); template void destroy(TaoCrypt::Integer*, TaoCrypt::Integer*); +template TaoCrypt::byte* GetArrayMemory(size_t); +template void FreeArrayMemory(TaoCrypt::byte*); +template TaoCrypt::Integer* GetArrayMemory(size_t); +template void FreeArrayMemory(TaoCrypt::Integer*); +template vector* GetArrayMemory >(size_t); +template void FreeArrayMemory >(vector*); +template void FreeArrayMemory(void*); } #endif diff --git a/extra/yassl/taocrypt/test/Makefile.am b/extra/yassl/taocrypt/test/Makefile.am index 0b238f1e057..988d00c7bef 100644 --- a/extra/yassl/taocrypt/test/Makefile.am +++ b/extra/yassl/taocrypt/test/Makefile.am @@ -1,8 +1,6 @@ -INCLUDES = -I../include -I../../mySTL +INCLUDES = -I../include -I../mySTL bin_PROGRAMS = test test_SOURCES = test.cpp -test_LDFLAGS = -L../src -test_LDADD = -ltaocrypt -test_DEPENDENCIES = ../src/libtaocrypt.la +test_LDADD = $(top_builddir)/extra/yassl/taocrypt/src/libtaocrypt.la test_CXXFLAGS = -DYASSL_PURE_C EXTRA_DIST = make.bat diff --git a/extra/yassl/testsuite/Makefile.am b/extra/yassl/testsuite/Makefile.am index 2ae46a1b409..e8abffd6bb0 100644 --- a/extra/yassl/testsuite/Makefile.am +++ b/extra/yassl/testsuite/Makefile.am @@ -1,11 +1,14 @@ -INCLUDES = -I../include -I../taocrypt/include -I../mySTL +INCLUDES = -I../include -I../taocrypt/include -I../taocrypt/mySTL bin_PROGRAMS = testsuite testsuite_SOURCES = testsuite.cpp ../taocrypt/test/test.cpp \ ../examples/client/client.cpp ../examples/server/server.cpp \ ../examples/echoclient/echoclient.cpp \ ../examples/echoserver/echoserver.cpp -testsuite_LDFLAGS = -L../src/ -L../taocrypt/src testsuite_CXXFLAGS = -DYASSL_PURE_C -DYASSL_PREFIX -DNO_MAIN_DRIVER -testsuite_LDADD = -lyassl -ltaocrypt -testsuite_DEPENDENCIES = ../src/libyassl.la ../taocrypt/src/libtaocrypt.la +testsuite_LDADD = $(top_builddir)/extra/yassl/src/libyassl.la \ + $(top_builddir)/extra/yassl/taocrypt/src/libtaocrypt.la EXTRA_DIST = testsuite.dsp test.hpp input quit make.bat + +# Don't update the files from bitkeeper +%::SCCS/s.% + diff --git a/extra/yassl/testsuite/test.hpp b/extra/yassl/testsuite/test.hpp index c80e3ad23da..0266c802657 100644 --- a/extra/yassl/testsuite/test.hpp +++ b/extra/yassl/testsuite/test.hpp @@ -27,22 +27,25 @@ #endif /* _WIN32 */ -#if !defined(_SOCKLEN_T) && (defined(__MACH__) || defined(_WIN32)) +#if !defined(_SOCKLEN_T) && (defined(_WIN32) || defined(__NETWARE__)) typedef int socklen_t; #endif +// Check type of third arg to accept +#if defined(__hpux) // HPUX doesn't use socklent_t for third parameter to accept -#if !defined(__hpux) - typedef socklen_t* ACCEPT_THIRD_T; -#else typedef int* ACCEPT_THIRD_T; - -// HPUX does not define _POSIX_THREADS as it's not _fully_ implemented -#ifndef _POSIX_THREADS -#define _POSIX_THREADS +#else + typedef socklen_t* ACCEPT_THIRD_T; #endif + +// Check if _POSIX_THREADS should be forced +#if !defined(_POSIX_THREADS) && (defined(__NETWARE__) || defined(__hpux)) +// HPUX does not define _POSIX_THREADS as it's not _fully_ implemented +// Netware supports pthreads but does not announce it +#define _POSIX_THREADS #endif @@ -148,6 +151,13 @@ inline void err_sys(const char* msg) } +static int PasswordCallBack(char* passwd, int sz, int rw, void* userdata) +{ + strncpy(passwd, "12345678", sz); + return 8; +} + + inline void store_ca(SSL_CTX* ctx) { // To allow testing from serveral dirs @@ -168,6 +178,7 @@ inline void store_ca(SSL_CTX* ctx) inline void set_certs(SSL_CTX* ctx) { store_ca(ctx); + SSL_CTX_set_default_passwd_cb(ctx, PasswordCallBack); // To allow testing from serveral dirs if (SSL_CTX_use_certificate_file(ctx, cert, SSL_FILETYPE_PEM) @@ -193,6 +204,7 @@ inline void set_certs(SSL_CTX* ctx) inline void set_serverCerts(SSL_CTX* ctx) { store_ca(ctx); + SSL_CTX_set_default_passwd_cb(ctx, PasswordCallBack); // To allow testing from serveral dirs if (SSL_CTX_use_certificate_file(ctx, svrCert, SSL_FILETYPE_PEM) @@ -258,13 +270,27 @@ inline void tcp_socket(SOCKET_T& sockfd, sockaddr_in& addr) } +inline void tcp_close(SOCKET_T& sockfd) +{ +#ifdef _WIN32 + closesocket(sockfd); +#else + close(sockfd); +#endif + sockfd = -1; +} + + inline void tcp_connect(SOCKET_T& sockfd) { sockaddr_in addr; tcp_socket(sockfd, addr); if (connect(sockfd, (const sockaddr*)&addr, sizeof(addr)) != 0) + { + tcp_close(sockfd); err_sys("tcp connect failed"); + } } @@ -274,13 +300,19 @@ inline void tcp_listen(SOCKET_T& sockfd) tcp_socket(sockfd, addr); if (bind(sockfd, (const sockaddr*)&addr, sizeof(addr)) != 0) + { + tcp_close(sockfd); err_sys("tcp bind failed"); + } if (listen(sockfd, 3) != 0) + { + tcp_close(sockfd); err_sys("tcp listen failed"); + } } -inline void tcp_accept(SOCKET_T& sockfd, int& clientfd, func_args& args) +inline void tcp_accept(SOCKET_T& sockfd, SOCKET_T& clientfd, func_args& args) { tcp_listen(sockfd); @@ -299,7 +331,10 @@ inline void tcp_accept(SOCKET_T& sockfd, int& clientfd, func_args& args) clientfd = accept(sockfd, (sockaddr*)&client, (ACCEPT_THIRD_T)&client_len); if (clientfd == -1) + { + tcp_close(sockfd); err_sys("tcp accept failed"); + } } diff --git a/heap/CMakeLists.txt b/heap/CMakeLists.txt deleted file mode 100755 index db5fb8b2981..00000000000 --- a/heap/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") - -INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include) -ADD_LIBRARY(heap _check.c _rectest.c hp_block.c hp_clear.c hp_close.c hp_create.c - hp_delete.c hp_extra.c hp_hash.c hp_info.c hp_open.c hp_panic.c - hp_rename.c hp_rfirst.c hp_rkey.c hp_rlast.c hp_rnext.c hp_rprev.c - hp_rrnd.c hp_rsame.c hp_scan.c hp_static.c hp_update.c hp_write.c) diff --git a/include/config-win.h b/include/config-win.h index 00d2fa0dea6..5290cf83896 100644 --- a/include/config-win.h +++ b/include/config-win.h @@ -25,7 +25,6 @@ functions */ #if defined(_MSC_VER) && _MSC_VER >= 1400 /* Avoid endless warnings about sprintf() etc. being unsafe. */ #define _CRT_SECURE_NO_DEPRECATE 1 -#define _USE_32BIT_TIME_T 1 /* force time_t to be 32 bit */ #endif #include diff --git a/include/heap.h b/include/heap.h index 985b20f9dc9..69d1e441a4b 100644 --- a/include/heap.h +++ b/include/heap.h @@ -225,6 +225,7 @@ extern int heap_indexes_are_disabled(HP_INFO *info); extern void heap_update_auto_increment(HP_INFO *info, const byte *record); ha_rows hp_rb_records_in_range(HP_INFO *info, int inx, key_range *min_key, key_range *max_key); +int hp_panic(enum ha_panic_function flag); int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, uint key_len, enum ha_rkey_function find_flag); extern gptr heap_find(HP_INFO *info,int inx,const byte *key); diff --git a/include/m_ctype.h b/include/m_ctype.h index 40cadad0017..deecf560170 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -196,7 +196,7 @@ typedef struct my_charset_handler_st /* Charset dependant snprintf() */ int (*snprintf)(struct charset_info_st *, char *to, uint n, const char *fmt, - ...); + ...) ATTRIBUTE_FORMAT(printf, 4, 5); int (*long10_to_str)(struct charset_info_st *, char *to, uint n, int radix, long int val); int (*longlong10_to_str)(struct charset_info_st *, char *to, uint n, @@ -217,6 +217,9 @@ typedef struct my_charset_handler_st int *err); longlong (*strtoll10)(struct charset_info_st *cs, const char *nptr, char **endptr, int *error); + ulonglong (*strntoull10rnd)(struct charset_info_st *cs, + const char *str, uint length, int unsigned_fl, + char **endptr, int *error); ulong (*scan)(struct charset_info_st *, const char *b, const char *e, int sq); } MY_CHARSET_HANDLER; @@ -335,7 +338,8 @@ int my_mb_ctype_mb(CHARSET_INFO *,int *, const uchar *,const uchar *); ulong my_scan_8bit(CHARSET_INFO *cs, const char *b, const char *e, int sq); int my_snprintf_8bit(struct charset_info_st *, char *to, uint n, - const char *fmt, ...); + const char *fmt, ...) + ATTRIBUTE_FORMAT(printf, 4, 5); long my_strntol_8bit(CHARSET_INFO *, const char *s, uint l, int base, char **e, int *err); @@ -357,6 +361,13 @@ longlong my_strtoll10_8bit(CHARSET_INFO *cs, longlong my_strtoll10_ucs2(CHARSET_INFO *cs, const char *nptr, char **endptr, int *error); +ulonglong my_strntoull10rnd_8bit(CHARSET_INFO *cs, + const char *str, uint length, int unsigned_fl, + char **endptr, int *error); +ulonglong my_strntoull10rnd_ucs2(CHARSET_INFO *cs, + const char *str, uint length, int unsigned_fl, + char **endptr, int *error); + void my_fill_8bit(CHARSET_INFO *cs, char* to, uint l, int fill); my_bool my_like_range_simple(CHARSET_INFO *cs, diff --git a/include/m_string.h b/include/m_string.h index e009447c192..caccc59abd5 100644 --- a/include/m_string.h +++ b/include/m_string.h @@ -235,7 +235,8 @@ extern ulonglong strtoull(const char *str, char **ptr, int base); extern int my_vsnprintf( char *str, size_t n, const char *format, va_list ap ); -extern int my_snprintf(char* to, size_t n, const char* fmt, ...); +extern int my_snprintf(char *to, size_t n, const char *fmt, ...) + ATTRIBUTE_FORMAT(printf, 3, 4); #if defined(__cplusplus) } diff --git a/include/my_base.h b/include/my_base.h index 3d1afed3949..774ff4c8cd5 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -372,7 +372,9 @@ enum ha_base_keytype { #define HA_ERR_TABLE_NEEDS_UPGRADE 164 /* The table changed in storage engine */ #define HA_ERR_TABLE_READONLY 165 /* The table is not writable */ -#define HA_ERR_LAST 165 /*Copy last error nr.*/ +#define HA_ERR_AUTOINC_READ_FAILED 166/* Failed to get the next autoinc value */ +#define HA_ERR_AUTOINC_ERANGE 167 /* Failed to set the row autoinc value */ +#define HA_ERR_LAST 167 /*Copy last error nr.*/ /* Add error numbers before HA_ERR_LAST and change it accordingly. */ #define HA_ERR_ERRORS (HA_ERR_LAST - HA_ERR_FIRST + 1) diff --git a/include/my_global.h b/include/my_global.h index 539a2ea644f..579379e7506 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -210,7 +210,7 @@ /* Fix problem with S_ISLNK() on Linux */ -#if defined(TARGET_OS_LINUX) +#if defined(TARGET_OS_LINUX) || defined(__GLIBC__) #undef _GNU_SOURCE #define _GNU_SOURCE 1 #endif @@ -546,12 +546,40 @@ typedef unsigned short ushort; #define function_volatile volatile #define my_reinterpret_cast(A) reinterpret_cast #define my_const_cast(A) const_cast +# ifndef GCC_VERSION +# define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) +# endif #elif !defined(my_reinterpret_cast) #define my_reinterpret_cast(A) (A) #define my_const_cast(A) (A) #endif -#if !defined(__attribute__) && (defined(__cplusplus) || !defined(__GNUC__) || __GNUC__ == 2 && __GNUC_MINOR__ < 8) -#define __attribute__(A) + +/* + Disable __attribute__() on gcc < 2.7, g++ < 3.4, and non-gcc compilers. + Some forms of __attribute__ are actually supported in earlier versions of + g++, but we just disable them all because we only use them to generate + compilation warnings. +*/ +#ifndef __attribute__ +# if !defined(__GNUC__) +# define __attribute__(A) +# elif defined (__QNXNTO__) + /* qcc defines GNUC */ +# define __attribute__(A) +# elif GCC_VERSION < 2008 +# define __attribute__(A) +# elif defined(__cplusplus) && GCC_VERSION < 3004 +# define __attribute__(A) +# endif +#endif + +/* + __attribute__((format(...))) is only supported in gcc >= 2.8 and g++ >= 3.4 + But that's already covered by the __attribute__ tests above, so this is + just a convenience macro. +*/ +#ifndef ATTRIBUTE_FORMAT +# define ATTRIBUTE_FORMAT(style, m, n) __attribute__((format(style, m, n))) #endif /* diff --git a/include/my_net.h b/include/my_net.h index b26e525016b..678025b31f4 100644 --- a/include/my_net.h +++ b/include/my_net.h @@ -55,6 +55,14 @@ C_MODE_START #if defined(__WIN__) #define O_NONBLOCK 1 /* For emulation of fcntl() */ + +/* + SHUT_RDWR is called SD_BOTH in windows and + is defined to 2 in winsock2.h + #define SD_BOTH 0x02 +*/ +#define SHUT_RDWR 0x02 + #endif /* diff --git a/include/my_sys.h b/include/my_sys.h index 4b31f6bcd2b..115183bf9c2 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -623,8 +623,8 @@ extern int my_chsize(File fd,my_off_t newlength, int filler, myf MyFlags); extern int my_sync(File fd, myf my_flags); extern int my_error _VARARGS((int nr,myf MyFlags, ...)); extern int my_printf_error _VARARGS((uint my_err, const char *format, - myf MyFlags, ...) - __attribute__ ((format (printf, 2, 4)))); + myf MyFlags, ...)) + ATTRIBUTE_FORMAT(printf, 2, 4); extern int my_error_register(const char **errmsgs, int first, int last); extern const char **my_error_unregister(int first, int last); extern int my_message(uint my_err, const char *str,myf MyFlags); diff --git a/include/my_time.h b/include/my_time.h index 3025b98a9c4..d0f2fc323d8 100644 --- a/include/my_time.h +++ b/include/my_time.h @@ -33,7 +33,12 @@ extern uchar days_in_month[]; Portable time_t replacement. Should be signed and hold seconds for 1902-2038 range. */ +#if defined(_WIN64) || defined(WIN64) +/* on Win64 long is still 4 bytes (not 8!) */ +typedef LONG64 my_time_t; +#else typedef long my_time_t; +#endif #define MY_TIME_T_MAX LONG_MAX #define MY_TIME_T_MIN LONG_MIN diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h index 417e949e83f..22cd64a52c4 100644 --- a/include/mysql/plugin.h +++ b/include/mysql/plugin.h @@ -31,6 +31,15 @@ #define MYSQL_FTPARSER_PLUGIN 2 /* Full-text parser plugin */ #define MYSQL_MAX_PLUGIN_TYPE_NUM 3 /* The number of plugin types */ +/* We use the following strings to define licenses for plugins */ +#define PLUGIN_LICENSE_PROPRIETARY 0 +#define PLUGIN_LICENSE_GPL 1 +#define PLUGIN_LICENSE_BSD 2 + +#define PLUGIN_LICENSE_PROPRIETARY_STRING "PROPRIETARY" +#define PLUGIN_LICENSE_GPL_STRING "GPL" +#define PLUGIN_LICENSE_BSD_STRING "BSD" + /* Macros for beginning and ending plugin declarations. Between mysql_declare_plugin and mysql_declare_plugin_end there should @@ -88,8 +97,9 @@ struct st_mysql_plugin const char *name; /* plugin name */ const char *author; /* plugin author (for SHOW PLUGINS) */ const char *descr; /* general descriptive text (for SHOW PLUGINS ) */ - int (*init)(void); /* the function to invoke when plugin is loaded */ - int (*deinit)(void); /* the function to invoke when plugin is unloaded */ + int license; /* the plugin type (a MYSQL_XXX_PLUGIN value) */ + int (*init)(void *); /* the function to invoke when plugin is loaded */ + int (*deinit)(void *);/* the function to invoke when plugin is unloaded */ unsigned int version; /* plugin version (for SHOW PLUGINS) */ struct st_mysql_show_var *status_vars; void * __reserved1; /* placeholder for system variables */ @@ -301,7 +311,6 @@ struct st_mysql_ftparser struct st_mysql_storage_engine { int interface_version; - struct handlerton *handlerton; }; #endif diff --git a/include/mysql_com.h b/include/mysql_com.h index d0bf1b1fec0..156d68efdec 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -26,9 +26,6 @@ #define USERNAME_LENGTH 16 #define SERVER_VERSION_LENGTH 60 #define SQLSTATE_LENGTH 5 -#define SYSTEM_CHARSET_MBMAXLEN 3 -#define NAME_BYTE_LEN NAME_LEN*SYSTEM_CHARSET_MBMAXLEN -#define USERNAME_BYTE_LENGTH USERNAME_LENGTH*SYSTEM_CHARSET_MBMAXLEN /* USER_HOST_BUFF_SIZE -- length of string buffer, that is enough to contain @@ -36,7 +33,7 @@ MySQL standard format: user_name_part@host_name_part\0 */ -#define USER_HOST_BUFF_SIZE HOSTNAME_LENGTH + USERNAME_BYTE_LENGTH + 2 +#define USER_HOST_BUFF_SIZE HOSTNAME_LENGTH + USERNAME_LENGTH + 2 #define LOCAL_HOST "localhost" #define LOCAL_HOST_NAMEDPIPE "." @@ -141,11 +138,11 @@ enum enum_server_command #define CLIENT_TRANSACTIONS 8192 /* Client knows about transactions */ #define CLIENT_RESERVED 16384 /* Old flag for 4.1 protocol */ #define CLIENT_SECURE_CONNECTION 32768 /* New 4.1 authentication */ -#define CLIENT_MULTI_STATEMENTS (((ulong) 1) << 16) /* Enable/disable multi-stmt support */ -#define CLIENT_MULTI_RESULTS (((ulong) 1) << 17) /* Enable/disable multi-results */ +#define CLIENT_MULTI_STATEMENTS (1UL << 16) /* Enable/disable multi-stmt support */ +#define CLIENT_MULTI_RESULTS (1UL << 17) /* Enable/disable multi-results */ -#define CLIENT_SSL_VERIFY_SERVER_CERT (((ulong) 1) << 30) -#define CLIENT_REMEMBER_OPTIONS (((ulong) 1) << 31) +#define CLIENT_SSL_VERIFY_SERVER_CERT (1UL << 30) +#define CLIENT_REMEMBER_OPTIONS (1UL << 31) #define SERVER_STATUS_IN_TRANS 1 /* Transaction has started */ #define SERVER_STATUS_AUTOCOMMIT 2 /* Server in auto_commit mode */ @@ -182,7 +179,7 @@ typedef struct st_vio Vio; #define MAX_INT_WIDTH 10 /* Max width for a LONG w.o. sign */ #define MAX_BIGINT_WIDTH 20 /* Max width for a LONGLONG */ #define MAX_CHAR_WIDTH 255 /* Max length for a CHAR colum */ -#define MAX_BLOB_WIDTH 8192 /* Default width for blob */ +#define MAX_BLOB_WIDTH 16777216 /* Default width for blob */ typedef struct st_net { #if !defined(CHECK_EMBEDDED_DIFFERENCES) || !defined(EMBEDDED_LIBRARY) diff --git a/innobase/CMakeLists.txt b/innobase/CMakeLists.txt deleted file mode 100755 index f9661963d56..00000000000 --- a/innobase/CMakeLists.txt +++ /dev/null @@ -1,35 +0,0 @@ -#SET(CMAKE_CXX_FLAGS_DEBUG "-DSAFEMALLOC -DSAFE_MUTEX") -#SET(CMAKE_C_FLAGS_DEBUG "-DSAFEMALLOC -DSAFE_MUTEX") -ADD_DEFINITIONS(-DMYSQL_SERVER -D_WIN32 -DWIN32 -D_LIB) - -INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include include) -ADD_LIBRARY(innobase btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c - buf/buf0buf.c buf/buf0flu.c buf/buf0lru.c buf/buf0rea.c - data/data0data.c data/data0type.c - dict/dict0boot.c dict/dict0crea.c dict/dict0dict.c dict/dict0load.c dict/dict0mem.c - dyn/dyn0dyn.c - eval/eval0eval.c eval/eval0proc.c - fil/fil0fil.c - fsp/fsp0fsp.c - fut/fut0fut.c fut/fut0lst.c - ha/ha0ha.c ha/hash0hash.c - ibuf/ibuf0ibuf.c - pars/lexyy.c pars/pars0grm.c pars/pars0opt.c pars/pars0pars.c pars/pars0sym.c - lock/lock0lock.c - log/log0log.c log/log0recv.c - mach/mach0data.c - mem/mem0mem.c mem/mem0pool.c - mtr/mtr0log.c mtr/mtr0mtr.c - os/os0file.c os/os0proc.c os/os0sync.c os/os0thread.c - page/page0cur.c page/page0page.c - que/que0que.c - read/read0read.c - rem/rem0cmp.c rem/rem0rec.c - row/row0ins.c row/row0mysql.c row/row0purge.c row/row0row.c row/row0sel.c row/row0uins.c - row/row0umod.c row/row0undo.c row/row0upd.c row/row0vers.c - srv/srv0que.c srv/srv0srv.c srv/srv0start.c - sync/sync0arr.c sync/sync0rw.c sync/sync0sync.c - thr/thr0loc.c - trx/trx0purge.c trx/trx0rec.c trx/trx0roll.c trx/trx0rseg.c trx/trx0sys.c trx/trx0trx.c trx/trx0undo.c - usr/usr0sess.c - ut/ut0byte.c ut/ut0dbg.c ut/ut0mem.c ut/ut0rnd.c ut/ut0ut.c ) diff --git a/myisam/CMakeLists.txt b/myisam/CMakeLists.txt deleted file mode 100755 index 3ba7aba4555..00000000000 --- a/myisam/CMakeLists.txt +++ /dev/null @@ -1,26 +0,0 @@ -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") - -INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include) -ADD_LIBRARY(myisam ft_boolean_search.c ft_nlq_search.c ft_parser.c ft_static.c ft_stem.c - ft_stopwords.c ft_update.c mi_cache.c mi_changed.c mi_check.c - mi_checksum.c mi_close.c mi_create.c mi_dbug.c mi_delete.c - mi_delete_all.c mi_delete_table.c mi_dynrec.c mi_extra.c mi_info.c - mi_key.c mi_keycache.c mi_locking.c mi_log.c mi_open.c - mi_packrec.c mi_page.c mi_panic.c mi_preload.c mi_range.c mi_rename.c - mi_rfirst.c mi_rlast.c mi_rnext.c mi_rnext_same.c mi_rprev.c mi_rrnd.c - mi_rsame.c mi_rsamepos.c mi_scan.c mi_search.c mi_static.c mi_statrec.c - mi_unique.c mi_update.c mi_write.c rt_index.c rt_key.c rt_mbr.c - rt_split.c sort.c sp_key.c ft_eval.h myisamdef.h rt_index.h mi_rkey.c) - -ADD_EXECUTABLE(myisam_ftdump myisam_ftdump.c) -TARGET_LINK_LIBRARIES(myisam_ftdump myisam mysys dbug strings zlib wsock32) - -ADD_EXECUTABLE(myisamchk myisamchk.c) -TARGET_LINK_LIBRARIES(myisamchk myisam mysys dbug strings zlib wsock32) - -ADD_EXECUTABLE(myisamlog myisamlog.c) -TARGET_LINK_LIBRARIES(myisamlog myisam mysys dbug strings zlib wsock32) - -ADD_EXECUTABLE(myisampack myisampack.c) -TARGET_LINK_LIBRARIES(myisampack myisam mysys dbug strings zlib wsock32) diff --git a/myisammrg/CMakeLists.txt b/myisammrg/CMakeLists.txt deleted file mode 100755 index 83168f6c60c..00000000000 --- a/myisammrg/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") - -INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include) -ADD_LIBRARY(myisammrg myrg_close.c myrg_create.c myrg_delete.c myrg_extra.c myrg_info.c - myrg_locking.c myrg_open.c myrg_panic.c myrg_queue.c myrg_range.c - myrg_rfirst.c myrg_rkey.c myrg_rlast.c myrg_rnext.c myrg_rnext_same.c - myrg_rprev.c myrg_rrnd.c myrg_rsame.c myrg_static.c myrg_update.c - myrg_write.c) diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am index 61bac05b224..f0bc1e9b20a 100644 --- a/mysql-test/Makefile.am +++ b/mysql-test/Makefile.am @@ -22,14 +22,15 @@ DIST_SUBDIRS=ndb benchdir_root= $(prefix) testdir = $(benchdir_root)/mysql-test -EXTRA_SCRIPTS = mysql-test-run.sh install_test_db.sh valgrind.supp $(PRESCRIPTS) -EXTRA_DIST = $(EXTRA_SCRIPTS) -GENSCRIPTS = mysql-test-run install_test_db mtr +EXTRA_SCRIPTS = mysql-test-run-shell.sh install_test_db.sh \ + valgrind.supp $(PRESCRIPTS) +EXTRA_DIST = $(EXTRA_SCRIPTS) +GENSCRIPTS = mysql-test-run-shell install_test_db mtr mysql-test-run PRESCRIPTS = mysql-test-run.pl test_SCRIPTS = $(GENSCRIPTS) $(PRESCRIPTS) test_DATA = std_data/client-key.pem std_data/client-cert.pem \ std_data/cacert.pem std_data/server-cert.pem \ - std_data/server-key.pem + std_data/server-key.pem CLEANFILES = $(GENSCRIPTS) INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include -I.. @@ -118,6 +119,11 @@ mtr: $(RM) -f mtr $(LN_S) mysql-test-run.pl mtr +# mysql-test-run - a shortcut for executing mysql-test-run.pl +mysql-test-run: + $(RM) -f mysql-test-run + $(LN_S) mysql-test-run.pl mysql-test-run + SUFFIXES = .sh .sh: diff --git a/mysql-test/extra/binlog_tests/binlog.test b/mysql-test/extra/binlog_tests/binlog.test index fe66647ec68..81c813e78d0 100644 --- a/mysql-test/extra/binlog_tests/binlog.test +++ b/mysql-test/extra/binlog_tests/binlog.test @@ -59,24 +59,4 @@ insert into t1 values(null); select * from t1; drop table t1; -# Test of binlogging of INSERT_ID with INSERT DELAYED -create table t1 (a int not null auto_increment, primary key (a)) engine=myisam; -# First, avoid BUG#20627: -set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; -# Verify that only one INSERT_ID event is binlogged. -insert delayed into t1 values (207); - -# We use sleeps between statements, that's the only way to get a -# repeatable binlog in a normal test run and under Valgrind. -# It may be that the "binlog missing rows" of BUG#20821 shows up -# here. -sleep 2; -insert delayed into t1 values (null); -sleep 2; -insert delayed into t1 values (300); -sleep 2; # time for the delayed queries to reach disk -select * from t1; ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; -drop table t1; +-- source extra/binlog_tests/binlog_insert_delayed.test diff --git a/mysql-test/extra/binlog_tests/binlog_insert_delayed.test b/mysql-test/extra/binlog_tests/binlog_insert_delayed.test new file mode 100644 index 00000000000..4da883b9e60 --- /dev/null +++ b/mysql-test/extra/binlog_tests/binlog_insert_delayed.test @@ -0,0 +1,42 @@ +# Test of binlogging of INSERT_ID with INSERT DELAYED +create table t1 (a int not null auto_increment, primary key (a)) engine=myisam; +# First, avoid BUG#20627: +set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; +# Verify that only one INSERT_ID event is binlogged. +# Note, that because of WL#3368 mixed mode binlog records RBR events for the delayed +let $table=t1; +let $rows_inserted=11; # total number of inserted rows in this test +insert delayed into t1 values (207); +let $count=1; + +# use this macro instead of sleeps. + +--source include/wait_until_rows_count.inc +insert delayed into t1 values (null); +inc $count; +--source include/wait_until_rows_count.inc + +insert delayed into t1 values (300); +inc $count; +--source include/wait_until_rows_count.inc + +# moving binlog check affront of multi-rows queries which work is indeterministic (extra table_maps) +# todo: better check is to substitute SHOW BINLOG with reading from binlog, probably bug#19459 is in +# the way +--replace_column 2 # 5 # +--replace_regex /table_id: [0-9]+/table_id: #/ +show binlog events from 102; + +insert delayed into t1 values (null),(null),(null),(null); +inc $count; inc $count; inc $count; inc $count; +--source include/wait_until_rows_count.inc + +insert delayed into t1 values (null),(null),(400),(null); +inc $count; inc $count; inc $count; inc $count; +--source include/wait_until_rows_count.inc + +#check this assertion about $count calculation +--echo $count == $rows_inserted + +select * from t1; +drop table t1; diff --git a/mysql-test/extra/rpl_tests/rpl_foreign_key.test b/mysql-test/extra/rpl_tests/rpl_foreign_key.test index d5589d4b5ea..0f4cd856db6 100644 --- a/mysql-test/extra/rpl_tests/rpl_foreign_key.test +++ b/mysql-test/extra/rpl_tests/rpl_foreign_key.test @@ -31,4 +31,4 @@ connection master; SET FOREIGN_KEY_CHECKS=0; DROP TABLE IF EXISTS t1,t2,t3; SET FOREIGN_KEY_CHECKS=1; - +sync_slave_with_master; diff --git a/mysql-test/extra/rpl_tests/rpl_insert_id.test b/mysql-test/extra/rpl_tests/rpl_insert_id.test index 79f8c39e152..33194270d37 100644 --- a/mysql-test/extra/rpl_tests/rpl_insert_id.test +++ b/mysql-test/extra/rpl_tests/rpl_insert_id.test @@ -9,8 +9,21 @@ # column and index but without primary key. ############################################################## +--echo # +--echo # Setup +--echo # -# We also check how the foreign_key_check variable is replicated +use test; +--disable_warnings +drop table if exists t1, t2, t3; +--enable_warnings + +--echo # +--echo # See if queries that use both auto_increment and LAST_INSERT_ID() +--echo # are replicated well +--echo # +--echo # We also check how the foreign_key_check variable is replicated +--echo # -- source include/master-slave.inc #should work for both SBR and RBR @@ -49,7 +62,9 @@ select * from t1; select * from t2; connection master; -# check if INSERT SELECT in auto_increment is well replicated (bug #490) +--echo # +--echo # check if INSERT SELECT in auto_increment is well replicated (bug #490) +--echo # drop table t2; drop table t1; @@ -72,10 +87,11 @@ save_master_pos; connection slave; sync_with_master; -# -# Bug#8412: Error codes reported in binary log for CHARACTER SET, -# FOREIGN_KEY_CHECKS -# +--echo # +--echo # Bug#8412: Error codes reported in binary log for CHARACTER SET, +--echo # FOREIGN_KEY_CHECKS +--echo # + connection master; SET TIMESTAMP=1000000000; CREATE TABLE t1 ( a INT UNIQUE ); @@ -88,11 +104,10 @@ connection master; drop table t1; sync_slave_with_master; -# End of 4.1 tests - -# -# Bug#14553: NULL in WHERE resets LAST_INSERT_ID -# +--echo # +--echo # Bug#14553: NULL in WHERE resets LAST_INSERT_ID +--echo # + connection master; create table t1(a int auto_increment, key(a)); create table t2(a int); @@ -107,11 +122,16 @@ connection master; drop table t1; drop table t2; -# -# BUG#15728: LAST_INSERT_ID function inside a stored function returns 0 -# -# The solution is not to reset last_insert_id on enter to sub-statement. -# +--echo # +--echo # End of 4.1 tests +--echo # + +--echo # +--echo # BUG#15728: LAST_INSERT_ID function inside a stored function returns 0 +--echo # +--echo # The solution is not to reset last_insert_id on enter to sub-statement. +--echo # + connection master; --disable_warnings drop function if exists bug15728; @@ -254,12 +274,162 @@ select * from t1 order by n; connection master; drop table t1; +sync_slave_with_master; + + +# +# BUG#20339: stored procedure using LAST_INSERT_ID() does not +# replicate statement-based. +# +# There is another version of the test for bug#20339 above that is +# actually originates in 5.1, and this is the version that is merged +# from 5.0. +# +connection master; + +--disable_warnings +DROP PROCEDURE IF EXISTS p1; +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +# Reset result of LAST_INSERT_ID(). +SELECT LAST_INSERT_ID(0); + +CREATE TABLE t1 ( + id INT NOT NULL DEFAULT 0, + last_id INT, + PRIMARY KEY (id) +); + +CREATE TABLE t2 ( + id INT NOT NULL AUTO_INCREMENT, + last_id INT, + PRIMARY KEY (id) +); + +delimiter |; +CREATE PROCEDURE p1() +BEGIN + INSERT INTO t2 (last_id) VALUES (LAST_INSERT_ID()); + INSERT INTO t1 (last_id) VALUES (LAST_INSERT_ID()); +END| +delimiter ;| + +CALL p1(); +SELECT * FROM t1; +SELECT * FROM t2; + +sync_slave_with_master; +SELECT * FROM t1; +SELECT * FROM t2; + +connection master; + +DROP PROCEDURE p1; +DROP TABLE t1, t2; + + +# +# BUG#21726: Incorrect result with multiple invocations of +# LAST_INSERT_ID +# +--disable_warnings +DROP PROCEDURE IF EXISTS p1; +DROP FUNCTION IF EXISTS f1; +DROP FUNCTION IF EXISTS f2; +DROP FUNCTION IF EXISTS f3; +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 ( + i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, + j INT DEFAULT 0 +); +CREATE TABLE t2 (i INT); + +delimiter |; +CREATE PROCEDURE p1() +BEGIN + INSERT INTO t1 (i) VALUES (NULL); + INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); + INSERT INTO t1 (i) VALUES (NULL), (NULL); + INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); +END | + +CREATE FUNCTION f1() RETURNS INT MODIFIES SQL DATA +BEGIN + INSERT INTO t1 (i) VALUES (NULL); + INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); + INSERT INTO t1 (i) VALUES (NULL), (NULL); + INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); + RETURN 0; +END | + +CREATE FUNCTION f2() RETURNS INT NOT DETERMINISTIC + RETURN LAST_INSERT_ID() | + +CREATE FUNCTION f3() RETURNS INT MODIFIES SQL DATA +BEGIN + INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); + RETURN 0; +END | +delimiter ;| + +INSERT INTO t1 VALUES (NULL, -1); +CALL p1(); +SELECT f1(); +INSERT INTO t1 VALUES (NULL, f2()), (NULL, LAST_INSERT_ID()), + (NULL, LAST_INSERT_ID()), (NULL, f2()), (NULL, f2()); +INSERT INTO t1 VALUES (NULL, f2()); +INSERT INTO t1 VALUES (NULL, LAST_INSERT_ID()), (NULL, LAST_INSERT_ID(5)), + (NULL, @@LAST_INSERT_ID); +# Test replication of substitution "IS NULL" -> "= LAST_INSERT_ID". +INSERT INTO t1 VALUES (NULL, 0), (NULL, LAST_INSERT_ID()); +UPDATE t1 SET j= -1 WHERE i IS NULL; + +# Test statement-based replication of function calls. +INSERT INTO t1 (i) VALUES (NULL); + +connection master1; +INSERT INTO t1 (i) VALUES (NULL); + +connection master; +SELECT f3(); + +SELECT * FROM t1; +SELECT * FROM t2; + +sync_slave_with_master; +SELECT * FROM t1; +SELECT * FROM t2; + +connection master; +DROP PROCEDURE p1; +DROP FUNCTION f1; +DROP FUNCTION f2; +DROP FUNCTION f3; +DROP TABLE t1, t2; + + +sync_slave_with_master; + +--echo # +--echo # End of 5.0 tests +--echo # + +# Tests in this file are tightly bound together. Recreate t2. +connection master; +create table t2 ( + id int not null auto_increment, + last_id int, + primary key (id) +); -# End of 5.0 tests # Test for BUG#20341 "stored function inserting into one # auto_increment puts bad data in slave" +connection master; truncate table t2; create table t1 (id tinyint primary key); # no auto_increment @@ -289,8 +459,30 @@ select * from t1; select * from t2; connection master; -drop table t1, t2; +drop table t1; drop function insid; -sync_slave_with_master; +truncate table t2; +create table t1 (n int primary key auto_increment not null, +b int, unique(b)); +delimiter |; +create procedure foo() +begin + insert into t1 values(null,10); + insert ignore into t1 values(null,10); + insert ignore into t1 values(null,10); + insert into t2 values(null,3); +end| +delimiter ;| +call foo(); +select * from t1; +select * from t2; +sync_slave_with_master; +select * from t1; +select * from t2; + +connection master; +drop table t1, t2; +drop procedure foo; +sync_slave_with_master; diff --git a/mysql-test/extra/rpl_tests/rpl_loaddata.test b/mysql-test/extra/rpl_tests/rpl_loaddata.test index 08e89c20973..e58908ec7e9 100644 --- a/mysql-test/extra/rpl_tests/rpl_loaddata.test +++ b/mysql-test/extra/rpl_tests/rpl_loaddata.test @@ -1,5 +1,5 @@ # Requires statement logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc # See if replication of a "LOAD DATA in an autoincrement column" # Honours autoincrement values diff --git a/mysql-test/extra/rpl_tests/rpl_max_relay_size.test b/mysql-test/extra/rpl_tests/rpl_max_relay_size.test index 14223392224..50325f83358 100644 --- a/mysql-test/extra/rpl_tests/rpl_max_relay_size.test +++ b/mysql-test/extra/rpl_tests/rpl_max_relay_size.test @@ -9,7 +9,10 @@ connection slave; stop slave; connection master; -# Generate a big enough master's binlog to cause relay log rotations + +--echo # +--echo # Generate a big enough master's binlog to cause relay log rotations +--echo # create table t1 (a int); let $1=800; @@ -26,6 +29,11 @@ drop table t1; save_master_pos; connection slave; reset slave; + +--echo # +--echo # Test 1 +--echo # + set global max_binlog_size=8192; set global max_relay_log_size=8192-1; # mapped to 4096 select @@global.max_relay_log_size; @@ -33,7 +41,13 @@ start slave; sync_with_master; --replace_result $MASTER_MYPORT MASTER_PORT --replace_column 1 # 8 # 9 # 16 # 23 # 33 # +--vertical_results show slave status; + +--echo # +--echo # Test 2 +--echo # + stop slave; reset slave; set global max_relay_log_size=(5*4096); @@ -42,7 +56,13 @@ start slave; sync_with_master; --replace_result $MASTER_MYPORT MASTER_PORT --replace_column 1 # 8 # 9 # 16 # 23 # 33 # +--vertical_results show slave status; + +--echo # +--echo # Test 3: max_relay_log_size = 0 +--echo # + stop slave; reset slave; set global max_relay_log_size=0; @@ -51,9 +71,12 @@ start slave; sync_with_master; --replace_result $MASTER_MYPORT MASTER_PORT --replace_column 1 # 8 # 9 # 16 # 23 # 33 # +--vertical_results show slave status; -# Tests below are mainly to ensure that we have not coded with wrong assumptions +--echo # +--echo # Test 4: Tests below are mainly to ensure that we have not coded with wrong assumptions +--echo # stop slave; reset slave; @@ -62,8 +85,13 @@ reset slave; flush logs; --replace_result $MASTER_MYPORT MASTER_PORT --replace_column 1 # 8 # 9 # 16 # 23 # 33 # +--vertical_results show slave status; +--echo # +--echo # Test 5 +--echo # + reset slave; start slave; sync_with_master; @@ -78,8 +106,13 @@ connection slave; sync_with_master; --replace_result $MASTER_MYPORT MASTER_PORT --replace_column 1 # 8 # 9 # 16 # 23 # 33 # +--vertical_results show slave status; -# one more rotation, to be sure Relay_Log_Space is correctly updated + +--echo # +--echo # Test 6: one more rotation, to be sure Relay_Log_Space is correctly updated +--echo # + flush logs; connection master; drop table t1; @@ -88,6 +121,7 @@ connection slave; sync_with_master; --replace_result $MASTER_MYPORT MASTER_PORT --replace_column 1 # 8 # 9 # 16 # 23 # 33 # +--vertical_results show slave status; connection master; @@ -95,3 +129,7 @@ connection master; flush logs; -- replace_column 3 show master status; + +--echo # +--echo # End of 4.1 tests +--echo # diff --git a/mysql-test/include/rpl_row_basic.inc b/mysql-test/extra/rpl_tests/rpl_row_basic.test similarity index 100% rename from mysql-test/include/rpl_row_basic.inc rename to mysql-test/extra/rpl_tests/rpl_row_basic.test diff --git a/mysql-test/extra/rpl_tests/rpl_row_tabledefs.test b/mysql-test/extra/rpl_tests/rpl_row_tabledefs.test index 94a3af87ecd..54c14594cf4 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_tabledefs.test +++ b/mysql-test/extra/rpl_tests/rpl_row_tabledefs.test @@ -3,27 +3,51 @@ # Consider making these part of the basic RBR tests. --- source include/have_binlog_format_row.inc --- source include/master-slave.inc +connection master; +--disable_warnings +--disable_query_log +DROP TABLE IF EXISTS t1_int,t1_bit,t1_char,t1_nodef; +DROP TABLE IF EXISTS t2,t3,t4,t5,t6,t9; +--enable_query_log +--enable_warnings +sync_slave_with_master; +STOP SLAVE; +SET GLOBAL SQL_MODE='STRICT_ALL_TABLES'; +START SLAVE; connection master; -eval CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=$engine_type; +eval CREATE TABLE t1_int (a INT PRIMARY KEY, b INT) ENGINE=$engine_type; +eval CREATE TABLE t1_bit (a INT PRIMARY KEY, b INT) ENGINE=$engine_type; +eval CREATE TABLE t1_char (a INT PRIMARY KEY, b INT) ENGINE=$engine_type; +eval CREATE TABLE t1_nodef (a INT PRIMARY KEY, b INT) ENGINE=$engine_type; eval CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=$engine_type; eval CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=$engine_type; eval CREATE TABLE t4 (a INT) ENGINE=$engine_type; eval CREATE TABLE t5 (a INT, b INT, c INT) ENGINE=$engine_type; eval CREATE TABLE t6 (a INT, b INT, c INT) ENGINE=$engine_type; +eval CREATE TABLE t7 (a INT NOT NULL) ENGINE=$engine_type; +eval CREATE TABLE t8 (a INT NOT NULL) ENGINE=$engine_type; # Table used to detect that slave is running -eval CREATE TABLE t9 (a INT PRIMARY KEY) ENGINE=$engine_type; +eval CREATE TABLE t9 (a INT) ENGINE=$engine_type; sync_slave_with_master; -# On the slave, we add one column last in table 't1', -ALTER TABLE t1 ADD x INT DEFAULT 42; -# ... add one column in the middle of table 't2', and -ALTER TABLE t2 ADD x INT DEFAULT 42 AFTER a; -# ... add one column first in table 't3'. -ALTER TABLE t3 ADD x INT DEFAULT 42 FIRST; + +# On the slave, we add one INT column last in table 't1_int', +ALTER TABLE t1_int ADD x INT DEFAULT 42; +# ... and add BIT columns last in table 't1_bit' to ensure that we +# have at least one extra null byte on the slave, +ALTER TABLE t1_bit + ADD x BIT(3) DEFAULT b'011', + ADD y BIT(5) DEFAULT b'10101', + ADD z BIT(2) DEFAULT b'10'; +# ... and add one CHAR column last in table 't1_char', +ALTER TABLE t1_char ADD x CHAR(20) DEFAULT 'Just a test'; +# ... and add one non-nullable INT column last in table 't1_text' +# with no default, +ALTER TABLE t1_nodef ADD x INT NOT NULL; +# ... and remove the last column in t2 +ALTER TABLE t2 DROP b; # ... change the type of the single column in table 't4' ALTER TABLE t4 MODIFY a FLOAT; # ... change the type of the middle column of table 't5' @@ -31,19 +55,67 @@ ALTER TABLE t5 MODIFY b FLOAT; # ... change the type of the last column of table 't6' ALTER TABLE t6 MODIFY c FLOAT; -# Each of these should generate an error and stop the slave +# ... add one byte worth of null bytes to the table on the slave +ALTER TABLE t7 ADD e1 INT, ADD e2 INT, ADD e3 INT, ADD e4 INT, + ADD e5 INT, ADD e6 INT, ADD e7 INT, ADD e8 INT; + +# ... add 8 columns that are nullable: t8 will not be entirely +# nullable and have no null bits (just an X bit) +ALTER TABLE t8 ADD e1 INT NOT NULL DEFAULT 0, ADD e2 INT NOT NULL DEFAULT 0, + ADD e3 INT NOT NULL DEFAULT 0, ADD e4 INT NOT NULL DEFAULT 0, + ADD e5 INT NOT NULL DEFAULT 0, ADD e6 INT NOT NULL DEFAULT 0, + ADD e7 INT NOT NULL DEFAULT 0, ADD e8 INT NOT NULL DEFAULT 0; + +# Insert some values for tables on slave side. These should not be +# modified when the row from the master is applied. +INSERT INTO t1_int VALUES (2, 4, 4711); +INSERT INTO t1_char VALUES (2, 4, 'Foo is a bar'); +INSERT INTO t1_bit VALUES (2, 4, b'101', b'11100', b'01'); + +--echo **** On Master **** connection master; -INSERT INTO t9 VALUES (1); +INSERT INTO t1_int VALUES (1,2); +INSERT INTO t1_int VALUES (2,5); +INSERT INTO t1_bit VALUES (1,2); +INSERT INTO t1_bit VALUES (2,5); +INSERT INTO t1_char VALUES (1,2); +INSERT INTO t1_char VALUES (2,5); +SELECT * FROM t1_int; +SELECT * FROM t1_bit; +SELECT * FROM t1_char; +--echo **** On Slave **** +sync_slave_with_master; +SELECT a,b,x FROM t1_int; +SELECT a,b,HEX(x),HEX(y),HEX(z) FROM t1_bit; +SELECT a,b,x FROM t1_char; + +--echo **** On Master **** +connection master; +UPDATE t1_int SET b=2*b WHERE a=2; +UPDATE t1_char SET b=2*b WHERE a=2; +UPDATE t1_bit SET b=2*b WHERE a=2; +SELECT * FROM t1_int; +SELECT * FROM t1_bit; +SELECT * FROM t1_char; +--echo **** On Slave **** +sync_slave_with_master; +SELECT a,b,x FROM t1_int; +SELECT a,b,HEX(x),HEX(y),HEX(z) FROM t1_bit; +SELECT a,b,x FROM t1_char; + +# Each of these inserts should generate an error and stop the slave + +connection master; +INSERT INTO t9 VALUES (2); sync_slave_with_master; # Now slave is guaranteed to be running connection master; -INSERT INTO t1 VALUES (1,2); +INSERT INTO t1_nodef VALUES (1,2); connection slave; wait_for_slave_to_stop; --replace_result $MASTER_MYPORT MASTER_PORT ---replace_column 1 # 8 # 9 # 23 # 33 # ---vertical_results -SHOW SLAVE STATUS; +--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # +--query_vertical SHOW SLAVE STATUS SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; START SLAVE; @@ -56,24 +128,8 @@ INSERT INTO t2 VALUES (2,4); connection slave; wait_for_slave_to_stop; --replace_result $MASTER_MYPORT MASTER_PORT ---replace_column 1 # 8 # 9 # 23 # 33 # ---vertical_results -SHOW SLAVE STATUS; -SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; -START SLAVE; - -connection master; -INSERT INTO t9 VALUES (3); -sync_slave_with_master; -# Now slave is guaranteed to be running -connection master; -INSERT INTO t3 VALUES (3,6); -connection slave; -wait_for_slave_to_stop; ---replace_result $MASTER_MYPORT MASTER_PORT ---replace_column 1 # 8 # 9 # 23 # 33 # ---vertical_results -SHOW SLAVE STATUS; +--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # +--query_vertical SHOW SLAVE STATUS SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; START SLAVE; @@ -86,9 +142,8 @@ INSERT INTO t4 VALUES (4); connection slave; wait_for_slave_to_stop; --replace_result $MASTER_MYPORT MASTER_PORT ---replace_column 1 # 8 # 9 # 23 # 33 # ---vertical_results -SHOW SLAVE STATUS; +--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # +--query_vertical SHOW SLAVE STATUS SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; START SLAVE; @@ -101,9 +156,8 @@ INSERT INTO t5 VALUES (5,10,25); connection slave; wait_for_slave_to_stop; --replace_result $MASTER_MYPORT MASTER_PORT ---replace_column 1 # 8 # 9 # 23 # 33 # ---vertical_results -SHOW SLAVE STATUS; +--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # +--query_vertical SHOW SLAVE STATUS SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; START SLAVE; @@ -116,14 +170,71 @@ INSERT INTO t6 VALUES (6,12,36); connection slave; wait_for_slave_to_stop; --replace_result $MASTER_MYPORT MASTER_PORT ---replace_column 1 # 8 # 9 # 23 # 33 # ---vertical_results -SHOW SLAVE STATUS; +--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # +--query_vertical SHOW SLAVE STATUS SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; START SLAVE; +connection master; +INSERT INTO t9 VALUES (6); +sync_slave_with_master; +--replace_result $MASTER_MYPORT MASTER_PORT +--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # +--query_vertical SHOW SLAVE STATUS + +# Testing some tables extra field that can be null and cannot be null +# (but have default values) + +connection master; +INSERT INTO t7 VALUES (1),(2),(3); +INSERT INTO t8 VALUES (1),(2),(3); +SELECT * FROM t7; +SELECT * FROM t8; +sync_slave_with_master; +SELECT * FROM t7; +SELECT * FROM t8; + +# We will now try to update and then delete a row on the master where +# the extra field on the slave does not have a default value. This +# update should not generate an error even though there is no default +# for the extra column. + +--echo **** On Master **** +connection master; +TRUNCATE t1_nodef; +SET SQL_LOG_BIN=0; +INSERT INTO t1_nodef VALUES (1,2); +INSERT INTO t1_nodef VALUES (2,4); +SET SQL_LOG_BIN=1; +sync_slave_with_master; + +--echo **** On Slave **** +connection slave; +INSERT INTO t1_nodef VALUES (1,2,3); +INSERT INTO t1_nodef VALUES (2,4,6); + +--echo **** On Master **** +connection master; +UPDATE t1_nodef SET b=2*b WHERE a=1; +SELECT * FROM t1_nodef; + +--echo **** On Slave **** +sync_slave_with_master; +SELECT * FROM t1_nodef; + +--echo **** On Master **** +connection master; +DELETE FROM t1_nodef WHERE a=2; +SELECT * FROM t1_nodef; + +--echo **** On Slave **** +sync_slave_with_master; +SELECT * FROM t1_nodef; + +--echo **** Cleanup **** connection master; --disable_warnings -DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t9; +DROP TABLE IF EXISTS t1_int,t1_bit,t1_char,t1_nodef; +DROP TABLE IF EXISTS t2,t3,t4,t5,t6,t7,t8,t9; --enable_warnings sync_slave_with_master; diff --git a/mysql-test/extra/rpl_tests/rpl_stm_000001.test b/mysql-test/extra/rpl_tests/rpl_stm_000001.test index 443ed27053a..8673ae31305 100644 --- a/mysql-test/extra/rpl_tests/rpl_stm_000001.test +++ b/mysql-test/extra/rpl_tests/rpl_stm_000001.test @@ -1,4 +1,4 @@ --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source include/master-slave.inc create table t1 (word char(20) not null); diff --git a/mysql-test/extra/rpl_tests/rpl_stm_charset.test b/mysql-test/extra/rpl_tests/rpl_stm_charset.test index d29a82cfd31..10b4310127f 100644 --- a/mysql-test/extra/rpl_tests/rpl_stm_charset.test +++ b/mysql-test/extra/rpl_tests/rpl_stm_charset.test @@ -2,7 +2,7 @@ # This test will fail if the server/client does not support enough charsets. # Requires statement logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc source include/master-slave.inc; --disable_warnings diff --git a/mysql-test/t/innodb_concurrent.test b/mysql-test/include/concurrent.inc similarity index 95% rename from mysql-test/t/innodb_concurrent.test rename to mysql-test/include/concurrent.inc index 957276a44c7..7eee50c52e6 100644 --- a/mysql-test/t/innodb_concurrent.test +++ b/mysql-test/include/concurrent.inc @@ -1,19 +1,30 @@ +# include/concurrent.inc # -# Concurrent InnoDB tests, mainly in UPDATE's +# Concurrent tests for transactional storage engines, mainly in UPDATE's # Bug#3300 # Designed and tested by Sinisa Milivojevic, sinisa@mysql.com # # two non-interfering UPDATE's not changing result set # - -# test takes circa 5 minutes to run, so it's big --- source include/big_test.inc +# The variable +# $engine_type -- storage engine to be tested +# has to be set before sourcing this script. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was t/innodb_concurrent.test +# main code went into include/concurrent.inc +# connection default; +eval SET SESSION STORAGE_ENGINE = $engine_type; +--disable_warnings drop table if exists t1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) engine=innodb; +--enable_warnings +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); connect (thread1, localhost, mysqltest,,); connection thread1; +eval SET SESSION STORAGE_ENGINE = $engine_type; insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -57,7 +68,7 @@ drop table t1; # #connect (thread1, localhost, mysqltest,,); connection thread1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) engine=innodb; +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -103,7 +114,7 @@ drop table t1; #connect (thread1, localhost, mysqltest,,); connection thread1; -create table t1 (a int not null, b int not null) engine=innodb; +create table t1 (a int not null, b int not null); insert into t1 values (1,1),(2,1),(3,1),(4,1); select get_lock("hello2",1000); #connect (thread2, localhost, mysqltest,,); @@ -132,7 +143,7 @@ drop table t1; # #connect (thread1, localhost, mysqltest,,); connection thread1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) engine=innodb; +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -175,7 +186,7 @@ drop table t1; # #connect (thread1, localhost, mysqltest,,); connection thread1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) engine=innodb; +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -218,7 +229,7 @@ drop table t1; # #connect (thread1, localhost, mysqltest,,); connection thread1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) engine=innodb; +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -261,7 +272,7 @@ drop table t1; # #connect (thread1, localhost, mysqltest,,); connection thread1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) engine=innodb; +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -305,7 +316,7 @@ drop table t1; # #connect (thread1, localhost, mysqltest,,); connection thread1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) engine=innodb; +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); diff --git a/mysql-test/include/deadlock.inc b/mysql-test/include/deadlock.inc new file mode 100644 index 00000000000..41c68f39320 --- /dev/null +++ b/mysql-test/include/deadlock.inc @@ -0,0 +1,147 @@ +# include/deadlock.inc +# +# The variable +# $engine_type -- storage engine to be tested +# has to be set before sourcing this script. +# +# Last update: +# 2006-07-26 ML refactoring + print when connection is switched +# old name was t/innodb-deadlock.test +# main code went into include/deadlock.inc +# + +--echo # Establish connection con1 (user=root) +connect (con1,localhost,root,,); +--echo # Establish connection con2 (user=root) +connect (con2,localhost,root,,); + +--disable_warnings +drop table if exists t1,t2; +--enable_warnings + +# +# Testing of FOR UPDATE +# + +--echo # Switch to connection con1 +connection con1; +eval create table t1 (id integer, x integer) engine = $engine_type; +insert into t1 values(0, 0); +set autocommit=0; +SELECT * from t1 where id = 0 FOR UPDATE; + +--echo # Switch to connection con2 +connection con2; +set autocommit=0; + +# The following query should hang because con1 is locking the record +--send +update t1 set x=2 where id = 0; +--sleep 2 + +--echo # Switch to connection con1 +connection con1; +update t1 set x=1 where id = 0; +select * from t1; +commit; + +--echo # Switch to connection con2 +connection con2; +reap; +commit; + +--echo # Switch to connection con1 +connection con1; +select * from t1; +commit; + +drop table t1; +# +# Testing of FOR UPDATE +# + +--echo # Switch to connection con1 +connection con1; +eval create table t1 (id integer, x integer) engine = $engine_type; +eval create table t2 (b integer, a integer) engine = $engine_type; +insert into t1 values(0, 0), (300, 300); +insert into t2 values(0, 10), (1, 20), (2, 30); +commit; +set autocommit=0; +select * from t2; +update t2 set a=100 where b=(SELECT x from t1 where id = b FOR UPDATE); +select * from t2; +select * from t1; + +--echo # Switch to connection con2 +connection con2; +set autocommit=0; + +# The following query should hang because con1 is locking the record +--send +update t1 set x=2 where id = 0; +--sleep 2 + +--echo # Switch to connection con1 +connection con1; +update t1 set x=1 where id = 0; +select * from t1; +commit; + +--echo # Switch to connection con2 +connection con2; +reap; +commit; + +--echo # Switch to connection con1 +connection con1; +select * from t1; +commit; + +drop table t1, t2; +eval create table t1 (id integer, x integer) engine = $engine_type; +eval create table t2 (b integer, a integer) engine = $engine_type; +insert into t1 values(0, 0), (300, 300); +insert into t2 values(0, 0), (1, 20), (2, 30); +commit; + +--echo # Switch to connection con1 +connection con1; +select a,b from t2 UNION SELECT id, x from t1 FOR UPDATE; +select * from t2; +select * from t1; + +--echo # Switch to connection con2 +connection con2; + +# The following query should hang because con1 is locking the record +update t2 set a=2 where b = 0; +select * from t2; +--send +update t1 set x=2 where id = 0; +--sleep 2 + +--echo # Switch to connection con1 +connection con1; +update t1 set x=1 where id = 0; +select * from t1; +commit; + +--echo # Switch to connection con2 +connection con2; +reap; +commit; + +--echo # Switch to connection con1 +connection con1; +select * from t1; +commit; + +# Cleanup +--echo # Switch to connection default + disconnect con1 and con2 +connection default; +disconnect con1; +disconnect con2; +drop table t1, t2; + +# End of 4.1 tests diff --git a/mysql-test/t/handler.test b/mysql-test/include/handler.inc similarity index 84% rename from mysql-test/t/handler.test rename to mysql-test/include/handler.inc index bf18b8da941..67fff16c577 100644 --- a/mysql-test/t/handler.test +++ b/mysql-test/include/handler.inc @@ -1,10 +1,25 @@ +# include/handler.inc +# +# The variables +# $engine_type -- storage engine to be tested +# $other_engine_type -- storage engine <> $engine_type +# $other_handler_engine_type -- storage engine <> $engine_type, if possible +# 1. $other_handler_engine_type must support handler +# 2. $other_handler_engine_type must point to an all +# time available storage engine +# 2006-08 MySQL 5.1 MyISAM and MEMORY only +# have to be set before sourcing this script. -- source include/not_embedded.inc # # test of HANDLER ... # +# Last update: +# 2006-07-31 ML test refactored (MySQL 5.1) +# code of t/handler.test and t/innodb_handler.test united +# main testing code put into include/handler.inc +# -# should work in embedded server after mysqltest is fixed --- source include/not_embedded.inc +eval SET SESSION STORAGE_ENGINE = $engine_type; --disable_warnings drop table if exists t1,t3,t4,t5; @@ -74,6 +89,22 @@ handler t2 read next; handler t2 read last; handler t2 close; +handler t1 open; +handler t1 read a next; # this used to crash as a bug#5373 +handler t1 read a next; +handler t1 close; + +handler t1 open; +handler t1 read a prev; # this used to crash as a bug#5373 +handler t1 read a prev; +handler t1 close; + +handler t1 open as t2; +handler t2 read first; +eval alter table t1 engine = $engine_type; +--error 1109 +handler t2 read first; + # # DROP TABLE / ALTER TABLE # @@ -84,7 +115,7 @@ insert into t1 values (17); --error 1109 handler t2 read first; handler t1 open as t2; -alter table t1 engine=MyISAM; +eval alter table t1 engine=$other_engine_type; --error 1109 handler t2 read first; drop table t1; @@ -303,7 +334,7 @@ insert into t5 values ("t5"); handler t5 open as h5; handler h5 read first limit 9; # close first -alter table t1 engine=MyISAM; +eval alter table t1 engine=$other_handler_engine_type; --error 1109 handler h1 read first limit 9; handler h2 read first limit 9; @@ -311,7 +342,7 @@ handler h3 read first limit 9; handler h4 read first limit 9; handler h5 read first limit 9; # close last -alter table t5 engine=MyISAM; +eval alter table t5 engine=$other_handler_engine_type; --error 1109 handler h1 read first limit 9; handler h2 read first limit 9; @@ -320,7 +351,7 @@ handler h4 read first limit 9; --error 1109 handler h5 read first limit 9; # close middle -alter table t3 engine=MyISAM; +eval alter table t3 engine=$other_handler_engine_type; --error 1109 handler h1 read first limit 9; handler h2 read first limit 9; @@ -338,7 +369,7 @@ handler t1 open as h1_3; handler h1_1 read first limit 9; handler h1_2 read first limit 9; handler h1_3 read first limit 9; -alter table t1 engine=MyISAM; +eval alter table t1 engine=$engine_type; --error 1109 handler h1_1 read first limit 9; --error 1109 @@ -379,6 +410,13 @@ reap; connection default; drop table t1; +CREATE TABLE t1 ( no1 smallint(5) NOT NULL default '0', no2 int(10) NOT NULL default '0', PRIMARY KEY (no1,no2)); +INSERT INTO t1 VALUES (1,274),(1,275),(2,6),(2,8),(4,1),(4,2); +HANDLER t1 OPEN; +HANDLER t1 READ `primary` = (1, 1000); +HANDLER t1 READ `primary` PREV; +DROP TABLE t1; + # End of 4.1 tests # diff --git a/mysql-test/include/have_archive.inc b/mysql-test/include/have_archive.inc index 262f66076a8..9f0038db97a 100644 --- a/mysql-test/include/have_archive.inc +++ b/mysql-test/include/have_archive.inc @@ -1,4 +1,4 @@ ---require r/have_archive.require --disable_query_log -show variables like "have_archive"; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'archive'; --enable_query_log diff --git a/mysql-test/include/have_binlog_format_mixed.inc b/mysql-test/include/have_binlog_format_mixed.inc new file mode 100644 index 00000000000..fc5ca61c5a0 --- /dev/null +++ b/mysql-test/include/have_binlog_format_mixed.inc @@ -0,0 +1,4 @@ +-- require r/have_binlog_format_mixed.require +disable_query_log; +show variables like "binlog_format"; +enable_query_log; diff --git a/mysql-test/include/have_binlog_format_mixed_or_statement.inc b/mysql-test/include/have_binlog_format_mixed_or_statement.inc new file mode 100644 index 00000000000..8ee6f2cc030 --- /dev/null +++ b/mysql-test/include/have_binlog_format_mixed_or_statement.inc @@ -0,0 +1,5 @@ +--require r/have_binlog_format_statement.require +--disable_query_log +--replace_result MIXED STATEMENT +show variables like "binlog_format"; +--enable_query_log diff --git a/mysql-test/include/have_blackhole.inc b/mysql-test/include/have_blackhole.inc index c2b6ea18830..e13cff52094 100644 --- a/mysql-test/include/have_blackhole.inc +++ b/mysql-test/include/have_blackhole.inc @@ -1,4 +1,4 @@ --- require r/have_blackhole.require disable_query_log; -show variables like "have_blackhole_engine"; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'blackhole'; enable_query_log; diff --git a/mysql-test/include/have_csv.inc b/mysql-test/include/have_csv.inc index d28199831b8..3175fc16fe7 100644 --- a/mysql-test/include/have_csv.inc +++ b/mysql-test/include/have_csv.inc @@ -1,4 +1,4 @@ --- require r/have_csv.require disable_query_log; -show variables like "have_csv"; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'csv'; enable_query_log; diff --git a/mysql-test/include/have_exampledb.inc b/mysql-test/include/have_exampledb.inc index 7ddd15c48b3..e3fd068b485 100644 --- a/mysql-test/include/have_exampledb.inc +++ b/mysql-test/include/have_exampledb.inc @@ -1,4 +1,4 @@ --- require r/have_exampledb.require disable_query_log; -show variables like "have_example_engine"; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'example'; enable_query_log; diff --git a/mysql-test/include/have_federated_db.inc b/mysql-test/include/have_federated_db.inc index e4cf1366fda..abef5a64d30 100644 --- a/mysql-test/include/have_federated_db.inc +++ b/mysql-test/include/have_federated_db.inc @@ -1,4 +1,4 @@ --- require r/have_federated_db.require disable_query_log; -show variables like "have_federated_engine"; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'federated'; enable_query_log; diff --git a/mysql-test/include/have_innodb.inc b/mysql-test/include/have_innodb.inc index 4f83d378cbc..be8850725e5 100644 --- a/mysql-test/include/have_innodb.inc +++ b/mysql-test/include/have_innodb.inc @@ -1,4 +1,4 @@ --- require r/have_innodb.require disable_query_log; -show variables like "have_innodb"; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'innodb'; enable_query_log; diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc index 45a551274f7..218a6852c41 100644 --- a/mysql-test/include/have_multi_ndb.inc +++ b/mysql-test/include/have_multi_ndb.inc @@ -9,8 +9,8 @@ disable_query_log; drop table if exists t1, t2; --enable_warnings flush tables; ---require r/have_ndb.require -show variables like "have_ndbcluster"; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'ndbcluster'; enable_query_log; # Check that server2 has NDB support @@ -20,8 +20,8 @@ disable_query_log; drop table if exists t1, t2; --enable_warnings flush tables; ---require r/have_ndb.require -show variables like "have_ndbcluster"; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'ndbcluster'; enable_query_log; # Set the default connection to 'server1' diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc index 8c277ea82a0..8cbeab07a4f 100644 --- a/mysql-test/include/have_ndb.inc +++ b/mysql-test/include/have_ndb.inc @@ -1,15 +1,7 @@ # Check that server is compiled and started with support for NDB --- require r/have_ndb.require disable_query_log; -show variables like "have_ndbcluster"; -enable_query_log; - -# Check that NDB is installed and known to be working -# This will disable ndb from the shell script 'mysql-test-run' - --- require r/have_ndb_status_ok.require -disable_query_log; -eval select "$NDB_STATUS_OK" as ndb_status_ok; +--require r/true.require +select support = 'Enabled' as `TRUE` from information_schema.engines where engine = 'ndbcluster'; enable_query_log; diff --git a/mysql-test/t/index_merge.test b/mysql-test/include/index_merge1.inc similarity index 73% rename from mysql-test/t/index_merge.test rename to mysql-test/include/index_merge1.inc index 3da5711bf7a..a3a874a8ed2 100644 --- a/mysql-test/t/index_merge.test +++ b/mysql-test/include/index_merge1.inc @@ -1,6 +1,26 @@ +# include/index_merge1.inc # # Index merge tests # +# The variables +# $engine_type -- storage engine to be tested +# $merge_table_support -- 1 storage engine supports merge tables +# -- 0 storage engine does not support merge tables +# have to be set before sourcing this script. +# +# Note: The comments/expectations refer to MyISAM. +# They might be not valid for other storage engines. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was t/index_merge.test +# main code went into include/index_merge1.inc +# + +--echo #---------------- Index merge test 1 ------------------------------------------- + +eval SET SESSION STORAGE_ENGINE = $engine_type; + --disable_warnings drop table if exists t0, t1, t2, t3, t4; --enable_warnings @@ -8,7 +28,7 @@ drop table if exists t0, t1, t2, t3, t4; # Create and fill a table with simple keys create table t0 ( - key1 int not null, + key1 int not null, INDEX i1(key1) ); @@ -36,11 +56,11 @@ alter table t0 add key8 int not null, add index i8(key8); update t0 set key2=key1,key3=key1,key4=key1,key5=key1,key6=key1,key7=key1,key8=1024-key1; analyze table t0; -# 1. One index +# 1. One index explain select * from t0 where key1 < 3 or key1 > 1020; # 2. Simple cases -explain +explain select * from t0 where key1 < 3 or key2 > 1020; select * from t0 where key1 < 3 or key2 > 1020; @@ -48,6 +68,7 @@ explain select * from t0 where key1 < 3 or key2 <4; explain select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40); +# Bug#21277: InnoDB, wrong result set, index_merge strategy, second index not evaluated select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40); # 3. Check that index_merge doesn't break "ignore/force/use index" @@ -60,8 +81,8 @@ explain select * from t0 force index (i1,i2) where (key1 > 1 or key2 > 2); # 4. Check if conjuncts are grouped by keyuse -explain - select * from t0 where key1<3 or key2<3 or (key1>5 and key1<8) or +explain + select * from t0 where key1<3 or key2<3 or (key1>5 and key1<8) or (key1>10 and key1<12) or (key2>100 and key2<110); # 5. Check index_merge with conjuncts that are always true/false @@ -78,13 +99,13 @@ explain select * from t0 where key2=10 or key3=3 or key4 <=> null; explain select * from t0 where key2=10 or key3=3 or key4 is null; # some more complicated cases -explain select key1 from t0 where (key1 <=> null) or (key2 < 5) or +explain select key1 from t0 where (key1 <=> null) or (key2 < 5) or (key3=10) or (key4 <=> null); -explain select key1 from t0 where (key1 <=> null) or (key1 < 5) or +explain select key1 from t0 where (key1 <=> null) or (key1 < 5) or (key3=10) or (key4 <=> null); # 6.Several ways to do index_merge, (ignored) index_merge vs. range -explain select * from t0 where +explain select * from t0 where (key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 5 or key6 < 5); explain @@ -93,23 +114,23 @@ select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4); select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4); -explain select * from t0 where +explain select * from t0 where (key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 2 or key6 < 2); - + # now index_merge is not used at all when "range" is possible -explain select * from t0 where +explain select * from t0 where (key1 < 3 or key2 < 3) and (key3 < 100); # this even can cause "all" scan: explain select * from t0 where (key1 < 3 or key2 < 3) and (key3 < 1000); - + # 7. Complex cases # tree_or(List, range SEL_TREE). -explain select * from t0 where - ((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) - or +explain select * from t0 where + ((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) + or key2 > 5; explain select * from t0 where @@ -121,18 +142,18 @@ select * from t0 where ((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) or key1 < 7; - -# tree_or(List, List). -explain select * from t0 where - ((key1 < 4 or key2 < 4) and (key3 <5 or key5 < 4)) - or + +# tree_or(List, List). +explain select * from t0 where + ((key1 < 4 or key2 < 4) and (key3 <5 or key5 < 4)) + or ((key5 < 5 or key6 < 6) and (key7 <7 or key8 < 4)); explain select * from t0 where ((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) or ((key7 <7 or key8 < 4) and (key5 < 5 or key6 < 6)); - + explain select * from t0 where ((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) or @@ -152,7 +173,7 @@ explain select * from t0 force index(i1, i2, i3, i4, i5, i6 ) where ((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) or ((key3 >=5 or key5 < 2) and (key5 < 5 or key6 < 6)); - + # 8. Verify that "order by" after index merge uses filesort select * from t0 where key1 < 5 or key8 < 4 order by key1; @@ -186,10 +207,8 @@ create table t4 ( key2_1 int not null, key2_2 int not null, key3 int not null, - index i1a (key1a, key1b), index i1b (key1b, key1a), - index i2_1(key2, key2_1), index i2_2(key2, key2_1) ); @@ -197,8 +216,8 @@ create table t4 ( insert into t4 select key1,key1,key1 div 10, key1 % 10, key1 % 10, key1 from t0; # the following will be handled by index_merge: -select * from t4 where key1a = 3 or key1b = 4; -explain select * from t4 where key1a = 3 or key1b = 4; +select * from t4 where key1a = 3 or key1b = 4; +explain select * from t4 where key1a = 3 or key1b = 4; # and the following will not explain select * from t4 where key2 = 1 and (key2_1 = 1 or key3 = 5); @@ -213,27 +232,27 @@ create table t1 like t0; insert into t1 select * from t0; # index_merge on first table in join -explain select * from t0 left join t1 on (t0.key1=t1.key1) - where t0.key1=3 or t0.key2=4; +explain select * from t0 left join t1 on (t0.key1=t1.key1) + where t0.key1=3 or t0.key2=4; select * from t0 left join t1 on (t0.key1=t1.key1) where t0.key1=3 or t0.key2=4; -explain +explain select * from t0,t1 where (t0.key1=t1.key1) and ( t0.key1=3 or t0.key2=4); # index_merge vs. ref -explain -select * from t0,t1 where (t0.key1=t1.key1) and +explain +select * from t0,t1 where (t0.key1=t1.key1) and (t0.key1=3 or t0.key2=4) and t1.key1<200; # index_merge vs. ref -explain -select * from t0,t1 where (t0.key1=t1.key1) and +explain +select * from t0,t1 where (t0.key1=t1.key1) and (t0.key1=3 or t0.key2<4) and t1.key1=2; # index_merge on second table in join -explain select * from t0,t1 where t0.key1 = 5 and +explain select * from t0,t1 where t0.key1 = 5 and (t1.key1 = t0.key1 or t1.key8 = t0.key1); # Fix for bug#1974 @@ -241,7 +260,7 @@ explain select * from t0,t1 where t0.key1 < 3 and (t1.key1 = t0.key1 or t1.key8 = t0.key1); # index_merge inside union -explain select * from t1 where key1=3 or key2=4 +explain select * from t1 where key1=3 or key2=4 union select * from t1 where key1<4 or key3=5; # index merge in subselect @@ -256,21 +275,23 @@ alter table t3 add keyB int not null, add index iB(keyB); alter table t3 add keyC int not null, add index iC(keyC); update t3 set key9=key1,keyA=key1,keyB=key1,keyC=key1; -explain select * from t3 where +explain select * from t3 where key1=1 or key2=2 or key3=3 or key4=4 or key5=5 or key6=6 or key7=7 or key8=8 or key9=9 or keyA=10 or keyB=11 or keyC=12; - + select * from t3 where key1=1 or key2=2 or key3=3 or key4=4 or key5=5 or key6=6 or key7=7 or key8=8 or - key9=9 or keyA=10 or keyB=11 or keyC=12; + key9=9 or keyA=10 or keyB=11 or keyC=12; # Test for Bug#3183 explain select * from t0 where key1 < 3 or key2 < 4; +# Bug#21277: InnoDB, wrong result set, index_merge strategy, second index not evaluated select * from t0 where key1 < 3 or key2 < 4; update t0 set key8=123 where key1 < 3 or key2 < 4; +# Bug#21277: InnoDB, wrong result set, index_merge strategy, second index not evaluated select * from t0 where key1 < 3 or key2 < 4; delete from t0 where key1 < 3 or key2 < 4; @@ -283,47 +304,47 @@ create table t4 (a int); insert into t4 values (1),(4),(3); set @save_join_buffer_size=@@join_buffer_size; set join_buffer_size= 4000; -explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) - from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) + from t0 as A force index(i1,i2), t0 as B force index (i1,i2) where (A.key1 < 500000 or A.key2 < 3) and (B.key1 < 500000 or B.key2 < 3); -select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) - from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) + from t0 as A force index(i1,i2), t0 as B force index (i1,i2) where (A.key1 < 500000 or A.key2 < 3) and (B.key1 < 500000 or B.key2 < 3); update t0 set key1=1; -explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) - from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) + from t0 as A force index(i1,i2), t0 as B force index (i1,i2) where (A.key1 = 1 or A.key2 = 1) and (B.key1 = 1 or B.key2 = 1); -select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) - from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) + from t0 as A force index(i1,i2), t0 as B force index (i1,i2) where (A.key1 = 1 or A.key2 = 1) and (B.key1 = 1 or B.key2 = 1); alter table t0 add filler1 char(200), add filler2 char(200), add filler3 char(200); update t0 set key2=1, key3=1, key4=1, key5=1,key6=1,key7=1 where key7 < 500; -# The next query will not use index i7 in intersection if the OS doesn't +# The next query will not use index i7 in intersection if the OS doesn't # support file sizes > 2GB. (ha_myisam::ref_length depends on this and index # scan cost estimates depend on ha_myisam::ref_length) --replace_column 9 # --replace_result "4,4,4,4,4,4,4" X "4,4,4,4,4,4" X "i6,i7" "i6,i7?" "i6" "i6,i7?" -explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) - from t0 as A, t0 as B +explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) + from t0 as A, t0 as B where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1) and (B.key1 = 1 and B.key2 = 1 and B.key3 = 1 and B.key4=1 and B.key5=1 and B.key6=1 and B.key7 = 1 or B.key8=1); -select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) - from t0 as A, t0 as B +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) + from t0 as A, t0 as B where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1) and (B.key1 = 1 and B.key2 = 1 and B.key3 = 1 and B.key4=1 and B.key5=1 and B.key6=1 and B.key7 = 1 or B.key8=1); set join_buffer_size= @save_join_buffer_size; -# Test for BUG#4177 ends +# Test for BUG#4177 ends drop table t0, t1, t2, t3, t4; @@ -357,13 +378,15 @@ explain select * from t1 WHERE cola = 'foo' AND colb = 'bar'; explain select * from t1 force index(cola,colb) WHERE cola = 'foo' AND colb = 'bar'; drop table t1; +if ($merge_table_support) +{ # # BUG#17314: Index_merge/intersection not choosen by the optimizer for MERGE tables # create table t0 (a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); create table t1 ( - a int, b int, + a int, b int, filler1 char(200), filler2 char(200), key(a),key(b) ); @@ -371,7 +394,7 @@ insert into t1 select @v:= A.a, @v, 't1', 'filler2' from t0 A, t0 B, t0 C; create table t2 like t1; create table t3 ( - a int, b int, + a int, b int, filler1 char(200), filler2 char(200), key(a),key(b) ) engine=merge union=(t1,t2); @@ -383,3 +406,36 @@ explain select * from t3 where a=1 and b=1; drop table t3; drop table t0, t1, t2; +} + +# +# BUG#20256 - LOCK WRITE - MyISAM +# +CREATE TABLE t1(a INT); +INSERT INTO t1 VALUES(1); +CREATE TABLE t2(a INT, b INT, dummy CHAR(16) DEFAULT '', KEY(a), KEY(b)); +INSERT INTO t2(a,b) VALUES +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(1,2); +LOCK TABLES t1 WRITE, t2 WRITE; +INSERT INTO t2(a,b) VALUES(1,2); +SELECT t2.a FROM t1,t2 WHERE t2.b=2 AND t2.a=1; +UNLOCK TABLES; +DROP TABLE t1, t2; diff --git a/mysql-test/include/index_merge2.inc b/mysql-test/include/index_merge2.inc new file mode 100644 index 00000000000..d65115eac0f --- /dev/null +++ b/mysql-test/include/index_merge2.inc @@ -0,0 +1,345 @@ +# include/index_merge2.inc +# +# Index merge tests +# +# The variable +# $engine_type -- storage engine to be tested +# has to be set before sourcing this script. +# +# Note: The comments/expectations refer to InnoDB. +# They might be not valid for other storage engines. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was t/index_merge_innodb.test +# main code went into include/index_merge2.inc +# + +--echo #---------------- Index merge test 2 ------------------------------------------- + +eval SET SESSION STORAGE_ENGINE = $engine_type; + +--disable_warnings +drop table if exists t1,t2; +--enable_warnings + +create table t1 +( + key1 int not null, + key2 int not null, + + INDEX i1(key1), + INDEX i2(key2) +); + +--disable_query_log +let $1=200; +while ($1) +{ + eval insert into t1 values (200-$1, $1); + dec $1; +} +--enable_query_log + +# No primary key +explain select * from t1 where key1 < 5 or key2 > 197; + +select * from t1 where key1 < 5 or key2 > 197; + +explain select * from t1 where key1 < 3 or key2 > 195; +select * from t1 where key1 < 3 or key2 > 195; + +# Primary key as case-sensitive string with \0s. +# also make primary key be longer then max. index length of MyISAM. +alter table t1 add str1 char (255) not null, + add zeroval int not null default 0, + add str2 char (255) not null, + add str3 char (255) not null; + +update t1 set str1='aaa', str2='bbb', str3=concat(key2, '-', key1 div 2, '_' ,if(key1 mod 2 = 0, 'a', 'A')); + +alter table t1 add primary key (str1, zeroval, str2, str3); + +explain select * from t1 where key1 < 5 or key2 > 197; + +select * from t1 where key1 < 5 or key2 > 197; + +explain select * from t1 where key1 < 3 or key2 > 195; +select * from t1 where key1 < 3 or key2 > 195; + +# Test for BUG#5401 +drop table t1; +create table t1 ( + pk integer not null auto_increment primary key, + key1 integer, + key2 integer not null, + filler char (200), + index (key1), + index (key2) +); +show warnings; +--disable_query_log +let $1=30; +while ($1) +{ + eval insert into t1 (key1, key2, filler) values ($1/4, $1/8, 'filler-data'); + dec $1; +} +--enable_query_log +explain select pk from t1 where key1 = 1 and key2 = 1; +select pk from t1 where key2 = 1 and key1 = 1; +select pk from t1 ignore index(key1,key2) where key2 = 1 and key1 = 1; + +# More tests for BUG#5401. +drop table t1; +create table t1 ( + pk int primary key auto_increment, + key1a int, + key2a int, + key1b int, + key2b int, + dummy1 int, + dummy2 int, + dummy3 int, + dummy4 int, + key3a int, + key3b int, + filler1 char (200), + index i1(key1a, key1b), + index i2(key2a, key2b), + index i3(key3a, key3b) +); + +create table t2 (a int); +insert into t2 values (0),(1),(2),(3),(4),(NULL); + +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) + select A.a, B.a, C.a, D.a, C.a, D.a from t2 A,t2 B,t2 C, t2 D; +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) + select key1a, key1b, key2a, key2b, key3a, key3b from t1; +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) + select key1a, key1b, key2a, key2b, key3a, key3b from t1; +analyze table t1; +select count(*) from t1; + +explain select count(*) from t1 where + key1a = 2 and key1b is null and key2a = 2 and key2b is null; + +select count(*) from t1 where + key1a = 2 and key1b is null and key2a = 2 and key2b is null; + +explain select count(*) from t1 where + key1a = 2 and key1b is null and key3a = 2 and key3b is null; + +select count(*) from t1 where + key1a = 2 and key1b is null and key3a = 2 and key3b is null; + +drop table t1,t2; + +# Test for BUG#8441 +create table t1 ( + id1 int, + id2 date , + index idx2 (id1,id2), + index idx1 (id2) +); +insert into t1 values(1,'20040101'), (2,'20040102'); +select * from t1 where id1 = 1 and id2= '20040101'; +drop table t1; + +# Test for BUG#12720 +--disable_warnings +drop view if exists v1; +--enable_warnings +CREATE TABLE t1 ( + `oid` int(11) unsigned NOT NULL auto_increment, + `fk_bbk_niederlassung` int(11) unsigned NOT NULL, + `fk_wochentag` int(11) unsigned NOT NULL, + `uhrzeit_von` time NOT NULL COMMENT 'HH:MM', + `uhrzeit_bis` time NOT NULL COMMENT 'HH:MM', + `geloescht` tinyint(4) NOT NULL, + `version` int(5) NOT NULL, + PRIMARY KEY (`oid`), + KEY `fk_bbk_niederlassung` (`fk_bbk_niederlassung`), + KEY `fk_wochentag` (`fk_wochentag`), + KEY `ix_version` (`version`) +) DEFAULT CHARSET=latin1; + +insert into t1 values +(1, 38, 1, '08:00:00', '13:00:00', 0, 1), +(2, 38, 2, '08:00:00', '13:00:00', 0, 1), +(3, 38, 3, '08:00:00', '13:00:00', 0, 1), +(4, 38, 4, '08:00:00', '13:00:00', 0, 1), +(5, 38, 5, '08:00:00', '13:00:00', 0, 1), +(6, 38, 5, '08:00:00', '13:00:00', 1, 2), +(7, 38, 3, '08:00:00', '13:00:00', 1, 2), +(8, 38, 1, '08:00:00', '13:00:00', 1, 2), +(9, 38, 2, '08:00:00', '13:00:00', 1, 2), +(10, 38, 4, '08:00:00', '13:00:00', 1, 2), +(11, 38, 1, '08:00:00', '13:00:00', 0, 3), +(12, 38, 2, '08:00:00', '13:00:00', 0, 3), +(13, 38, 3, '08:00:00', '13:00:00', 0, 3), +(14, 38, 4, '08:00:00', '13:00:00', 0, 3), +(15, 38, 5, '08:00:00', '13:00:00', 0, 3), +(16, 38, 4, '08:00:00', '13:00:00', 0, 4), +(17, 38, 5, '08:00:00', '13:00:00', 0, 4), +(18, 38, 1, '08:00:00', '13:00:00', 0, 4), +(19, 38, 2, '08:00:00', '13:00:00', 0, 4), +(20, 38, 3, '08:00:00', '13:00:00', 0, 4), +(21, 7, 1, '08:00:00', '13:00:00', 0, 1), +(22, 7, 2, '08:00:00', '13:00:00', 0, 1), +(23, 7, 3, '08:00:00', '13:00:00', 0, 1), +(24, 7, 4, '08:00:00', '13:00:00', 0, 1), +(25, 7, 5, '08:00:00', '13:00:00', 0, 1); + +create view v1 as +select + zeit1.oid AS oid, + zeit1.fk_bbk_niederlassung AS fk_bbk_niederlassung, + zeit1.fk_wochentag AS fk_wochentag, + zeit1.uhrzeit_von AS uhrzeit_von, + zeit1.uhrzeit_bis AS uhrzeit_bis, + zeit1.geloescht AS geloescht, + zeit1.version AS version +from + t1 zeit1 +where +(zeit1.version = + (select max(zeit2.version) AS `max(version)` + from t1 zeit2 + where + ((zeit1.fk_bbk_niederlassung = zeit2.fk_bbk_niederlassung) and + (zeit1.fk_wochentag = zeit2.fk_wochentag) and + (zeit1.uhrzeit_von = zeit2.uhrzeit_von) and + (zeit1.uhrzeit_bis = zeit2.uhrzeit_bis) + ) + ) +) +and (zeit1.geloescht = 0); + +select * from v1 where oid = 21; +drop view v1; +drop table t1; +## +CREATE TABLE t1( + t_cpac varchar(2) NOT NULL, + t_vers varchar(4) NOT NULL, + t_rele varchar(2) NOT NULL, + t_cust varchar(4) NOT NULL, + filler1 char(250) default NULL, + filler2 char(250) default NULL, + PRIMARY KEY (t_cpac,t_vers,t_rele,t_cust), + UNIQUE KEY IX_4 (t_cust,t_cpac,t_vers,t_rele), + KEY IX_5 (t_vers,t_rele,t_cust) +); + +insert into t1 values +('tm','2.5 ','a ',' ','',''), ('tm','2.5U','a ','stnd','',''), +('da','3.3 ','b ',' ','',''), ('da','3.3U','b ','stnd','',''), +('tl','7.6 ','a ',' ','',''), ('tt','7.6 ','a ',' ','',''), +('bc','B61 ','a ',' ','',''), ('bp','B61 ','a ',' ','',''), +('ca','B61 ','a ',' ','',''), ('ci','B61 ','a ',' ','',''), +('cp','B61 ','a ',' ','',''), ('dm','B61 ','a ',' ','',''), +('ec','B61 ','a ',' ','',''), ('ed','B61 ','a ',' ','',''), +('fm','B61 ','a ',' ','',''), ('nt','B61 ','a ',' ','',''), +('qm','B61 ','a ',' ','',''), ('tc','B61 ','a ',' ','',''), +('td','B61 ','a ',' ','',''), ('tf','B61 ','a ',' ','',''), +('tg','B61 ','a ',' ','',''), ('ti','B61 ','a ',' ','',''), +('tp','B61 ','a ',' ','',''), ('ts','B61 ','a ',' ','',''), +('wh','B61 ','a ',' ','',''), ('bc','B61U','a ','stnd','',''), +('bp','B61U','a ','stnd','',''), ('ca','B61U','a ','stnd','',''), +('ci','B61U','a ','stnd','',''), ('cp','B61U','a ','stnd','',''), +('dm','B61U','a ','stnd','',''), ('ec','B61U','a ','stnd','',''), +('fm','B61U','a ','stnd','',''), ('nt','B61U','a ','stnd','',''), +('qm','B61U','a ','stnd','',''), ('tc','B61U','a ','stnd','',''), +('td','B61U','a ','stnd','',''), ('tf','B61U','a ','stnd','',''), +('tg','B61U','a ','stnd','',''), ('ti','B61U','a ','stnd','',''), +('tp','B61U','a ','stnd','',''), ('ts','B61U','a ','stnd','',''), +('wh','B61U','a ','stnd','',''); +show create table t1; + +select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6'; +select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6' + and t_rele='a' and t_cust = ' '; + +drop table t1; + +# BUG#19021: Crash in index_merge/ROR-intersection optimizer under +# specific circumstances. +create table t1 ( + pk int(11) not null auto_increment, + a int(11) not null default '0', + b int(11) not null default '0', + c int(11) not null default '0', + + filler1 datetime, filler2 varchar(15), + filler3 longtext, + + kp1 varchar(4), kp2 varchar(7), + kp3 varchar(2), kp4 varchar(4), + kp5 varchar(7), + filler4 char(1), + + primary key (pk), + key idx1(a,b,c), + key idx2(c), + key idx3(kp1,kp2,kp3,kp4,kp5) +) default charset=latin1; +--disable_query_log +set @fill= uncompress(unhex(concat( +'F91D0000789CDD993D6FDB301086F7FE0A6D4E0105B8E3F1335D5BA028DA0EEDE28E1D320408', +'52A0713BF4D7571FB62C51A475924839080307B603E77DEE787C8FA41F9E9EEF7F1F8A87A7C3', +'AFE280C5DF9F8F7FEE9F8B1B2CB114D6902E918455245DB91300FA16E42D5201FA4EE29DA05D', +'B9FB3718A33718A3FA8C30AEFAFDE1F317D016AA67BA7A60FDE45BF5F8BA7B5BDE8812AA9F1A', +'069DB03C9804346644F3A3A6A1338DB572756A3C4D1BCC804CABF912C654AE9BB855A2B85962', +'3A479259CAE6A86C0411D01AE5483581EDCBD9A39C45252D532E533979EB9F82E971D979BDB4', +'8531105670740AFBFD1E34AAB0029E4AD0A1D46A6D0946A21A16038A5CD965CD2D524673F712', +'20C304477315CE18405EAF9BD0AFFEAC74FDA14F1FBF5BD34C769D73FBBEDF4750ADD4E5A99C', +'5C8DC04934AFA275D483D536D174C11B12AF27F8F888B41B6FC9DBA569E1FD7BD72D698130B7', +'91B23A98803512B3D31881E8DCDA2AC1754E3644C4BB3A8466750B911681274A39E35E8624B7', +'444A42AC1213F354758E3CF1A4CDD5A688C767CF1B11ABC5867CB15D8A18E0B91E9EC275BB94', +'58F33C2936F64690D55BC29E4A293D95A798D84217736CEAAA538CE1354269EE2162053FBC66', +'496D90CB53323CB279D3A6AF651B4B22B9E430743D83BE48E995A09D4FC9871C22D8D189B945', +'706911BCB8C3C774B9C08D2FC6ED853ADACA37A14A4CB2E027630E5B80ECACD939431B1CDF62', +'7D71487536EA2C678F59685E91F4B6C144BCCB94C1EBA9FA6F5552DDCA4E4539BE326A2720CB', +'45ED028EB3616AC93C46E775FEA9FA6DA7CFCEC6DEBA5FCD1F915EED4D983BDDB881528AD9AB', +'43C1576F29AAB35BDFBC21D422F52B307D350589D45225A887AC46C8EDD72D99EC3ED2E1BCEF', +'7AF26FC4C74097B6768A5EDAFA660CC64278F7E63F99AC954B'))); +prepare x from @fill; +execute x; +deallocate prepare x; +--enable_query_log +set @fill=NULL; +SELECT COUNT(*) FROM t1 WHERE b = 0 AND a = 0 AND c = 13286427 AND + kp1='279' AND kp2='ELM0678' AND kp3='6' AND kp4='10' AND kp5 = 'R '; + +drop table t1; + +# BUG#21277: Index Merge/sort_union: wrong query results +create table t1 +( + key1 int not null, + key2 int not null default 0, + key3 int not null default 0 +); + +insert into t1(key1) values (1),(2),(3),(4),(5),(6),(7),(8); + +let $1=7; +set @d=8; +while ($1) +{ + eval insert into t1 (key1) select key1+@d from t1; + eval set @d=@d*2; + dec $1; +} + +alter table t1 add index i2(key2); +alter table t1 add index i3(key3); +update t1 set key2=key1,key3=key1; + +# to test the bug, the following must use "sort_union": +explain select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); +select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); +drop table t1; + diff --git a/mysql-test/t/index_merge_innodb2.test b/mysql-test/include/index_merge_2sweeps.inc similarity index 58% rename from mysql-test/t/index_merge_innodb2.test rename to mysql-test/include/index_merge_2sweeps.inc index ec4ea672bc1..3ae7e5b3c09 100644 --- a/mysql-test/t/index_merge_innodb2.test +++ b/mysql-test/include/index_merge_2sweeps.inc @@ -1,7 +1,20 @@ +# include/index_merge_2sweeps.inc # # 2-sweeps read Index_merge test # --- source include/have_innodb.inc +# The variable +# $engine_type -- storage engine to be tested +# has to be set before sourcing this script. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was index_merge_innodb2.test +# main code went into include/index_merge_2sweeps.inc +# + +--echo #---------------- 2-sweeps read Index merge test 2 ------------------------------- + +eval SET SESSION STORAGE_ENGINE = $engine_type; --disable_warnings drop table if exists t1; @@ -10,12 +23,12 @@ drop table if exists t1; create table t1 ( pk int primary key, key1 int, - key2 int, + key2 int, filler char(200), - filler2 char(200), + filler2 char(200), index(key1), index(key2) -) engine=innodb; +); --disable_query_log @@ -31,20 +44,20 @@ select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 ); set @maxv=1000; -select * from t1 where - (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +select * from t1 where + (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) or key1=18 or key1=60; -select * from t1 where - (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +select * from t1 where + (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) or key1 < 3 or key1 > @maxv-11; -select * from t1 where - (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +select * from t1 where + (pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) or (key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10); -select * from t1 where +select * from t1 where (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (key1 < 5) or (key1 > @maxv-10); diff --git a/mysql-test/t/index_merge_ror.test b/mysql-test/include/index_merge_ror.inc similarity index 88% rename from mysql-test/t/index_merge_ror.test rename to mysql-test/include/index_merge_ror.inc index 474d8e3dbcd..b9f5c7b95da 100644 --- a/mysql-test/t/index_merge_ror.test +++ b/mysql-test/include/index_merge_ror.inc @@ -1,10 +1,27 @@ +# include/index_merge_ror.inc # -# ROR-index_merge tests. +# ROR-index_merge tests. # +# The variable +# $engine_type -- storage engine to be tested +# has to be set before sourcing this script. +# +# Note: The comments/expectations refer to MyISAM. +# They might be not valid for other storage engines. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was t/index_merge_ror.test +# main code went into include/index_merge_ror.inc +# + +--echo #---------------- ROR-index_merge tests ----------------------- + +eval SET SESSION STORAGE_ENGINE = $engine_type; + --disable_warnings drop table if exists t0,t1,t2; --enable_warnings ---disable_query_log create table t1 ( /* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */ @@ -29,7 +46,7 @@ create table t1 filler4 char (200), filler5 char (200), filler6 char (200), - + /* order of keys is important */ key sta_swt12a(st_a,swt1a,swt2a), key sta_swt1a(st_a,swt1a), @@ -47,21 +64,26 @@ create table t1 key(key4) ) ; -# Fill table +# Fill table create table t0 as select * from t1; +--disable_query_log +--echo # Printing of many insert into t0 values (....) disabled. let $cnt=1000; while ($cnt) { eval insert into t0 values (1, 2, 3, 1, 2, 3, 0, 0, 0, 0, 'data1', 'data2', 'data3', 'data4', 'data5', 'data6'); dec $cnt; } +--enable_query_log alter table t1 disable keys; +--disable_query_log +--echo # Printing of many insert into t1 select .... from t0 disabled. let $1=4; while ($1) { let $2=4; - while ($2) + while ($2) { let $3=4; while ($3) @@ -74,6 +96,7 @@ while ($1) dec $1; } +--echo # Printing of many insert into t1 (...) values (....) disabled. # Row retrieval tests # -1 is used for values 'out of any range we are using' # insert enough rows for index intersection to be used for (key1,key2) @@ -90,17 +113,17 @@ while ($cnt) eval insert into t1 (key1, key2, key3, key4, filler1) values (-1, 100, -1, 100,'key2-key4'); dec $cnt; } -alter table t1 enable keys; --enable_query_log +alter table t1 enable keys; select count(*) from t1; -# One row results tests for cases where a single row matches all conditions +# One row results tests for cases where a single row matches all conditions explain select key1,key2 from t1 where key1=100 and key2=100; select key1,key2 from t1 where key1=100 and key2=100; explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -# Several-rows results +# Several-rows results insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2'); insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3'); @@ -150,7 +173,7 @@ explain select key1,key2 from t1 where key1=100 and key2=100; select key1,key2 from t1 where key1=100 and key2=100; # ROR-union tests with various cases. -# All scans returning duplicate rows: +# All scans returning duplicate rows: insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-1'); insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-2'); insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-3'); @@ -169,7 +192,7 @@ explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 a select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; ## -## Optimizer tests +## Optimizer tests ## # Check that the shortest key is used for ROR-intersection, covering and non-covering. @@ -179,9 +202,9 @@ explain select st_a,st_b from t1 where st_a=1 and st_b=1; # Check if "ingore index" syntax works explain select st_a from t1 ignore index (st_a) where st_a=1 and st_b=1; -# Do many tests +# Do many tests # Check that keys that don't improve selectivity are skipped. -# +# # Different value on 32 and 64 bit --replace_result sta_swt12a sta_swt21a sta_swt12a, sta_swt12a, @@ -191,25 +214,25 @@ explain select * from t1 where st_b=1 and swt1b=1 and swt2b=1; explain select * from t1 where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; -explain select * from t1 ignore index (sta_swt21a, stb_swt1a_2b) +explain select * from t1 ignore index (sta_swt21a, stb_swt1a_2b) where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; -explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b) +explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b) where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; -explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b, stb_swt1b) +explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b, stb_swt1b) where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; -explain select * from t1 +explain select * from t1 where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1; -explain select * from t1 +explain select * from t1 where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; -explain select st_a from t1 +explain select st_a from t1 where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; -explain select st_a from t1 +explain select st_a from t1 where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; drop table t0,t1; @@ -240,7 +263,7 @@ insert into t2 select * from t2; select count(a) from t2 where a='BBBBBBBB'; select count(a) from t2 where b='BBBBBBBB'; -# BUG#1: +# BUG#1: --replace_result a a_or_b b a_or_b explain select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; diff --git a/mysql-test/t/index_merge_ror_cpk.test b/mysql-test/include/index_merge_ror_cpk.inc similarity index 67% rename from mysql-test/t/index_merge_ror_cpk.test rename to mysql-test/include/index_merge_ror_cpk.inc index d58d0b46dd8..cfc2ed3885e 100644 --- a/mysql-test/t/index_merge_ror_cpk.test +++ b/mysql-test/include/index_merge_ror_cpk.inc @@ -1,7 +1,23 @@ +# include/index_merge_ror_cpk.inc # # Clustered PK ROR-index_merge tests # --- source include/have_innodb.inc +# The variable +# $engine_type -- storage engine to be tested +# has to be set before sourcing this script. +# +# Note: The comments/expectations refer to InnoDB. +# They might be not valid for other storage engines. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was t/index_merge_ror_cpk.test +# main code went into include/index_merge_ror_cpk.inc +# + +--echo #---------------- Clustered PK ROR-index_merge tests ----------------------------- + +eval SET SESSION STORAGE_ENGINE = $engine_type; --disable_warnings drop table if exists t1; @@ -9,14 +25,14 @@ drop table if exists t1; create table t1 ( - pk1 int not null, + pk1 int not null, pk2 int not null, - + key1 int not null, key2 int not null, - pktail1ok int not null, - pktail2ok int not null, + pktail1ok int not null, + pktail2ok int not null, pktail3bad int not null, pktail4bad int not null, pktail5bad int not null, @@ -37,7 +53,7 @@ create table t1 key (pktail5bad, pk1, pk2, pk2copy), primary key (pk1, pk2) -) engine=innodb; +); --disable_query_log set autocommit=0; @@ -50,13 +66,13 @@ while ($1) set autocommit=1; --enable_query_log -# Verify that range scan on CPK is ROR -# (use index_intersection because it is impossible to check that for index union) +# Verify that range scan on CPK is ROR +# (use index_intersection because it is impossible to check that for index union) explain select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; # CPK scan + 1 ROR range scan is a special case select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; -# Verify that CPK fields are considered to be covered by index scans +# Verify that CPK fields are considered to be covered by index scans explain select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; @@ -65,12 +81,13 @@ select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; explain select * from t1 where badkey=1 and key1=10; --replace_column 9 ROWS explain select * from t1 where pk1 < 7500 and key1 = 10; - + # Verify that keys with 'tails' of PK members are ok. explain select * from t1 where pktail1ok=1 and key1=10; explain select * from t1 where pktail2ok=1 and key1=10; -select ' The following is actually a deficiency, it uses sort_union currently:' as 'note:'; +# Note: The following is actually a deficiency, it uses sort_union currently. +# This comment refers to InnoDB and is probably not valid for other engines. explain select * from t1 where (pktail2ok=1 and pk1< 50000) or key1=10; # The expected rows differs a bit from platform to platform @@ -79,33 +96,33 @@ explain select * from t1 where pktail3bad=1 and key1=10; explain select * from t1 where pktail4bad=1 and key1=10; explain select * from t1 where pktail5bad=1 and key1=10; -# Test for problem with innodb key values prefetch buffer: +# Test for problem with innodb key values prefetch buffer: explain select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; drop table t1; # Testcase for BUG#4984 create table t1 -( - RUNID varchar(22), - SUBMITNR varchar(5), - ORDERNR char(1) , - PROGRAMM varchar(8), - TESTID varchar(4), - UCCHECK char(1), - ETEXT varchar(80), +( + RUNID varchar(22), + SUBMITNR varchar(5), + ORDERNR char(1), + PROGRAMM varchar(8), + TESTID varchar(4), + UCCHECK char(1), + ETEXT varchar(80), ETEXT_TYPE char(1), - INFO char(1), - SEVERITY tinyint(3), - TADIRFLAG char(1), - PRIMARY KEY (RUNID,SUBMITNR,ORDERNR,PROGRAMM,TESTID,UCCHECK), + INFO char(1), + SEVERITY tinyint(3), + TADIRFLAG char(1), + PRIMARY KEY (RUNID,SUBMITNR,ORDERNR,PROGRAMM,TESTID,UCCHECK), KEY `TVERM~KEY` (PROGRAMM,TESTID,UCCHECK) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -update t1 set `ETEXT` = '', `ETEXT_TYPE`='', `INFO`='', `SEVERITY`='', `TADIRFLAG`='' -WHERE - `RUNID`= '' AND `SUBMITNR`= '' AND `ORDERNR`='' AND `PROGRAMM`='' AND - `TESTID`='' AND `UCCHECK`=''; +) DEFAULT CHARSET=latin1; + +update t1 set `ETEXT` = '', `ETEXT_TYPE`='', `INFO`='', `SEVERITY`='', `TADIRFLAG`='' +WHERE + `RUNID`= '' AND `SUBMITNR`= '' AND `ORDERNR`='' AND `PROGRAMM`='' AND + `TESTID`='' AND `UCCHECK`=''; drop table t1; diff --git a/mysql-test/include/mix1.inc b/mysql-test/include/mix1.inc new file mode 100644 index 00000000000..fb4d095ed6d --- /dev/null +++ b/mysql-test/include/mix1.inc @@ -0,0 +1,430 @@ +# include/mix1.inc +# +# The variables +# $engine_type -- storage engine to be tested +# $other_engine_type -- storage engine <> $engine_type +# $other_engine_type must point to an all +# time available storage engine +# 2006-08 MySQL 5.1 MyISAM and MEMORY only +# $test_foreign_keys -- 0, skip foreign key tests +# -- 1, do not skip foreign key tests +# have to be set before sourcing this script. +# +# Note: The comments/expectations refer to InnoDB. +# They might be not valid for other storage engines. +# +# Last update: +# 2006-08-15 ML refactoring of t/innodb_mysql.test +# - shift main code of t/innodb_mysql.test to include/mix1.inc +# - replace hardcoded assignment of storage engine by +# use of $engine_type and $other_engine_type variables +# - remove redundant replay testcase of +# Bug#12882 min/max inconsistent on empty table +# - corrected analyze table t1; to analyze table t4; +# Much older versions of this test show that the table +# where just some indexes have been created must be used. +# + +eval SET SESSION STORAGE_ENGINE = $engine_type; + +--disable_warnings +drop table if exists t1,t2,t1m,t1i,t2m,t2i,t4; +--enable_warnings + +# +# Bug#17530: Incorrect key truncation on table creation caused server crash. +# +create table t1(f1 varchar(800) binary not null, key(f1)) + character set utf8 collate utf8_general_ci; +insert into t1 values('aaa'); +drop table t1; + +# BUG#16798: Uninitialized row buffer reads in ref-or-null optimizer +# (repeatable only w/innodb). +create table t1 ( + c_id int(11) not null default '0', + org_id int(11) default null, + unique key contacts$c_id (c_id), + key contacts$org_id (org_id) +); +insert into t1 values + (2,null),(120,null),(141,null),(218,7), (128,1), + (151,2),(234,2),(236,2),(243,2),(255,2),(259,2),(232,3),(235,3),(238,3), + (246,3),(253,3),(269,3),(285,3),(291,3),(293,3),(131,4),(230,4),(231,4); + +create table t2 ( + slai_id int(11) not null default '0', + owner_tbl int(11) default null, + owner_id int(11) default null, + sla_id int(11) default null, + inc_web int(11) default null, + inc_email int(11) default null, + inc_chat int(11) default null, + inc_csr int(11) default null, + inc_total int(11) default null, + time_billed int(11) default null, + activedate timestamp null default null, + expiredate timestamp null default null, + state int(11) default null, + sla_set int(11) default null, + unique key t2$slai_id (slai_id), + key t2$owner_id (owner_id), + key t2$sla_id (sla_id) +); +insert into t2(slai_id, owner_tbl, owner_id, sla_id) values + (1,3,1,1), (3,3,10,2), (4,3,3,6), (5,3,2,5), (6,3,8,3), (7,3,9,7), + (8,3,6,8), (9,3,4,9), (10,3,5,10), (11,3,11,11), (12,3,7,12); + +flush tables; +select si.slai_id +from t1 c join t2 si on + ((si.owner_tbl = 3 and si.owner_id = c.org_id) or + ( si.owner_tbl = 2 and si.owner_id = c.c_id)) +where + c.c_id = 218 and expiredate is null; + +select * from t1 where org_id is null; +select si.slai_id +from t1 c join t2 si on + ((si.owner_tbl = 3 and si.owner_id = c.org_id) or + ( si.owner_tbl = 2 and si.owner_id = c.c_id)) +where + c.c_id = 218 and expiredate is null; + +drop table t1, t2; + +# +# Bug#17212: results not sorted correctly by ORDER BY when using index +# (repeatable only w/innodb because of index props) +# +CREATE TABLE t1 (a int, b int, KEY b (b)); +CREATE TABLE t2 (a int, b int, PRIMARY KEY (a,b)); +CREATE TABLE t3 (a int, b int, c int, PRIMARY KEY (a), + UNIQUE KEY b (b,c), KEY a (a,b,c)); + +INSERT INTO t1 VALUES (1, 1); +INSERT INTO t1 SELECT a + 1, b + 1 FROM t1; +INSERT INTO t1 SELECT a + 2, b + 2 FROM t1; + +INSERT INTO t2 VALUES (1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8); +INSERT INTO t2 SELECT a + 1, b FROM t2; +DELETE FROM t2 WHERE a = 1 AND b < 2; + +INSERT INTO t3 VALUES (1,1,1),(2,1,2); +INSERT INTO t3 SELECT a + 2, a + 2, 3 FROM t3; +INSERT INTO t3 SELECT a + 4, a + 4, 3 FROM t3; + +# demonstrate a problem when a must-use-sort table flag +# (sort_by_table=1) is being neglected. +SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE + t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2) + ORDER BY t1.b LIMIT 2; + +# demonstrate the problem described in the bug report +SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE + t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2) + ORDER BY t1.b LIMIT 5; +DROP TABLE t1, t2, t3; + + +# BUG#21077 (The testcase is not deterministic so correct execution doesn't +# prove anything) For proof one should track if sequence of ha_innodb::* func +# calls is correct. +CREATE TABLE `t1` (`id1` INT) ; +INSERT INTO `t1` (`id1`) VALUES (1),(5),(2); + +CREATE TABLE `t2` ( + `id1` INT, + `id2` INT NOT NULL, + `id3` INT, + `id4` INT NOT NULL, + UNIQUE (`id2`,`id4`), + KEY (`id1`) +); + +INSERT INTO `t2`(`id1`,`id2`,`id3`,`id4`) VALUES +(1,1,1,0), +(1,1,2,1), +(5,1,2,2), +(6,1,2,3), +(1,2,2,2), +(1,2,1,1); + +SELECT `id1` FROM `t1` WHERE `id1` NOT IN (SELECT `id1` FROM `t2` WHERE `id2` = 1 AND `id3` = 2); +DROP TABLE t1, t2; + +# +# Bug #12882 min/max inconsistent on empty table +# + +--disable_warnings +eval create table t1m (a int) engine = $other_engine_type; +create table t1i (a int); +eval create table t2m (a int) engine = $other_engine_type; +create table t2i (a int); +--enable_warnings +insert into t2m values (5); +insert into t2i values (5); + +# test with $engine_type +select min(a) from t1i; +select min(7) from t1i; +select min(7) from DUAL; +explain select min(7) from t2i join t1i; +select min(7) from t2i join t1i; + +select max(a) from t1i; +select max(7) from t1i; +select max(7) from DUAL; +explain select max(7) from t2i join t1i; +select max(7) from t2i join t1i; + +select 1, min(a) from t1i where a=99; +select 1, min(a) from t1i where 1=99; +select 1, min(1) from t1i where a=99; +select 1, min(1) from t1i where 1=99; + +select 1, max(a) from t1i where a=99; +select 1, max(a) from t1i where 1=99; +select 1, max(1) from t1i where a=99; +select 1, max(1) from t1i where 1=99; + +# mixed $engine_type/$other_engine_type test +explain select count(*), min(7), max(7) from t1m, t1i; +select count(*), min(7), max(7) from t1m, t1i; + +explain select count(*), min(7), max(7) from t1m, t2i; +select count(*), min(7), max(7) from t1m, t2i; + +explain select count(*), min(7), max(7) from t2m, t1i; +select count(*), min(7), max(7) from t2m, t1i; + +drop table t1m, t1i, t2m, t2i; + +# +# Bug #12882: primary key implcitly included in every innodb index +# (was part of group_min_max.test) +# + +eval create table t1 ( + a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(64) default ' ' +) ENGINE = $other_engine_type; + +insert into t1 (a1, a2, b, c, d) values +('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'), +('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'), +('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'), +('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'), +('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'), +('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'), +('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'), +('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'), +('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'), +('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'), +('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'), +('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'), +('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'), +('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'), +('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'), +('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'), +('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'), +('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'), +('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'), +('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'), +('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'), +('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'), +('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'), +('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'), +('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'), +('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'), +('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'), +('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'), +('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'), +('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'), +('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'), +('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'); +--disable_warnings +create table t4 ( + pk_col int auto_increment primary key, a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(64) default ' ' +); +--enable_warnings +insert into t4 (a1, a2, b, c, d, dummy) select * from t1; + +create index idx12672_0 on t4 (a1); +create index idx12672_1 on t4 (a1,a2,b,c); +create index idx12672_2 on t4 (a1,a2,b); +analyze table t4; + +select distinct a1 from t4 where pk_col not in (1,2,3,4); + +drop table t1,t4; + +# +# Bug #6142: a problem with the empty innodb table +# (was part of group_min_max.test) +# + +--disable_warnings +create table t1 ( + a varchar(30), b varchar(30), primary key(a), key(b) +); +--enable_warnings +select distinct a from t1; +drop table t1; + +# +# Bug #9798: group by with rollup +# (was part of group_min_max.test) +# + +--disable_warnings +create table t1(a int, key(a)); +--enable_warnings +insert into t1 values(1); +select a, count(a) from t1 group by a with rollup; +drop table t1; + +# +# Bug #13293 Wrongly used index results in endless loop. +# (was part of group_min_max.test) +# +create table t1 (f1 int, f2 char(1), primary key(f1,f2)); +insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d"); +alter table t1 drop primary key, add primary key (f2, f1); +explain select distinct f1 a, f1 b from t1; +explain select distinct f1, f2 from t1; +drop table t1; + +# +# Test for bug #17164: ORed FALSE blocked conversion of outer join into join +# + +CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20), + INDEX (name)); +CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11)); +# CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11), +# FOREIGN KEY (fkey) REFERENCES t2(id)); +if ($test_foreign_keys) +{ + ALTER TABLE t2 ADD FOREIGN KEY (fkey) REFERENCES t2(id); +} +INSERT INTO t1 VALUES (1,'A1'),(2,'A2'),(3,'B'); +INSERT INTO t2 VALUES (1,1),(2,2),(3,2),(4,3),(5,3); + +EXPLAIN +SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id + WHERE t1.name LIKE 'A%'; + +EXPLAIN +SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id + WHERE t1.name LIKE 'A%' OR FALSE; + +DROP TABLE t1,t2; + +# +# Test of behaviour with CREATE ... SELECT +# + +CREATE TABLE t1 (a int, b int); +insert into t1 values (1,1),(1,2); +--error 1062 +CREATE TABLE t2 (primary key (a)) select * from t1; +# This should give warning +drop table if exists t2; +--error 1062 +CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1; +# This should give warning +drop table if exists t2; +CREATE TABLE t2 (a int, b int, primary key (a)); +BEGIN; +INSERT INTO t2 values(100,100); +--error 1062 +CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1; +SELECT * from t2; +ROLLBACK; +SELECT * from t2; +TRUNCATE table t2; +--error 1062 +INSERT INTO t2 select * from t1; +SELECT * from t2; +drop table t2; + +CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)); +BEGIN; +INSERT INTO t2 values(100,100); +--error 1062 +CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1; +SELECT * from t2; +COMMIT; +BEGIN; +INSERT INTO t2 values(101,101); +--error 1062 +CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1; +SELECT * from t2; +ROLLBACK; +SELECT * from t2; +TRUNCATE table t2; +--error 1062 +INSERT INTO t2 select * from t1; +SELECT * from t2; +drop table t1,t2; + +# +# Bug#17530: Incorrect key truncation on table creation caused server crash. +# +create table t1(f1 varchar(800) binary not null, key(f1)) + character set utf8 collate utf8_general_ci; +insert into t1 values('aaa'); +drop table t1; + +# Fix for BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY +# UPDATE": if the row is updated, it's like a regular UPDATE: +# LAST_INSERT_ID() is not affected. +CREATE TABLE `t2` ( + `k` int(11) NOT NULL auto_increment, + `a` int(11) default NULL, + `c` int(11) default NULL, + PRIMARY KEY (`k`), + UNIQUE KEY `idx_1` (`a`) +); +insert into t2 ( a ) values ( 6 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +insert into t2 ( a ) values ( 7 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +select last_insert_id(); +select * from t2; +insert into t2 ( a ) values ( 6 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +select last_insert_id(); +# test again when last_insert_id() is 0 initially +select last_insert_id(0); +insert into t2 ( a ) values ( 6 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +select last_insert_id(); +select * from t2; + +# Test of LAST_INSERT_ID() when autogenerated will fail: +# last_insert_id() should not change +insert ignore into t2 values (null,6,1),(10,8,1); +select last_insert_id(); +# First and second autogenerated will fail, last_insert_id() should +# point to third +insert ignore into t2 values (null,6,1),(null,8,1),(null,15,1),(null,20,1); +select last_insert_id(); +select * from t2; + +# Test of the workaround which enables people to know the id of the +# updated row in INSERT ON DUPLICATE KEY UPDATE, by using +# LAST_INSERT_ID(autoinc_col) in the UPDATE clause. + +insert into t2 ( a ) values ( 6 ) on duplicate key update c = +ifnull( c, +0 ) + 1, k=last_insert_id(k); +select last_insert_id(); +select * from t2; + +drop table t2; diff --git a/mysql-test/include/mix2.inc b/mysql-test/include/mix2.inc new file mode 100644 index 00000000000..d3980b17d91 --- /dev/null +++ b/mysql-test/include/mix2.inc @@ -0,0 +1,2699 @@ +################################################################################ +# # +# include/mix2.inc # +# # +# This is a derivate of t/innodb.test and has to be maintained by MySQL # +# guys only. # +# # +# Please, DO NOT create a toplevel testcase mix2_innodb.test, because # +# innodb.test does already these tests. # +# # +# Variables which have to be set before calling this script: # +# $engine_type -- Storage engine to be tested # +# $other_engine_type -- storage engine <> $engine_type # +# $other_engine_type1 -- storage engine <> $engine_type # +# storage engine <> $other_engine_type, if possible # +# $other_non_trans_engine_type -- storage engine <> $engine_type # +# $other_non_trans_engine_type must be a non # +# transactional storage engine # +# $other_non_live_chks_engine_type # +# -- storage engine <> $engine_type, if possible # +# storage engine must not support live checksum # +# $other_live_chks_engine_type # +# -- storage engine <> $engine_type, if possible # +# storage engine must support live checksum # +# General Note: The $other_*_engine_type variables must point to all # +# time available storage engines # +# 2006-08 MySQL 5.1 MyISAM and MEMORY only # +# $test_transactions -- 0, skip transactional tests # +# -- 1, do not skip transactional tests # +# $test_foreign_keys -- 0, skip foreign key tests # +# -- 1, do not skip foreign key tests # +# $fulltext_query_unsupported -- 0, execute fulltext_query tests # +# -- 1, skip fulltext query tests # +# $no_autoinc_update -- 0, skip tests where it is expected that an update # +# does not update the internal auto-increment value# +# -- 1, do not skip these tests # +# $no_spatial_key -- 0, skip tests where it is expected that keys on # +# spatial data type are not allowed # +# -- 1, do not skip these tests # +# # +# The comments/expectations refer to InnoDB. # +# They might be not valid for other storage engines. # +# # +# # +# Last update: # +# 2006-08-15 ML - introduce several $variables # +# - correct some storage engine assignments # +# - minor improvements like correct wrong table after analyze # +# - let checksum testcase meet all table variants with/without # +# live checksum feature exiting and/or enabled # +# 2006-07-26 ML create script by using t/innodb.test and introduce $variables # +# # +################################################################################ + +# Set the SESSION DEFAULT STORAGE ENGINE to a value <> storage engine +# to be tested. This must not affect any CREATE TABLE statement, where +# the storage engine is assigned explicitely, +eval SET SESSION STORAGE_ENGINE = $other_engine_type; + +# +# Small basic test with ignore +# + +--disable_warnings +drop table if exists t1,t2,t3,t4; +drop database if exists mysqltest; +--enable_warnings + +eval create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) engine=$engine_type; + +insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'), (2, 'Erik'), (3, 'Sasha'), (3, 'Jeremy'), (4, 'Matt'); +select id, code, name from t1 order by id; + +update ignore t1 set id = 8, name = 'Sinisa' where id < 3; +select id, code, name from t1 order by id; +update ignore t1 set id = id + 10, name = 'Ralph' where id < 4; +select id, code, name from t1 order by id; + +drop table t1; + +# +# A bit bigger test +# The 'replace_column' statements are needed because the cardinality calculated +# by innodb is not always the same between runs +# + +eval CREATE TABLE t1 ( + id int(11) NOT NULL auto_increment, + parent_id int(11) DEFAULT '0' NOT NULL, + level tinyint(4) DEFAULT '0' NOT NULL, + PRIMARY KEY (id), + KEY parent_id (parent_id), + KEY level (level) +) engine=$engine_type; +INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1),(179,5,2); +update t1 set parent_id=parent_id+100; +select * from t1 where parent_id=102; +update t1 set id=id+1000; +-- error 1062,1022 +update t1 set id=1024 where id=1009; +select * from t1; +update ignore t1 set id=id+1; # This will change all rows +select * from t1; +update ignore t1 set id=1023 where id=1010; +select * from t1 where parent_id=102; +--replace_column 9 # +explain select level from t1 where level=1; +--replace_column 9 # +explain select level,id from t1 where level=1; +--replace_column 9 # +explain select level,id,parent_id from t1 where level=1; +select level,id from t1 where level=1; +select level,id,parent_id from t1 where level=1; +optimize table t1; +--replace_column 7 # +show keys from t1; +drop table t1; + +# +# Test replace +# + +eval CREATE TABLE t1 ( + gesuchnr int(11) DEFAULT '0' NOT NULL, + benutzer_id int(11) DEFAULT '0' NOT NULL, + PRIMARY KEY (gesuchnr,benutzer_id) +) engine=$engine_type; + +replace into t1 (gesuchnr,benutzer_id) values (2,1); +replace into t1 (gesuchnr,benutzer_id) values (1,1); +replace into t1 (gesuchnr,benutzer_id) values (1,1); +select * from t1; +drop table t1; + +# +# test delete using hidden_primary_key +# + +eval create table t1 (a int) engine=$engine_type; +insert into t1 values (1), (2); +optimize table t1; +delete from t1 where a = 1; +select * from t1; +check table t1; +drop table t1; + +eval create table t1 (a int,b varchar(20)) engine=$engine_type; +insert into t1 values (1,""), (2,"testing"); +delete from t1 where a = 1; +select * from t1; +create index skr on t1 (a); +insert into t1 values (3,""), (4,"testing"); +analyze table t1; +--replace_column 7 # +show keys from t1; +drop table t1; + + +# Test of reading on secondary key with may be null + +eval create table t1 (a int,b varchar(20),key(a)) engine=$engine_type; +insert into t1 values (1,""), (2,"testing"); +select * from t1 where a = 1; +drop table t1; + +if ($test_transactions) +{ +# +# Test rollback +# + +eval create table t1 (n int not null primary key) engine=$engine_type; +set autocommit=0; +insert into t1 values (4); +rollback; +select n, "after rollback" from t1; +insert into t1 values (4); +commit; +select n, "after commit" from t1; +commit; +insert into t1 values (5); +-- error 1062 +insert into t1 values (4); +commit; +select n, "after commit" from t1; +set autocommit=1; +insert into t1 values (6); +-- error 1062 +insert into t1 values (4); +select n from t1; +set autocommit=0; +# +# savepoints +# +begin; +savepoint `my_savepoint`; +insert into t1 values (7); +savepoint `savept2`; +insert into t1 values (3); +select n from t1; +savepoint savept3; +rollback to savepoint savept2; +--error 1305 +rollback to savepoint savept3; +rollback to savepoint savept2; +release savepoint `my_savepoint`; +select n from t1; +-- error 1305 +rollback to savepoint `my_savepoint`; +--error 1305 +rollback to savepoint savept2; +insert into t1 values (8); +savepoint sv; +commit; +savepoint sv; +set autocommit=1; +# nop +rollback; +drop table t1; + +# +# Test for commit and FLUSH TABLES WITH READ LOCK +# + +eval create table t1 (n int not null primary key) engine=$engine_type; +start transaction; +insert into t1 values (4); +flush tables with read lock; +# +# Current code can't handle a read lock in middle of transaction +#--error 1223; +commit; +unlock tables; +commit; +select * from t1; +drop table t1; + +# +# Testing transactions +# + +eval create table t1 ( id int NOT NULL PRIMARY KEY, nom varchar(64)) engine=$engine_type; +begin; +insert into t1 values(1,'hamdouni'); +select id as afterbegin_id,nom as afterbegin_nom from t1; +rollback; +select id as afterrollback_id,nom as afterrollback_nom from t1; +set autocommit=0; +insert into t1 values(2,'mysql'); +select id as afterautocommit0_id,nom as afterautocommit0_nom from t1; +rollback; +select id as afterrollback_id,nom as afterrollback_nom from t1; +set autocommit=1; +drop table t1; + +# +# Simple not autocommit test +# + +eval CREATE TABLE t1 (id char(8) not null primary key, val int not null) engine=$engine_type; +insert into t1 values ('pippo', 12); +-- error 1062 +insert into t1 values ('pippo', 12); # Gives error +delete from t1; +delete from t1 where id = 'pippo'; +select * from t1; + +insert into t1 values ('pippo', 12); +set autocommit=0; +delete from t1; +rollback; +select * from t1; +delete from t1; +commit; +select * from t1; +drop table t1; + +# +# Test of active transactions +# + +eval create table t1 (a integer) engine=$engine_type; +start transaction; +rename table t1 to t2; +eval create table t1 (b integer) engine=$engine_type; +insert into t1 values (1); +rollback; +drop table t1; +rename table t2 to t1; +drop table t1; +set autocommit=1; + +# +# The following simple tests failed at some point +# + +eval CREATE TABLE t1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR(64)) ENGINE=$engine_type; +INSERT INTO t1 VALUES (1, 'Jochen'); +select * from t1; +drop table t1; + +eval CREATE TABLE t1 ( _userid VARCHAR(60) NOT NULL PRIMARY KEY) ENGINE=$engine_type; +set autocommit=0; +INSERT INTO t1 SET _userid='marc@anyware.co.uk'; +COMMIT; +SELECT * FROM t1; +SELECT _userid FROM t1 WHERE _userid='marc@anyware.co.uk'; +drop table t1; +set autocommit=1; + +} +# End of transactional tests + +# +# Test when reading on part of unique key +# +eval CREATE TABLE t1 ( + user_id int(10) DEFAULT '0' NOT NULL, + name varchar(100), + phone varchar(100), + ref_email varchar(100) DEFAULT '' NOT NULL, + detail varchar(200), + PRIMARY KEY (user_id,ref_email) +)engine=$engine_type; + +INSERT INTO t1 VALUES (10292,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10292,'shirish','2333604','shirish@yahoo.com','ddsds'),(10292,'sonali','323232','sonali@bolly.com','filmstar'); +select * from t1 where user_id=10292; +INSERT INTO t1 VALUES (10291,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10293,'shirish','2333604','shirish@yahoo.com','ddsds'); +select * from t1 where user_id=10292; +select * from t1 where user_id>=10292; +select * from t1 where user_id>10292; +select * from t1 where user_id<10292; +drop table t1; + +# +# Test that keys are created in right order +# + +eval CREATE TABLE t1 (a int not null, b int not null,c int not null, +key(a),primary key(a,b), unique(c),key(a),unique(b)) ENGINE = $engine_type; +--replace_column 7 # +show index from t1; +drop table t1; + +# +# Test of ALTER TABLE and innodb tables +# + +eval create table t1 (col1 int not null, col2 char(4) not null, primary key(col1)) ENGINE = $other_engine_type; +eval alter table t1 engine=$engine_type; +insert into t1 values ('1','1'),('5','2'),('2','3'),('3','4'),('4','4'); +select * from t1; +update t1 set col2='7' where col1='4'; +select * from t1; +alter table t1 add co3 int not null; +select * from t1; +update t1 set col2='9' where col1='2'; +select * from t1; +drop table t1; + +# +# INSERT INTO innodb tables +# + +eval create table t1 (a int not null , b int, primary key (a)) engine = $engine_type; +eval create table t2 (a int not null , b int, primary key (a)) engine = $other_engine_type; +insert into t1 VALUES (1,3) , (2,3), (3,3); +select * from t1; +insert into t2 select * from t1; +select * from t2; +delete from t1 where b = 3; +select * from t1; +insert into t1 select * from t2; +select * from t1; +select * from t2; +drop table t1,t2; + +# +# Search on unique key +# + +eval CREATE TABLE t1 ( + id int(11) NOT NULL auto_increment, + ggid varchar(32) binary DEFAULT '' NOT NULL, + email varchar(64) DEFAULT '' NOT NULL, + passwd varchar(32) binary DEFAULT '' NOT NULL, + PRIMARY KEY (id), + UNIQUE ggid (ggid) +) ENGINE=$engine_type; + +insert into t1 (ggid,passwd) values ('test1','xxx'); +insert into t1 (ggid,passwd) values ('test2','yyy'); +-- error 1062 +insert into t1 (ggid,passwd) values ('test2','this will fail'); +-- error 1062 +insert into t1 (ggid,id) values ('this will fail',1); + +select * from t1 where ggid='test1'; +select * from t1 where passwd='xxx'; +select * from t1 where id=2; + +replace into t1 (ggid,id) values ('this will work',1); +replace into t1 (ggid,passwd) values ('test2','this will work'); +-- error 1062 +update t1 set id=100,ggid='test2' where id=1; +select * from t1; +select * from t1 where id=1; +select * from t1 where id=999; +drop table t1; + +# +# ORDER BY on not primary key +# + +eval CREATE TABLE t1 ( + user_name varchar(12), + password text, + subscribed char(1), + user_id int(11) DEFAULT '0' NOT NULL, + quota bigint(20), + weight double, + access_date date, + access_time time, + approved datetime, + dummy_primary_key int(11) NOT NULL auto_increment, + PRIMARY KEY (dummy_primary_key) +) ENGINE=$engine_type; +INSERT INTO t1 VALUES ('user_0','somepassword','N',0,0,0,'2000-09-07','23:06:59','2000-09-07 23:06:59',1); +INSERT INTO t1 VALUES ('user_1','somepassword','Y',1,1,1,'2000-09-07','23:06:59','2000-09-07 23:06:59',2); +INSERT INTO t1 VALUES ('user_2','somepassword','N',2,2,1.4142135623731,'2000-09-07','23:06:59','2000-09-07 23:06:59',3); +INSERT INTO t1 VALUES ('user_3','somepassword','Y',3,3,1.7320508075689,'2000-09-07','23:06:59','2000-09-07 23:06:59',4); +INSERT INTO t1 VALUES ('user_4','somepassword','N',4,4,2,'2000-09-07','23:06:59','2000-09-07 23:06:59',5); +select user_name, password , subscribed, user_id, quota, weight, access_date, access_time, approved, dummy_primary_key from t1 order by user_name; +drop table t1; + +# +# Testing of tables without primary keys +# + +eval CREATE TABLE t1 ( + id int(11) NOT NULL auto_increment, + parent_id int(11) DEFAULT '0' NOT NULL, + level tinyint(4) DEFAULT '0' NOT NULL, + KEY (id), + KEY parent_id (parent_id), + KEY level (level) +) engine=$engine_type; +INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1); +INSERT INTO t1 values (179,5,2); +update t1 set parent_id=parent_id+100; +select * from t1 where parent_id=102; +update t1 set id=id+1000; +update t1 set id=1024 where id=1009; +select * from t1; +update ignore t1 set id=id+1; # This will change all rows +select * from t1; +update ignore t1 set id=1023 where id=1010; +select * from t1 where parent_id=102; +--replace_column 9 # +explain select level from t1 where level=1; +select level,id from t1 where level=1; +select level,id,parent_id from t1 where level=1; +select level,id from t1 where level=1 order by id; +delete from t1 where level=1; +select * from t1; +drop table t1; + +# +# Test of index only reads +# +eval CREATE TABLE t1 ( + sca_code char(6) NOT NULL, + cat_code char(6) NOT NULL, + sca_desc varchar(50), + lan_code char(2) NOT NULL, + sca_pic varchar(100), + sca_sdesc varchar(50), + sca_sch_desc varchar(16), + PRIMARY KEY (sca_code, cat_code, lan_code), + INDEX sca_pic (sca_pic) +) engine = $engine_type ; + +INSERT INTO t1 ( sca_code, cat_code, sca_desc, lan_code, sca_pic, sca_sdesc, sca_sch_desc) VALUES ( 'PD', 'J', 'PENDANT', 'EN', NULL, NULL, 'PENDANT'),( 'RI', 'J', 'RING', 'EN', NULL, NULL, 'RING'),( 'QQ', 'N', 'RING', 'EN', 'not null', NULL, 'RING'); +select count(*) from t1 where sca_code = 'PD'; +select count(*) from t1 where sca_code <= 'PD'; +select count(*) from t1 where sca_pic is null; +alter table t1 drop index sca_pic, add index sca_pic (cat_code, sca_pic); +select count(*) from t1 where sca_code='PD' and sca_pic is null; +select count(*) from t1 where cat_code='E'; + +alter table t1 drop index sca_pic, add index (sca_pic, cat_code); +select count(*) from t1 where sca_code='PD' and sca_pic is null; +select count(*) from t1 where sca_pic >= 'n'; +select sca_pic from t1 where sca_pic is null; +update t1 set sca_pic="test" where sca_pic is null; +delete from t1 where sca_code='pd'; +drop table t1; + +# +# Test of opening table twice and timestamps +# +set @a:=now(); +eval CREATE TABLE t1 (a int not null, b timestamp not null, primary key (a)) engine=$engine_type; +insert into t1 (a) values(1),(2),(3); +select t1.a from t1 natural join t1 as t2 where t1.b >= @a order by t1.a; +select a from t1 natural join t1 as t2 where b >= @a order by a; +update t1 set a=5 where a=1; +select a from t1; +drop table t1; + +# +# Test with variable length primary key +# +eval create table t1 (a varchar(100) not null, primary key(a), b int not null) engine=$engine_type; +insert into t1 values("hello",1),("world",2); +select * from t1 order by b desc; +optimize table t1; +--replace_column 7 # +show keys from t1; +drop table t1; + +# +# Test of create index with NULL columns +# +eval create table t1 (i int, j int ) ENGINE=$engine_type; +insert into t1 values (1,2); +select * from t1 where i=1 and j=2; +create index ax1 on t1 (i,j); +select * from t1 where i=1 and j=2; +drop table t1; + +# +# Test min-max optimization +# + +eval CREATE TABLE t1 ( + a int3 unsigned NOT NULL, + b int1 unsigned NOT NULL, + UNIQUE (a, b) +) ENGINE = $engine_type; + +INSERT INTO t1 VALUES (1, 1); +SELECT MIN(B),MAX(b) FROM t1 WHERE t1.a = 1; +drop table t1; + +# +# Test INSERT DELAYED +# + +eval CREATE TABLE t1 (a int unsigned NOT NULL) engine=$engine_type; +# Can't test this in 3.23 +# INSERT DELAYED INTO t1 VALUES (1); +INSERT INTO t1 VALUES (1); +SELECT * FROM t1; +DROP TABLE t1; + + +# +# Crash when using many tables (Test case by Jeremy D Zawodny) +# + +eval create table t1 (a int primary key,b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int, r int, s int, t int, u int, v int, w int, x int, y int, z int, a1 int, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int, b1 int, b2 int, b3 int, b4 int, b5 int, b6 int) engine = $engine_type; +insert into t1 values (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1); +--replace_column 9 # +explain select * from t1 where a > 0 and a < 50; +drop table t1; + +# +# Test lock tables +# + +eval create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=$engine_type; +insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); +LOCK TABLES t1 WRITE; +--error 1062 +insert into t1 values (99,1,2,'D'),(1,1,2,'D'); +select id from t1; +select id from t1; +UNLOCK TABLES; +DROP TABLE t1; + +eval create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=$engine_type; +insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); +LOCK TABLES t1 WRITE; +begin; +--error 1062 +insert into t1 values (99,1,2,'D'),(1,1,2,'D'); +select id from t1; +insert ignore into t1 values (100,1,2,'D'),(1,1,99,'D'); +commit; +select id,id3 from t1; +UNLOCK TABLES; +DROP TABLE t1; + +# +# Test prefix key +# +eval create table t1 (a char(20), unique (a(5))) engine=$engine_type; +drop table t1; +eval create table t1 (a char(20), index (a(5))) engine=$engine_type; +show create table t1; +drop table t1; + +# +# Test using temporary table and auto_increment +# + +eval create temporary table t1 (a int not null auto_increment, primary key(a)) engine=$engine_type; +insert into t1 values (NULL),(NULL),(NULL); +delete from t1 where a=3; +insert into t1 values (NULL); +select * from t1; +alter table t1 add b int; +select * from t1; +drop table t1; + +#Slashdot bug +eval create table t1 + ( + id int auto_increment primary key, + name varchar(32) not null, + value text not null, + uid int not null, + unique key(name,uid) + ) engine=$engine_type; +insert into t1 values (1,'one','one value',101), + (2,'two','two value',102),(3,'three','three value',103); +set insert_id=5; +replace into t1 (value,name,uid) values ('other value','two',102); +delete from t1 where uid=102; +set insert_id=5; +replace into t1 (value,name,uid) values ('other value','two',102); +set insert_id=6; +replace into t1 (value,name,uid) values ('other value','two',102); +select * from t1; +drop table t1; + +# +# Test DROP DATABASE +# +# ML: Test logics +# Check that the creation of a table with engine = $engine_type does +# in a certain database (already containing some tables using other +# storage engines) not prevent the dropping of this database. + +create database mysqltest; +eval create table mysqltest.t1 (a int not null) engine= $engine_type; +insert into mysqltest.t1 values(1); +eval create table mysqltest.t2 (a int not null) engine= $other_engine_type; +insert into mysqltest.t2 values(1); +eval create table mysqltest.t3 (a int not null) engine= $other_engine_type1; +insert into mysqltest.t3 values(1); +commit; +drop database mysqltest; +# Don't check error message +--error 1049 +show tables from mysqltest; + +# +# Test truncate table with and without auto_commit +# + +set autocommit=0; +eval create table t1 (a int not null) engine= $engine_type; +insert into t1 values(1),(2); +truncate table t1; +commit; +truncate table t1; +truncate table t1; +select * from t1; +insert into t1 values(1),(2); +delete from t1; +select * from t1; +commit; +drop table t1; +set autocommit=1; + +eval create table t1 (a int not null) engine= $engine_type; +insert into t1 values(1),(2); +truncate table t1; +insert into t1 values(1),(2); +select * from t1; +truncate table t1; +insert into t1 values(1),(2); +delete from t1; +select * from t1; +drop table t1; + +# +# Test of how ORDER BY works when doing it on the whole table +# + +eval create table t1 (a int not null, b int not null, c int not null, primary key (a),key(b)) engine=$engine_type; +insert into t1 values (3,3,3),(1,1,1),(2,2,2),(4,4,4); +--replace_column 9 # +explain select * from t1 order by a; +--replace_column 9 # +explain select * from t1 order by b; +--replace_column 9 # +explain select * from t1 order by c; +--replace_column 9 # +explain select a from t1 order by a; +--replace_column 9 # +explain select b from t1 order by b; +--replace_column 9 # +explain select a,b from t1 order by b; +--replace_column 9 # +explain select a,b from t1; +--replace_column 9 # +explain select a,b,c from t1; +drop table t1; + +# +# Check describe +# + +eval create table t1 (t int not null default 1, key (t)) engine=$engine_type; +desc t1; +drop table t1; + +# +# Test of multi-table-delete +# + +eval CREATE TABLE t1 ( + number bigint(20) NOT NULL default '0', + cname char(15) NOT NULL default '', + carrier_id smallint(6) NOT NULL default '0', + privacy tinyint(4) NOT NULL default '0', + last_mod_date timestamp NOT NULL, + last_mod_id smallint(6) NOT NULL default '0', + last_app_date timestamp NOT NULL, + last_app_id smallint(6) default '-1', + version smallint(6) NOT NULL default '0', + assigned_scps int(11) default '0', + status tinyint(4) default '0' +) ENGINE=$engine_type; +INSERT INTO t1 VALUES (4077711111,'SeanWheeler',90,2,20020111112846,500,00000000000000,-1,2,3,1); +INSERT INTO t1 VALUES (9197722223,'berry',90,3,20020111112809,500,20020102114532,501,4,10,0); +INSERT INTO t1 VALUES (650,'San Francisco',0,0,20011227111336,342,00000000000000,-1,1,24,1); +INSERT INTO t1 VALUES (302467,'Sue\'s Subshop',90,3,20020109113241,500,20020102115111,501,7,24,0); +INSERT INTO t1 VALUES (6014911113,'SudzCarwash',520,1,20020102115234,500,20020102115259,501,33,32768,0); +INSERT INTO t1 VALUES (333,'tubs',99,2,20020109113440,501,20020109113440,500,3,10,0); +eval CREATE TABLE t2 ( + number bigint(20) NOT NULL default '0', + cname char(15) NOT NULL default '', + carrier_id smallint(6) NOT NULL default '0', + privacy tinyint(4) NOT NULL default '0', + last_mod_date timestamp NOT NULL, + last_mod_id smallint(6) NOT NULL default '0', + last_app_date timestamp NOT NULL, + last_app_id smallint(6) default '-1', + version smallint(6) NOT NULL default '0', + assigned_scps int(11) default '0', + status tinyint(4) default '0' +) ENGINE=$engine_type; +INSERT INTO t2 VALUES (4077711111,'SeanWheeler',0,2,20020111112853,500,00000000000000,-1,2,3,1); +INSERT INTO t2 VALUES (9197722223,'berry',90,3,20020111112818,500,20020102114532,501,4,10,0); +INSERT INTO t2 VALUES (650,'San Francisco',90,0,20020109113158,342,00000000000000,-1,1,24,1); +INSERT INTO t2 VALUES (333,'tubs',99,2,20020109113453,501,20020109113453,500,3,10,0); +select * from t1; +select * from t2; +delete t1, t2 from t1 left join t2 on t1.number=t2.number where (t1.carrier_id=90 and t1.number=t2.number) or (t2.carrier_id=90 and t1.number=t2.number) or (t1.carrier_id=90 and t2.number is null); +select * from t1; +select * from t2; +select * from t2; +drop table t1,t2; + +# +# A simple test with some isolation levels +# TODO: Make this into a test using replication to really test how +# this works. +# + +eval create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) engine=$engine_type; + +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +SELECT @@tx_isolation,@@global.tx_isolation; +insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'); +select id, code, name from t1 order by id; +COMMIT; + +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +insert into t1 (code, name) values (2, 'Erik'), (3, 'Sasha'); +select id, code, name from t1 order by id; +COMMIT; + +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +insert into t1 (code, name) values (3, 'Jeremy'), (4, 'Matt'); +select id, code, name from t1 order by id; +COMMIT; +DROP TABLE t1; + +# +# Test of multi-table-update +# +eval create table t1 (n int(10), d int(10)) engine=$engine_type; +eval create table t2 (n int(10), d int(10)) engine=$engine_type; +insert into t1 values(1,1),(1,2); +insert into t2 values(1,10),(2,20); +UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n; +select * from t1; +select * from t2; +drop table t1,t2; + +# +# Testing of IFNULL +# +eval create table t1 (a int, b int) engine=$engine_type; +insert into t1 values(20,null); +select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on +t2.b=t3.a; +select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on +t2.b=t3.a order by 1; +insert into t1 values(10,null); +select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on +t2.b=t3.a order by 1; +drop table t1; + +# +# Test of read_through not existing const_table +# + +eval create table t1 (a varchar(10) not null) engine = $other_engine_type; +eval create table t2 (b varchar(10) not null unique) engine=$engine_type; +select t1.a from t1,t2 where t1.a=t2.b; +drop table t1,t2; +eval create table t1 (a int not null, b int, primary key (a)) engine = $engine_type; +eval create table t2 (a int not null, b int, primary key (a)) engine = $engine_type; +insert into t1 values (10, 20); +insert into t2 values (10, 20); +update t1, t2 set t1.b = 150, t2.b = t1.b where t2.a = t1.a and t1.a = 10; +drop table t1,t2; + +if ($test_foreign_keys) +{ +# +# Test of multi-table-delete with foreign key constraints +# + +eval CREATE TABLE t1 (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=$engine_type; +eval CREATE TABLE t2 (id INT PRIMARY KEY, t1_id INT, INDEX par_ind (t1_id), FOREIGN KEY (t1_id) REFERENCES t1(id) ON DELETE CASCADE ) ENGINE=$engine_type; +insert into t1 set id=1; +insert into t2 set id=1, t1_id=1; +delete t1,t2 from t1,t2 where t1.id=t2.t1_id; +select * from t1; +select * from t2; +drop table t2,t1; +eval CREATE TABLE t1(id INT NOT NULL, PRIMARY KEY (id)) ENGINE=$engine_type; +eval CREATE TABLE t2(id INT PRIMARY KEY, t1_id INT, INDEX par_ind (t1_id) ) ENGINE=$engine_type; +INSERT INTO t1 VALUES(1); +INSERT INTO t2 VALUES(1, 1); +SELECT * from t1; +UPDATE t1,t2 SET t1.id=t1.id+1, t2.t1_id=t1.id+1; +SELECT * from t1; +UPDATE t1,t2 SET t1.id=t1.id+1 where t1.id!=t2.id; +SELECT * from t1; +DROP TABLE t1,t2; +} + +if ($test_transactions) +{ +# +# Test of range_optimizer +# + +set autocommit=0; + +eval CREATE TABLE t1 (id CHAR(15) NOT NULL, value CHAR(40) NOT NULL, PRIMARY KEY(id)) ENGINE=$engine_type; + +eval CREATE TABLE t2 (id CHAR(15) NOT NULL, value CHAR(40) NOT NULL, PRIMARY KEY(id)) ENGINE=$engine_type; + +eval CREATE TABLE t3 (id1 CHAR(15) NOT NULL, id2 CHAR(15) NOT NULL, PRIMARY KEY(id1, id2)) ENGINE=$engine_type; + +INSERT INTO t3 VALUES("my-test-1", "my-test-2"); +COMMIT; + +INSERT INTO t1 VALUES("this-key", "will disappear"); +INSERT INTO t2 VALUES("this-key", "will also disappear"); +DELETE FROM t3 WHERE id1="my-test-1"; + +SELECT * FROM t1; +SELECT * FROM t2; +SELECT * FROM t3; +ROLLBACK; + +SELECT * FROM t1; +SELECT * FROM t2; +SELECT * FROM t3; +SELECT * FROM t3 WHERE id1="my-test-1" LOCK IN SHARE MODE; +COMMIT; +set autocommit=1; +DROP TABLE t1,t2,t3; +} + +# +# Check update with conflicting key +# + +eval CREATE TABLE t1 (a int not null primary key, b int not null, unique (b)) engine=$engine_type; +INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9); +# We need the a < 1000 test here to quard against the halloween problems +UPDATE t1 set a=a+100 where b between 2 and 3 and a < 1000; +SELECT * from t1; +drop table t1; + +# +# Test multi update with different join methods +# + +eval CREATE TABLE t1 (a int not null primary key, b int not null, key (b)) engine=$engine_type; +eval CREATE TABLE t2 (a int not null primary key, b int not null, key (b)) engine=$engine_type; +INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12); +INSERT INTO t2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9); + +# Full join, without key +update t1,t2 set t1.a=t1.a+100; +select * from t1; + +# unique key +update t1,t2 set t1.a=t1.a+100 where t1.a=101; +select * from t1; + +# ref key +update t1,t2 set t1.b=t1.b+10 where t1.b=2; +select * from t1; + +# Range key (in t1) +update t1,t2 set t1.b=t1.b+2,t2.b=t1.b+10 where t1.b between 3 and 5 and t1.a=t2.a+100; +select * from t1; +select * from t2; + +drop table t1,t2; +eval CREATE TABLE t2 ( NEXT_T BIGINT NOT NULL PRIMARY KEY) ENGINE=$other_non_trans_engine_type; +eval CREATE TABLE t1 ( B_ID INTEGER NOT NULL PRIMARY KEY) ENGINE=$engine_type; +SET AUTOCOMMIT=0; +INSERT INTO t1 ( B_ID ) VALUES ( 1 ); +INSERT INTO t2 ( NEXT_T ) VALUES ( 1 ); +ROLLBACK; +SELECT * FROM t1; +drop table t1,t2; +eval create table t1 ( pk int primary key, parent int not null, child int not null, index (parent) ) engine = $engine_type; +insert into t1 values (1,0,4), (2,1,3), (3,2,1), (4,1,2); +select distinct parent,child from t1 order by parent; +drop table t1; + +# +# Test that MySQL priorities clustered indexes +# +eval create table t1 (a int not null auto_increment primary key, b int, c int, key(c)) engine=$engine_type; +eval create table t2 (a int not null auto_increment primary key, b int) ENGINE = $other_engine_type; +insert into t1 (b) values (null),(null),(null),(null),(null),(null),(null); +insert into t2 (a) select b from t1; +insert into t1 (b) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +select count(*) from t1; +--replace_column 9 # +explain select * from t1 where c between 1 and 2500; +update t1 set c=a; +--replace_column 9 # +explain select * from t1 where c between 1 and 2500; +drop table t1,t2; + +# +# Test of UPDATE ... ORDER BY +# + +eval create table t1 (id int primary key auto_increment, fk int, index index_fk (fk)) engine=$engine_type; + +insert into t1 (id) values (null),(null),(null),(null),(null); +update t1 set fk=69 where fk is null order by id limit 1; +SELECT * from t1; +drop table t1; + +eval create table t1 (a int not null, b int not null, key (a)) engine=$engine_type; +insert into t1 values (1,1),(1,2),(1,3),(3,1),(3,2),(3,3),(3,1),(3,2),(3,3),(2,1),(2,2),(2,3); +SET @tmp=0; +update t1 set b=(@tmp:=@tmp+1) order by a; +update t1 set b=99 where a=1 order by b asc limit 1; +update t1 set b=100 where a=1 order by b desc limit 2; +update t1 set a=a+10+b where a=1 order by b; +select * from t1 order by a,b; +drop table t1; + +# +# Test of multi-table-updates (bug #1980). +# + +eval create table t1 ( c char(8) not null ) engine=$engine_type; +insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); +insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F'); + +alter table t1 add b char(8) not null; +alter table t1 add a char(8) not null; +alter table t1 add primary key (a,b,c); +update t1 set a=c, b=c; + +eval create table t2 (c char(8) not null, b char(8) not null, a char(8) not null, primary key(a,b,c)) engine=$engine_type; +insert into t2 select * from t1; + +delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; +drop table t1,t2; + +# +# test autoincrement with TRUNCATE +# + +SET AUTOCOMMIT=1; +eval create table t1 (a integer auto_increment primary key) engine=$engine_type; +insert into t1 (a) values (NULL),(NULL); +truncate table t1; +insert into t1 (a) values (NULL),(NULL); +SELECT * from t1; +drop table t1; + + +if ($test_foreign_keys) +{ +# +# Test dictionary handling with spaceand quoting +# + +eval CREATE TABLE t1 (`id 1` INT NOT NULL, PRIMARY KEY (`id 1`)) ENGINE=$engine_type; +eval CREATE TABLE t2 (id INT PRIMARY KEY, t1_id INT, INDEX par_ind (t1_id), FOREIGN KEY (`t1_id`) REFERENCES `t1`(`id 1`) ON DELETE CASCADE ) ENGINE=$engine_type; +#show create table t2; +drop table t2,t1; + +# +# Test of multi updated and foreign keys +# + +eval create table `t1` (`id` int( 11 ) not null ,primary key ( `id` )) engine = $engine_type; +insert into `t1`values ( 1 ) ; +eval create table `t2` (`id` int( 11 ) not null default '0',unique key `id` ( `id` ) ,constraint `t1_id_fk` foreign key ( `id` ) references `t1` (`id` )) engine = $engine_type; +insert into `t2`values ( 1 ) ; +eval create table `t3` (`id` int( 11 ) not null default '0',key `id` ( `id` ) ,constraint `t2_id_fk` foreign key ( `id` ) references `t2` (`id` )) engine = $engine_type; +insert into `t3`values ( 1 ) ; +--error 1451 +delete t3,t2,t1 from t1,t2,t3 where t1.id =1 and t2.id = t1.id and t3.id = t2.id; +--error 1451 +update t1,t2,t3 set t3.id=5, t2.id=6, t1.id=7 where t1.id =1 and t2.id = t1.id and t3.id = t2.id; +--error 1054 +update t3 set t3.id=7 where t1.id =1 and t2.id = t1.id and t3.id = t2.id; +drop table t3,t2,t1; + +# +# test for recursion depth limit +# +eval create table t1( + id int primary key, + pid int, + index(pid), + foreign key(pid) references t1(id) on delete cascade) engine=$engine_type; +insert into t1 values(0,0),(1,0),(2,1),(3,2),(4,3),(5,4),(6,5),(7,6), + (8,7),(9,8),(10,9),(11,10),(12,11),(13,12),(14,13),(15,14); +-- error 1451 +delete from t1 where id=0; +delete from t1 where id=15; +delete from t1 where id=0; + +drop table t1; +} +# End of FOREIGN KEY tests + +# +# Test timestamps +# + +eval CREATE TABLE t1 (col1 int(1))ENGINE=$engine_type; +eval CREATE TABLE t2 (col1 int(1),stamp TIMESTAMP,INDEX stamp_idx +(stamp))ENGINE=$engine_type; +insert into t1 values (1),(2),(3); +# Note that timestamp 3 is wrong +insert into t2 values (1, 20020204130000),(2, 20020204130000),(4,20020204310000 ),(5,20020204230000); +SELECT col1 FROM t1 UNION SELECT col1 FROM t2 WHERE stamp < +'20020204120000' GROUP BY col1; +drop table t1,t2; + +# +# Test by Francois MASUREL +# + +eval CREATE TABLE t1 ( + `id` int(10) unsigned NOT NULL auto_increment, + `id_object` int(10) unsigned default '0', + `id_version` int(10) unsigned NOT NULL default '1', + `label` varchar(100) NOT NULL default '', + `description` text, + PRIMARY KEY (`id`), + KEY `id_object` (`id_object`), + KEY `id_version` (`id_version`) +) ENGINE=$engine_type; + +INSERT INTO t1 VALUES("6", "3382", "9", "Test", NULL), ("7", "102", "5", "Le Pekin (Test)", NULL),("584", "1794", "4", "Test de resto", NULL),("837", "1822", "6", "Test 3", NULL),("1119", "3524", "1", "Societe Test", NULL),("1122", "3525", "1", "Fournisseur Test", NULL); + +eval CREATE TABLE t2 ( + `id` int(10) unsigned NOT NULL auto_increment, + `id_version` int(10) unsigned NOT NULL default '1', + PRIMARY KEY (`id`), + KEY `id_version` (`id_version`) +) ENGINE=$engine_type; + +INSERT INTO t2 VALUES("3524", "1"),("3525", "1"),("1794", "4"),("102", "5"),("1822", "6"),("3382", "9"); + +SELECT t2.id, t1.`label` FROM t2 INNER JOIN +(SELECT t1.id_object as id_object FROM t1 WHERE t1.`label` LIKE '%test%') AS lbl +ON (t2.id = lbl.id_object) INNER JOIN t1 ON (t2.id = t1.id_object); +drop table t1,t2; + +# Live checksum feature available + enabled +eval create table t1 (a int, b varchar(200), c text not null) checksum=1 engine=$other_live_chks_engine_type; +# Live checksum feature available + disabled +eval create table t2 (a int, b varchar(200), c text not null) checksum=0 engine=$other_live_chks_engine_type; +# +# Live checksum feature not available + enabled +eval create table t3 (a int, b varchar(200), c varchar(200) not null) checksum=1 engine=$other_non_live_chks_engine_type; +# Live checksum feature not available + disabled +eval create table t4 (a int, b varchar(200), c varchar(200) not null) checksum=0 engine=$other_non_live_chks_engine_type; +# +# Live checksum feature probably available + enabled +eval create table t5 (a int, b varchar(200), c text not null) checksum=1 engine=$engine_type; +# Live checksum feature probably available + disabled +eval create table t6 (a int, b varchar(200), c text not null) checksum=0 engine=$engine_type; +# +insert t1 values (1, "aaa", "bbb"), (NULL, "", "ccccc"), (0, NULL, ""); +insert t2 select * from t1; +insert t3 select * from t1; +insert t4 select * from t1; +insert t5 select * from t1; +insert t6 select * from t1; +checksum table t1, t2, t3, t4, t5, t6, t7 quick; +checksum table t1, t2, t3, t4, t5, t6, t7; +checksum table t1, t2, t3, t4, t5, t6, t7 extended; +# #show table status; +drop table t1,t2,t3, t4, t5, t6; + +# +# Test problem with refering to different fields in same table in UNION +# (Bug#2552: UNION returns NULL instead of expected value (innoDB only tables)) +# +eval create table t1 (id int, name char(10) not null, name2 char(10) not null) engine=$engine_type; +insert into t1 values(1,'first','fff'),(2,'second','sss'),(3,'third','ttt'); +select trim(name2) from t1 union all select trim(name) from t1 union all select trim(id) from t1; +drop table t1; + +# +# Bug#2160: Extra error message for CREATE TABLE LIKE with InnoDB +# +eval create table t1 (a int) engine=$engine_type; +create table t2 like t1; +show create table t2; +drop table t1,t2; + +if ($test_foreign_keys) +{ +# +# Test of automaticly created foreign keys +# + +eval create table t1 (id int(11) not null, id2 int(11) not null, unique (id,id2)) engine=$engine_type; +eval create table t2 (id int(11) not null, constraint t1_id_fk foreign key ( id ) references t1 (id)) engine = $engine_type; +show create table t1; +show create table t2; +create index id on t2 (id); +show create table t2; +create index id2 on t2 (id); +show create table t2; +drop index id2 on t2; +--error 1025,1025 +drop index id on t2; +show create table t2; +drop table t2; + +eval create table t2 (id int(11) not null, id2 int(11) not null, constraint t1_id_fk foreign key (id,id2) references t1 (id,id2)) engine = $engine_type; +show create table t2; +create unique index id on t2 (id,id2); +show create table t2; +drop table t2; + +# Check foreign key columns created in different order than key columns +eval create table t2 (id int(11) not null, id2 int(11) not null, unique (id,id2),constraint t1_id_fk foreign key (id2,id) references t1 (id,id2)) engine = $engine_type; +show create table t2; +drop table t2; + +eval create table t2 (id int(11) not null, id2 int(11) not null, unique (id,id2), constraint t1_id_fk foreign key (id) references t1 (id)) engine = $engine_type; +show create table t2; +drop table t2; + +eval create table t2 (id int(11) not null, id2 int(11) not null, unique (id,id2),constraint t1_id_fk foreign key (id2,id) references t1 (id,id2)) engine = $engine_type; +show create table t2; +drop table t2; + +eval create table t2 (id int(11) not null auto_increment, id2 int(11) not null, constraint t1_id_fk foreign key (id) references t1 (id), primary key (id), index (id,id2)) engine = $engine_type; +show create table t2; +drop table t2; + +eval create table t2 (id int(11) not null auto_increment, id2 int(11) not null, constraint t1_id_fk foreign key (id) references t1 (id)) engine= $engine_type; +show create table t2; +alter table t2 add index id_test (id), add index id_test2 (id,id2); +show create table t2; +drop table t2; + +# Test error handling + +# Clean up filename -- embedded server reports whole path without .frm, +# regular server reports relative path with .frm (argh!) +--replace_result \\ / $MYSQL_TEST_DIR . /var/master-data/ / t2.frm t2 +--error 1005 +eval create table t2 (id int(11) not null, id2 int(11) not null, constraint t1_id_fk foreign key (id2,id) references t1 (id)) engine = $engine_type; + +# bug#3749 + +eval create table t2 (a int auto_increment primary key, b int, index(b), foreign key (b) references t1(id), unique(b)) engine=$engine_type; +show create table t2; +drop table t2; +eval create table t2 (a int auto_increment primary key, b int, foreign key (b) references t1(id), foreign key (b) references t1(id), unique(b)) engine=$engine_type; +show create table t2; +drop table t2, t1; +} +# End of FOREIGN KEY tests + + +# +# Let us test binlog_cache_use and binlog_cache_disk_use status vars. +# Actually this test has nothing to do with innodb per se, it just requires +# transactional table. +# +flush status; +show status like "binlog_cache_use"; +show status like "binlog_cache_disk_use"; + +eval create table t1 (a int) engine=$engine_type; + +# Now we are going to create transaction which is long enough so its +# transaction binlog will be flushed to disk... +let $1=2000; +disable_query_log; +begin; +while ($1) +{ + eval insert into t1 values( $1 ); + dec $1; +} +commit; +enable_query_log; +show status like "binlog_cache_use"; +show status like "binlog_cache_disk_use"; + +# Transaction which should not be flushed to disk and so should not +# increase binlog_cache_disk_use. +begin; +delete from t1; +commit; +show status like "binlog_cache_use"; +show status like "binlog_cache_disk_use"; +drop table t1; + +# +# Bug #6126: Duplicate columns in keys gives misleading error message +# +--error 1060 +eval create table t1 (c char(10), index (c,c)) engine=$engine_type; +--error 1060 +eval create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1)) engine=$engine_type; +--error 1060 +eval create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2)) engine=$engine_type; +--error 1060 +eval create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1)) engine=$engine_type; +eval create table t1 (c1 char(10), c2 char(10)) engine=$engine_type; +--error 1060 +alter table t1 add key (c1,c1); +--error 1060 +alter table t1 add key (c2,c1,c1); +--error 1060 +alter table t1 add key (c1,c2,c1); +--error 1060 +alter table t1 add key (c1,c1,c2); +drop table t1; + +# +# Bug #4082: integer truncation +# + +eval create table t1(a int(1) , b int(1)) engine=$engine_type; +insert into t1 values ('1111', '3333'); +select distinct concat(a, b) from t1; +drop table t1; + +if ($fulltext_query_unsupported) +{ +# +# BUG#7709 test case - Boolean fulltext query against unsupported +# engines does not fail +# + +eval CREATE TABLE t1 ( a char(10) ) ENGINE=$engine_type; +--error 1214 +SELECT a FROM t1 WHERE MATCH (a) AGAINST ('test' IN BOOLEAN MODE); +DROP TABLE t1; +} + +if ($test_foreign_keys) +{ +# +# check null values #1 +# + +--disable_warnings +eval CREATE TABLE t1 (a_id tinyint(4) NOT NULL default '0', PRIMARY KEY (a_id)) ENGINE=$engine_type DEFAULT CHARSET=latin1; +INSERT INTO t1 VALUES (1),(2),(3); +eval CREATE TABLE t2 (b_id tinyint(4) NOT NULL default '0',b_a tinyint(4) NOT NULL default '0', PRIMARY KEY (b_id), KEY (b_a), + CONSTRAINT fk_b_a FOREIGN KEY (b_a) REFERENCES t1 (a_id) ON DELETE CASCADE ON UPDATE NO ACTION) ENGINE=$engine_type DEFAULT CHARSET=latin1; +--enable_warnings +INSERT INTO t2 VALUES (1,1),(2,1),(3,1),(4,2),(5,2); +SELECT * FROM (SELECT t1.*,GROUP_CONCAT(t2.b_id SEPARATOR ',') as b_list FROM (t1 LEFT JOIN (t2) on t1.a_id = t2.b_a) GROUP BY t1.a_id ) AS xyz; +DROP TABLE t2; +DROP TABLE t1; +} + +# +# Bug#11816 - Truncate table doesn't work with temporary innodb tables +# This is not an innodb bug, but we test it using innodb. +# +eval create temporary table t1 (a int) engine=$engine_type; +insert into t1 values (4711); +truncate t1; +insert into t1 values (42); +select * from t1; +drop table t1; +# Show that it works with permanent tables too. +eval create table t1 (a int) engine=$engine_type; +insert into t1 values (4711); +truncate t1; +insert into t1 values (42); +select * from t1; +drop table t1; + +# +# Bug #13025 Server crash during filesort +# + +eval create table t1 (a int not null, b int not null, c blob not null, d int not null, e int, primary key (a,b,c(255),d)) engine=$engine_type; +insert into t1 values (2,2,"b",2,2),(1,1,"a",1,1),(3,3,"ab",3,3); +select * from t1 order by a,b,c,d; +explain select * from t1 order by a,b,c,d; +drop table t1; + +# +# BUG#11039,#13218 Wrong key length in min() +# + +eval create table t1 (a char(1), b char(1), key(a, b)) engine=$engine_type; +insert into t1 values ('8', '6'), ('4', '7'); +select min(a) from t1; +select min(b) from t1 where a='8'; +drop table t1; + +# End of 4.1 tests + +# +# range optimizer problem +# + +eval create table t1 (x bigint unsigned not null primary key) engine=$engine_type; +insert into t1(x) values (0xfffffffffffffff0),(0xfffffffffffffff1); +select * from t1; +select count(*) from t1 where x>0; +select count(*) from t1 where x=0; +select count(*) from t1 where x<0; +select count(*) from t1 where x < -16; +select count(*) from t1 where x = -16; +explain select count(*) from t1 where x > -16; +select count(*) from t1 where x > -16; +select * from t1 where x > -16; +select count(*) from t1 where x = 18446744073709551601; +drop table t1; + +# Please do not remove the following skipped InnoDB specific tests. +# They make the synchronization with innodb.test easier and give +# an idea what to test on other storage engines. +if (0) +{ + +# Test for testable InnoDB status variables. This test +# uses previous ones(pages_created, rows_deleted, ...). +show status like "Innodb_buffer_pool_pages_total"; +show status like "Innodb_page_size"; +show status like "Innodb_rows_deleted"; +show status like "Innodb_rows_inserted"; +show status like "Innodb_rows_updated"; + +# Test for row locks InnoDB status variables. +show status like "Innodb_row_lock_waits"; +show status like "Innodb_row_lock_current_waits"; +show status like "Innodb_row_lock_time"; +show status like "Innodb_row_lock_time_max"; +show status like "Innodb_row_lock_time_avg"; + +# Test for innodb_sync_spin_loops variable +show variables like "innodb_sync_spin_loops"; +set global innodb_sync_spin_loops=1000; +show variables like "innodb_sync_spin_loops"; +set global innodb_sync_spin_loops=0; +show variables like "innodb_sync_spin_loops"; +set global innodb_sync_spin_loops=20; +show variables like "innodb_sync_spin_loops"; + +# Test for innodb_thread_concurrency variable +show variables like "innodb_thread_concurrency"; +set global innodb_thread_concurrency=1001; +show variables like "innodb_thread_concurrency"; +set global innodb_thread_concurrency=0; +show variables like "innodb_thread_concurrency"; +set global innodb_thread_concurrency=16; +show variables like "innodb_thread_concurrency"; + +# Test for innodb_concurrency_tickets variable +show variables like "innodb_concurrency_tickets"; +set global innodb_concurrency_tickets=1000; +show variables like "innodb_concurrency_tickets"; +set global innodb_concurrency_tickets=0; +show variables like "innodb_concurrency_tickets"; +set global innodb_concurrency_tickets=500; +show variables like "innodb_concurrency_tickets"; + +# Test for innodb_thread_sleep_delay variable +show variables like "innodb_thread_sleep_delay"; +set global innodb_thread_sleep_delay=100000; +show variables like "innodb_thread_sleep_delay"; +set global innodb_thread_sleep_delay=0; +show variables like "innodb_thread_sleep_delay"; +set global innodb_thread_sleep_delay=10000; +show variables like "innodb_thread_sleep_delay"; + +} + + +# +# Test varchar +# + +let $default=`select @@storage_engine`; +eval set storage_engine=$engine_type; +source include/varchar.inc; + +# +# Some errors/warnings on create +# + +# Clean up filename -- embedded server reports whole path without .frm, +# regular server reports relative path with .frm (argh!) +--replace_result \\ / $MYSQL_TEST_DIR . /var/master-data/ / t1.frm t1 +create table t1 (v varchar(65530), key(v)); +drop table t1; +create table t1 (v varchar(65536)); +show create table t1; +drop table t1; +create table t1 (v varchar(65530) character set utf8); +show create table t1; +drop table t1; + +eval set storage_engine=$default; + +# InnoDB specific varchar tests +eval create table t1 (v varchar(16384)) engine=$engine_type; +drop table t1; + +# +# BUG#11039 Wrong key length in min() +# + +eval create table t1 (a char(1), b char(1), key(a, b)) engine=$engine_type; +insert into t1 values ('8', '6'), ('4', '7'); +select min(a) from t1; +select min(b) from t1 where a='8'; +drop table t1; + +# +# Bug #11080 & #11005 Multi-row REPLACE fails on a duplicate key error +# + +eval CREATE TABLE t1 ( `a` int(11) NOT NULL auto_increment, `b` int(11) default NULL,PRIMARY KEY (`a`),UNIQUE KEY `b` (`b`)) ENGINE=$engine_type; +insert into t1 (b) values (1); +replace into t1 (b) values (2), (1), (3); +select * from t1; +truncate table t1; +insert into t1 (b) values (1); +replace into t1 (b) values (2); +replace into t1 (b) values (1); +replace into t1 (b) values (3); +select * from t1; +drop table t1; + +eval create table t1 (rowid int not null auto_increment, val int not null,primary +key (rowid), unique(val)) engine=$engine_type; +replace into t1 (val) values ('1'),('2'); +replace into t1 (val) values ('1'),('2'); +--error 1062 +insert into t1 (val) values ('1'),('2'); +select * from t1; +drop table t1; + +if ($no_autoinc_update) +{ +# +# Test that update does not change internal auto-increment value +# + +eval create table t1 (a int not null auto_increment primary key, val int) engine=$engine_type; +insert into t1 (val) values (1); +update t1 set a=2 where a=1; +# We should get the following error because InnoDB does not update the counter +--error 1062 +insert into t1 (val) values (1); +select * from t1; +drop table t1; +} + + +# +# Bug#10465: DECIMAL, crash on DELETE (InnoDB only) +# + +--disable_warnings +eval CREATE TABLE t1 (GRADE DECIMAL(4) NOT NULL, PRIMARY KEY (GRADE)) ENGINE=$engine_type; +--enable_warnings +INSERT INTO t1 (GRADE) VALUES (151),(252),(343); +SELECT GRADE FROM t1 WHERE GRADE > 160 AND GRADE < 300; +SELECT GRADE FROM t1 WHERE GRADE= 151; +DROP TABLE t1; + +# +# Bug #12340 multitable delete deletes only one record +# +eval create table t1 (f1 varchar(10), f2 varchar(10), primary key (f1,f2)) engine=$engine_type; +eval create table t2 (f3 varchar(10), f4 varchar(10), key (f4)) engine=$engine_type; +insert into t2 values ('aa','cc'); +insert into t1 values ('aa','bb'),('aa','cc'); +delete t1 from t1,t2 where f1=f3 and f4='cc'; +select * from t1; +drop table t1,t2; + +if ($test_foreign_keys) +{ +# +# Test that the slow TRUNCATE implementation resets autoincrement columns +# (bug #11946) +# + +eval CREATE TABLE t1 ( +id INTEGER NOT NULL AUTO_INCREMENT, PRIMARY KEY (id) +) ENGINE=$engine_type; + +eval CREATE TABLE t2 ( +id INTEGER NOT NULL, +FOREIGN KEY (id) REFERENCES t1 (id) +) ENGINE=$engine_type; + +INSERT INTO t1 (id) VALUES (NULL); +SELECT * FROM t1; +TRUNCATE t1; +INSERT INTO t1 (id) VALUES (NULL); +SELECT * FROM t1; + +# continued from above; test that doing a slow TRUNCATE on a table with 0 +# rows resets autoincrement columns +DELETE FROM t1; +TRUNCATE t1; +INSERT INTO t1 (id) VALUES (NULL); +SELECT * FROM t1; +DROP TABLE t2, t1; + +-- Test that foreign keys in temporary tables are not accepted (bug #12084) +eval CREATE TABLE t1 +( + id INT PRIMARY KEY +) ENGINE=$engine_type; + +--error 1005,1005 +eval CREATE TEMPORARY TABLE t2 +( + id INT NOT NULL PRIMARY KEY, + b INT, + FOREIGN KEY (b) REFERENCES test.t1(id) +) ENGINE=$engine_type; +DROP TABLE t1; +} +# End of FOREIGN KEY test + +# Please do not remove the following skipped InnoDB specific tests. +# They make the synchronization with innodb.test easier and give +# an idea what to test on other storage engines. +if (0) +{ + +# +# Test that index column max sizes are honored (bug #13315) +# + +# prefix index +eval create table t1 (col1 varchar(2000), index (col1(767))) + character set = latin1 engine = $engine_type; + +# normal indexes +eval create table t2 (col1 char(255), index (col1)) + character set = latin1 engine = $engine_type; +eval create table t3 (col1 binary(255), index (col1)) + character set = latin1 engine = $engine_type; +eval create table t4 (col1 varchar(767), index (col1)) + character set = latin1 engine = $engine_type; +eval create table t5 (col1 varchar(767) primary key) + character set = latin1 engine = $engine_type; +eval create table t6 (col1 varbinary(767) primary key) + character set = latin1 engine = $engine_type; +eval create table t7 (col1 text, index(col1(767))) + character set = latin1 engine = $engine_type; +eval create table t8 (col1 blob, index(col1(767))) + character set = latin1 engine = $engine_type; + + +# multi-column indexes are allowed to be longer +eval create table t9 (col1 varchar(512), col2 varchar(512), index(col1, col2)) + character set = latin1 engine = $engine_type; + +show create table t9; + +drop table t1, t2, t3, t4, t5, t6, t7, t8, t9; + +# these should have their index length trimmed +eval create table t1 (col1 varchar(768), index(col1)) + character set = latin1 engine = $engine_type; +eval create table t2 (col1 varbinary(768), index(col1)) + character set = latin1 engine = $engine_type; +eval create table t3 (col1 text, index(col1(768))) + character set = latin1 engine = $engine_type; +eval create table t4 (col1 blob, index(col1(768))) + character set = latin1 engine = $engine_type; + +show create table t1; + +drop table t1, t2, t3, t4; + +} +# End of skipped test + +# Please do not remove the following skipped InnoDB specific tests. +# They make the synchronization with innodb.test easier and give +# an idea what to test on other storage engines. +if (0) +{ + +# these should be refused +--error 1071 +eval create table t1 (col1 varchar(768) primary key) + character set = latin1 engine = $engine_type; +--error 1071 +eval create table t2 (col1 varbinary(768) primary key) + character set = latin1 engine = $engine_type; +--error 1071 +eval create table t3 (col1 text, primary key(col1(768))) + character set = latin1 engine = $engine_type; +--error 1071 +eval create table t4 (col1 blob, primary key(col1(768))) + character set = latin1 engine = $engine_type; + +} + +if ($test_foreign_keys) +{ +# +# Test improved foreign key error messages (bug #3443) +# + +eval CREATE TABLE t1 +( + id INT PRIMARY KEY +) ENGINE=$engine_type; + +eval CREATE TABLE t2 +( + v INT, + CONSTRAINT c1 FOREIGN KEY (v) REFERENCES t1(id) +) ENGINE=$engine_type; + +--error 1452 +INSERT INTO t2 VALUES(2); + +INSERT INTO t1 VALUES(1); +INSERT INTO t2 VALUES(1); + +--error 1451 +DELETE FROM t1 WHERE id = 1; + +--error 1217 +DROP TABLE t1; + +SET FOREIGN_KEY_CHECKS=0; +DROP TABLE t1; +SET FOREIGN_KEY_CHECKS=1; + +--error 1452 +INSERT INTO t2 VALUES(3); + +DROP TABLE t2; +} +# End of FOREIGN tests + +if ($test_transactions) +{ +# +# Test that checksum table uses a consistent read Bug #12669 +# +connect (a,localhost,root,,); +connect (b,localhost,root,,); +connection a; +eval create table t1(a int not null) engine=$engine_type DEFAULT CHARSET=latin1; +insert into t1 values (1),(2); +set autocommit=0; +checksum table t1; +connection b; +insert into t1 values(3); +connection a; +# +# Here checksum should not see insert +# +checksum table t1; +connection a; +commit; +checksum table t1; +commit; +drop table t1; +# +# autocommit = 1 +# +connection a; +eval create table t1(a int not null) engine=$engine_type DEFAULT CHARSET=latin1; +insert into t1 values (1),(2); +set autocommit=1; +checksum table t1; +connection b; +set autocommit=1; +insert into t1 values(3); +connection a; +# +# Here checksum sees insert +# +checksum table t1; +drop table t1; + +connection default; +disconnect a; +disconnect b; +} + +# +# BUG 14056 Column prefix index on UTF-8 primary key column causes: Can't find record.. +# + +eval create table t1 ( + a int, b char(10), c char(10), filler char(10), primary key(a, b(2)), unique key (a, c(2)) +) character set utf8 engine = $engine_type; +eval create table t2 ( + a int, b char(10), c char(10), filler char(10), primary key(a, b(2)), unique key (a, c(2)) +) character set ucs2 engine = $engine_type; +insert into t1 values (1,'abcdefg','abcdefg','one'); +insert into t1 values (2,'ijkilmn','ijkilmn','two'); +insert into t1 values (3,'qrstuvw','qrstuvw','three'); +insert into t1 values (4,_utf8 0xe880bd,_utf8 0xe880bd,'four'); +insert into t1 values (4,_utf8 0x5b,_utf8 0x5b,'five'); +insert into t1 values (4,_utf8 0xe880bde880bd,_utf8 0xe880bde880bd,'six'); +insert into t1 values (4,_utf8 0xe880bdD0B1e880bd,_utf8 0xe880bdD0B1e880bd,'seven'); +insert into t1 values (4,_utf8 0xD0B1,_utf8 0xD0B1,'eight'); +insert into t2 values (1,'abcdefg','abcdefg','one'); +insert into t2 values (2,'ijkilmn','ijkilmn','two'); +insert into t2 values (3,'qrstuvw','qrstuvw','three'); +insert into t2 values (4,_ucs2 0x00e400,_ucs2 0x00e400,'four'); +insert into t2 values (4,_ucs2 0x00640065,_ucs2 0x00640065,'five'); +insert into t2 values (4,_ucs2 0x00e400e50068,_ucs2 0x00e400e50068,'six'); +insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven'); +insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight'); +insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten'); +insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven'); +insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point'); +insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken'); +update t1 set filler = 'boo' where a = 1; +update t2 set filler ='email' where a = 4; +select a,hex(b),hex(c),filler from t1 order by filler; +select a,hex(b),hex(c),filler from t2 order by filler; +drop table t1; +drop table t2; + +eval create table t1 ( + a int, b varchar(10), c varchar(10), filler varchar(10), primary key(a, b(2)), unique key (a, c(2)) +) character set utf8 engine = $engine_type; +eval create table t2 ( + a int, b varchar(10), c varchar(10), filler varchar(10), primary key(a, b(2)), unique key (a, c(2)) +) character set ucs2 engine = $engine_type; +insert into t1 values (1,'abcdefg','abcdefg','one'); +insert into t1 values (2,'ijkilmn','ijkilmn','two'); +insert into t1 values (3,'qrstuvw','qrstuvw','three'); +insert into t1 values (4,_utf8 0xe880bd,_utf8 0xe880bd,'four'); +insert into t1 values (4,_utf8 0x5b,_utf8 0x5b,'five'); +insert into t1 values (4,_utf8 0xe880bde880bd,_utf8 0xe880bde880bd,'six'); +insert into t1 values (4,_utf8 0xe880bdD0B1e880bd,_utf8 0xe880bdD0B1e880bd,'seven'); +insert into t1 values (4,_utf8 0xD0B1,_utf8 0xD0B1,'eight'); +insert into t2 values (1,'abcdefg','abcdefg','one'); +insert into t2 values (2,'ijkilmn','ijkilmn','two'); +insert into t2 values (3,'qrstuvw','qrstuvw','three'); +insert into t2 values (4,_ucs2 0x00e400,_ucs2 0x00e400,'four'); +insert into t2 values (4,_ucs2 0x00640065,_ucs2 0x00640065,'five'); +insert into t2 values (4,_ucs2 0x00e400e50068,_ucs2 0x00e400e50068,'six'); +insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven'); +insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight'); +insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten'); +insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven'); +insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point'); +insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken'); +update t1 set filler = 'boo' where a = 1; +update t2 set filler ='email' where a = 4; +select a,hex(b),hex(c),filler from t1 order by filler; +select a,hex(b),hex(c),filler from t2 order by filler; +drop table t1; +drop table t2; + +eval create table t1 ( + a int, b text(10), c text(10), filler text(10), primary key(a, b(2)), unique key (a, c(2)) +) character set utf8 engine = $engine_type; +eval create table t2 ( + a int, b text(10), c text(10), filler text(10), primary key(a, b(2)), unique key (a, c(2)) +) character set ucs2 engine = $engine_type; +insert into t1 values (1,'abcdefg','abcdefg','one'); +insert into t1 values (2,'ijkilmn','ijkilmn','two'); +insert into t1 values (3,'qrstuvw','qrstuvw','three'); +insert into t1 values (4,_utf8 0xe880bd,_utf8 0xe880bd,'four'); +insert into t1 values (4,_utf8 0x5b,_utf8 0x5b,'five'); +insert into t1 values (4,_utf8 0xe880bde880bd,_utf8 0xe880bde880bd,'six'); +insert into t1 values (4,_utf8 0xe880bdD0B1e880bd,_utf8 0xe880bdD0B1e880bd,'seven'); +insert into t1 values (4,_utf8 0xD0B1,_utf8 0xD0B1,'eight'); +insert into t2 values (1,'abcdefg','abcdefg','one'); +insert into t2 values (2,'ijkilmn','ijkilmn','two'); +insert into t2 values (3,'qrstuvw','qrstuvw','three'); +insert into t2 values (4,_ucs2 0x00e400,_ucs2 0x00e400,'four'); +insert into t2 values (4,_ucs2 0x00640065,_ucs2 0x00640065,'five'); +insert into t2 values (4,_ucs2 0x00e400e50068,_ucs2 0x00e400e50068,'six'); +insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven'); +insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight'); +insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten'); +insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven'); +insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point'); +insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken'); +update t1 set filler = 'boo' where a = 1; +update t2 set filler ='email' where a = 4; +select a,hex(b),hex(c),filler from t1 order by filler; +select a,hex(b),hex(c),filler from t2 order by filler; +drop table t1; +drop table t2; + +eval create table t1 ( + a int, b blob(10), c blob(10), filler blob(10), primary key(a, b(2)), unique key (a, c(2)) +) character set utf8 engine = $engine_type; +eval create table t2 ( + a int, b blob(10), c blob(10), filler blob(10), primary key(a, b(2)), unique key (a, c(2)) +) character set ucs2 engine = $engine_type; +insert into t1 values (1,'abcdefg','abcdefg','one'); +insert into t1 values (2,'ijkilmn','ijkilmn','two'); +insert into t1 values (3,'qrstuvw','qrstuvw','three'); +insert into t1 values (4,_utf8 0xe880bd,_utf8 0xe880bd,'four'); +insert into t1 values (4,_utf8 0x5b,_utf8 0x5b,'five'); +insert into t1 values (4,_utf8 0xD0B1,_utf8 0xD0B1,'eight'); +insert into t2 values (1,'abcdefg','abcdefg','one'); +insert into t2 values (2,'ijkilmn','ijkilmn','two'); +insert into t2 values (3,'qrstuvw','qrstuvw','three'); +insert into t2 values (4,_ucs2 0x00e400,_ucs2 0x00e400,'four'); +insert into t2 values (4,_ucs2 0x00640065,_ucs2 0x00640065,'five'); +insert into t2 values (4,_ucs2 0x00e400e50068,_ucs2 0x00e400e50068,'six'); +insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven'); +insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight'); +insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten'); +insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken'); +update t1 set filler = 'boo' where a = 1; +update t2 set filler ='email' where a = 4; +select a,hex(b),hex(c),filler from t1 order by filler; +select a,hex(b),hex(c),filler from t2 order by filler; +drop table t1; +drop table t2; +commit; + +# tests for bugs #9802 and #13778 + +if ($test_foreign_keys) +{ +# test that FKs between invalid types are not accepted + +set foreign_key_checks=0; +eval create table t2 (a int primary key, b int, foreign key (b) references t1(a)) engine = $engine_type; +--replace_result $MYSQLTEST_VARDIR . master-data/ '' +-- error 1005 +eval create table t1(a char(10) primary key, b varchar(20)) engine = $engine_type; +set foreign_key_checks=1; +drop table t2; + +# test that FKs between different charsets are not accepted in CREATE even +# when f_k_c is 0 + +set foreign_key_checks=0; +eval create table t1(a varchar(10) primary key) engine = $engine_type DEFAULT CHARSET=latin1; +--replace_result $MYSQLTEST_VARDIR . master-data/ '' +-- error 1005 +eval create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = $engine_type DEFAULT CHARSET=utf8; +set foreign_key_checks=1; +drop table t1; + +# test that invalid datatype conversions with ALTER are not allowed + +set foreign_key_checks=0; +eval create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = $engine_type; +eval create table t1(a varchar(10) primary key) engine = $engine_type; +-- error 1025,1025 +alter table t1 modify column a int; +set foreign_key_checks=1; +drop table t2,t1; + +# test that charset conversions with ALTER are allowed when f_k_c is 0 + +set foreign_key_checks=0; +eval create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = $engine_type DEFAULT CHARSET=latin1; +eval create table t1(a varchar(10) primary key) engine = $engine_type DEFAULT CHARSET=latin1; +alter table t1 convert to character set utf8; +set foreign_key_checks=1; +drop table t2,t1; + +# test that RENAME does not allow invalid charsets when f_k_c is 0 + +set foreign_key_checks=0; +eval create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = $engine_type DEFAULT CHARSET=latin1; +eval create table t3(a varchar(10) primary key) engine = $engine_type DEFAULT CHARSET=utf8; +--replace_result $MYSQLTEST_VARDIR . master-data/ '' +-- error 1025 +rename table t3 to t1; +set foreign_key_checks=1; +drop table t2,t3; + +# test that foreign key errors are reported correctly (Bug #15550) + +eval create table t1(a int primary key) row_format=redundant engine=$engine_type; +eval create table t2(a int primary key,constraint foreign key(a)references t1(a)) row_format=compact engine=$engine_type; +eval create table t3(a int primary key) row_format=compact engine=$engine_type; +eval create table t4(a int primary key,constraint foreign key(a)references t3(a)) row_format=redundant engine=$engine_type; + +insert into t1 values(1); +insert into t3 values(1); +-- error 1452 +insert into t2 values(2); +-- error 1452 +insert into t4 values(2); +insert into t2 values(1); +insert into t4 values(1); +-- error 1451 +update t1 set a=2; +-- error 1452 +update t2 set a=2; +-- error 1451 +update t3 set a=2; +-- error 1452 +update t4 set a=2; +-- error 1451 +truncate t1; +-- error 1451 +truncate t3; +truncate t2; +truncate t4; +truncate t1; +truncate t3; + +drop table t4,t3,t2,t1; +} +# End of FOREIGN KEY tests + + +# Please do not remove the following skipped InnoDB specific tests. +# They make the synchronization with innodb.test easier and give +# an idea what to test on other storage engines. +if (0) +{ + +# +# Test that we can create a large (>1K) key +# +eval create table t1 (a varchar(255) character set utf8, + b varchar(255) character set utf8, + c varchar(255) character set utf8, + d varchar(255) character set utf8, + key (a,b,c,d)) engine=$engine_type; +drop table t1; +--error ER_TOO_LONG_KEY +eval create table t1 (a varchar(255) character set utf8, + b varchar(255) character set utf8, + c varchar(255) character set utf8, + d varchar(255) character set utf8, + e varchar(255) character set utf8, + key (a,b,c,d,e)) engine=$engine_type; + + +# test the padding of BINARY types and collations (Bug #14189) + +eval create table t1 (s1 varbinary(2),primary key (s1)) engine=$engine_type; +eval create table t2 (s1 binary(2),primary key (s1)) engine=$engine_type; +eval create table t3 (s1 varchar(2) binary,primary key (s1)) engine=$engine_type; +eval create table t4 (s1 char(2) binary,primary key (s1)) engine=$engine_type; + +insert into t1 values (0x41),(0x4120),(0x4100); +-- error 1062 +insert into t2 values (0x41),(0x4120),(0x4100); +insert into t2 values (0x41),(0x4120); +-- error 1062 +insert into t3 values (0x41),(0x4120),(0x4100); +insert into t3 values (0x41),(0x4100); +-- error 1062 +insert into t4 values (0x41),(0x4120),(0x4100); +insert into t4 values (0x41),(0x4100); +select hex(s1) from t1; +select hex(s1) from t2; +select hex(s1) from t3; +select hex(s1) from t4; +drop table t1,t2,t3,t4; +} + +if (test_foreign_keys) +{ +eval create table t1 (a int primary key,s1 varbinary(3) not null unique) engine=$engine_type; +eval create table t2 (s1 binary(2) not null, constraint c foreign key(s1) references t1(s1) on update cascade) engine=$engine_type; + +insert into t1 values(1,0x4100),(2,0x41),(3,0x4120),(4,0x42); +-- error 1452 +insert into t2 values(0x42); +insert into t2 values(0x41); +select hex(s1) from t2; +update t1 set s1=0x123456 where a=2; +select hex(s1) from t2; +-- error 1451 +update t1 set s1=0x12 where a=1; +-- error 1451 +update t1 set s1=0x12345678 where a=1; +-- error 1451 +update t1 set s1=0x123457 where a=1; +update t1 set s1=0x1220 where a=1; +select hex(s1) from t2; +update t1 set s1=0x1200 where a=1; +select hex(s1) from t2; +update t1 set s1=0x4200 where a=1; +select hex(s1) from t2; +-- error 1451 +delete from t1 where a=1; +delete from t1 where a=2; +update t2 set s1=0x4120; +-- error 1451 +delete from t1; +delete from t1 where a!=3; +select a,hex(s1) from t1; +select hex(s1) from t2; + +drop table t2,t1; + +eval create table t1 (a int primary key,s1 varchar(2) binary not null unique) engine=$engine_type; +eval create table t2 (s1 char(2) binary not null, constraint c foreign key(s1) references t1(s1) on update cascade) engine=$engine_type; + +insert into t1 values(1,0x4100),(2,0x41); +insert into t2 values(0x41); +select hex(s1) from t2; +update t1 set s1=0x1234 where a=1; +select hex(s1) from t2; +update t1 set s1=0x12 where a=2; +select hex(s1) from t2; +delete from t1 where a=1; +-- error 1451 +delete from t1 where a=2; +select a,hex(s1) from t1; +select hex(s1) from t2; + +drop table t2,t1; +} +# End FOREIGN KEY tests + +# +# Test cases for bug #15308 Problem of Order with Enum Column in Primary Key +# +eval CREATE TABLE t1 ( + ind enum('0','1','2') NOT NULL default '0', + string1 varchar(250) NOT NULL, + PRIMARY KEY (ind) +) ENGINE=$engine_type DEFAULT CHARSET=utf8; +eval CREATE TABLE t2 ( + ind enum('0','1','2') NOT NULL default '0', + string1 varchar(250) NOT NULL, + PRIMARY KEY (ind) +) ENGINE=$engine_type DEFAULT CHARSET=ucs2; + +INSERT INTO t1 VALUES ('1', ''),('2', ''); +INSERT INTO t2 VALUES ('1', ''),('2', ''); +SELECT hex(ind),hex(string1) FROM t1 ORDER BY string1; +SELECT hex(ind),hex(string1) FROM t2 ORDER BY string1; +drop table t1,t2; + +eval CREATE TABLE t1 ( + ind set('0','1','2') NOT NULL default '0', + string1 varchar(250) NOT NULL, + PRIMARY KEY (ind) +) ENGINE=$engine_type DEFAULT CHARSET=utf8; +eval CREATE TABLE t2 ( + ind set('0','1','2') NOT NULL default '0', + string1 varchar(250) NOT NULL, + PRIMARY KEY (ind) +) ENGINE=$engine_type DEFAULT CHARSET=ucs2; + +INSERT INTO t1 VALUES ('1', ''),('2', ''); +INSERT INTO t2 VALUES ('1', ''),('2', ''); +SELECT hex(ind),hex(string1) FROM t1 ORDER BY string1; +SELECT hex(ind),hex(string1) FROM t2 ORDER BY string1; +drop table t1,t2; + +eval CREATE TABLE t1 ( + ind bit not null, + string1 varchar(250) NOT NULL, + PRIMARY KEY (ind) +) ENGINE=$engine_type DEFAULT CHARSET=utf8; +eval CREATE TABLE t2 ( + ind bit not null, + string1 varchar(250) NOT NULL, + PRIMARY KEY (ind) +) ENGINE=$engine_type DEFAULT CHARSET=ucs2; +insert into t1 values(0,''),(1,''); +insert into t2 values(0,''),(1,''); +select hex(ind),hex(string1) from t1 order by string1; +select hex(ind),hex(string1) from t2 order by string1; +drop table t1,t2; + +# tests for bug #14056 Column prefix index on UTF-8 primary key column causes 'Can't find record..' + +eval create table t2 ( + a int, b char(10), filler char(10), primary key(a, b(2)) +) character set utf8 engine = $engine_type; + +insert into t2 values (1,'abcdefg','one'); +insert into t2 values (2,'ijkilmn','two'); +insert into t2 values (3, 'qrstuvw','three'); +update t2 set a=5, filler='booo' where a=1; +drop table t2; +eval create table t2 ( + a int, b char(10), filler char(10), primary key(a, b(2)) +) character set ucs2 engine = $engine_type; + +insert into t2 values (1,'abcdefg','one'); +insert into t2 values (2,'ijkilmn','two'); +insert into t2 values (3, 'qrstuvw','three'); +update t2 set a=5, filler='booo' where a=1; +drop table t2; + +eval create table t1(a int not null, b char(110),primary key(a,b(100))) engine=$engine_type default charset=utf8; +insert into t1 values(1,'abcdefg'),(2,'defghijk'); +insert into t1 values(6,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1); +insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2); +select a,hex(b) from t1 order by b; +update t1 set b = 'three' where a = 6; +drop table t1; +eval create table t1(a int not null, b text(110),primary key(a,b(100))) engine=$engine_type default charset=utf8; +insert into t1 values(1,'abcdefg'),(2,'defghijk'); +insert into t1 values(6,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1); +insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2); +select a,hex(b) from t1 order by b; +update t1 set b = 'three' where a = 6; +drop table t1; + +if ($test_foreign_keys) +{ +# Ensure that _ibfk_0 is not mistreated as a +# generated foreign key identifier. (Bug #16387) + +eval CREATE TABLE t1(a INT, PRIMARY KEY(a)) ENGINE=$engine_type; +eval CREATE TABLE t2(a INT) ENGINE=$engine_type; +ALTER TABLE t2 ADD FOREIGN KEY (a) REFERENCES t1(a); +ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_1; +ALTER TABLE t2 ADD CONSTRAINT t2_ibfk_0 FOREIGN KEY (a) REFERENCES t1(a); +ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_0; +SHOW CREATE TABLE t2; +DROP TABLE t2,t1; +} + +# +# Test case for bug #16229: MySQL/InnoDB uses full explicit table locks in trigger processing +# + +connect (a,localhost,root,,); +connect (b,localhost,root,,); +connection a; +eval create table t1(a int not null, b int, c int, d int, primary key(a)) engine=$engine_type; +insert into t1(a) values (1),(2),(3); +commit; +connection b; +set autocommit = 0; +update t1 set b = 5 where a = 2; +connection a; +delimiter |; +create trigger t1t before insert on t1 for each row begin set NEW.b = NEW.a * 10 + 5, NEW.c = NEW.a / 10; end | +delimiter ;| +set autocommit = 0; +connection a; +insert into t1(a) values (10),(20),(30),(40),(50),(60),(70),(80),(90),(100), +(11),(21),(31),(41),(51),(61),(71),(81),(91),(101), +(12),(22),(32),(42),(52),(62),(72),(82),(92),(102), +(13),(23),(33),(43),(53),(63),(73),(83),(93),(103), +(14),(24),(34),(44),(54),(64),(74),(84),(94),(104); +connection b; +commit; +connection a; +commit; +drop trigger t1t; +drop table t1; +disconnect a; +disconnect b; +# +# Another trigger test +# +connect (a,localhost,root,,); +connect (b,localhost,root,,); +connection a; +eval create table t1(a int not null, b int, c int, d int, primary key(a)) engine=$engine_type; +eval create table t2(a int not null, b int, c int, d int, primary key(a)) engine=$engine_type; +eval create table t3(a int not null, b int, c int, d int, primary key(a)) engine=$engine_type; +eval create table t4(a int not null, b int, c int, d int, primary key(a)) engine=$engine_type; +eval create table t5(a int not null, b int, c int, d int, primary key(a)) engine=$engine_type; +insert into t1(a) values (1),(2),(3); +insert into t2(a) values (1),(2),(3); +insert into t3(a) values (1),(2),(3); +insert into t4(a) values (1),(2),(3); +insert into t3(a) values (5),(7),(8); +insert into t4(a) values (5),(7),(8); +insert into t5(a) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12); + +delimiter |; +create trigger t1t before insert on t1 for each row begin + INSERT INTO t2 SET a = NEW.a; +end | + +create trigger t2t before insert on t2 for each row begin + DELETE FROM t3 WHERE a = NEW.a; +end | + +create trigger t3t before delete on t3 for each row begin + UPDATE t4 SET b = b + 1 WHERE a = OLD.a; +end | + +create trigger t4t before update on t4 for each row begin + UPDATE t5 SET b = b + 1 where a = NEW.a; +end | +delimiter ;| +commit; +set autocommit = 0; +update t1 set b = b + 5 where a = 1; +update t2 set b = b + 5 where a = 1; +update t3 set b = b + 5 where a = 1; +update t4 set b = b + 5 where a = 1; +insert into t5(a) values(20); +connection b; +set autocommit = 0; +insert into t1(a) values(7); +insert into t2(a) values(8); +delete from t2 where a = 3; +update t4 set b = b + 1 where a = 3; +commit; +drop trigger t1t; +drop trigger t2t; +drop trigger t3t; +drop trigger t4t; +drop table t1, t2, t3, t4, t5; +connection default; +disconnect a; +disconnect b; + +if ($test_foreign_keys) +{ +# +# Test that cascading updates leading to duplicate keys give the correct +# error message (bug #9680) +# + +eval CREATE TABLE t1 ( + field1 varchar(8) NOT NULL DEFAULT '', + field2 varchar(8) NOT NULL DEFAULT '', + PRIMARY KEY (field1, field2) +) ENGINE=$engine_type; + +eval CREATE TABLE t2 ( + field1 varchar(8) NOT NULL DEFAULT '' PRIMARY KEY, + FOREIGN KEY (field1) REFERENCES t1 (field1) + ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=$engine_type; + +INSERT INTO t1 VALUES ('old', 'somevalu'); +INSERT INTO t1 VALUES ('other', 'anyvalue'); + +INSERT INTO t2 VALUES ('old'); +INSERT INTO t2 VALUES ('other'); + +--error ER_FOREIGN_DUPLICATE_KEY +UPDATE t1 SET field1 = 'other' WHERE field2 = 'somevalu'; + +DROP TABLE t2; +DROP TABLE t1; + +# +# Bug#18477 - MySQL/InnoDB Ignoring Foreign Keys in ALTER TABLE +# +eval create table t1 ( + c1 bigint not null, + c2 bigint not null, + primary key (c1), + unique key (c2) +) engine=$engine_type; +# +eval create table t2 ( + c1 bigint not null, + primary key (c1) +) engine=$engine_type; +# +alter table t1 add constraint c2_fk foreign key (c2) + references t2(c1) on delete cascade; +show create table t1; +# +alter table t1 drop foreign key c2_fk; +show create table t1; +# +drop table t1, t2; +} +# End FOREIGN KEY test + +# +# Bug #14360: problem with intervals +# + +eval create table t1(a date) engine=$engine_type; +eval create table t2(a date, key(a)) engine=$engine_type; +insert into t1 values('2005-10-01'); +insert into t2 values('2005-10-01'); +select * from t1, t2 + where t2.a between t1.a - interval 2 day and t1.a + interval 2 day; +drop table t1, t2; + +eval create table t1 (id int not null, f_id int not null, f int not null, +primary key(f_id, id)) engine=$engine_type; +eval create table t2 (id int not null,s_id int not null,s varchar(200), +primary key(id)) engine=$engine_type; +INSERT INTO t1 VALUES (8, 1, 3); +INSERT INTO t1 VALUES (1, 2, 1); +INSERT INTO t2 VALUES (1, 0, ''); +INSERT INTO t2 VALUES (8, 1, ''); +commit; +DELETE ml.* FROM t1 AS ml LEFT JOIN t2 AS mm ON (mm.id=ml.id) +WHERE mm.id IS NULL; +select ml.* from t1 as ml left join t2 as mm on (mm.id=ml.id) +where mm.id is null lock in share mode; +drop table t1,t2; + +# +# Test case where X-locks on unused rows should be released in a +# update (because READ COMMITTED isolation level) +# + +connect (a,localhost,root,,); +connect (b,localhost,root,,); +connection a; +eval create table t1(a int not null, b int, primary key(a)) engine=$engine_type; +insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3); +commit; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +update t1 set b = 5 where b = 1; +connection b; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +# +# X-lock to record (7,3) should be released in a update +# +select * from t1 where a = 7 and b = 3 for update; +connection a; +commit; +connection b; +commit; +drop table t1; +connection default; +disconnect a; +disconnect b; + +if ($test_transactions) +{ +# +# Test case where no locks should be released (because we are not +# using READ COMMITTED isolation level) +# + +connect (a,localhost,root,,); +connect (b,localhost,root,,); +connection a; +eval create table t1(a int not null, b int, primary key(a)) engine=$engine_type; +insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2); +commit; +set autocommit = 0; +select * from t1 lock in share mode; +update t1 set b = 5 where b = 1; +connection b; +set autocommit = 0; +# +# S-lock to records (2,2),(4,2), and (6,2) should not be released in a update +# +--error 1205 +select * from t1 where a = 2 and b = 2 for update; +# +# X-lock to record (1,1),(3,1),(5,1) should not be released in a update +# +--error 1205 +connection a; +commit; +connection b; +commit; +connection default; +disconnect a; +disconnect b; +drop table t1; + +# +# Consistent read should be used in following selects +# +# 1) INSERT INTO ... SELECT +# 2) UPDATE ... = ( SELECT ...) +# 3) CREATE ... SELECT + +connect (a,localhost,root,,); +connect (b,localhost,root,,); +connection a; +eval create table t1(a int not null, b int, primary key(a)) engine=$engine_type; +insert into t1 values (1,2),(5,3),(4,2); +eval create table t2(d int not null, e int, primary key(d)) engine=$engine_type; +insert into t2 values (8,6),(12,1),(3,1); +commit; +set autocommit = 0; +select * from t2 for update; +connection b; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +insert into t1 select * from t2; +update t1 set b = (select e from t2 where a = d); +eval create table t3(d int not null, e int, primary key(d)) engine=$engine_type +select * from t2; +commit; +connection a; +commit; +connection default; +disconnect a; +disconnect b; +drop table t1, t2, t3; + +# +# Consistent read should not be used if +# +# (a) isolation level is serializable OR +# (b) select ... lock in share mode OR +# (c) select ... for update +# +# in following queries: +# +# 1) INSERT INTO ... SELECT +# 2) UPDATE ... = ( SELECT ...) +# 3) CREATE ... SELECT + +connect (a,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (b,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (c,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (d,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (e,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (f,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (g,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (h,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (i,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connect (j,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; +connection a; +create table t1(a int not null, b int, primary key(a)); +insert into t1 values (1,2),(5,3),(4,2); +create table t2(a int not null, b int, primary key(a)); +insert into t2 values (8,6),(12,1),(3,1); +create table t3(d int not null, b int, primary key(d)); +insert into t3 values (8,6),(12,1),(3,1); +create table t5(a int not null, b int, primary key(a)); +insert into t5 values (1,2),(5,3),(4,2); +create table t6(d int not null, e int, primary key(d)); +insert into t6 values (8,6),(12,1),(3,1); +create table t8(a int not null, b int, primary key(a)); +insert into t8 values (1,2),(5,3),(4,2); +create table t9(d int not null, e int, primary key(d)); +insert into t9 values (8,6),(12,1),(3,1); +commit; +set autocommit = 0; +select * from t2 for update; +connection b; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +--send +insert into t1 select * from t2; +connection c; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +--send +update t3 set b = (select b from t2 where a = d); +connection d; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +--send +create table t4(a int not null, b int, primary key(a)) select * from t2; +connection e; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +--send +insert into t5 (select * from t2 lock in share mode); +connection f; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +--send +update t6 set e = (select b from t2 where a = d lock in share mode); +connection g; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +--send +create table t7(a int not null, b int, primary key(a)) select * from t2 lock in share mode; +connection h; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +--send +insert into t8 (select * from t2 for update); +connection i; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +--send +update t9 set e = (select b from t2 where a = d for update); +connection j; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +--send +create table t10(a int not null, b int, primary key(a)) select * from t2 for update; + +connection b; +--error 1205 +reap; + +connection c; +--error 1205 +reap; + +connection d; +--error 1205 +reap; + +connection e; +--error 1205 +reap; + +connection f; +--error 1205 +reap; + +connection g; +--error 1205 +reap; + +connection h; +--error 1205 +reap; + +connection i; +--error 1205 +reap; + +connection j; +--error 1205 +reap; + +connection a; +commit; + +connection default; +disconnect a; +disconnect b; +disconnect c; +disconnect d; +disconnect e; +disconnect f; +disconnect g; +disconnect h; +disconnect i; +disconnect j; +drop table t1, t2, t3, t5, t6, t8, t9; +} +# End transactional tests + +if (test_foreign_keys) +{ +# bug 18934, "InnoDB crashes when table uses column names like DB_ROW_ID" +--error 1005 +eval CREATE TABLE t1 (DB_ROW_ID int) engine=$engine_type; + +# +# Bug #17152: Wrong result with BINARY comparison on aliased column +# + +eval CREATE TABLE t1 ( + a BIGINT(20) NOT NULL, + PRIMARY KEY (a) + ) ENGINE=$engine_type DEFAULT CHARSET=UTF8; + +eval CREATE TABLE t2 ( + a BIGINT(20) NOT NULL, + b VARCHAR(128) NOT NULL, + c TEXT NOT NULL, + PRIMARY KEY (a,b), + KEY idx_t2_b_c (b,c(200)), + CONSTRAINT t_fk FOREIGN KEY (a) REFERENCES t1 (a) + ON DELETE CASCADE + ) ENGINE=$engine_type DEFAULT CHARSET=UTF8; + +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (1, 'bar', 'vbar'); +INSERT INTO t2 VALUES (1, 'BAR2', 'VBAR'); +INSERT INTO t2 VALUES (1, 'bar_bar', 'bibi'); +INSERT INTO t2 VALUES (1, 'customer_over', '1'); + +SELECT * FROM t2 WHERE b = 'customer_over'; +SELECT * FROM t2 WHERE BINARY b = 'customer_over'; +SELECT DISTINCT p0.a FROM t2 p0 WHERE p0.b = 'customer_over'; +/* Bang: Empty result set, above was expected: */ +SELECT DISTINCT p0.a FROM t2 p0 WHERE BINARY p0.b = 'customer_over'; +SELECT p0.a FROM t2 p0 WHERE BINARY p0.b = 'customer_over'; + +drop table t2, t1; +} + +if ($no_spatial_key) +{ +# +# Bug #15680 (SPATIAL key in innodb) +# +--error ER_TABLE_CANT_HANDLE_SPKEYS +eval create table t1 (g geometry not null, spatial gk(g)) engine=$engine_type; +} + +# +# Test optimize on table with open transaction +# + +eval CREATE TABLE t1 ( a int ) ENGINE=$engine_type; +BEGIN; +INSERT INTO t1 VALUES (1); +OPTIMIZE TABLE t1; +DROP TABLE t1; + +####################################################################### +# # +# This is derivate of t/innodb.test and has to be maintained by # +# MySQL guys only. # +# # +# Please synchronize this file from time to time with t/innodb.test. # +# Please, DO NOT create a toplevel testcase innodb-mix2.test, because # +# innodb.test does already these tests. # +# # +####################################################################### diff --git a/mysql-test/t/innodb_cache.test b/mysql-test/include/query_cache.inc similarity index 74% rename from mysql-test/t/innodb_cache.test rename to mysql-test/include/query_cache.inc index 8ed2853e4f7..3b63167a737 100644 --- a/mysql-test/t/innodb_cache.test +++ b/mysql-test/include/query_cache.inc @@ -1,5 +1,18 @@ --- source include/have_innodb.inc --- source include/have_query_cache.inc +# include/query_cache.inc +# +# The variables +# $engine_type -- storage engine to be tested +# $test_foreign_keys -- 0, skip foreign key tests +# -- 1, do not skip foreign key tests +# have to be set before sourcing this script. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was innodb_cache.test +# main code went into include/query_cache.inc +# + +eval SET SESSION STORAGE_ENGINE = $engine_type; # Initialise --disable_warnings @@ -11,7 +24,7 @@ drop table if exists t1,t2,t3; # flush status; set autocommit=0; -create table t1 (a int not null) engine=innodb; +create table t1 (a int not null); insert into t1 values (1),(2),(3); select * from t1; show status like "Qcache_queries_in_cache"; @@ -19,15 +32,15 @@ drop table t1; commit; set autocommit=1; begin; -create table t1 (a int not null) engine=innodb; +create table t1 (a int not null); insert into t1 values (1),(2),(3); select * from t1; show status like "Qcache_queries_in_cache"; drop table t1; commit; -create table t1 (a int not null) engine=innodb; -create table t2 (a int not null) engine=innodb; -create table t3 (a int not null) engine=innodb; +create table t1 (a int not null); +create table t2 (a int not null); +create table t3 (a int not null); insert into t1 values (1),(2); insert into t2 values (1),(2); insert into t3 values (1),(2); @@ -54,20 +67,22 @@ commit; show status like "Qcache_queries_in_cache"; drop table t3,t2,t1; -CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)) ENGINE=InnoDB; +CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)); select count(*) from t1; insert into t1 (id) values (0); select count(*) from t1; drop table t1; +if ($test_foreign_keys) +{ # # one statement roll back inside transation # let $save_query_cache_size=`select @@global.query_cache_size`; set GLOBAL query_cache_size=1355776; -CREATE TABLE t1 ( id int(10) NOT NULL auto_increment, a varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY a (a)) ENGINE=innodb; -CREATE TABLE t2 ( id int(10) NOT NULL auto_increment, b varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY b (b)) ENGINE=innodb; -CREATE TABLE t3 ( id int(10) NOT NULL auto_increment, t1_id int(10) NOT NULL default '0', t2_id int(10) NOT NULL default '0', state int(11) default NULL, PRIMARY KEY (id), UNIQUE KEY t1_id (t1_id,t2_id), KEY t2_id (t2_id,t1_id), CONSTRAINT `t3_ibfk_1` FOREIGN KEY (`t1_id`) REFERENCES `t1` (`id`), CONSTRAINT `t3_ibfk_2` FOREIGN KEY (`t2_id`) REFERENCES `t2` (`id`)) ENGINE=innodb; +CREATE TABLE t1 ( id int(10) NOT NULL auto_increment, a varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY a (a)); +CREATE TABLE t2 ( id int(10) NOT NULL auto_increment, b varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY b (b)); +CREATE TABLE t3 ( id int(10) NOT NULL auto_increment, t1_id int(10) NOT NULL default '0', t2_id int(10) NOT NULL default '0', state int(11) default NULL, PRIMARY KEY (id), UNIQUE KEY t1_id (t1_id,t2_id), KEY t2_id (t2_id,t1_id), CONSTRAINT `t3_ibfk_1` FOREIGN KEY (`t1_id`) REFERENCES `t1` (`id`), CONSTRAINT `t3_ibfk_2` FOREIGN KEY (`t2_id`) REFERENCES `t2` (`id`)); INSERT INTO t1 VALUES (1,'me'); INSERT INTO t2 VALUES (1,'you'); INSERT INTO t3 VALUES (2,1,1,2); @@ -83,5 +98,6 @@ drop table t3,t2,t1; --disable_query_log eval set GLOBAL query_cache_size=$save_query_cache_size; --enable_query_log +} # End of 4.1 tests diff --git a/mysql-test/include/read_many_rows.inc b/mysql-test/include/read_many_rows.inc new file mode 100644 index 00000000000..e69f5a05cd6 --- /dev/null +++ b/mysql-test/include/read_many_rows.inc @@ -0,0 +1,61 @@ +# include/read_many_rows.inc +# +# Test how filesort and buffered-record-reads works +# This test needs a lot of time. +# +# The variables +# $engine_type -- storage engine to be tested +# $other_engine_type -- storage engine <> $engine_type, if possible +# 1. $other_engine_type must allow to store many rows +# without using non standard server options +# (does not need a t/read_many_rows_*-master.opt file) +# 2. $other_engine_type must point to an all time +# available storage engine +# 2006-08 MySQL 5.1 MyISAM and MEMORY only +# have to be set before sourcing this script. +# +# Last update: +# 2006-08-03 ML test refactored (MySQL 5.1) +# main code t/innodb-big.test --> include/read_many_rows.inc +# + +eval SET SESSION STORAGE_ENGINE = $engine_type; + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3, t4; +--enable_warnings + +eval CREATE TABLE t1 (id INTEGER) ENGINE=$other_engine_type; +CREATE TABLE t2 (id INTEGER PRIMARY KEY); +CREATE TABLE t3 (a CHAR(32) PRIMARY KEY,id INTEGER); +eval CREATE TABLE t4 (a CHAR(32) PRIMARY KEY,id INTEGER) ENGINE=$other_engine_type; + +INSERT INTO t1 (id) VALUES (1); +INSERT INTO t1 SELECT id+1 FROM t1; +INSERT INTO t1 SELECT id+2 FROM t1; +INSERT INTO t1 SELECT id+4 FROM t1; +INSERT INTO t1 SELECT id+8 FROM t1; +INSERT INTO t1 SELECT id+16 FROM t1; +INSERT INTO t1 SELECT id+32 FROM t1; +INSERT INTO t1 SELECT id+64 FROM t1; +INSERT INTO t1 SELECT id+128 FROM t1; +INSERT INTO t1 SELECT id+256 FROM t1; +INSERT INTO t1 SELECT id+512 FROM t1; +INSERT INTO t1 SELECT id+1024 FROM t1; +INSERT INTO t1 SELECT id+2048 FROM t1; +INSERT INTO t1 SELECT id+4096 FROM t1; +INSERT INTO t1 SELECT id+8192 FROM t1; +INSERT INTO t1 SELECT id+16384 FROM t1; +INSERT INTO t1 SELECT id+32768 FROM t1; +INSERT INTO t1 SELECT id+65536 FROM t1; +INSERT INTO t1 SELECT id+131072 FROM t1; +INSERT INTO t1 SELECT id+262144 FROM t1; +INSERT INTO t1 SELECT id+524288 FROM t1; +INSERT INTO t1 SELECT id+1048576 FROM t1; + +INSERT INTO t2 SELECT * FROM t1; +INSERT INTO t3 SELECT CONCAT(id),id FROM t2 ORDER BY -id; +INSERT INTO t4 SELECT * FROM t3 ORDER BY CONCAT(a); +SELECT SUM(id) FROM t3; + +DROP TABLE t1,t2,t3,t4; diff --git a/mysql-test/include/rowid_order.inc b/mysql-test/include/rowid_order.inc new file mode 100644 index 00000000000..b05bad45cde --- /dev/null +++ b/mysql-test/include/rowid_order.inc @@ -0,0 +1,121 @@ +# include/rowid_order.inc +# +# Test for rowid ordering (and comparison) functions. +# do index_merge select for tables with PK of various types. +# +# The variable +# $engine_type -- storage engine to be tested +# has to be set before sourcing this script. +# +# Note: The comments/expections refer to InnoDB. +# They might be not valid for other storage engines. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was t/rowid_order.test +# main code went into include/rowid_order.inc +# + +eval SET SESSION STORAGE_ENGINE = $engine_type; + +--disable_warnings +drop table if exists t1, t2, t3,t4; +--enable_warnings + +# Signed number as rowid +create table t1 ( + pk1 int not NULL, + key1 int(11), + key2 int(11), + PRIMARY KEY (pk1), + KEY key1 (key1), + KEY key2 (key2) +); +insert into t1 values (-5, 1, 1), + (-100, 1, 1), + (3, 1, 1), + (0, 1, 1), + (10, 1, 1); +explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; +select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; +drop table t1; + +# Unsigned numbers as rowids +create table t1 ( + pk1 int unsigned not NULL, + key1 int(11), + key2 int(11), + PRIMARY KEY (pk1), + KEY key1 (key1), + KEY key2 (key2) +); +insert into t1 values (0, 1, 1), + (0xFFFFFFFF, 1, 1), + (0xFFFFFFFE, 1, 1), + (1, 1, 1), + (2, 1, 1); +select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; +drop table t1; + +# Case-insensitive char(N) +create table t1 ( + pk1 char(4) not NULL, + key1 int(11), + key2 int(11), + PRIMARY KEY (pk1), + KEY key1 (key1), + KEY key2 (key2) +) collate latin2_general_ci; +insert into t1 values ('a1', 1, 1), + ('b2', 1, 1), + ('A3', 1, 1), + ('B4', 1, 1); +select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; +drop table t1; + +# Multi-part PK +create table t1 ( + pk1 int not NULL, + pk2 char(4) not NULL collate latin1_german1_ci, + pk3 char(4) not NULL collate latin1_bin, + key1 int(11), + key2 int(11), + PRIMARY KEY (pk1,pk2,pk3), + KEY key1 (key1), + KEY key2 (key2) +); +insert into t1 values + (1, 'u', 'u', 1, 1), + (1, 'u', char(0xEC), 1, 1), + (1, 'u', 'x', 1, 1); +insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1; +insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u'; +insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1; +select * from t1; +select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; + +# Hidden PK +alter table t1 drop primary key; +select * from t1; +select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; +drop table t1; + +# Variable-length PK +# this is also test for Bug#2688 +create table t1 ( + pk1 varchar(8) NOT NULL default '', + pk2 varchar(4) NOT NULL default '', + key1 int(11), + key2 int(11), + primary key(pk1, pk2), + KEY key1 (key1), + KEY key2 (key2) +); +insert into t1 values ('','empt',2,2), + ('a','a--a',2,2), + ('bb','b--b',2,2), + ('ccc','c--c',2,2), + ('dddd','d--d',2,2); +select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; + +drop table t1; diff --git a/mysql-test/include/rpl_multi_engine3.inc b/mysql-test/include/rpl_multi_engine3.inc index 5d8f7e46409..e1a80df336c 100644 --- a/mysql-test/include/rpl_multi_engine3.inc +++ b/mysql-test/include/rpl_multi_engine3.inc @@ -28,7 +28,6 @@ INSERT INTO t1 VALUES(412,1,'Testing MySQL databases is a cool ', select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id; sync_slave_with_master; ---sleep 5 --echo --- Select from t1 on slave --- select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id; @@ -44,7 +43,6 @@ SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412; # into the binlog other wise we will miss the update. sync_slave_with_master; ---sleep 5 --echo --- Check Update on slave --- SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412; @@ -56,7 +54,6 @@ DELETE FROM t1 WHERE id = 42; SELECT COUNT(*) FROM t1; sync_slave_with_master; ---sleep 5 --echo --- Show current count on slave for t1 --- SELECT COUNT(*) FROM t1; diff --git a/mysql-test/include/safe_set_to_maybe_ro_var.inc b/mysql-test/include/safe_set_to_maybe_ro_var.inc new file mode 100644 index 00000000000..add7f2091b3 --- /dev/null +++ b/mysql-test/include/safe_set_to_maybe_ro_var.inc @@ -0,0 +1,23 @@ +# to mask out the error - never abort neither log in result file - in setting +# to read-only variable. +# It is assumed that the new value is equal to one the var was set to. +# Such situation happens particularily with binlog_format that becomes read-only +# with ndb default storage. +# +# when generate results always watch the file to find what is expected, +# the SET query may fail + +# script accepts $maybe_ro_var the var name and $val4var the value + +### USAGE: +### let $maybe_ro_var= ... +### let $val4var= ... +### include/safe_set_to_maybe_ro_var.inc + +--disable_result_log +--disable_abort_on_error +eval SET $maybe_ro_var = $val4var; +--enable_abort_on_error +--enable_result_log + +eval SELECT $maybe_ro_var; diff --git a/mysql-test/include/strict_autoinc.inc b/mysql-test/include/strict_autoinc.inc new file mode 100644 index 00000000000..6960440f3a7 --- /dev/null +++ b/mysql-test/include/strict_autoinc.inc @@ -0,0 +1,28 @@ +# +# Test for strict-mode autoincrement +# + +set @org_mode=@@sql_mode; +eval create table t1 +( + `a` tinyint(4) NOT NULL auto_increment, + primary key (`a`) +) engine = $type ; +set @@sql_mode='strict_all_tables'; +--error ER_WARN_DATA_OUT_OF_RANGE +insert into t1 values(1000); +select count(*) from t1; + +set auto_increment_increment=1000; +set auto_increment_offset=700; +--error ER_WARN_DATA_OUT_OF_RANGE +insert into t1 values(null); +select count(*) from t1; + +set @@sql_mode=@org_mode; +insert into t1 values(null); +select * from t1; + +drop table t1; + +# End of test diff --git a/mysql-test/t/innodb_unsafe_binlog.test b/mysql-test/include/unsafe_binlog.inc similarity index 65% rename from mysql-test/t/innodb_unsafe_binlog.test rename to mysql-test/include/unsafe_binlog.inc index af1091e4421..6fbbdcb6d6c 100644 --- a/mysql-test/t/innodb_unsafe_binlog.test +++ b/mysql-test/include/unsafe_binlog.inc @@ -1,20 +1,34 @@ --- source include/have_innodb.inc +# include/unsafe_binlog.inc # -# Note that these tests uses options +# The variable +# $engine_type -- storage engine to be tested +# has to be set before sourcing this script. +# +# Notes: +# 1. This test uses at least in case of InnoDB options # innodb_locks_unsafe_for_binlog = true # innodb_lock_timeout = 5 +# 2. The comments/expectations refer to InnoDB. +# They might be not valid for other storage engines. +# +# Last update: +# 2006-08-02 ML test refactored +# old name was innodb_unsafe_binlog.test +# main code went into include/unsafe_binlog.inc +# # -# Test cases for a bug #15650 +# Test cases for bug#15650 +# DELETE with LEFT JOIN crashes server with innodb_locks_unsafe_for_binlog # --disable_warnings drop table if exists t1,t2; --enable_warnings -create table t1 (id int not null, f_id int not null, f int not null, -primary key(f_id, id)) engine=innodb; -create table t2 (id int not null,s_id int not null,s varchar(200), -primary key(id)) engine=innodb; +eval create table t1 (id int not null, f_id int not null, f int not null, +primary key(f_id, id)) engine = $engine_type; +eval create table t2 (id int not null,s_id int not null,s varchar(200), +primary key(id)) engine = $engine_type; INSERT INTO t1 VALUES (8, 1, 3); INSERT INTO t1 VALUES (1, 2, 1); INSERT INTO t2 VALUES (1, 0, ''); @@ -34,10 +48,10 @@ drop table t1,t2; connect (a,localhost,root,,); connect (b,localhost,root,,); connection a; -create table t1(a int not null, b int, primary key(a)) engine=innodb; +eval create table t1(a int not null, b int, primary key(a)) engine = $engine_type; insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3); commit; -set autocommit = 0; +set autocommit = 0; select * from t1 lock in share mode; update t1 set b = 5 where b = 1; connection b; @@ -63,10 +77,10 @@ disconnect b; connect (a,localhost,root,,); connect (b,localhost,root,,); connection a; -create table t1(a int not null, b int, primary key(a)) engine=innodb; +eval create table t1(a int not null, b int, primary key(a)) engine = $engine_type; insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3); commit; -set autocommit = 0; +set autocommit = 0; update t1 set b = 5 where b = 1; connection b; set autocommit = 0; @@ -93,9 +107,9 @@ disconnect b; connect (a,localhost,root,,); connect (b,localhost,root,,); connection a; -create table t1(a int not null, b int, primary key(a)) engine=innodb; +eval create table t1(a int not null, b int, primary key(a)) engine = $engine_type; insert into t1 values (1,2),(5,3),(4,2); -create table t2(d int not null, e int, primary key(d)) engine=innodb; +eval create table t2(d int not null, e int, primary key(d)) engine = $engine_type; insert into t2 values (8,6),(12,1),(3,1); commit; set autocommit = 0; @@ -104,7 +118,7 @@ connection b; set autocommit = 0; insert into t1 select * from t2; update t1 set b = (select e from t2 where a = d); -create table t3(d int not null, e int, primary key(d)) engine=innodb +eval create table t3(d int not null, e int, primary key(d)) engine = $engine_type select * from t2; commit; connection a; @@ -115,7 +129,7 @@ disconnect b; drop table t1, t2, t3; # -# Consistent read should not be used if +# Consistent read should not be used if # # (a) isolation level is serializable OR # (b) select ... lock in share mode OR @@ -131,26 +145,29 @@ connect (a,localhost,root,,); connect (b,localhost,root,,); connect (c,localhost,root,,); connect (d,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; connect (e,localhost,root,,); connect (f,localhost,root,,); connect (g,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; connect (h,localhost,root,,); connect (i,localhost,root,,); connect (j,localhost,root,,); +eval SET SESSION STORAGE_ENGINE = $engine_type; connection a; -create table t1(a int not null, b int, primary key(a)) engine=innodb; +eval create table t1(a int not null, b int, primary key(a)) engine = $engine_type; insert into t1 values (1,2),(5,3),(4,2); -create table t2(a int not null, b int, primary key(a)) engine=innodb; +eval create table t2(a int not null, b int, primary key(a)) engine = $engine_type; insert into t2 values (8,6),(12,1),(3,1); -create table t3(d int not null, b int, primary key(d)) engine=innodb; +eval create table t3(d int not null, b int, primary key(d)) engine = $engine_type; insert into t3 values (8,6),(12,1),(3,1); -create table t5(a int not null, b int, primary key(a)) engine=innodb; +eval create table t5(a int not null, b int, primary key(a)) engine = $engine_type; insert into t5 values (1,2),(5,3),(4,2); -create table t6(d int not null, e int, primary key(d)) engine=innodb; +eval create table t6(d int not null, e int, primary key(d)) engine = $engine_type; insert into t6 values (8,6),(12,1),(3,1); -create table t8(a int not null, b int, primary key(a)) engine=innodb; +eval create table t8(a int not null, b int, primary key(a)) engine = $engine_type; insert into t8 values (1,2),(5,3),(4,2); -create table t9(d int not null, e int, primary key(d)) engine=innodb; +eval create table t9(d int not null, e int, primary key(d)) engine = $engine_type; insert into t9 values (8,6),(12,1),(3,1); commit; set autocommit = 0; @@ -169,7 +186,7 @@ connection d; set autocommit = 0; SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; --send -create table t4(a int not null, b int, primary key(a)) engine=innodb select * from t2; +create table t4(a int not null, b int, primary key(a)) select * from t2; connection e; set autocommit = 0; --send @@ -181,7 +198,7 @@ update t6 set e = (select b from t2 where a = d lock in share mode); connection g; set autocommit = 0; --send -create table t7(a int not null, b int, primary key(a)) engine=innodb select * from t2 lock in share mode; +create table t7(a int not null, b int, primary key(a)) select * from t2 lock in share mode; connection h; set autocommit = 0; --send @@ -193,7 +210,7 @@ update t9 set e = (select b from t2 where a = d for update); connection j; set autocommit = 0; --send -create table t10(a int not null, b int, primary key(a)) engine=innodb select * from t2 for update; +create table t10(a int not null, b int, primary key(a)) select * from t2 for update; connection b; --error 1205 diff --git a/mysql-test/include/wait_until_connected_again.inc b/mysql-test/include/wait_until_connected_again.inc index 5ae722e6db7..dc96f646cb3 100644 --- a/mysql-test/include/wait_until_connected_again.inc +++ b/mysql-test/include/wait_until_connected_again.inc @@ -3,7 +3,7 @@ # server has been restored or timeout occurs --disable_result_log --disable_query_log -let $counter= 100; +let $counter= 500; while ($mysql_errno) { --error 0,2002,2006 diff --git a/mysql-test/include/wait_until_rows_count.inc b/mysql-test/include/wait_until_rows_count.inc new file mode 100644 index 00000000000..cf2a21d335a --- /dev/null +++ b/mysql-test/include/wait_until_rows_count.inc @@ -0,0 +1,52 @@ +# include/wait_until_rows_count.inc +# inspired by wait_for_slave_status by Matthias Leich +# +# SUMMARY +# +# Waits until SELECT count(*)-$count from $table returns zero +# +# USAGE +# +# Set vars like +# let $count=11; +# let $table=t1; +# # invoke the macro +# --include wait_until_rows_count.inc +# +# EXAMPLE +# extra/binlog/binlog_insert_delayed.test +# +# +# TODO: generalize up to wait_[until|while] with arbitrary select or even query and +# a condition to wait or get awakened +# It's impossible to implement such a "most" general macro without +# extending mysqltest. Just no way to pass a query as an argument and +# evaluate it here, like eval "$quuery". One is bound +# to specify it inside of the macro + +--disable_query_log + +let $wait_counter= 300; # max wait in 0.1 seconds +while ($wait_counter) +{ + eval select count(*)-$count from $table into @rez; + let $rez=`select @rez`; + let $success=`SELECT @rez = 0`; + let $no_success=1; + if ($success) + { + let $wait_counter= 1; # droppping counter to leave loop + let $no_success=0; + } + if ($no_success) + { + --sleep 0.1 + } + dec $wait_counter; +} + +--enable_query_log +if ($no_success) +{ + --die Timeout in wait_until_rows_count.inc, required table never had a prescribed number of rows. +} diff --git a/mysql-test/lib/mtr_cases.pl b/mysql-test/lib/mtr_cases.pl index cfd07abb9e0..bb92730444c 100644 --- a/mysql-test/lib/mtr_cases.pl +++ b/mysql-test/lib/mtr_cases.pl @@ -62,7 +62,7 @@ sub collect_test_cases ($) { foreach my $tname ( @::opt_cases ) { # Run in specified order, no sort my $elem= undef; my $component_id= undef; - + # Get rid of directory part (path). Leave the extension since it is used # to understand type of the test. @@ -390,6 +390,7 @@ sub collect_one_test_case($$$$$$$) { { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "No tests with sh scripts on Windows"; + return; } else { @@ -404,6 +405,7 @@ sub collect_one_test_case($$$$$$$) { { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "No tests with sh scripts on Windows"; + return; } else { @@ -448,6 +450,7 @@ sub collect_one_test_case($$$$$$$) { { $tinfo->{'skip'}= 1; $tinfo->{'disable'}= 1; # Sub type of 'skip' + return; } } @@ -457,16 +460,19 @@ sub collect_one_test_case($$$$$$$) { { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "No IM with embedded server"; + return; } elsif ( $::opt_ps_protocol ) { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "No IM with --ps-protocol"; + return; } elsif ( $::opt_skip_im ) { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "No IM tests(--skip-im)"; + return; } } else @@ -477,18 +483,21 @@ sub collect_one_test_case($$$$$$$) { { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "Test need 'big-test' option"; + return; } if ( $tinfo->{'ndb_extra'} and ! $::opt_ndb_extra_test ) { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "Test need 'ndb_extra' option"; + return; } if ( $tinfo->{'require_manager'} ) { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "Test need the _old_ manager(to be removed)"; + return; } if ( defined $tinfo->{'binlog_format'} and @@ -496,12 +505,14 @@ sub collect_one_test_case($$$$$$$) { { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "Not running with binlog format '$tinfo->{'binlog_format'}'"; + return; } if ( $tinfo->{'need_debug'} && ! $::debug_compiled_binaries ) { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "Test need debug binaries"; + return; } } @@ -512,6 +523,7 @@ sub collect_one_test_case($$$$$$$) { { $tinfo->{'skip'}= 1; $tinfo->{'comment'}= "Can't restart a running server"; + return; } } @@ -538,8 +550,6 @@ sub mtr_options_from_test_file($$) { while ( my $line= <$F> ) { - chomp; - next if ( $line !~ /^--/ ); # Match this line against tag in "tags" array diff --git a/mysql-test/lib/mtr_im.pl b/mysql-test/lib/mtr_im.pl new file mode 100644 index 00000000000..ca17516278e --- /dev/null +++ b/mysql-test/lib/mtr_im.pl @@ -0,0 +1,761 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +# Private IM-related operations. + +sub mtr_im_kill_process ($$$$); +sub mtr_im_load_pids ($); +sub mtr_im_terminate ($); +sub mtr_im_check_alive ($); +sub mtr_im_check_main_alive ($); +sub mtr_im_check_angel_alive ($); +sub mtr_im_check_mysqlds_alive ($); +sub mtr_im_check_mysqld_alive ($); +sub mtr_im_cleanup ($); +sub mtr_im_rm_file ($); +sub mtr_im_errlog ($); +sub mtr_im_kill ($); +sub mtr_im_wait_for_connection ($$$); +sub mtr_im_wait_for_mysqld($$$); + +# Public IM-related operations. + +sub mtr_im_start ($$); +sub mtr_im_stop ($); + +############################################################################## +# +# Private operations. +# +############################################################################## + +sub mtr_im_kill_process ($$$$) { + my $pid_lst= shift; + my $signal= shift; + my $total_retries= shift; + my $timeout= shift; + + my %pids; + + foreach my $pid ( @{$pid_lst} ) + { + $pids{$pid}= 1; + } + + for ( my $cur_attempt= 1; $cur_attempt <= $total_retries; ++$cur_attempt ) + { + foreach my $pid ( keys %pids ) + { + mtr_debug("Sending $signal to $pid..."); + + kill($signal, $pid); + + unless ( kill (0, $pid) ) + { + mtr_debug("Process $pid died."); + delete $pids{$pid}; + } + } + + return if scalar keys %pids == 0; + + mtr_debug("Sleeping $timeout second(s) waiting for processes to die..."); + + sleep($timeout); + } + + mtr_debug("Process(es) " . + join(' ', keys %pids) . + " is still alive after $total_retries " . + "of sending signal $signal."); +} + +########################################################################### + +sub mtr_im_load_pids($) { + my $im= shift; + + mtr_debug("Loading PID files..."); + + # Obtain mysqld-process pids. + + my $instances = $im->{'instances'}; + + for ( my $idx= 0; $idx < 2; ++$idx ) + { + mtr_debug("IM-guarded mysqld[$idx] PID file: '" . + $instances->[$idx]->{'path_pid'} . "'."); + + my $mysqld_pid; + + if ( -r $instances->[$idx]->{'path_pid'} ) + { + $mysqld_pid= mtr_get_pid_from_file($instances->[$idx]->{'path_pid'}); + mtr_debug("IM-guarded mysqld[$idx] PID: $mysqld_pid."); + } + else + { + $mysqld_pid= undef; + mtr_debug("IM-guarded mysqld[$idx]: no PID file."); + } + + $instances->[$idx]->{'pid'}= $mysqld_pid; + } + + # Re-read Instance Manager PIDs from the file, since during tests Instance + # Manager could have been restarted, so its PIDs could have been changed. + + # - IM-main + + mtr_debug("IM-main PID file: '$im->{path_pid}'."); + + if ( -f $im->{'path_pid'} ) + { + $im->{'pid'} = + mtr_get_pid_from_file($im->{'path_pid'}); + + mtr_debug("IM-main PID: $im->{pid}."); + } + else + { + mtr_debug("IM-main: no PID file."); + $im->{'pid'}= undef; + } + + # - IM-angel + + mtr_debug("IM-angel PID file: '$im->{path_angel_pid}'."); + + if ( -f $im->{'path_angel_pid'} ) + { + $im->{'angel_pid'} = + mtr_get_pid_from_file($im->{'path_angel_pid'}); + + mtr_debug("IM-angel PID: $im->{'angel_pid'}."); + } + else + { + mtr_debug("IM-angel: no PID file."); + $im->{'angel_pid'} = undef; + } +} + +########################################################################### + +sub mtr_im_terminate($) { + my $im= shift; + + # Load pids from pid-files. We should do it first of all, because IM deletes + # them on shutdown. + + mtr_im_load_pids($im); + + mtr_debug("Shutting Instance Manager down..."); + + # Ignoring SIGCHLD so that all children could rest in peace. + + start_reap_all(); + + # Send SIGTERM to IM-main. + + if ( defined $im->{'pid'} ) + { + mtr_debug("IM-main pid: $im->{pid}."); + mtr_debug("Stopping IM-main..."); + + mtr_im_kill_process([ $im->{'pid'} ], 'TERM', 10, 1); + } + else + { + mtr_debug("IM-main pid: n/a."); + } + + # If IM-angel was alive, wait for it to die. + + if ( defined $im->{'angel_pid'} ) + { + mtr_debug("IM-angel pid: $im->{'angel_pid'}."); + mtr_debug("Waiting for IM-angel to die..."); + + my $total_attempts= 10; + + for ( my $cur_attempt=1; $cur_attempt <= $total_attempts; ++$cur_attempt ) + { + unless ( kill (0, $im->{'angel_pid'}) ) + { + mtr_debug("IM-angel died."); + last; + } + + sleep(1); + } + } + else + { + mtr_debug("IM-angel pid: n/a."); + } + + stop_reap_all(); + + # Re-load PIDs. + + mtr_im_load_pids($im); +} + +########################################################################### + +sub mtr_im_check_alive($) { + my $im= shift; + + mtr_debug("Checking whether IM-components are alive..."); + + return 1 if mtr_im_check_main_alive($im); + + return 1 if mtr_im_check_angel_alive($im); + + return 1 if mtr_im_check_mysqlds_alive($im); + + return 0; +} + +########################################################################### + +sub mtr_im_check_main_alive($) { + my $im= shift; + + # Check that the process, that we know to be IM's, is dead. + + if ( defined $im->{'pid'} ) + { + if ( kill (0, $im->{'pid'}) ) + { + mtr_debug("IM-main (PID: $im->{pid}) is alive."); + return 1; + } + else + { + mtr_debug("IM-main (PID: $im->{pid}) is dead."); + } + } + else + { + mtr_debug("No PID file for IM-main."); + } + + # Check that IM does not accept client connections. + + if ( mtr_ping_port($im->{'port'}) ) + { + mtr_debug("IM-main (port: $im->{port}) " . + "is accepting connections."); + + mtr_im_errlog("IM-main is accepting connections on port " . + "$im->{port}, but there is no " . + "process information."); + return 1; + } + else + { + mtr_debug("IM-main (port: $im->{port}) " . + "does not accept connections."); + return 0; + } +} + +########################################################################### + +sub mtr_im_check_angel_alive($) { + my $im= shift; + + # Check that the process, that we know to be the Angel, is dead. + + if ( defined $im->{'angel_pid'} ) + { + if ( kill (0, $im->{'angel_pid'}) ) + { + mtr_debug("IM-angel (PID: $im->{angel_pid}) is alive."); + return 1; + } + else + { + mtr_debug("IM-angel (PID: $im->{angel_pid}) is dead."); + return 0; + } + } + else + { + mtr_debug("No PID file for IM-angel."); + return 0; + } +} + +########################################################################### + +sub mtr_im_check_mysqlds_alive($) { + my $im= shift; + + mtr_debug("Checking for IM-guarded mysqld instances..."); + + my $instances = $im->{'instances'}; + + for ( my $idx= 0; $idx < 2; ++$idx ) + { + mtr_debug("Checking mysqld[$idx]..."); + + return 1 + if mtr_im_check_mysqld_alive($instances->[$idx]); + } +} + +########################################################################### + +sub mtr_im_check_mysqld_alive($) { + my $mysqld_instance= shift; + + # Check that the process is dead. + + if ( defined $mysqld_instance->{'pid'} ) + { + if ( kill (0, $mysqld_instance->{'pid'}) ) + { + mtr_debug("Mysqld instance (PID: $mysqld_instance->{pid}) is alive."); + return 1; + } + else + { + mtr_debug("Mysqld instance (PID: $mysqld_instance->{pid}) is dead."); + } + } + else + { + mtr_debug("No PID file for mysqld instance."); + } + + # Check that mysqld does not accept client connections. + + if ( mtr_ping_port($mysqld_instance->{'port'}) ) + { + mtr_debug("Mysqld instance (port: $mysqld_instance->{port}) " . + "is accepting connections."); + + mtr_im_errlog("Mysqld is accepting connections on port " . + "$mysqld_instance->{port}, but there is no " . + "process information."); + return 1; + } + else + { + mtr_debug("Mysqld instance (port: $mysqld_instance->{port}) " . + "does not accept connections."); + return 0; + } +} + +########################################################################### + +sub mtr_im_cleanup($) { + my $im= shift; + + mtr_im_rm_file($im->{'path_pid'}); + mtr_im_rm_file($im->{'path_sock'}); + + mtr_im_rm_file($im->{'path_angel_pid'}); + + for ( my $idx= 0; $idx < 2; ++$idx ) + { + mtr_im_rm_file($im->{'instances'}->[$idx]->{'path_pid'}); + mtr_im_rm_file($im->{'instances'}->[$idx]->{'path_sock'}); + } +} + +########################################################################### + +sub mtr_im_rm_file($) +{ + my $file_path= shift; + + if ( -f $file_path ) + { + mtr_debug("Removing '$file_path'..."); + + unless ( unlink($file_path) ) + { + mtr_warning("Can not remove '$file_path'.") + } + } + else + { + mtr_debug("File '$file_path' does not exist already."); + } +} + +########################################################################### + +sub mtr_im_errlog($) { + my $msg= shift; + + # Complain in error log so that a warning will be shown. + # + # TODO: unless BUG#20761 is fixed, we will print the warning to stdout, so + # that it can be seen on console and does not produce pushbuild error. + + # my $errlog= "$opt_vardir/log/mysql-test-run.pl.err"; + # + # open (ERRLOG, ">>$errlog") || + # mtr_error("Can not open error log ($errlog)"); + # + # my $ts= localtime(); + # print ERRLOG + # "Warning: [$ts] $msg\n"; + # + # close ERRLOG; + + my $ts= localtime(); + print "Warning: [$ts] $msg\n"; +} + +########################################################################### + +sub mtr_im_kill($) { + my $im= shift; + + # Re-load PIDs. That can be useful because some processes could have been + # restarted. + + mtr_im_load_pids($im); + + # Ignoring SIGCHLD so that all children could rest in peace. + + start_reap_all(); + + # Kill IM-angel first of all. + + if ( defined $im->{'angel_pid'} ) + { + mtr_debug("Killing IM-angel (PID: $im->{angel_pid})..."); + mtr_im_kill_process([ $im->{'angel_pid'} ], 'KILL', 10, 1) + } + else + { + mtr_debug("IM-angel is dead."); + } + + # Re-load PIDs again. + + mtr_im_load_pids($im); + + # Kill IM-main. + + if ( defined $im->{'pid'} ) + { + mtr_debug("Killing IM-main (PID: $im->pid})..."); + mtr_im_kill_process([ $im->{'pid'} ], 'KILL', 10, 1); + } + else + { + mtr_debug("IM-main is dead."); + } + + # Re-load PIDs again. + + mtr_im_load_pids($im); + + # Kill guarded mysqld instances. + + my @mysqld_pids; + + mtr_debug("Collecting PIDs of mysqld instances to kill..."); + + for ( my $idx= 0; $idx < 2; ++$idx ) + { + my $pid= $im->{'instances'}->[$idx]->{'pid'}; + + unless ( defined $pid ) + { + next; + } + + mtr_debug(" - IM-guarded mysqld[$idx] PID: $pid."); + + push (@mysqld_pids, $pid); + } + + if ( scalar @mysqld_pids > 0 ) + { + mtr_debug("Killing IM-guarded mysqld instances..."); + mtr_im_kill_process(\@mysqld_pids, 'KILL', 10, 1); + } + + # That's all. + + stop_reap_all(); +} + +############################################################################## + +sub mtr_im_wait_for_connection($$$) { + my $im= shift; + my $total_attempts= shift; + my $connect_timeout= shift; + + mtr_debug("Waiting for IM on port $im->{port} " . + "to start accepting connections..."); + + for ( my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt ) + { + mtr_debug("Trying to connect to IM ($cur_attempt of $total_attempts)..."); + + if ( mtr_ping_port($im->{'port'}) ) + { + mtr_debug("IM is accepting connections " . + "on port $im->{port}."); + return 1; + } + + mtr_debug("Sleeping $connect_timeout..."); + sleep($connect_timeout); + } + + mtr_debug("IM does not accept connections " . + "on port $im->{port} after " . + ($total_attempts * $connect_timeout) . " seconds."); + + return 0; +} + +############################################################################## + +sub mtr_im_wait_for_mysqld($$$) { + my $mysqld= shift; + my $total_attempts= shift; + my $connect_timeout= shift; + + mtr_debug("Waiting for IM-guarded mysqld on port $mysqld->{port} " . + "to start accepting connections..."); + + for ( my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt ) + { + mtr_debug("Trying to connect to mysqld " . + "($cur_attempt of $total_attempts)..."); + + if ( mtr_ping_port($mysqld->{'port'}) ) + { + mtr_debug("Mysqld is accepting connections " . + "on port $mysqld->{port}."); + return 1; + } + + mtr_debug("Sleeping $connect_timeout..."); + sleep($connect_timeout); + } + + mtr_debug("Mysqld does not accept connections " . + "on port $mysqld->{port} after " . + ($total_attempts * $connect_timeout) . " seconds."); + + return 0; +} + +############################################################################## +# +# Public operations. +# +############################################################################## + +sub mtr_im_start($$) { + my $im = shift; + my $opts = shift; + + mtr_debug("Starting Instance Manager..."); + + my $args; + mtr_init_args(\$args); + mtr_add_arg($args, "--defaults-file=%s", $im->{'defaults_file'}); + + foreach my $opt ( @{$opts} ) + { + mtr_add_arg($args, $opt); + } + + $im->{'pid'} = + mtr_spawn( + $::exe_im, # path to the executable + $args, # cmd-line args + '', # stdin + $im->{'path_log'}, # stdout + $im->{'path_err'}, # stderr + '', # pid file path (not used) + { append_log_file => 1 } # append log files + ); + + unless ( $im->{'pid'} ) + { + mtr_error('Could not start Instance Manager.') + } + + # Instance Manager can be run in daemon mode. In this case, it creates + # several processes and the parent process, created by mtr_spawn(), exits just + # after start. So, we have to obtain Instance Manager PID from the PID file. + + mtr_debug("Waiting for IM to create PID file (" . + "path: '$im->{path_pid}'; " . + "timeout: $im->{start_timeout})..."); + + unless ( sleep_until_file_created($im->{'path_pid'}, + $im->{'start_timeout'}, + -1) ) # real PID is still unknown + { + mtr_debug("IM has not created PID file in $im->{start_timeout} secs."); + mtr_debug("Aborting test suite..."); + + mtr_kill_leftovers(); + + mtr_report("IM has not created PID file in $im->{start_timeout} secs."); + return 0; + } + + $im->{'pid'}= mtr_get_pid_from_file($im->{'path_pid'}); + + mtr_debug("Instance Manager started. PID: $im->{pid}."); + + # Wait until we can connect to IM. + + my $IM_CONNECT_TIMEOUT= 30; + + unless ( mtr_im_wait_for_connection($im, + $IM_CONNECT_TIMEOUT, 1) ) + { + mtr_debug("Can not connect to Instance Manager " . + "in $IM_CONNECT_TIMEOUT seconds after start."); + mtr_debug("Aborting test suite..."); + + mtr_kill_leftovers(); + + mtr_report("Can not connect to Instance Manager " . + "in $IM_CONNECT_TIMEOUT seconds after start."); + return 0; + } + + # Wait for IM to start guarded instances: + # - wait for PID files; + + mtr_debug("Waiting for guarded mysqlds instances to create PID files..."); + + for ( my $idx= 0; $idx < 2; ++$idx ) + { + my $mysqld= $im->{'instances'}->[$idx]; + + if ( exists $mysqld->{'nonguarded'} ) + { + next; + } + + mtr_debug("Waiting for mysqld[$idx] to create PID file (" . + "path: '$mysqld->{path_pid}'; " . + "timeout: $mysqld->{start_timeout})..."); + + unless ( sleep_until_file_created($mysqld->{'path_pid'}, + $mysqld->{'start_timeout'}, + -1) ) # real PID is still unknown + { + mtr_debug("mysqld[$idx] has not created PID file in " . + "$mysqld->{start_timeout} secs."); + mtr_debug("Aborting test suite..."); + + mtr_kill_leftovers(); + + mtr_report("mysqld[$idx] has not created PID file in " . + "$mysqld->{start_timeout} secs."); + return 0; + } + + mtr_debug("PID file for mysqld[$idx] ($mysqld->{path_pid} created."); + } + + # Wait until we can connect to guarded mysqld-instances + # (in other words -- wait for IM to start guarded instances). + + mtr_debug("Waiting for guarded mysqlds to start accepting connections..."); + + for ( my $idx= 0; $idx < 2; ++$idx ) + { + my $mysqld= $im->{'instances'}->[$idx]; + + if ( exists $mysqld->{'nonguarded'} ) + { + next; + } + + mtr_debug("Waiting for mysqld[$idx] to accept connection..."); + + unless ( mtr_im_wait_for_mysqld($mysqld, 30, 1) ) + { + mtr_debug("Can not connect to mysqld[$idx] " . + "in $IM_CONNECT_TIMEOUT seconds after start."); + mtr_debug("Aborting test suite..."); + + mtr_kill_leftovers(); + + mtr_report("Can not connect to mysqld[$idx] " . + "in $IM_CONNECT_TIMEOUT seconds after start."); + return 0; + } + + mtr_debug("mysqld[$idx] started."); + } + + mtr_debug("Instance Manager and its components are up and running."); + + return 1; +} + +############################################################################## + +sub mtr_im_stop($) { + my $im= shift; + + mtr_debug("Stopping Instance Manager..."); + + # Try graceful shutdown. + + mtr_im_terminate($im); + + # Check that all processes died. + + unless ( mtr_im_check_alive($im) ) + { + mtr_debug("Instance Manager has been stopped successfully."); + mtr_im_cleanup($im); + return 1; + } + + # Instance Manager don't want to die. We should kill it. + + mtr_im_errlog("Instance Manager did not shutdown gracefully."); + + mtr_im_kill($im); + + # Check again that all IM-related processes have been killed. + + my $im_is_alive= mtr_im_check_alive($im); + + mtr_im_cleanup($im); + + if ( $im_is_alive ) + { + mtr_debug("Can not kill Instance Manager or its children."); + return 0; + } + + mtr_debug("Instance Manager has been killed successfully."); + return 1; +} + +########################################################################### + +1; diff --git a/mysql-test/lib/mtr_process.pl b/mysql-test/lib/mtr_process.pl index ab2af8c4ccf..868b6d4f1ec 100644 --- a/mysql-test/lib/mtr_process.pl +++ b/mysql-test/lib/mtr_process.pl @@ -26,28 +26,6 @@ sub mtr_kill_processes ($); sub mtr_ping_with_timeout($); sub mtr_ping_port ($); -# Private IM-related operations. - -sub mtr_im_kill_process ($$$); -sub mtr_im_load_pids ($); -sub mtr_im_terminate ($); -sub mtr_im_check_alive ($); -sub mtr_im_check_main_alive ($); -sub mtr_im_check_angel_alive ($); -sub mtr_im_check_mysqlds_alive ($); -sub mtr_im_check_mysqld_alive ($$); -sub mtr_im_cleanup ($); -sub mtr_im_rm_file ($); -sub mtr_im_errlog ($); -sub mtr_im_kill ($); -sub mtr_im_wait_for_connection ($$$); -sub mtr_im_wait_for_mysqld($$$); - -# Public IM-related operations. - -sub mtr_im_start ($$); -sub mtr_im_stop ($$); - # static in C sub spawn_impl ($$$$$$$$); @@ -358,9 +336,16 @@ sub mtr_process_exit_status { # kill IM manager first, else it will restart the servers sub mtr_kill_leftovers () { + mtr_report("Killing Possible Leftover Processes"); mtr_debug("mtr_kill_leftovers(): started."); - mtr_im_stop($::instance_manager, 'mtr_kill_leftovers'); + mkpath("$::opt_vardir/log"); # Needed for mysqladmin log + + # Stop or kill Instance Manager and all its children. If we failed to do + # that, we can only abort -- there is nothing left to do. + + mtr_error("Failed to stop Instance Manager.") + unless mtr_im_stop($::instance_manager); # Start shutdown of masters and slaves. Don't touch IM-managed mysqld # instances -- they should be stopped by mtr_im_stop(). @@ -476,14 +461,6 @@ sub mtr_kill_leftovers () { mtr_debug("Got pid: $pid from file '$pidfile'"); - # Race, could have been removed between I tested with -f - # and the unlink() below, so I better check again with -f - - if ( ! unlink($pidfile) and -f $pidfile ) - { - mtr_error("can't remove $pidfile"); - } - if ( $::glob_cygwin_perl or kill(0, $pid) ) { mtr_debug("There is process with pid $pid -- scheduling for kill."); @@ -1103,7 +1080,7 @@ sub mtr_kill_processes ($) { { foreach my $sig (15, 9) { - last if mtr_im_kill_process([ $pid ], $sig, 10); + last if mtr_im_kill_process([ $pid ], $sig, 10, 1); } } } @@ -1138,679 +1115,6 @@ sub mtr_exit ($) { exit($code); } -############################################################################## -# -# Instance Manager management routines. -# -############################################################################## - -sub mtr_im_kill_process ($$$) { - my $pid_lst= shift; - my $signal= shift; - my $timeout= shift; - my $total_attempts= $timeout * 10; - - my %pids; - - foreach my $pid (@{$pid_lst}) - { - $pids{$pid}= 1; - } - - for (my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt) - { - foreach my $pid (keys %pids) - { - mtr_debug("Sending $signal to $pid..."); - - kill($signal, $pid); - - unless (kill (0, $pid)) - { - mtr_debug("Process $pid died."); - delete $pids{$pid}; - } - } - - return if scalar keys %pids == 0; - - mtr_debug("Sleeping 100ms waiting for processes to die..."); - - select(undef, undef, undef, 0.1); - } - - mtr_debug("Process(es) " . - join(' ', keys %pids) . - " is still alive after $total_attempts " . - "of sending signal $signal."); -} - -########################################################################### - -sub mtr_im_load_pids($) { - my $instance_manager= shift; - - mtr_debug("Loading PID files..."); - - # Obtain mysqld-process pids. - - my $instances = $instance_manager->{'instances'}; - - for (my $idx= 0; $idx < 2; ++$idx) - { - mtr_debug("IM-guarded mysqld[$idx] PID file: '" . - $instances->[$idx]->{'path_pid'} . "'."); - - my $mysqld_pid; - - if (-r $instances->[$idx]->{'path_pid'}) - { - $mysqld_pid= mtr_get_pid_from_file($instances->[$idx]->{'path_pid'}); - mtr_debug("IM-guarded mysqld[$idx] PID: $mysqld_pid."); - } - else - { - $mysqld_pid= undef; - mtr_debug("IM-guarded mysqld[$idx]: no PID file."); - } - - $instances->[$idx]->{'pid'}= $mysqld_pid; - } - - # Re-read Instance Manager PIDs from the file, since during tests Instance - # Manager could have been restarted, so its PIDs could have been changed. - - # - IM-main - - mtr_debug("IM-main PID file: '$instance_manager->{path_pid}'."); - - if (-f $instance_manager->{'path_pid'}) - { - $instance_manager->{'pid'} = - mtr_get_pid_from_file($instance_manager->{'path_pid'}); - - mtr_debug("IM-main PID: $instance_manager->{pid}."); - } - else - { - mtr_debug("IM-main: no PID file."); - $instance_manager->{'pid'}= undef; - } - - # - IM-angel - - mtr_debug("IM-angel PID file: '$instance_manager->{path_angel_pid}'."); - - if (-f $instance_manager->{'path_angel_pid'}) - { - $instance_manager->{'angel_pid'} = - mtr_get_pid_from_file($instance_manager->{'path_angel_pid'}); - - mtr_debug("IM-angel PID: $instance_manager->{'angel_pid'}."); - } - else - { - mtr_debug("IM-angel: no PID file."); - $instance_manager->{'angel_pid'} = undef; - } -} - -########################################################################### - -sub mtr_im_terminate($) { - my $instance_manager= shift; - - # Load pids from pid-files. We should do it first of all, because IM deletes - # them on shutdown. - - mtr_im_load_pids($instance_manager); - - mtr_debug("Shutting Instance Manager down..."); - - # Ignoring SIGCHLD so that all children could rest in peace. - - start_reap_all(); - - # Send SIGTERM to IM-main. - - if (defined $instance_manager->{'pid'}) - { - mtr_debug("IM-main pid: $instance_manager->{pid}."); - mtr_debug("Stopping IM-main..."); - - mtr_im_kill_process([ $instance_manager->{'pid'} ], 'TERM', 10); - } - else - { - mtr_debug("IM-main pid: n/a."); - } - - # If IM-angel was alive, wait for it to die. - - if (defined $instance_manager->{'angel_pid'}) - { - mtr_debug("IM-angel pid: $instance_manager->{'angel_pid'}."); - mtr_debug("Waiting for IM-angel to die..."); - - my $total_attempts= 10; - - for (my $cur_attempt=1; $cur_attempt <= $total_attempts; ++$cur_attempt) - { - unless (kill (0, $instance_manager->{'angel_pid'})) - { - mtr_debug("IM-angel died."); - last; - } - - sleep(1); - } - } - else - { - mtr_debug("IM-angel pid: n/a."); - } - - stop_reap_all(); - - # Re-load PIDs. - - mtr_im_load_pids($instance_manager); -} - -########################################################################### - -sub mtr_im_check_alive($) { - my $instance_manager= shift; - - mtr_debug("Checking whether IM-components are alive..."); - - return 1 if mtr_im_check_main_alive($instance_manager); - - return 1 if mtr_im_check_angel_alive($instance_manager); - - return 1 if mtr_im_check_mysqlds_alive($instance_manager); - - return 0; -} - -########################################################################### - -sub mtr_im_check_main_alive($) { - my $instance_manager= shift; - - # Check that the process, that we know to be IM's, is dead. - - if (defined $instance_manager->{'pid'}) - { - if (kill (0, $instance_manager->{'pid'})) - { - mtr_debug("IM-main (PID: $instance_manager->{pid}) is alive."); - return 1; - } - else - { - mtr_debug("IM-main (PID: $instance_manager->{pid}) is dead."); - } - } - else - { - mtr_debug("No PID file for IM-main."); - } - - # Check that IM does not accept client connections. - - if (mtr_ping_port($instance_manager->{'port'})) - { - mtr_debug("IM-main (port: $instance_manager->{port}) " . - "is accepting connections."); - - mtr_im_errlog("IM-main is accepting connections on port " . - "$instance_manager->{port}, but there is no " . - "process information."); - return 1; - } - else - { - mtr_debug("IM-main (port: $instance_manager->{port}) " . - "does not accept connections."); - return 0; - } -} - -########################################################################### - -sub mtr_im_check_angel_alive($) { - my $instance_manager= shift; - - # Check that the process, that we know to be the Angel, is dead. - - if (defined $instance_manager->{'angel_pid'}) - { - if (kill (0, $instance_manager->{'angel_pid'})) - { - mtr_debug("IM-angel (PID: $instance_manager->{angel_pid}) is alive."); - return 1; - } - else - { - mtr_debug("IM-angel (PID: $instance_manager->{angel_pid}) is dead."); - return 0; - } - } - else - { - mtr_debug("No PID file for IM-angel."); - return 0; - } -} - -########################################################################### - -sub mtr_im_check_mysqlds_alive($) { - my $instance_manager= shift; - - mtr_debug("Checking for IM-guarded mysqld instances..."); - - my $instances = $instance_manager->{'instances'}; - - for (my $idx= 0; $idx < 2; ++$idx) - { - mtr_debug("Checking mysqld[$idx]..."); - - return 1 - if mtr_im_check_mysqld_alive($instance_manager, $instances->[$idx]); - } -} - -########################################################################### - -sub mtr_im_check_mysqld_alive($$) { - my $instance_manager= shift; - my $mysqld_instance= shift; - - # Check that the process is dead. - - if (defined $instance_manager->{'pid'}) - { - if (kill (0, $instance_manager->{'pid'})) - { - mtr_debug("Mysqld instance (PID: $mysqld_instance->{pid}) is alive."); - return 1; - } - else - { - mtr_debug("Mysqld instance (PID: $mysqld_instance->{pid}) is dead."); - } - } - else - { - mtr_debug("No PID file for mysqld instance."); - } - - # Check that mysqld does not accept client connections. - - if (mtr_ping_port($mysqld_instance->{'port'})) - { - mtr_debug("Mysqld instance (port: $mysqld_instance->{port}) " . - "is accepting connections."); - - mtr_im_errlog("Mysqld is accepting connections on port " . - "$mysqld_instance->{port}, but there is no " . - "process information."); - return 1; - } - else - { - mtr_debug("Mysqld instance (port: $mysqld_instance->{port}) " . - "does not accept connections."); - return 0; - } -} - -########################################################################### - -sub mtr_im_cleanup($) { - my $instance_manager= shift; - - mtr_im_rm_file($instance_manager->{'path_pid'}); - mtr_im_rm_file($instance_manager->{'path_sock'}); - - mtr_im_rm_file($instance_manager->{'path_angel_pid'}); - - for (my $idx= 0; $idx < 2; ++$idx) - { - mtr_im_rm_file($instance_manager->{'instances'}->[$idx]->{'path_pid'}); - mtr_im_rm_file($instance_manager->{'instances'}->[$idx]->{'path_sock'}); - } -} - -########################################################################### - -sub mtr_im_rm_file($) -{ - my $file_path= shift; - - if (-f $file_path) - { - mtr_debug("Removing '$file_path'..."); - - mtr_warning("Can not remove '$file_path'.") - unless unlink($file_path); - } - else - { - mtr_debug("File '$file_path' does not exist already."); - } -} - -########################################################################### - -sub mtr_im_errlog($) { - my $msg= shift; - - # Complain in error log so that a warning will be shown. - # - # TODO: unless BUG#20761 is fixed, we will print the warning to stdout, so - # that it can be seen on console and does not produce pushbuild error. - - # my $errlog= "$opt_vardir/log/mysql-test-run.pl.err"; - # - # open (ERRLOG, ">>$errlog") || - # mtr_error("Can not open error log ($errlog)"); - # - # my $ts= localtime(); - # print ERRLOG - # "Warning: [$ts] $msg\n"; - # - # close ERRLOG; - - my $ts= localtime(); - print "Warning: [$ts] $msg\n"; -} - -########################################################################### - -sub mtr_im_kill($) { - my $instance_manager= shift; - - # Re-load PIDs. That can be useful because some processes could have been - # restarted. - - mtr_im_load_pids($instance_manager); - - # Ignoring SIGCHLD so that all children could rest in peace. - - start_reap_all(); - - # Kill IM-angel first of all. - - if (defined $instance_manager->{'angel_pid'}) - { - mtr_debug("Killing IM-angel (PID: $instance_manager->{angel_pid})..."); - mtr_im_kill_process([ $instance_manager->{'angel_pid'} ], 'KILL', 10); - } - else - { - mtr_debug("IM-angel is dead."); - } - - # Re-load PIDs again. - - mtr_im_load_pids($instance_manager); - - # Kill IM-main. - - if (defined $instance_manager->{'pid'}) - { - mtr_debug("Killing IM-main (PID: $instance_manager->pid})..."); - mtr_im_kill_process([ $instance_manager->{'pid'} ], 'KILL', 10); - } - else - { - mtr_debug("IM-main is dead."); - } - - # Re-load PIDs again. - - mtr_im_load_pids($instance_manager); - - # Kill guarded mysqld instances. - - my @mysqld_pids; - - mtr_debug("Collecting PIDs of mysqld instances to kill..."); - - for (my $idx= 0; $idx < 2; ++$idx) - { - my $pid= $instance_manager->{'instances'}->[$idx]->{'pid'}; - - next unless defined $pid; - - mtr_debug(" - IM-guarded mysqld[$idx] PID: $pid."); - - push (@mysqld_pids, $pid); - } - - if (scalar @mysqld_pids > 0) - { - mtr_debug("Killing IM-guarded mysqld instances..."); - mtr_im_kill_process(\@mysqld_pids, 'KILL', 10); - } - - # That's all. - - stop_reap_all(); -} - -############################################################################## - -sub mtr_im_wait_for_connection($$$) { - my $instance_manager= shift; - my $total_attempts= shift; - my $connect_timeout= shift; - - mtr_debug("Waiting for IM on port $instance_manager->{port} " . - "to start accepting connections..."); - - for (my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt) - { - mtr_debug("Trying to connect to IM ($cur_attempt of $total_attempts)..."); - - if (mtr_ping_port($instance_manager->{'port'})) - { - mtr_debug("IM is accepting connections " . - "on port $instance_manager->{port}."); - return 1; - } - - mtr_debug("Sleeping $connect_timeout..."); - sleep($connect_timeout); - } - - mtr_debug("IM does not accept connections " . - "on port $instance_manager->{port} after " . - ($total_attempts * $connect_timeout) . " seconds."); - - return 0; -} - -############################################################################## - -sub mtr_im_wait_for_mysqld($$$) { - my $mysqld= shift; - my $total_attempts= shift; - my $connect_timeout= shift; - - mtr_debug("Waiting for IM-guarded mysqld on port $mysqld->{port} " . - "to start accepting connections..."); - - for (my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt) - { - mtr_debug("Trying to connect to mysqld " . - "($cur_attempt of $total_attempts)..."); - - if (mtr_ping_port($mysqld->{'port'})) - { - mtr_debug("Mysqld is accepting connections " . - "on port $mysqld->{port}."); - return 1; - } - - mtr_debug("Sleeping $connect_timeout..."); - sleep($connect_timeout); - } - - mtr_debug("Mysqld does not accept connections " . - "on port $mysqld->{port} after " . - ($total_attempts * $connect_timeout) . " seconds."); - - return 0; -} - -############################################################################## - -sub mtr_im_start($$) { - my $instance_manager = shift; - my $opts = shift; - - mtr_debug("Starting Instance Manager..."); - - my $args; - mtr_init_args(\$args); - mtr_add_arg($args, "--defaults-file=%s", - $instance_manager->{'defaults_file'}); - - foreach my $opt (@{$opts}) - { - mtr_add_arg($args, $opt); - } - - $instance_manager->{'pid'} = - mtr_spawn( - $::exe_im, # path to the executable - $args, # cmd-line args - '', # stdin - $instance_manager->{'path_log'}, # stdout - $instance_manager->{'path_err'}, # stderr - '', # pid file path (not used) - { append_log_file => 1 } # append log files - ); - - if ( ! $instance_manager->{'pid'} ) - { - mtr_report('Could not start Instance Manager'); - return; - } - - # Instance Manager can be run in daemon mode. In this case, it creates - # several processes and the parent process, created by mtr_spawn(), exits just - # after start. So, we have to obtain Instance Manager PID from the PID file. - - if ( ! sleep_until_file_created( - $instance_manager->{'path_pid'}, - $instance_manager->{'start_timeout'}, - -1)) # real PID is still unknown - { - mtr_report("Instance Manager PID file is missing"); - return; - } - - $instance_manager->{'pid'} = - mtr_get_pid_from_file($instance_manager->{'path_pid'}); - - mtr_debug("Instance Manager started. PID: $instance_manager->{pid}."); - - # Wait until we can connect to IM. - - my $IM_CONNECT_TIMEOUT= 30; - - unless (mtr_im_wait_for_connection($instance_manager, - $IM_CONNECT_TIMEOUT, 1)) - { - mtr_debug("Can not connect to Instance Manager " . - "in $IM_CONNECT_TIMEOUT seconds after start."); - mtr_debug("Aborting test suite..."); - - mtr_kill_leftovers(); - - mtr_error("Can not connect to Instance Manager " . - "in $IM_CONNECT_TIMEOUT seconds after start."); - } - - # Wait until we can connect to guarded mysqld-instances - # (in other words -- wait for IM to start guarded instances). - - for (my $idx= 0; $idx < 2; ++$idx) - { - my $mysqld= $instance_manager->{'instances'}->[$idx]; - - next if exists $mysqld->{'nonguarded'}; - - mtr_debug("Waiting for mysqld[$idx] to start..."); - - unless (mtr_im_wait_for_mysqld($mysqld, 30, 1)) - { - mtr_debug("Can not connect to mysqld[$idx] " . - "in $IM_CONNECT_TIMEOUT seconds after start."); - mtr_debug("Aborting test suite..."); - - mtr_kill_leftovers(); - - mtr_error("Can not connect to mysqld[$idx] " . - "in $IM_CONNECT_TIMEOUT seconds after start."); - } - - mtr_debug("mysqld[$idx] started."); - } - - mtr_debug("Instance Manager started."); - - mtr_im_load_pids($instance_manager); -} - -############################################################################## - -sub mtr_im_stop($$) { - my $instance_manager= shift; - my $where= shift; - - mtr_debug("Stopping Instance Manager..."); - - # Try graceful shutdown. - - mtr_im_terminate($instance_manager); - - # Check that all processes died. - - unless (mtr_im_check_alive($instance_manager)) - { - mtr_debug("Instance Manager has been stopped successfully."); - mtr_im_cleanup($instance_manager); - return 1; - } - - # Instance Manager don't want to die. We should kill it. - - mtr_im_errlog("[$where] Instance Manager did not shutdown gracefully."); - - mtr_im_kill($instance_manager); - - # Check again that all IM-related processes have been killed. - - my $im_is_alive= mtr_im_check_alive($instance_manager); - - mtr_im_cleanup($instance_manager); - - if ($im_is_alive) - { - mtr_error("Can not kill Instance Manager or its children."); - return 0; - } - - mtr_debug("Instance Manager has been killed successfully."); - return 1; -} - ########################################################################### 1; diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run-shell.sh similarity index 100% rename from mysql-test/mysql-test-run.sh rename to mysql-test/mysql-test-run-shell.sh diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 4e3a8974002..e5ed8a99304 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -88,6 +88,7 @@ use strict; #use diagnostics; require "lib/mtr_cases.pl"; +require "lib/mtr_im.pl"; require "lib/mtr_process.pl"; require "lib/mtr_timer.pl"; require "lib/mtr_io.pl"; @@ -133,7 +134,6 @@ our @mysqld_src_dirs= our $glob_win32= 0; # OS and native Win32 executables our $glob_win32_perl= 0; # ActiveState Win32 Perl our $glob_cygwin_perl= 0; # Cygwin Perl -our $glob_cygwin_shell= undef; our $glob_mysql_test_dir= undef; our $glob_mysql_bench_dir= undef; our $glob_hostname= undef; @@ -257,7 +257,7 @@ our $opt_result_ext; our $opt_skip; our $opt_skip_rpl; -our $use_slaves; +our $max_slave_num= 0; our $use_innodb; our $opt_skip_test; our $opt_skip_im; @@ -269,7 +269,7 @@ our $opt_sleep_time_for_delete= 10; our $opt_testcase_timeout; our $opt_suite_timeout; my $default_testcase_timeout= 15; # 15 min max -my $default_suite_timeout= 120; # 2 hours max +my $default_suite_timeout= 180; # 3 hours max our $opt_socket; @@ -428,7 +428,13 @@ sub main () { { $need_ndbcluster||= $test->{ndb_test}; $need_im||= $test->{component_id} eq 'im'; - $use_slaves||= $test->{slave_num}; + + # Count max number of slaves used by a test case + if ( $test->{slave_num} > $max_slave_num) + { + $max_slave_num= $test->{slave_num}; + mtr_error("Too many slaves") if $max_slave_num > 3; + } $use_innodb||= $test->{'innodb_test'}; } $opt_skip_ndbcluster= $opt_skip_ndbcluster_slave= 1 @@ -484,10 +490,7 @@ sub initial_setup () { { # Windows programs like 'mysqld' needs Windows paths $glob_mysql_test_dir= `cygpath -m "$glob_mysql_test_dir"`; - my $shell= $ENV{'SHELL'} || "/bin/bash"; - $glob_cygwin_shell= `cygpath -w "$shell"`; # The Windows path c:\... chomp($glob_mysql_test_dir); - chomp($glob_cygwin_shell); } $glob_basedir= dirname($glob_mysql_test_dir); # Expect mysql-bench to be located adjacent to the source tree, by default @@ -909,13 +912,13 @@ sub command_line_setup () { if ( ! $opt_testcase_timeout ) { $opt_testcase_timeout= $default_testcase_timeout; - $opt_testcase_timeout*= 10 if defined $opt_valgrind; + $opt_testcase_timeout*= 10 if $opt_valgrind; } if ( ! $opt_suite_timeout ) { $opt_suite_timeout= $default_suite_timeout; - $opt_suite_timeout*= 4 if defined $opt_valgrind; + $opt_suite_timeout*= 6 if $opt_valgrind; } # Increase times to wait for executables to start if using valgrind @@ -947,6 +950,7 @@ sub command_line_setup () { $master->[0]= { + pid => 0, type => "master", idx => 0, path_myddir => "$opt_vardir/master-data", @@ -962,6 +966,7 @@ sub command_line_setup () { $master->[1]= { + pid => 0, type => "master", idx => 1, path_myddir => "$opt_vardir/master1-data", @@ -977,6 +982,7 @@ sub command_line_setup () { $slave->[0]= { + pid => 0, type => "slave", idx => 0, path_myddir => "$opt_vardir/slave-data", @@ -993,6 +999,7 @@ sub command_line_setup () { $slave->[1]= { + pid => 0, type => "slave", idx => 1, path_myddir => "$opt_vardir/slave1-data", @@ -1008,6 +1015,7 @@ sub command_line_setup () { $slave->[2]= { + pid => 0, type => "slave", idx => 2, path_myddir => "$opt_vardir/slave2-data", @@ -1044,6 +1052,7 @@ sub command_line_setup () { path_datadir => "$opt_vardir/im_mysqld_1.data", path_sock => "$sockdir/mysqld_1.sock", path_pid => "$opt_vardir/run/mysqld_1.pid", + start_timeout => 400, # enough time create innodb tables old_log_format => 1 }; @@ -1055,6 +1064,7 @@ sub command_line_setup () { path_sock => "$sockdir/mysqld_2.sock", path_pid => "$opt_vardir/run/mysqld_2.pid", nonguarded => 1, + start_timeout => 400, # enough time create innodb tables old_log_format => 1 }; @@ -1068,7 +1078,7 @@ sub command_line_setup () { connect_string => "$opt_ndbconnectstring", path_pid => "$data_dir/ndb_3.pid", # Nodes + 1 pid => 0, # pid of ndb_mgmd - installed_ok => 'NO', + installed_ok => 0, }; $data_dir= "$opt_vardir/ndbcluster-$opt_ndbcluster_port_slave"; @@ -1081,7 +1091,7 @@ sub command_line_setup () { connect_string => "$opt_ndbconnectstring_slave", path_pid => "$data_dir/ndb_2.pid", # Nodes + 1 pid => 0, # pid of ndb_mgmd - installed_ok => 'NO', + installed_ok => 0, }; # Init pids of ndbd's @@ -1121,11 +1131,9 @@ sub snapshot_setup () { $master->[0]->{'path_myddir'}, $master->[1]->{'path_myddir'}); - if ($use_slaves) + for (my $idx= 0; $idx < $max_slave_num; $idx++) { - push @data_dir_lst, ($slave->[0]->{'path_myddir'}, - $slave->[1]->{'path_myddir'}, - $slave->[2]->{'path_myddir'}); + push(@data_dir_lst, $slave->[$idx]->{'path_myddir'}); } unless ($opt_skip_im) @@ -1176,9 +1184,9 @@ sub executable_setup () { "$path_client_bindir/mysqld-nt", "$path_client_bindir/mysqld", "$path_client_bindir/mysqld-max", + "$path_client_bindir/mysqld-debug", "$glob_basedir/sql/release/mysqld", "$glob_basedir/sql/debug/mysqld"); - "$path_client_bindir/mysqld-debug", $path_language= mtr_path_exists("$glob_basedir/share/english/", "$glob_basedir/sql/share/english/"); $path_charsetsdir= mtr_path_exists("$glob_basedir/share/charsets", @@ -1244,7 +1252,9 @@ sub executable_setup () { $exe_ndbd= "$glob_basedir/storage/ndb/src/kernel/ndbd"; $exe_ndb_mgmd= "$glob_basedir/storage/ndb/src/mgmsrv/ndb_mgmd"; $lib_udf_example= - mtr_file_exists("$glob_basedir/sql/.libs/udf_example.so"); + mtr_file_exists("$glob_basedir/sql/.libs/udf_example.so", + "$glob_basedir/sql/release/udf_example.dll", + "$glob_basedir/sql/debug/udf_example.dll"); } else { @@ -1339,7 +1349,7 @@ sub environment_setup () { umask(022); - my $extra_ld_library_paths; + my @ld_library_paths; # -------------------------------------------------------------------------- # Setup LD_LIBRARY_PATH so the libraries from this distro/clone @@ -1347,34 +1357,50 @@ sub environment_setup () { # -------------------------------------------------------------------------- if ( $opt_source_dist ) { - $extra_ld_library_paths= "$glob_basedir/libmysql/.libs/" . - ":$glob_basedir/libmysql_r/.libs/"; + push(@ld_library_paths, "$glob_basedir/libmysql/.libs/", + "$glob_basedir/libmysql_r/.libs/"); } else { - $extra_ld_library_paths= "$glob_basedir/lib"; + push(@ld_library_paths, "$glob_basedir/lib"); + } + + # -------------------------------------------------------------------------- + # Add the path where libndbclient can be found + # -------------------------------------------------------------------------- + if ( $opt_ndbcluster_supported ) + { + push(@ld_library_paths, "$glob_basedir/storage/ndb/src/.libs"); } # -------------------------------------------------------------------------- # Add the path where mysqld will find udf_example.so # -------------------------------------------------------------------------- - $extra_ld_library_paths .= ":" . - ($lib_udf_example ? dirname($lib_udf_example) : ""); - - # -------------------------------------------------------------------------- - # Add the path where libndbclient can be found - # -------------------------------------------------------------------------- - if ( $opt_ndbcluster_supported ) + if ( $lib_udf_example ) { - $extra_ld_library_paths .= ":$glob_basedir/storage/ndb/src/.libs"; + push(@ld_library_paths, dirname($lib_udf_example)); } - $ENV{'LD_LIBRARY_PATH'}= - "$extra_ld_library_paths" . - ($ENV{'LD_LIBRARY_PATH'} ? ":$ENV{'LD_LIBRARY_PATH'}" : ""); - $ENV{'DYLD_LIBRARY_PATH'}= - "$extra_ld_library_paths" . - ($ENV{'DYLD_LIBRARY_PATH'} ? ":$ENV{'DYLD_LIBRARY_PATH'}" : ""); + # -------------------------------------------------------------------------- + # Valgrind need to be run with debug libraries otherwise it's almost + # impossible to add correct supressions, that means if "/usr/lib/debug" + # is available, it should be added to + # LD_LIBRARY_PATH + # -------------------------------------------------------------------------- + my $debug_libraries_path= "/usr/lib/debug"; + if ( $opt_valgrind and -d $debug_libraries_path ) + { + push(@ld_library_paths, $debug_libraries_path); + } + + $ENV{'LD_LIBRARY_PATH'}= join(":", @ld_library_paths, + split(':', $ENV{'LD_LIBRARY_PATH'})); + mtr_debug("LD_LIBRARY_PATH: $ENV{'LD_LIBRARY_PATH'}"); + + $ENV{'DYLD_LIBRARY_PATH'}= join(":", @ld_library_paths, + split(':', $ENV{'DYLD_LIBRARY_PATH'})); + mtr_debug("DYLD_LIBRARY_PATH: $ENV{'DYLD_LIBRARY_PATH'}"); + # -------------------------------------------------------------------------- # Also command lines in .opt files may contain env vars @@ -1541,7 +1567,8 @@ sub environment_setup () { my $cmdline_mysql= "$exe_mysql --no-defaults --host=localhost --user=root --password= " . "--port=$master->[0]->{'port'} " . - "--socket=$master->[0]->{'path_sock'}"; + "--socket=$master->[0]->{'path_sock'} ". + "--character-sets-dir=$path_charsetsdir"; $ENV{'MYSQL'}= $cmdline_mysql; @@ -1664,14 +1691,10 @@ sub kill_running_server () { { # Ensure that no old mysqld test servers are running # This is different from terminating processes we have - # started from ths run of the script, this is terminating + # started from this run of the script, this is terminating # leftovers from previous runs. - mtr_report("Killing Possible Leftover Processes"); - mkpath("$opt_vardir/log"); # Needed for mysqladmin log - mtr_kill_leftovers(); - } } @@ -2256,11 +2279,10 @@ sub mysql_install_db () { # FIXME check if testcase really is using second master copy_install_db('master', $master->[1]->{'path_myddir'}); - if ( $use_slaves ) + # Install the number of slave databses needed + for (my $idx= 0; $idx < $max_slave_num; $idx++) { - install_db('slave', $slave->[0]->{'path_myddir'}); - install_db('slave', $slave->[1]->{'path_myddir'}); - install_db('slave', $slave->[2]->{'path_myddir'}); + copy_install_db("slave".($idx+1), $slave->[$idx]->{'path_myddir'}); } if ( ! $opt_skip_im ) @@ -2271,7 +2293,7 @@ sub mysql_install_db () { my $cluster_started_ok= 1; # Assume it can be started if (ndbcluster_start_install($clusters->[0]) || - $use_slaves && ndbcluster_start_install($clusters->[1])) + $max_slave_num && ndbcluster_start_install($clusters->[1])) { mtr_warning("Failed to start install of cluster"); $cluster_started_ok= 0; @@ -2282,21 +2304,18 @@ sub mysql_install_db () { next if !$cluster->{'pid'}; - $cluster->{'installed_ok'}= "YES"; # Assume install suceeds + $cluster->{'installed_ok'}= 1; # Assume install suceeds if (ndbcluster_wait_started($cluster, "")) { # failed to install, disable usage and flag that its no ok mtr_report("ndbcluster_install of $cluster->{'name'} failed"); - $cluster->{"installed_ok"}= "NO"; + $cluster->{"installed_ok"}= 0; $cluster_started_ok= 0; } } - $ENV{'NDB_STATUS_OK'}= $clusters->[0]->{'installed_ok'}; - $ENV{'NDB_SLAVE_STATUS_OK'}= $clusters->[1]->{'installed_ok'};; - if ( ! $cluster_started_ok ) { if ( $opt_force) @@ -2374,6 +2393,12 @@ sub install_db ($$) { mtr_add_arg($args, "--skip-ndbcluster"); mtr_add_arg($args, "--tmpdir=."); + if ( $opt_debug ) + { + mtr_add_arg($args, "--debug=d:t:i:A,%s/log/bootstrap_%s.trace", + $opt_vardir_trace, $type); + } + if ( ! $opt_netware ) { mtr_add_arg($args, "--language=%s", $path_language); @@ -2488,6 +2513,28 @@ sub im_prepare_data_dir($) { } } + + +# +# Restore snapshot of the installed slave databases +# if the snapshot exists +# +sub restore_slave_databases ($) { + my ($num_slaves)= @_; + + if ( -d $path_snapshot) + { + for (my $idx= 0; $idx < $num_slaves; $idx++) + { + my $data_dir= $slave->[$idx]->{'path_myddir'}; + my $name= basename($data_dir); + rmtree($data_dir); + mtr_copy_dir("$path_snapshot/$name", $data_dir); + } + } +} + + sub run_testcase_check_skip_test($) { my ($tinfo)= @_; @@ -2507,7 +2554,7 @@ sub run_testcase_check_skip_test($) } # If test needs cluster, check that master installed ok - if ( $tinfo->{'ndb_test'} and $clusters->[0]->{'installed_ok'} eq "NO" ) + if ( $tinfo->{'ndb_test'} and !$clusters->[0]->{'installed_ok'} ) { mtr_report_test_name($tinfo); mtr_report_test_failed($tinfo); @@ -2516,7 +2563,7 @@ sub run_testcase_check_skip_test($) # If test needs slave cluster, check that it installed ok if ( $tinfo->{'ndb_test'} and $tinfo->{'slave_num'} and - $clusters->[1]->{'installed_ok'} eq "NO" ) + !$clusters->[1]->{'installed_ok'} ) { mtr_report_test_name($tinfo); mtr_report_test_failed($tinfo); @@ -2595,7 +2642,6 @@ sub run_testcase ($) { { run_testcase_stop_servers($tinfo, $master_restart, $slave_restart); } - my $died= mtr_record_dead_children(); if ($died or $master_restart or $slave_restart) { @@ -2659,7 +2705,10 @@ sub run_testcase ($) { if ( ! $glob_use_running_server and $tinfo->{'component_id'} eq 'im' ) { - mtr_im_stop($instance_manager, $tinfo->{'name'}); + unless ( mtr_im_stop($instance_manager, $tinfo->{'name'}) ) + { + mtr_error("Failed to stop Instance Manager.") + } } } @@ -2881,7 +2930,7 @@ sub mysqld_arguments ($$$$$) { mtr_add_arg($args, "%s--console", $prefix); mtr_add_arg($args, "%s--basedir=%s", $prefix, $path_my_basedir); mtr_add_arg($args, "%s--character-sets-dir=%s", $prefix, $path_charsetsdir); - mtr_add_arg($args, "%s--core", $prefix); + mtr_add_arg($args, "%s--log-bin-trust-function-creators", $prefix); mtr_add_arg($args, "%s--default-character-set=latin1", $prefix); mtr_add_arg($args, "%s--language=%s", $prefix, $path_language); @@ -2942,8 +2991,6 @@ sub mysqld_arguments ($$$$$) { mtr_add_arg($args, "%s--datadir=%s", $prefix, $slave->[$idx]->{'path_myddir'}); - # FIXME slave get this option twice?! - mtr_add_arg($args, "%s--exit-info=256", $prefix); mtr_add_arg($args, "%s--init-rpl-role=slave", $prefix); if (! $opt_skip_slave_binlog) { @@ -3061,9 +3108,22 @@ sub mysqld_arguments ($$$$$) { mtr_add_arg($args, "%s--user=root", $prefix); } + my $found_skip_core= 0; foreach my $arg ( @opt_extra_mysqld_opt, @$extra_opt ) { - mtr_add_arg($args, "%s%s", $prefix, $arg); + # Allow --skip-core-file to be set in master.opt file + if ($arg eq "--skip-core-file") + { + $found_skip_core= 1; + } + else + { + mtr_add_arg($args, "%s%s", $prefix, $arg); + } + } + if ( !$found_skip_core ) + { + mtr_add_arg($args, "%s%s", $prefix, "--core-file"); } if ( $opt_bench ) @@ -3073,7 +3133,6 @@ sub mysqld_arguments ($$$$$) { } elsif ( $type eq 'master' ) { - mtr_add_arg($args, "%s--exit-info=256", $prefix); mtr_add_arg($args, "%s--open-files-limit=1024", $prefix); mtr_add_arg($args, "%s--log=%s", $prefix, $master->[0]->{'path_mylog'}); } @@ -3105,13 +3164,13 @@ sub mysqld_start ($$$) { { $exe= $exe_master_mysqld; } - if ( $type eq 'slave' ) + elsif ( $type eq 'slave' ) { $exe= $exe_slave_mysqld; } else { - $exe= $exe_mysqld; + mtr_error("Unknown 'type' \"$type\" passed to mysqld_start"); } mtr_init_args(\$args); @@ -3197,7 +3256,10 @@ sub stop_all_servers () { print "Stopping All Servers\n"; print "Shutting-down Instance Manager\n"; - mtr_im_stop($instance_manager, "stop_all_servers"); + unless (mtr_im_stop($instance_manager, "stop_all_servers")) + { + mtr_error("Failed to stop Instance Manager.") + } my %admin_pids; # hash of admin processes that requests shutdown my @kill_pids; # list of processes to shutdown/kill @@ -3412,7 +3474,6 @@ sub run_testcase_stop_servers($$$) { my %admin_pids; # hash of admin processes that requests shutdown my @kill_pids; # list of processes to shutdown/kill - # Remember if we restarted for this test case $tinfo->{'restarted'}= $do_restart; @@ -3603,7 +3664,13 @@ sub run_testcase_start_servers($) { im_create_defaults_file($instance_manager); - mtr_im_start($instance_manager, $tinfo->{im_opts}); + unless ( mtr_im_start($instance_manager, $tinfo->{im_opts}) ) + { + report_failure_and_restart($tinfo); + mtr_report("Failed to start Instance Manager. " . + "The test '$tname' is marked as failed."); + return; + } } # ---------------------------------------------------------------------- @@ -3613,6 +3680,8 @@ sub run_testcase_start_servers($) { { mtr_tofile($slave->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n"); + restore_slave_databases($tinfo->{'slave_num'}); + do_before_start_slave($tname,$tinfo->{'slave_sh'}); if ( ! $opt_skip_ndbcluster_slave and @@ -3722,7 +3791,6 @@ sub run_check_testcase ($$) { sub run_mysqltest ($) { my ($tinfo)= @_; - my $exe= $exe_mysqltest; my $args; diff --git a/mysql-test/r/analyse.result b/mysql-test/r/analyse.result index ba56a98b4a9..b1ed1ea2bed 100644 --- a/mysql-test/r/analyse.result +++ b/mysql-test/r/analyse.result @@ -141,3 +141,16 @@ test.t1.product Computer TV 2 8 0 0 4.2500 NULL ENUM('Computer','Phone','TV') NO sum(profit) 10 6900 11 11 0 0 1946.2500 2867.6719 ENUM('10','275','600','6900') NOT NULL avg(profit) 10.0000 1380.0000 16 16 0 0 394.68750000 570.20033144 ENUM('10.0000','68.7500','120.0000','1380.0000') NOT NULL drop table t1,t2; +create table t1 (f1 double(10,5), f2 char(10), f3 double(10,5)); +insert into t1 values (5.999, "5.9999", 5.99999), (9.555, "9.5555", 9.55555); +select f1 from t1 procedure analyse(1, 1); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +test.t1.f1 5.99900 9.55500 7 7 0 0 7.77700 1.77800 FLOAT(4,3) NOT NULL +select f2 from t1 procedure analyse(1, 1); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +test.t1.f2 5.9999 9.5555 6 6 0 0 6.0000 NULL FLOAT(5,4) UNSIGNED NOT NULL +select f3 from t1 procedure analyse(1, 1); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +test.t1.f3 5.99999 9.55555 7 7 0 0 7.77777 1.77778 FLOAT(6,5) NOT NULL +drop table t1; +End of 4.1 tests diff --git a/mysql-test/r/binlog_row_binlog.result b/mysql-test/r/binlog_row_binlog.result index 28f2284d3d2..7cbfa525798 100644 --- a/mysql-test/r/binlog_row_binlog.result +++ b/mysql-test/r/binlog_row_binlog.result @@ -250,11 +250,6 @@ set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; insert delayed into t1 values (207); insert delayed into t1 values (null); insert delayed into t1 values (300); -select * from t1; -a -207 -208 -300 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key) @@ -268,4 +263,20 @@ master-bin.000001 # Table_map 1 # table_id: # (test.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Table_map 1 # table_id: # (test.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +insert delayed into t1 values (null),(null),(null),(null); +insert delayed into t1 values (null),(null),(400),(null); +11 == 11 +select * from t1; +a +207 +208 +300 +301 +302 +303 +304 +305 +306 +400 +401 drop table t1; diff --git a/mysql-test/r/binlog_statement_insert_delayed.result b/mysql-test/r/binlog_statement_insert_delayed.result new file mode 100644 index 00000000000..7a1b9a7ec9b --- /dev/null +++ b/mysql-test/r/binlog_statement_insert_delayed.result @@ -0,0 +1,29 @@ +create table t1 (a int not null auto_increment, primary key (a)) engine=myisam; +set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; +insert delayed into t1 values (207); +insert delayed into t1 values (null); +insert delayed into t1 values (300); +show binlog events from 102; +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Query 1 # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam +master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (207) +master-bin.000001 # Intvar 1 # INSERT_ID=208 +master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (null) +master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (300) +insert delayed into t1 values (null),(null),(null),(null); +insert delayed into t1 values (null),(null),(400),(null); +11 == 11 +select * from t1; +a +207 +208 +300 +301 +302 +303 +304 +305 +306 +400 +401 +drop table t1; diff --git a/mysql-test/r/binlog_stm_binlog.result b/mysql-test/r/binlog_stm_binlog.result index 60735be3ac6..eed2df28d1f 100644 --- a/mysql-test/r/binlog_stm_binlog.result +++ b/mysql-test/r/binlog_stm_binlog.result @@ -160,11 +160,6 @@ set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; insert delayed into t1 values (207); insert delayed into t1 values (null); insert delayed into t1 values (300); -select * from t1; -a -207 -208 -300 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key) @@ -172,8 +167,26 @@ master-bin.000001 # Intvar 1 # INSERT_ID=127 master-bin.000001 # Query 1 # use `test`; insert into t1 values(null) master-bin.000001 # Query 1 # use `test`; drop table t1 master-bin.000001 # Query 1 # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam -master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (207) -master-bin.000001 # Intvar 1 # INSERT_ID=208 -master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (null) -master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (300) +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +insert delayed into t1 values (null),(null),(null),(null); +insert delayed into t1 values (null),(null),(400),(null); +11 == 11 +select * from t1; +a +207 +208 +300 +301 +302 +303 +304 +305 +306 +400 +401 drop table t1; diff --git a/mysql-test/r/innodb_cache.result b/mysql-test/r/cache_innodb.result similarity index 85% rename from mysql-test/r/innodb_cache.result rename to mysql-test/r/cache_innodb.result index 5c494814df9..7f9b3e279a9 100644 --- a/mysql-test/r/innodb_cache.result +++ b/mysql-test/r/cache_innodb.result @@ -1,7 +1,8 @@ +SET SESSION STORAGE_ENGINE = InnoDB; drop table if exists t1,t2,t3; flush status; set autocommit=0; -create table t1 (a int not null) engine=innodb; +create table t1 (a int not null); insert into t1 values (1),(2),(3); select * from t1; a @@ -15,7 +16,7 @@ drop table t1; commit; set autocommit=1; begin; -create table t1 (a int not null) engine=innodb; +create table t1 (a int not null); insert into t1 values (1),(2),(3); select * from t1; a @@ -27,9 +28,9 @@ Variable_name Value Qcache_queries_in_cache 1 drop table t1; commit; -create table t1 (a int not null) engine=innodb; -create table t2 (a int not null) engine=innodb; -create table t3 (a int not null) engine=innodb; +create table t1 (a int not null); +create table t2 (a int not null); +create table t3 (a int not null); insert into t1 values (1),(2); insert into t2 values (1),(2); insert into t3 values (1),(2); @@ -99,7 +100,7 @@ show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 1 drop table t3,t2,t1; -CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)) ENGINE=InnoDB; +CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)); select count(*) from t1; count(*) 0 @@ -109,9 +110,9 @@ count(*) 1 drop table t1; set GLOBAL query_cache_size=1355776; -CREATE TABLE t1 ( id int(10) NOT NULL auto_increment, a varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY a (a)) ENGINE=innodb; -CREATE TABLE t2 ( id int(10) NOT NULL auto_increment, b varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY b (b)) ENGINE=innodb; -CREATE TABLE t3 ( id int(10) NOT NULL auto_increment, t1_id int(10) NOT NULL default '0', t2_id int(10) NOT NULL default '0', state int(11) default NULL, PRIMARY KEY (id), UNIQUE KEY t1_id (t1_id,t2_id), KEY t2_id (t2_id,t1_id), CONSTRAINT `t3_ibfk_1` FOREIGN KEY (`t1_id`) REFERENCES `t1` (`id`), CONSTRAINT `t3_ibfk_2` FOREIGN KEY (`t2_id`) REFERENCES `t2` (`id`)) ENGINE=innodb; +CREATE TABLE t1 ( id int(10) NOT NULL auto_increment, a varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY a (a)); +CREATE TABLE t2 ( id int(10) NOT NULL auto_increment, b varchar(25) default NULL, PRIMARY KEY (id), UNIQUE KEY b (b)); +CREATE TABLE t3 ( id int(10) NOT NULL auto_increment, t1_id int(10) NOT NULL default '0', t2_id int(10) NOT NULL default '0', state int(11) default NULL, PRIMARY KEY (id), UNIQUE KEY t1_id (t1_id,t2_id), KEY t2_id (t2_id,t1_id), CONSTRAINT `t3_ibfk_1` FOREIGN KEY (`t1_id`) REFERENCES `t1` (`id`), CONSTRAINT `t3_ibfk_2` FOREIGN KEY (`t2_id`) REFERENCES `t2` (`id`)); INSERT INTO t1 VALUES (1,'me'); INSERT INTO t2 VALUES (1,'you'); INSERT INTO t3 VALUES (2,1,1,2); diff --git a/mysql-test/r/client_xml.result b/mysql-test/r/client_xml.result index 24c05c7f9d6..7395b2433e8 100644 --- a/mysql-test/r/client_xml.result +++ b/mysql-test/r/client_xml.result @@ -68,7 +68,7 @@ insert into t1 values (1, 2, 'a&b ab'); - NULL + drop table t1; diff --git a/mysql-test/r/innodb_concurrent.result b/mysql-test/r/concurrent_innodb.result similarity index 96% rename from mysql-test/r/innodb_concurrent.result rename to mysql-test/r/concurrent_innodb.result index 56adb812cb7..27e2cde077c 100644 --- a/mysql-test/r/innodb_concurrent.result +++ b/mysql-test/r/concurrent_innodb.result @@ -1,9 +1,7 @@ +SET SESSION STORAGE_ENGINE = InnoDB; drop table if exists t1; -Warnings: -Note 1051 Unknown table 't1' -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) ENGINE=innodb; -Warnings: -Warning 1287 'TYPE=storage_engine' is deprecated; use 'ENGINE=storage_engine' instead +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); +SET SESSION STORAGE_ENGINE = InnoDB; insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -106,9 +104,7 @@ eta tipo c 2 22 jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjj 1 11 kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk drop table t1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) ENGINE=innodb; -Warnings: -Warning 1287 'TYPE=storage_engine' is deprecated; use 'ENGINE=storage_engine' instead +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -211,7 +207,7 @@ eta tipo c 80 22 jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjj 90 11 kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk drop table t1; -create table t1 (a int not null, b int not null) engine=innodb; +create table t1 (a int not null, b int not null); insert into t1 values (1,1),(2,1),(3,1),(4,1); select get_lock("hello2",1000); get_lock("hello2",1000) @@ -239,9 +235,7 @@ a b 1 1 commit; drop table t1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) ENGINE=innodb; -Warnings: -Warning 1287 'TYPE=storage_engine' is deprecated; use 'ENGINE=storage_engine' instead +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -323,9 +317,7 @@ eta tipo c 80 22 jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjj 90 11 kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk drop table t1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) ENGINE=innodb; -Warnings: -Warning 1287 'TYPE=storage_engine' is deprecated; use 'ENGINE=storage_engine' instead +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -407,9 +399,7 @@ eta tipo c 80 22 jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjj 90 11 kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk drop table t1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) ENGINE=innodb; -Warnings: -Warning 1287 'TYPE=storage_engine' is deprecated; use 'ENGINE=storage_engine' instead +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -478,9 +468,7 @@ eta tipo c 80 22 jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjj 90 11 kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk drop table t1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) ENGINE=innodb; -Warnings: -Warning 1287 'TYPE=storage_engine' is deprecated; use 'ENGINE=storage_engine' instead +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); @@ -549,9 +537,7 @@ eta tipo c 80 22 jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjj 90 11 kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk drop table t1; -create table t1(eta int(11) not null, tipo int(11), c varchar(255)) ENGINE=innodb; -Warnings: -Warning 1287 'TYPE=storage_engine' is deprecated; use 'ENGINE=storage_engine' instead +create table t1(eta int(11) not null, tipo int(11), c varchar(255)); insert into t1 values (7,7, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); insert into t1 values (8,8, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); insert into t1 values (10,1,"ccccccccccccccccccccccccccccccccccccccccccc"); diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 9ecaaa66cc3..5bc28e9eee5 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -780,6 +780,8 @@ t1 CREATE TABLE `t1` ( `i` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 MAX_ROWS=4294967295 drop table t1; +create table t1 (upgrade int); +drop table t1; CREATE TABLE t1 (a int, b int); insert into t1 values (1,1),(1,2); CREATE TABLE t2 (primary key (a)) select * from t1; diff --git a/mysql-test/r/csv.result b/mysql-test/r/csv.result index e342af32a80..222d4de8059 100644 --- a/mysql-test/r/csv.result +++ b/mysql-test/r/csv.result @@ -5205,3 +5205,22 @@ select * from bug15205; val drop table bug15205; drop table bug15205_2; +create table bug22080_1 (id int,string varchar(64)) Engine=CSV; +create table bug22080_2 (id int,string varchar(64)) Engine=CSV; +create table bug22080_3 (id int,string varchar(64)) Engine=CSV; +insert into bug22080_1 values(1,'string'); +insert into bug22080_1 values(2,'string'); +insert into bug22080_1 values(3,'string'); +"1","string" +2","string" +"3","string" +check table bug22080_2; +Table Op Msg_type Msg_text +test.bug22080_2 check error Corrupt +"1","string" +"2",string" +"3","string" +check table bug22080_3; +Table Op Msg_type Msg_text +test.bug22080_3 check error Corrupt +drop tables bug22080_1,bug22080_2,bug22080_3; diff --git a/mysql-test/r/ctype_gbk.result b/mysql-test/r/ctype_gbk.result index 241539ecf42..3f5d8b0d8c6 100644 --- a/mysql-test/r/ctype_gbk.result +++ b/mysql-test/r/ctype_gbk.result @@ -168,3 +168,13 @@ DROP TABLE t1; select hex(convert(_gbk 0xA14041 using ucs2)); hex(convert(_gbk 0xA14041 using ucs2)) 003F0041 +create table t1 (c1 text not null, c2 text not null) character set gbk; +alter table t1 change c1 c1 mediumtext character set gbk not null; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` mediumtext NOT NULL, + `c2` text NOT NULL +) ENGINE=MyISAM DEFAULT CHARSET=gbk +drop table t1; +End of 5.0 tests diff --git a/mysql-test/r/ctype_ucs.result b/mysql-test/r/ctype_ucs.result index 0d8975c94c7..4f08b97492f 100644 --- a/mysql-test/r/ctype_ucs.result +++ b/mysql-test/r/ctype_ucs.result @@ -755,6 +755,27 @@ select export_set(5, name, upper(name), ",", 5) from bug20536; export_set(5, name, upper(name), ",", 5) test1,TEST1,test1,TEST1,TEST1 'test\_2','TEST\_2','test\_2','TEST\_2','TEST\_2' +CREATE TABLE t1 ( +status enum('active','passive') collate latin1_general_ci +NOT NULL default 'passive' +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `status` enum('active','passive') CHARACTER SET latin1 COLLATE latin1_general_ci NOT NULL DEFAULT 'passive' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE t1 ADD a int NOT NULL AFTER status; +CREATE TABLE t2 ( +status enum('active','passive') collate ucs2_turkish_ci +NOT NULL default 'passive' +); +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `status` enum('active','passive') CHARACTER SET ucs2 COLLATE ucs2_turkish_ci NOT NULL DEFAULT 'passive' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE t2 ADD a int NOT NULL AFTER status; +DROP TABLE t1,t2; select password(name) from bug20536; password(name) ???????????????????? diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result index f5e4054a385..f3351a2973b 100644 --- a/mysql-test/r/ctype_utf8.result +++ b/mysql-test/r/ctype_utf8.result @@ -1340,19 +1340,18 @@ select a from t1 group by a; a e drop table t1; -set names utf8; -grant select on test.* to юзер_юзер@localhost; -user() -юзер_юзер@localhost -revoke all on test.* from юзер_юзер@localhost; -drop user юзер_юзер@localhost; -create database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45; -use имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45; -select database(); -database() -имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45 -drop database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45; -use test; +create table t1(a char(10)) default charset utf8; +insert into t1 values ('123'), ('456'); +explain +select substr(Z.a,-1), Z.a from t1 as Y join t1 as Z on Y.a=Z.a order by 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE Y ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +1 SIMPLE Z ALL NULL NULL NULL NULL 2 Using where +select substr(Z.a,-1), Z.a from t1 as Y join t1 as Z on Y.a=Z.a order by 1; +substr(Z.a,-1) a +3 123 +6 456 +drop table t1; CREATE TABLE t1(id varchar(20) NOT NULL) DEFAULT CHARSET=utf8; INSERT INTO t1 VALUES ('xxx'), ('aa'), ('yyy'), ('aa'); SELECT id FROM t1; @@ -1462,3 +1461,21 @@ set @a:=null; execute my_stmt using @a; a b drop table if exists t1; +CREATE TABLE t1 ( +colA int(11) NOT NULL, +colB varchar(255) character set utf8 NOT NULL, +PRIMARY KEY (colA) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO t1 (colA, colB) VALUES (1, 'foo'), (2, 'foo bar'); +CREATE TABLE t2 ( +colA int(11) NOT NULL, +colB varchar(255) character set utf8 NOT NULL, +KEY bad (colA,colB(3)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO t2 (colA, colB) VALUES (1, 'foo'),(2, 'foo bar'); +SELECT * FROM t1 JOIN t2 ON t1.colA=t2.colA AND t1.colB=t2.colB +WHERE t1.colA < 3; +colA colB colA colB +1 foo 1 foo +2 foo bar 2 foo bar +DROP TABLE t1, t2; diff --git a/mysql-test/r/innodb-deadlock.result b/mysql-test/r/deadlock_innodb.result similarity index 60% rename from mysql-test/r/innodb-deadlock.result rename to mysql-test/r/deadlock_innodb.result index 2ca82101fb4..a0686d1c844 100644 --- a/mysql-test/r/innodb-deadlock.result +++ b/mysql-test/r/deadlock_innodb.result @@ -1,25 +1,33 @@ +# Establish connection con1 (user=root) +# Establish connection con2 (user=root) drop table if exists t1,t2; -create table t1 (id integer, x integer) engine=INNODB; +# Switch to connection con1 +create table t1 (id integer, x integer) engine = InnoDB; insert into t1 values(0, 0); set autocommit=0; SELECT * from t1 where id = 0 FOR UPDATE; id x 0 0 +# Switch to connection con2 set autocommit=0; update t1 set x=2 where id = 0; +# Switch to connection con1 update t1 set x=1 where id = 0; select * from t1; id x 0 1 commit; +# Switch to connection con2 commit; +# Switch to connection con1 select * from t1; id x 0 2 commit; drop table t1; -create table t1 (id integer, x integer) engine=INNODB; -create table t2 (b integer, a integer) engine=INNODB; +# Switch to connection con1 +create table t1 (id integer, x integer) engine = InnoDB; +create table t2 (b integer, a integer) engine = InnoDB; insert into t1 values(0, 0), (300, 300); insert into t2 values(0, 10), (1, 20), (2, 30); commit; @@ -39,26 +47,31 @@ select * from t1; id x 0 0 300 300 +# Switch to connection con2 set autocommit=0; update t1 set x=2 where id = 0; +# Switch to connection con1 update t1 set x=1 where id = 0; select * from t1; id x 0 1 300 300 commit; +# Switch to connection con2 commit; +# Switch to connection con1 select * from t1; id x 0 2 300 300 commit; drop table t1, t2; -create table t1 (id integer, x integer) engine=INNODB; -create table t2 (b integer, a integer) engine=INNODB; +create table t1 (id integer, x integer) engine = InnoDB; +create table t2 (b integer, a integer) engine = InnoDB; insert into t1 values(0, 0), (300, 300); insert into t2 values(0, 0), (1, 20), (2, 30); commit; +# Switch to connection con1 select a,b from t2 UNION SELECT id, x from t1 FOR UPDATE; a b 0 0 @@ -74,6 +87,7 @@ select * from t1; id x 0 0 300 300 +# Switch to connection con2 update t2 set a=2 where b = 0; select * from t2; b a @@ -81,16 +95,20 @@ b a 1 20 2 30 update t1 set x=2 where id = 0; +# Switch to connection con1 update t1 set x=1 where id = 0; select * from t1; id x 0 1 300 300 commit; +# Switch to connection con2 commit; +# Switch to connection con1 select * from t1; id x 0 2 300 300 commit; +# Switch to connection default + disconnect con1 and con2 drop table t1, t2; diff --git a/mysql-test/r/delayed.result b/mysql-test/r/delayed.result index a336f3b4108..a514ffcddd2 100644 --- a/mysql-test/r/delayed.result +++ b/mysql-test/r/delayed.result @@ -7,6 +7,7 @@ insert delayed into t1 set a = 4; insert delayed into t1 set a = 5, tmsp = 19711006010203; insert delayed into t1 (a, tmsp) values (6, 19711006010203); insert delayed into t1 (a, tmsp) values (7, NULL); +FLUSH TABLE t1; insert into t1 set a = 8,tmsp=19711006010203; select * from t1 where tmsp=0; a tmsp @@ -22,6 +23,7 @@ insert delayed into t1 values (null,"c"); insert delayed into t1 values (3,"d"),(null,"e"); insert delayed into t1 values (3,"this will give an","error"); ERROR 21S01: Column count doesn't match value count at row 1 +FLUSH TABLE t1; show status like 'not_flushed_delayed_rows'; Variable_name Value Not_flushed_delayed_rows 0 @@ -54,6 +56,7 @@ insert delayed into t1 values(null); insert delayed into t1 values(null); insert delayed into t1 values(null); insert delayed into t1 values(null); +FLUSH TABLE t1; select * from t1 order by a; a 1 @@ -69,3 +72,174 @@ a 12 13 DROP TABLE t1; +SET @bug20627_old_auto_increment_offset= +@@auto_increment_offset= 2; +SET @bug20627_old_auto_increment_increment= +@@auto_increment_increment= 3; +SET @bug20627_old_session_auto_increment_offset= +@@session.auto_increment_offset= 4; +SET @bug20627_old_session_auto_increment_increment= +@@session.auto_increment_increment= 5; +SET @@auto_increment_offset= 2; +SET @@auto_increment_increment= 3; +SET @@session.auto_increment_offset= 4; +SET @@session.auto_increment_increment= 5; +CREATE TABLE t1 ( +c1 INT NOT NULL AUTO_INCREMENT, +PRIMARY KEY (c1) +); +INSERT INTO t1 VALUES (NULL),(NULL),(NULL); +SELECT * FROM t1; +c1 +4 +9 +14 +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT NOT NULL AUTO_INCREMENT, +PRIMARY KEY (c1) +); +INSERT DELAYED INTO t1 VALUES (NULL),(NULL),(NULL); +FLUSH TABLE t1; +SELECT * FROM t1; +c1 +4 +9 +14 +DROP TABLE t1; +SET @@auto_increment_offset= +@bug20627_old_auto_increment_offset; +SET @@auto_increment_increment= +@bug20627_old_auto_increment_increment; +SET @@session.auto_increment_offset= +@bug20627_old_session_auto_increment_offset; +SET @@session.auto_increment_increment= +@bug20627_old_session_auto_increment_increment; +SET @bug20830_old_auto_increment_offset= +@@auto_increment_offset= 2; +SET @bug20830_old_auto_increment_increment= +@@auto_increment_increment= 3; +SET @bug20830_old_session_auto_increment_offset= +@@session.auto_increment_offset= 4; +SET @bug20830_old_session_auto_increment_increment= +@@session.auto_increment_increment= 5; +SET @@auto_increment_offset= 2; +SET @@auto_increment_increment= 3; +SET @@session.auto_increment_offset= 4; +SET @@session.auto_increment_increment= 5; +CREATE TABLE t1 ( +c1 INT(11) NOT NULL AUTO_INCREMENT, +c2 INT(11) DEFAULT NULL, +PRIMARY KEY (c1) +); +SET insert_id= 14; +INSERT INTO t1 VALUES(NULL, 11), (NULL, 12), (NULL, 13); +INSERT INTO t1 VALUES(NULL, 21), (NULL, 22), (NULL, 23); +INSERT INTO t1 VALUES( 69, 31), (NULL, 32), (NULL, 33); +INSERT INTO t1 VALUES(NULL, 41), (NULL, 42), (NULL, 43); +SET insert_id= 114; +INSERT INTO t1 VALUES(NULL, 51), (NULL, 52), (NULL, 53); +INSERT INTO t1 VALUES(NULL, 61), (NULL, 62), (NULL, 63); +INSERT INTO t1 VALUES( 49, 71), (NULL, 72), (NULL, 73); +INSERT INTO t1 VALUES(NULL, 81), (NULL, 82), (NULL, 83); +SET insert_id= 114; +INSERT INTO t1 VALUES(NULL, 91); +ERROR 23000: Duplicate entry '114' for key 'PRIMARY' +INSERT INTO t1 VALUES (NULL, 92), (NULL, 93); +SELECT * FROM t1; +c1 c2 +14 11 +19 12 +24 13 +29 21 +34 22 +39 23 +69 31 +74 32 +79 33 +84 41 +89 42 +94 43 +114 51 +119 52 +124 53 +129 61 +134 62 +139 63 +49 71 +144 72 +149 73 +154 81 +159 82 +164 83 +169 92 +174 93 +SELECT COUNT(*) FROM t1; +COUNT(*) +26 +SELECT SUM(c1) FROM t1; +SUM(c1) +2569 +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT(11) NOT NULL AUTO_INCREMENT, +c2 INT(11) DEFAULT NULL, +PRIMARY KEY (c1) +); +SET insert_id= 14; +INSERT DELAYED INTO t1 VALUES(NULL, 11), (NULL, 12), (NULL, 13); +INSERT DELAYED INTO t1 VALUES(NULL, 21), (NULL, 22), (NULL, 23); +INSERT DELAYED INTO t1 VALUES( 69, 31), (NULL, 32), (NULL, 33); +INSERT DELAYED INTO t1 VALUES(NULL, 41), (NULL, 42), (NULL, 43); +SET insert_id= 114; +INSERT DELAYED INTO t1 VALUES(NULL, 51), (NULL, 52), (NULL, 53); +INSERT DELAYED INTO t1 VALUES(NULL, 61), (NULL, 62), (NULL, 63); +INSERT DELAYED INTO t1 VALUES( 49, 71), (NULL, 72), (NULL, 73); +INSERT DELAYED INTO t1 VALUES(NULL, 81), (NULL, 82), (NULL, 83); +SET insert_id= 114; +INSERT DELAYED INTO t1 VALUES(NULL, 91); +INSERT DELAYED INTO t1 VALUES (NULL, 92), (NULL, 93); +FLUSH TABLE t1; +SELECT * FROM t1; +c1 c2 +14 11 +19 12 +24 13 +29 21 +34 22 +39 23 +69 31 +74 32 +79 33 +84 41 +89 42 +94 43 +114 51 +119 52 +124 53 +129 61 +134 62 +139 63 +49 71 +144 72 +149 73 +154 81 +159 82 +164 83 +169 92 +174 93 +SELECT COUNT(*) FROM t1; +COUNT(*) +26 +SELECT SUM(c1) FROM t1; +SUM(c1) +2569 +DROP TABLE t1; +SET @@auto_increment_offset= +@bug20830_old_auto_increment_offset; +SET @@auto_increment_increment= +@bug20830_old_auto_increment_increment; +SET @@session.auto_increment_offset= +@bug20830_old_session_auto_increment_offset; +SET @@session.auto_increment_increment= +@bug20830_old_session_auto_increment_increment; diff --git a/mysql-test/r/delete.result b/mysql-test/r/delete.result index 0946dc8f809..9d337a1ed34 100644 --- a/mysql-test/r/delete.result +++ b/mysql-test/r/delete.result @@ -172,6 +172,10 @@ a 0 2 DROP TABLE t1; +create table t1 (a int); +delete `4.t1` from t1 as `4.t1` where `4.t1`.a = 5; +delete FROM `4.t1` USING t1 as `4.t1` where `4.t1`.a = 5; +drop table t1; CREATE TABLE t1 (a int not null,b int not null); CREATE TABLE t2 (a int not null, b int not null, primary key (a,b)); CREATE TABLE t3 (a int not null, b int not null, primary key (a,b)); diff --git a/mysql-test/r/events.result b/mysql-test/r/events.result index 146f46edc2b..abf6879fc3c 100644 --- a/mysql-test/r/events.result +++ b/mysql-test/r/events.result @@ -38,52 +38,37 @@ drop event event2; create event event2 on schedule every 2 second starts now() ends date_add(now(), interval 5 hour) comment "some" DO begin end; drop event event2; CREATE EVENT event_starts_test ON SCHEDULE EVERY 10 SECOND COMMENT "" DO SELECT 1; -SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status -events_test event_starts_test root@localhost RECURRING NULL 10 SECOND # # ENABLED -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; -starts IS NULL ends IS NULL comment -0 1 +SELECT interval_field, interval_value, body FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +interval_field interval_value body +SECOND 10 SELECT 1 +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +execute_at IS NULL starts IS NULL ends IS NULL comment +1 0 1 ALTER EVENT event_starts_test ON SCHEDULE AT '2020-02-02 20:00:02'; -SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status -events_test event_starts_test root@localhost ONE TIME 2020-02-02 17:00:02 NULL NULL # # ENABLED -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; -starts IS NULL ends IS NULL comment -1 1 +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +execute_at IS NULL starts IS NULL ends IS NULL comment +0 1 1 ALTER EVENT event_starts_test COMMENT "non-empty comment"; -SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status -events_test event_starts_test root@localhost ONE TIME 2020-02-02 17:00:02 NULL NULL # # ENABLED -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; -starts IS NULL ends IS NULL comment -1 1 non-empty comment +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +execute_at IS NULL starts IS NULL ends IS NULL comment +0 1 1 non-empty comment ALTER EVENT event_starts_test COMMENT ""; -SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status -events_test event_starts_test root@localhost ONE TIME 2020-02-02 17:00:02 NULL NULL # # ENABLED -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; -starts IS NULL ends IS NULL comment -1 1 +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +execute_at IS NULL starts IS NULL ends IS NULL comment +0 1 1 DROP EVENT event_starts_test; CREATE EVENT event_starts_test ON SCHEDULE EVERY 20 SECOND STARTS '2020-02-02 20:00:02' ENDS '2022-02-02 20:00:02' DO SELECT 2; -SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status -events_test event_starts_test root@localhost RECURRING NULL 20 SECOND # # ENABLED -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; -starts IS NULL ends IS NULL comment -0 0 +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +execute_at IS NULL starts IS NULL ends IS NULL comment +1 0 0 ALTER EVENT event_starts_test COMMENT "non-empty comment"; -SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status -events_test event_starts_test root@localhost RECURRING NULL 20 SECOND # # ENABLED -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; -starts IS NULL ends IS NULL comment -0 0 non-empty comment +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +execute_at IS NULL starts IS NULL ends IS NULL comment +1 0 0 non-empty comment ALTER EVENT event_starts_test COMMENT ""; -SHOW EVENTS; -Db Name Definer Type Execute at Interval value Interval field Starts Ends Status -events_test event_starts_test root@localhost RECURRING NULL 20 SECOND # # ENABLED +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +execute_at IS NULL starts IS NULL ends IS NULL comment +1 0 0 DROP EVENT event_starts_test; create table test_nested(a int); create event e_43 on schedule every 1 second do set @a = 5; diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result index 458fd151130..08be6924e03 100644 --- a/mysql-test/r/events_bugs.result +++ b/mysql-test/r/events_bugs.result @@ -213,4 +213,17 @@ create event e_53 on schedule every 5 second starts (select s1 from ttx) do drop ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'select s1 from ttx) do drop table t' at line 1 create event e_53 on schedule every 5 second ends (select s1 from ttx) do drop table t; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'select s1 from ttx) do drop table t' at line 1 +drop event if exists e_16; +drop procedure if exists p_16; +create event e_16 on schedule every 1 second do set @a=5; +create procedure p_16 () alter event e_16 on schedule every @a second; +set @a = null; +call p_16(); +ERROR HY000: Incorrect INTERVAL value: 'NULL' +call p_16(); +ERROR HY000: Incorrect INTERVAL value: 'NULL' +set @a= 6; +call p_16(); +drop procedure p_16; +drop event e_16; drop database events_test; diff --git a/mysql-test/r/events_restart_phase0.result b/mysql-test/r/events_restart_phase0.result new file mode 100644 index 00000000000..218b804a302 --- /dev/null +++ b/mysql-test/r/events_restart_phase0.result @@ -0,0 +1,22 @@ +SHOW VARIABLES LIKE 'event%'; +Variable_name Value +event_scheduler DISABLED +SELECT @@global.event_scheduler; +@@global.event_scheduler +DISABLED +SET GLOBAL event_scheduler=on; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=off; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=0; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=1; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=2; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of '2' +SET GLOBAL event_scheduler=SUSPEND; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'SUSPEND' +SET GLOBAL event_scheduler=SUSPENDED; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'SUSPENDED' +SET GLOBAL event_scheduler=disabled; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'disabled' diff --git a/mysql-test/r/execution_constants.result b/mysql-test/r/execution_constants.result new file mode 100644 index 00000000000..293c88dc506 --- /dev/null +++ b/mysql-test/r/execution_constants.result @@ -0,0 +1,12 @@ +CREATE TABLE `t_bug21476` ( +`ID_BOARD` smallint(5) unsigned NOT NULL default '0', +`ID_MEMBER` mediumint(8) unsigned NOT NULL default '0', +`logTime` int(10) unsigned NOT NULL default '0', +`ID_MSG` mediumint(8) unsigned NOT NULL default '0', +PRIMARY KEY (`ID_MEMBER`,`ID_BOARD`), +KEY `logTime` (`logTime`) +) ENGINE=MyISAM DEFAULT CHARSET=cp1251 COLLATE=cp1251_bulgarian_ci; +INSERT INTO `t_bug21476` VALUES (2,2,1154870939,0),(1,2,1154870957,0),(2,183,1154941362,0),(2,84,1154904301,0),(1,84,1154905867,0),(2,13,1154947484,10271),(3,84,1154880549,0),(1,6,1154892183,0),(2,25,1154947581,10271),(3,25,1154904760,0),(1,25,1154947373,10271),(1,179,1154899992,0),(2,179,1154899410,0),(5,25,1154901666,0),(2,329,1154902026,0),(3,329,1154902040,0),(1,329,1154902058,0),(1,13,1154930841,0),(3,85,1154904987,0),(1,183,1154929665,0),(3,13,1154931268,0),(1,85,1154936888,0),(1,169,1154937959,0),(2,169,1154941717,0),(3,183,1154939810,0),(3,169,1154941734,0); +Assertion: mysql_errno 1436 == 1436 +DROP TABLE `t_bug21476`; +End of 5.0 tests. diff --git a/mysql-test/r/func_gconcat.result b/mysql-test/r/func_gconcat.result index 675e0faa1a0..43646bd3d0e 100644 --- a/mysql-test/r/func_gconcat.result +++ b/mysql-test/r/func_gconcat.result @@ -572,14 +572,14 @@ COUNT(*) GROUP_CONCAT(DISTINCT t2.somename SEPARATOR ' |') DROP TABLE t1,t2; select * from (select group_concat('c') from DUAL) t; group_concat('c') -NULL +c create table t1 ( a int not null default 0); select * from (select group_concat(a) from t1) t2; group_concat(a) NULL select group_concat('x') UNION ALL select 1; group_concat('x') -NULL +x 1 drop table t1; CREATE TABLE t1 (id int, a varchar(9)); @@ -660,3 +660,12 @@ CHAR_LENGTH( GROUP_CONCAT(b) ) 240001 SET GROUP_CONCAT_MAX_LEN = 1024; DROP TABLE t1; +CREATE TABLE t1 (a int, b int); +INSERT INTO t1 VALUES (2,1), (1,2), (2,2), (1,3); +SELECT GROUP_CONCAT(a), x +FROM (SELECT a, GROUP_CONCAT(b) x FROM t1 GROUP BY a) AS s +GROUP BY x; +GROUP_CONCAT(a) x +2 1,2 +1 2,3 +DROP TABLE t1; diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index c7d95a4a446..40ca0a38db2 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -847,6 +847,22 @@ EXPLAIN SELECT MAX(b) FROM t1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 4 DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT); +INSERT INTO t1 VALUES (1,1),(1,2),(2,3); +SELECT (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; +(SELECT COUNT(DISTINCT t1.b)) +2 +1 +SELECT (SELECT COUNT(DISTINCT 12)) FROM t1 GROUP BY t1.a; +(SELECT COUNT(DISTINCT 12)) +1 +1 +SELECT AVG(2), BIT_AND(2), BIT_OR(2), BIT_XOR(2), COUNT(*), COUNT(12), +COUNT(DISTINCT 12), MIN(2),MAX(2),STD(2), VARIANCE(2),SUM(2), +GROUP_CONCAT(2),GROUP_CONCAT(DISTINCT 2); +AVG(2) BIT_AND(2) BIT_OR(2) BIT_XOR(2) COUNT(*) COUNT(12) COUNT(DISTINCT 12) MIN(2) MAX(2) STD(2) VARIANCE(2) SUM(2) GROUP_CONCAT(2) GROUP_CONCAT(DISTINCT 2) +2.00000 2 2 2 1 1 1 2 2 0.00000 0.00000 2 2 2 +DROP TABLE t1; create table t2 (ff double); insert into t2 values (2.2); select cast(sum(distinct ff) as decimal(5,2)) from t2; @@ -988,7 +1004,7 @@ SELECT SQL_NO_CACHE WHERE ttt.a = ccc.b AND ttt.a = t.a GROUP BY ttt.a) AS minid FROM t1 t, t2 c WHERE t.a = c.b; minid -NULL +1 DROP TABLE t1,t2; create table t1 select variance(0); show create table t1; diff --git a/mysql-test/r/func_group_innodb.result b/mysql-test/r/func_group_innodb.result index 5f12a437eed..230f2a7633f 100644 --- a/mysql-test/r/func_group_innodb.result +++ b/mysql-test/r/func_group_innodb.result @@ -21,7 +21,7 @@ min(7) NULL select min(7) from DUAL; min(7) -NULL +7 explain select min(7) from t2m join t1m; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away @@ -36,7 +36,7 @@ max(7) NULL select max(7) from DUAL; max(7) -NULL +7 explain select max(7) from t2m join t1m; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away @@ -75,7 +75,7 @@ min(7) NULL select min(7) from DUAL; min(7) -NULL +7 explain select min(7) from t2i join t1i; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2i ALL NULL NULL NULL NULL 1 @@ -91,7 +91,7 @@ max(7) NULL select max(7) from DUAL; max(7) -NULL +7 explain select max(7) from t2i join t1i; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2i ALL NULL NULL NULL NULL 1 diff --git a/mysql-test/r/func_in.result b/mysql-test/r/func_in.result index 0236cbfe26f..b88e5a66f96 100644 --- a/mysql-test/r/func_in.result +++ b/mysql-test/r/func_in.result @@ -343,3 +343,71 @@ some_id 1 2 drop table t1; +create table t1(f1 char(1)); +insert into t1 values ('a'),('b'),('1'); +select f1 from t1 where f1 in ('a',1); +f1 +a +1 +select f1, case f1 when 'a' then '+' when 1 then '-' end from t1; +f1 case f1 when 'a' then '+' when 1 then '-' end +a + +b NULL +1 - +create index t1f1_idx on t1(f1); +select f1 from t1 where f1 in ('a',1); +f1 +1 +a +explain select f1 from t1 where f1 in ('a',1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL t1f1_idx 2 NULL 3 Using where; Using index +select f1 from t1 where f1 in ('a','b'); +f1 +a +b +explain select f1 from t1 where f1 in ('a','b'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index t1f1_idx t1f1_idx 2 NULL 3 Using where; Using index +select f1 from t1 where f1 in (2,1); +f1 +1 +explain select f1 from t1 where f1 in (2,1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index t1f1_idx t1f1_idx 2 NULL 3 Using where; Using index +create table t2(f2 int, index t2f2(f2)); +insert into t2 values(0),(1),(2); +select f2 from t2 where f2 in ('a',2); +f2 +0 +2 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'a' +explain select f2 from t2 where f2 in ('a',2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index NULL t2f2 5 NULL 3 Using where; Using index +select f2 from t2 where f2 in ('a','b'); +f2 +0 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'b' +explain select f2 from t2 where f2 in ('a','b'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index t2f2 t2f2 5 NULL 3 Using where; Using index +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'b' +select f2 from t2 where f2 in (1,'b'); +f2 +0 +1 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'b' +Warning 1292 Truncated incorrect DOUBLE value: 'b' +explain select f2 from t2 where f2 in (1,'b'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index NULL t2f2 5 NULL 3 Using where; Using index +drop table t1, t2; diff --git a/mysql-test/r/func_str.result b/mysql-test/r/func_str.result index 940ca71ebbc..4d31b2ba496 100644 --- a/mysql-test/r/func_str.result +++ b/mysql-test/r/func_str.result @@ -1113,4 +1113,39 @@ conv("18383815659218730760",10,10) + 0 select "18383815659218730760" + 0; "18383815659218730760" + 0 1.8383815659219e+19 +CREATE TABLE t1 (code varchar(10)); +INSERT INTO t1 VALUES ('a12'), ('A12'), ('a13'); +SELECT ASCII(code), code FROM t1 WHERE code='A12'; +ASCII(code) code +97 a12 +65 A12 +SELECT ASCII(code), code FROM t1 WHERE code='A12' AND ASCII(code)=65; +ASCII(code) code +65 A12 +INSERT INTO t1 VALUES ('a12 '), ('A12 '); +SELECT LENGTH(code), code FROM t1 WHERE code='A12'; +LENGTH(code) code +3 a12 +3 A12 +4 a12 +5 A12 +SELECT LENGTH(code), code FROM t1 WHERE code='A12' AND LENGTH(code)=5; +LENGTH(code) code +5 A12 +ALTER TABLE t1 ADD INDEX (code); +CREATE TABLE t2 (id varchar(10) PRIMARY KEY); +INSERT INTO t2 VALUES ('a11'), ('a12'), ('a13'), ('a14'); +SELECT * FROM t1 INNER JOIN t2 ON t1.code=t2.id +WHERE t2.id='a12' AND (LENGTH(code)=5 OR code < 'a00'); +code id +A12 a12 +EXPLAIN EXTENDED +SELECT * FROM t1 INNER JOIN t2 ON code=id +WHERE id='a12' AND (LENGTH(code)=5 OR code < 'a00'); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ref code code 13 const 3 100.00 Using where; Using index +1 SIMPLE t2 ref PRIMARY PRIMARY 12 const 1 100.00 Using where; Using index +Warnings: +Note 1003 select `test`.`t1`.`code` AS `code`,`test`.`t2`.`id` AS `id` from `test`.`t1` join `test`.`t2` where ((`test`.`t1`.`code` = _latin1'a12') and (`test`.`t2`.`id` = _latin1'a12') and (length(`test`.`t1`.`code`) = 5)) +DROP TABLE t1,t2; End of 5.0 tests diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index cfb675652f2..82c361edd39 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -840,39 +840,38 @@ drop table t1; create table t1(f1 date, f2 time, f3 datetime); insert into t1 values ("2006-01-01", "12:01:01", "2006-01-01 12:01:01"); insert into t1 values ("2006-01-02", "12:01:02", "2006-01-02 12:01:02"); -select f1 from t1 where f1 between "2006-1-1" and 20060101; +select f1 from t1 where f1 between CAST("2006-1-1" as date) and CAST(20060101 as date); f1 2006-01-01 -select f1 from t1 where f1 between "2006-1-1" and "2006.1.1"; +select f1 from t1 where f1 between cast("2006-1-1" as date) and cast("2006.1.1" as date); f1 2006-01-01 -select f1 from t1 where date(f1) between "2006-1-1" and "2006.1.1"; +select f1 from t1 where date(f1) between cast("2006-1-1" as date) and cast("2006.1.1" as date); f1 2006-01-01 -select f2 from t1 where f2 between "12:1:2" and "12:2:2"; +select f2 from t1 where f2 between cast("12:1:2" as time) and cast("12:2:2" as time); f2 12:01:02 -select f2 from t1 where time(f2) between "12:1:2" and "12:2:2"; +select f2 from t1 where time(f2) between cast("12:1:2" as time) and cast("12:2:2" as time); f2 12:01:02 -select f3 from t1 where f3 between "2006-1-1 12:1:1" and "2006-1-1 12:1:2"; +select f3 from t1 where f3 between cast("2006-1-1 12:1:1" as datetime) and cast("2006-1-1 12:1:2" as datetime); f3 2006-01-01 12:01:01 -select f3 from t1 where timestamp(f3) between "2006-1-1 12:1:1" and "2006-1-1 12:1:2"; +select f3 from t1 where timestamp(f3) between cast("2006-1-1 12:1:1" as datetime) and cast("2006-1-1 12:1:2" as datetime); f3 2006-01-01 12:01:01 -select f1 from t1 where "2006-1-1" between f1 and f3; +select f1 from t1 where cast("2006-1-1" as date) between f1 and f3; f1 2006-01-01 -select f1 from t1 where "2006-1-1" between date(f1) and date(f3); +select f1 from t1 where cast("2006-1-1" as date) between date(f1) and date(f3); f1 2006-01-01 -select f1 from t1 where "2006-1-1" between f1 and 'zzz'; +select f1 from t1 where cast("2006-1-1" as date) between f1 and cast('zzz' as date); f1 Warnings: -Warning 1292 Incorrect date value: 'zzz' for column 'f1' at row 1 -Warning 1292 Truncated incorrect DOUBLE value: 'zzz' -Warning 1292 Truncated incorrect DOUBLE value: 'zzz' +Warning 1292 Incorrect datetime value: 'zzz' +Warning 1292 Incorrect datetime value: 'zzz' select f1 from t1 where makedate(2006,1) between date(f1) and date(f3); f1 2006-01-01 @@ -891,6 +890,18 @@ t1 CREATE TABLE `t1` ( `from_unixtime(1) + 0` double(23,6) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; +SET NAMES latin1; +SET character_set_results = NULL; +SHOW VARIABLES LIKE 'character_set_results'; +Variable_name Value +character_set_results +CREATE TABLE testBug8868 (field1 DATE, field2 VARCHAR(32) CHARACTER SET BINARY); +INSERT INTO testBug8868 VALUES ('2006-09-04', 'abcd'); +SELECT DATE_FORMAT(field1,'%b-%e %l:%i%p') as fmtddate, field2 FROM testBug8868; +fmtddate field2 +Sep-4 12:00AM abcd +DROP TABLE testBug8868; +SET NAMES DEFAULT; (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 DAY)),'%H') As H) union (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 DAY)),'%H') As H); diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 6b700d0c947..6a90ad27625 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -703,7 +703,7 @@ def test t1 t1 g g 255 4294967295 0 Y 144 0 63 g select asbinary(g) from t1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def asbinary(g) 252 8192 0 Y 128 0 63 +def asbinary(g) 252 16777216 0 Y 128 0 63 asbinary(g) drop table t1; create table t1 select GeomFromText('point(1 1)'); diff --git a/mysql-test/r/greedy_optimizer.result b/mysql-test/r/greedy_optimizer.result index 1da49fbedb0..b02ff04780b 100644 --- a/mysql-test/r/greedy_optimizer.result +++ b/mysql-test/r/greedy_optimizer.result @@ -233,7 +233,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -245,7 +245,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -257,7 +257,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -269,7 +269,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 set optimizer_search_depth=1; select @@optimizer_search_depth; @@optimizer_search_depth @@ -385,7 +385,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -397,7 +397,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -409,7 +409,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -421,7 +421,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 set optimizer_prune_level=1; select @@optimizer_prune_level; @@optimizer_prune_level diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index 709d60b963f..80988bd8047 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -807,8 +807,8 @@ explain SELECT straight_join sql_no_cache v1.a, v1.b, v1.real_b from t2, v1 where t2.b=v1.a GROUP BY t2.b; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t2 index b b 2 NULL 10 Using index -1 PRIMARY t1 eq_ref PRIMARY PRIMARY 1 test.t2.b 1 +1 SIMPLE t2 index b b 2 NULL 10 Using index +1 SIMPLE t1 eq_ref PRIMARY PRIMARY 1 test.t2.b 1 SELECT straight_join sql_no_cache v1.a, v1.b, v1.real_b from t2, v1 where t2.b=v1.a GROUP BY t2.b; a b real_b diff --git a/mysql-test/r/handler_innodb.result b/mysql-test/r/handler_innodb.result new file mode 100644 index 00000000000..89569d918ca --- /dev/null +++ b/mysql-test/r/handler_innodb.result @@ -0,0 +1,517 @@ +SET SESSION STORAGE_ENGINE = InnoDB; +drop table if exists t1,t3,t4,t5; +create table t1 (a int, b char(10), key a(a), key b(a,b)); +insert into t1 values +(17,"ddd"),(18,"eee"),(19,"fff"),(19,"yyy"), +(14,"aaa"),(15,"bbb"),(16,"ccc"),(16,"xxx"), +(20,"ggg"),(21,"hhh"),(22,"iii"); +handler t1 open as t2; +handler t2 read a=(SELECT 1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'SELECT 1)' at line 1 +handler t2 read a first; +a b +14 aaa +handler t2 read a next; +a b +15 bbb +handler t2 read a next; +a b +16 ccc +handler t2 read a prev; +a b +15 bbb +handler t2 read a last; +a b +22 iii +handler t2 read a prev; +a b +21 hhh +handler t2 read a prev; +a b +20 ggg +handler t2 read a first; +a b +14 aaa +handler t2 read a prev; +a b +handler t2 read a last; +a b +22 iii +handler t2 read a prev; +a b +21 hhh +handler t2 read a next; +a b +22 iii +handler t2 read a next; +a b +handler t2 read a=(15); +a b +15 bbb +handler t2 read a=(16); +a b +16 ccc +handler t2 read a=(19,"fff"); +ERROR 42000: Too many key parts specified; max 1 parts allowed +handler t2 read b=(19,"fff"); +a b +19 fff +handler t2 read b=(19,"yyy"); +a b +19 yyy +handler t2 read b=(19); +a b +19 fff +handler t1 read a last; +ERROR 42S02: Unknown table 't1' in HANDLER +handler t2 read a=(11); +a b +handler t2 read a>=(11); +a b +14 aaa +handler t2 read a=(18); +a b +18 eee +handler t2 read a>=(18); +a b +18 eee +handler t2 read a>(18); +a b +19 fff +handler t2 read a<=(18); +a b +18 eee +handler t2 read a<(18); +a b +17 ddd +handler t2 read a first limit 5; +a b +14 aaa +15 bbb +16 ccc +16 xxx +17 ddd +handler t2 read a next limit 3; +a b +18 eee +19 fff +19 yyy +handler t2 read a prev limit 10; +a b +19 fff +18 eee +17 ddd +16 xxx +16 ccc +15 bbb +14 aaa +handler t2 read a>=(16) limit 4; +a b +16 ccc +16 xxx +17 ddd +18 eee +handler t2 read a>=(16) limit 2,2; +a b +17 ddd +18 eee +handler t2 read a last limit 3; +a b +22 iii +21 hhh +20 ggg +handler t2 read a=(19); +a b +19 fff +handler t2 read a=(19) where b="yyy"; +a b +19 yyy +handler t2 read first; +a b +17 ddd +handler t2 read next; +a b +18 eee +handler t2 read next; +a b +19 fff +handler t2 read last; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1 +handler t2 close; +handler t1 open; +handler t1 read a next; +a b +14 aaa +handler t1 read a next; +a b +15 bbb +handler t1 close; +handler t1 open; +handler t1 read a prev; +a b +22 iii +handler t1 read a prev; +a b +21 hhh +handler t1 close; +handler t1 open as t2; +handler t2 read first; +a b +17 ddd +alter table t1 engine = InnoDB; +handler t2 read first; +ERROR 42S02: Unknown table 't2' in HANDLER +handler t1 open as t2; +drop table t1; +create table t1 (a int); +insert into t1 values (17); +handler t2 read first; +ERROR 42S02: Unknown table 't2' in HANDLER +handler t1 open as t2; +alter table t1 engine=MEMORY; +handler t2 read first; +ERROR 42S02: Unknown table 't2' in HANDLER +drop table t1; +create table t1 (a int); +insert into t1 values (1),(2),(3),(4),(5),(6); +delete from t1 limit 2; +handler t1 open; +handler t1 read first; +a +3 +handler t1 read first limit 1,1; +a +4 +handler t1 read first limit 2,2; +a +5 +6 +delete from t1 limit 3; +handler t1 read first; +a +6 +drop table t1; +create table t1(a int, index(a)); +insert into t1 values (1), (2), (3); +handler t1 open; +handler t1 read a=(W); +ERROR 42S22: Unknown column 'W' in 'field list' +handler t1 read a=(a); +ERROR HY000: Incorrect arguments to HANDLER ... READ +drop table t1; +create table t1 (a char(5)); +insert into t1 values ("Ok"); +handler t1 open as t; +handler t read first; +a +Ok +use mysql; +handler t read first; +a +Ok +handler t close; +handler test.t1 open as t; +handler t read first; +a +Ok +handler t close; +use test; +drop table t1; +create table t1 ( a int, b int, INDEX a (a) ); +insert into t1 values (1,2), (2,1); +handler t1 open; +handler t1 read a=(1) where b=2; +a b +1 2 +handler t1 read a=(1) where b=3; +a b +handler t1 read a=(1) where b=1; +a b +handler t1 close; +drop table t1; +drop database if exists test_test; +create database test_test; +use test_test; +create table t1(table_id char(20) primary key); +insert into t1 values ('test_test.t1'); +insert into t1 values (''); +handler t1 open; +handler t1 read first limit 9; +table_id + +test_test.t1 +create table t2(table_id char(20) primary key); +insert into t2 values ('test_test.t2'); +insert into t2 values (''); +handler t2 open; +handler t2 read first limit 9; +table_id + +test_test.t2 +use test; +drop table if exists t1; +create table t1(table_id char(20) primary key); +insert into t1 values ('test.t1'); +insert into t1 values (''); +handler t1 open; +ERROR 42000: Not unique table/alias: 't1' +use test; +handler test.t1 read first limit 9; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'read first limit 9' at line 1 +handler test_test.t1 read first limit 9; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'read first limit 9' at line 1 +handler t1 read first limit 9; +table_id + +test_test.t1 +handler test_test.t2 read first limit 9; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'read first limit 9' at line 1 +handler t2 read first limit 9; +table_id + +test_test.t2 +handler test_test.t1 close; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1 +handler t1 close; +drop table test_test.t1; +handler test_test.t2 close; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1 +handler t2 close; +drop table test_test.t2; +drop database test_test; +use test; +handler test.t1 close; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1 +handler t1 close; +ERROR 42S02: Unknown table 't1' in HANDLER +drop table test.t1; +drop database if exists test_test; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create database test_test; +use test_test; +create table t1 (c1 char(20)); +insert into t1 values ('test_test.t1'); +create table t3 (c1 char(20)); +insert into t3 values ('test_test.t3'); +handler t1 open; +handler t1 read first limit 9; +c1 +test_test.t1 +handler t1 open h1; +handler h1 read first limit 9; +c1 +test_test.t1 +use test; +create table t1 (c1 char(20)); +create table t2 (c1 char(20)); +create table t3 (c1 char(20)); +insert into t1 values ('t1'); +insert into t2 values ('t2'); +insert into t3 values ('t3'); +handler t1 open; +ERROR 42000: Not unique table/alias: 't1' +handler t2 open t1; +ERROR 42000: Not unique table/alias: 't1' +handler t3 open t1; +ERROR 42000: Not unique table/alias: 't1' +handler t1 read first limit 9; +c1 +test_test.t1 +handler test.t1 close; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1 +handler test.t1 open h1; +ERROR 42000: Not unique table/alias: 'h1' +handler test_test.t1 open h1; +ERROR 42000: Not unique table/alias: 'h1' +handler test_test.t3 open h3; +handler test.t1 open h2; +handler t1 read first limit 9; +c1 +test_test.t1 +handler h1 read first limit 9; +c1 +test_test.t1 +handler h2 read first limit 9; +c1 +t1 +handler h3 read first limit 9; +c1 +test_test.t3 +handler h2 read first limit 9; +c1 +t1 +handler test.h1 close; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1 +handler t1 close; +handler h1 close; +handler h2 close; +handler t1 read first limit 9; +ERROR 42S02: Unknown table 't1' in HANDLER +handler h1 read first limit 9; +ERROR 42S02: Unknown table 'h1' in HANDLER +handler h2 read first limit 9; +ERROR 42S02: Unknown table 'h2' in HANDLER +handler h3 read first limit 9; +c1 +test_test.t3 +handler h3 read first limit 9; +c1 +test_test.t3 +use test_test; +handler h3 read first limit 9; +c1 +test_test.t3 +handler test.h3 read first limit 9; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'read first limit 9' at line 1 +handler h3 close; +use test; +drop table t3; +drop table t2; +drop table t1; +drop database test_test; +create table t1 (c1 char(20)); +insert into t1 values ("t1"); +handler t1 open as h1; +handler h1 read first limit 9; +c1 +t1 +create table t2 (c1 char(20)); +insert into t2 values ("t2"); +handler t2 open as h2; +handler h2 read first limit 9; +c1 +t2 +create table t3 (c1 char(20)); +insert into t3 values ("t3"); +handler t3 open as h3; +handler h3 read first limit 9; +c1 +t3 +create table t4 (c1 char(20)); +insert into t4 values ("t4"); +handler t4 open as h4; +handler h4 read first limit 9; +c1 +t4 +create table t5 (c1 char(20)); +insert into t5 values ("t5"); +handler t5 open as h5; +handler h5 read first limit 9; +c1 +t5 +alter table t1 engine=MyISAM; +handler h1 read first limit 9; +ERROR 42S02: Unknown table 'h1' in HANDLER +handler h2 read first limit 9; +c1 +t2 +handler h3 read first limit 9; +c1 +t3 +handler h4 read first limit 9; +c1 +t4 +handler h5 read first limit 9; +c1 +t5 +alter table t5 engine=MyISAM; +handler h1 read first limit 9; +ERROR 42S02: Unknown table 'h1' in HANDLER +handler h2 read first limit 9; +c1 +t2 +handler h3 read first limit 9; +c1 +t3 +handler h4 read first limit 9; +c1 +t4 +handler h5 read first limit 9; +ERROR 42S02: Unknown table 'h5' in HANDLER +alter table t3 engine=MyISAM; +handler h1 read first limit 9; +ERROR 42S02: Unknown table 'h1' in HANDLER +handler h2 read first limit 9; +c1 +t2 +handler h3 read first limit 9; +ERROR 42S02: Unknown table 'h3' in HANDLER +handler h4 read first limit 9; +c1 +t4 +handler h5 read first limit 9; +ERROR 42S02: Unknown table 'h5' in HANDLER +handler h2 close; +handler h4 close; +handler t1 open as h1_1; +handler t1 open as h1_2; +handler t1 open as h1_3; +handler h1_1 read first limit 9; +c1 +t1 +handler h1_2 read first limit 9; +c1 +t1 +handler h1_3 read first limit 9; +c1 +t1 +alter table t1 engine=InnoDB; +handler h1_1 read first limit 9; +ERROR 42S02: Unknown table 'h1_1' in HANDLER +handler h1_2 read first limit 9; +ERROR 42S02: Unknown table 'h1_2' in HANDLER +handler h1_3 read first limit 9; +ERROR 42S02: Unknown table 'h1_3' in HANDLER +drop table t1; +drop table t2; +drop table t3; +drop table t4; +drop table t5; +create table t1 (c1 int); +insert into t1 values (1); +handler t1 open; +handler t1 read first; +c1 +1 +send the below to another connection, do not wait for the result + optimize table t1; +proceed with the normal connection +handler t1 read next; +c1 +1 +handler t1 close; +read the result from the other connection +Table Op Msg_type Msg_text +test.t1 optimize status OK +proceed with the normal connection +drop table t1; +CREATE TABLE t1 ( no1 smallint(5) NOT NULL default '0', no2 int(10) NOT NULL default '0', PRIMARY KEY (no1,no2)); +INSERT INTO t1 VALUES (1,274),(1,275),(2,6),(2,8),(4,1),(4,2); +HANDLER t1 OPEN; +HANDLER t1 READ `primary` = (1, 1000); +no1 no2 +HANDLER t1 READ `primary` PREV; +no1 no2 +1 275 +DROP TABLE t1; +create table t1 (c1 int); +insert into t1 values (14397); +flush tables with read lock; +drop table t1; +ERROR HY000: Can't execute the query because you have a conflicting read lock +send the below to another connection, do not wait for the result + drop table t1; +proceed with the normal connection +select * from t1; +c1 +14397 +unlock tables; +read the result from the other connection +proceed with the normal connection +select * from t1; +ERROR 42S02: Table 'test.t1' doesn't exist +drop table if exists t1; +Warnings: +Note 1051 Unknown table 't1' diff --git a/mysql-test/r/handler.result b/mysql-test/r/handler_myisam.result similarity index 93% rename from mysql-test/r/handler.result rename to mysql-test/r/handler_myisam.result index 104025e83eb..8edf191b1b0 100644 --- a/mysql-test/r/handler.result +++ b/mysql-test/r/handler_myisam.result @@ -1,3 +1,4 @@ +SET SESSION STORAGE_ENGINE = MyISAM; drop table if exists t1,t3,t4,t5; create table t1 (a int, b char(10), key a(a), key b(a,b)); insert into t1 values @@ -137,6 +138,29 @@ a b handler t2 read last; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1 handler t2 close; +handler t1 open; +handler t1 read a next; +a b +14 aaa +handler t1 read a next; +a b +15 bbb +handler t1 close; +handler t1 open; +handler t1 read a prev; +a b +22 iii +handler t1 read a prev; +a b +21 hhh +handler t1 close; +handler t1 open as t2; +handler t2 read first; +a b +17 ddd +alter table t1 engine = MyISAM; +handler t2 read first; +ERROR 42S02: Unknown table 't2' in HANDLER handler t1 open as t2; drop table t1; create table t1 (a int); @@ -144,7 +168,7 @@ insert into t1 values (17); handler t2 read first; ERROR 42S02: Unknown table 't2' in HANDLER handler t1 open as t2; -alter table t1 engine=MyISAM; +alter table t1 engine=MEMORY; handler t2 read first; ERROR 42S02: Unknown table 't2' in HANDLER drop table t1; @@ -463,6 +487,15 @@ Table Op Msg_type Msg_text test.t1 optimize status OK proceed with the normal connection drop table t1; +CREATE TABLE t1 ( no1 smallint(5) NOT NULL default '0', no2 int(10) NOT NULL default '0', PRIMARY KEY (no1,no2)); +INSERT INTO t1 VALUES (1,274),(1,275),(2,6),(2,8),(4,1),(4,2); +HANDLER t1 OPEN; +HANDLER t1 READ `primary` = (1, 1000); +no1 no2 +HANDLER t1 READ `primary` PREV; +no1 no2 +1 275 +DROP TABLE t1; create table t1 (c1 int); insert into t1 values (14397); flush tables with read lock; diff --git a/mysql-test/r/have_archive.require b/mysql-test/r/have_binlog_format_mixed.require similarity index 50% rename from mysql-test/r/have_archive.require rename to mysql-test/r/have_binlog_format_mixed.require index c4b4ba24fcd..4b752cbb314 100644 --- a/mysql-test/r/have_archive.require +++ b/mysql-test/r/have_binlog_format_mixed.require @@ -1,2 +1,2 @@ Variable_name Value -have_archive YES +binlog_format MIXED diff --git a/mysql-test/r/have_blackhole.require b/mysql-test/r/have_blackhole.require deleted file mode 100644 index 15029a460f6..00000000000 --- a/mysql-test/r/have_blackhole.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_blackhole_engine YES diff --git a/mysql-test/r/have_csv.require b/mysql-test/r/have_csv.require deleted file mode 100644 index cc2fb28289c..00000000000 --- a/mysql-test/r/have_csv.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_csv YES diff --git a/mysql-test/r/have_exampledb.require b/mysql-test/r/have_exampledb.require deleted file mode 100644 index 29d0cf8b1a6..00000000000 --- a/mysql-test/r/have_exampledb.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_example_engine YES diff --git a/mysql-test/r/have_federated_db.require b/mysql-test/r/have_federated_db.require deleted file mode 100644 index f4c521a8f35..00000000000 --- a/mysql-test/r/have_federated_db.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_federated_engine YES diff --git a/mysql-test/r/have_innodb.require b/mysql-test/r/have_innodb.require deleted file mode 100644 index 48a764a6c58..00000000000 --- a/mysql-test/r/have_innodb.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_innodb YES diff --git a/mysql-test/r/have_ndb.require b/mysql-test/r/have_ndb.require deleted file mode 100644 index f0402b72c6a..00000000000 --- a/mysql-test/r/have_ndb.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_ndbcluster YES diff --git a/mysql-test/r/have_ndb_status_ok.require b/mysql-test/r/have_ndb_status_ok.require deleted file mode 100644 index 8a82871234b..00000000000 --- a/mysql-test/r/have_ndb_status_ok.require +++ /dev/null @@ -1,2 +0,0 @@ -ndb_status_ok -YES diff --git a/mysql-test/r/have_raid.require b/mysql-test/r/have_raid.require deleted file mode 100644 index 8390f4dbb85..00000000000 --- a/mysql-test/r/have_raid.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_raid YES diff --git a/mysql-test/r/heap_hash.result b/mysql-test/r/heap_hash.result index bf7c250cbcf..41f00fd2ad8 100644 --- a/mysql-test/r/heap_hash.result +++ b/mysql-test/r/heap_hash.result @@ -354,7 +354,7 @@ t3 1 a 2 b NULL 13 NULL NULL HASH explain select * from t1 ignore key(btree_idx), t3 where t1.name='matt' and t3.a = concat('',t1.name) and t3.b=t1.name; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref heap_idx heap_idx 22 const 7 Using where -1 SIMPLE t3 ref a a 44 const,const 7 Using where +1 SIMPLE t3 ref a a 44 func,const 7 Using where drop table t1, t2, t3; create temporary table t1 ( a int, index (a) ) engine=memory; insert into t1 values (1),(2),(3),(4),(5); diff --git a/mysql-test/r/im_daemon_life_cycle.result b/mysql-test/r/im_daemon_life_cycle.result index 86b196659bf..eee8479888a 100644 --- a/mysql-test/r/im_daemon_life_cycle.result +++ b/mysql-test/r/im_daemon_life_cycle.result @@ -8,3 +8,17 @@ mysqld2 offline Killing the process... Sleeping... Success: the process was restarted. + +-------------------------------------------------------------------- +-- Test for BUG#12751 +-------------------------------------------------------------------- +START INSTANCE mysqld2; +Success: the process has been started. +Killing the process... +Sleeping... +Success: the process was restarted. +SHOW INSTANCE STATUS mysqld1; +instance_name state version_number version mysqld_compatible +mysqld1 online VERSION_NUMBER VERSION no +STOP INSTANCE mysqld2; +Success: the process has been stopped. diff --git a/mysql-test/r/index_merge.result b/mysql-test/r/index_merge.result deleted file mode 100644 index 6365bd0a664..00000000000 --- a/mysql-test/r/index_merge.result +++ /dev/null @@ -1,426 +0,0 @@ -drop table if exists t0, t1, t2, t3, t4; -create table t0 -( -key1 int not null, -INDEX i1(key1) -); -alter table t0 add key2 int not null, add index i2(key2); -alter table t0 add key3 int not null, add index i3(key3); -alter table t0 add key4 int not null, add index i4(key4); -alter table t0 add key5 int not null, add index i5(key5); -alter table t0 add key6 int not null, add index i6(key6); -alter table t0 add key7 int not null, add index i7(key7); -alter table t0 add key8 int not null, add index i8(key8); -update t0 set key2=key1,key3=key1,key4=key1,key5=key1,key6=key1,key7=key1,key8=1024-key1; -analyze table t0; -Table Op Msg_type Msg_text -test.t0 analyze status OK -explain select * from t0 where key1 < 3 or key1 > 1020; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 range i1 i1 4 NULL 78 Using where -explain -select * from t0 where key1 < 3 or key2 > 1020; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 45 Using sort_union(i1,i2); Using where -select * from t0 where key1 < 3 or key2 > 1020; -key1 key2 key3 key4 key5 key6 key7 key8 -1 1 1 1 1 1 1 1023 -2 2 2 2 2 2 2 1022 -1021 1021 1021 1021 1021 1021 1021 3 -1022 1022 1022 1022 1022 1022 1022 2 -1023 1023 1023 1023 1023 1023 1023 1 -1024 1024 1024 1024 1024 1024 1024 0 -explain select * from t0 where key1 < 3 or key2 <4; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where -explain -select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 11 Using sort_union(i1,i2); Using where -select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40); -key1 key2 key3 key4 key5 key6 key7 key8 -31 31 31 31 31 31 31 993 -32 32 32 32 32 32 32 992 -33 33 33 33 33 33 33 991 -34 34 34 34 34 34 34 990 -35 35 35 35 35 35 35 989 -36 36 36 36 36 36 36 988 -37 37 37 37 37 37 37 987 -38 38 38 38 38 38 38 986 -39 39 39 39 39 39 39 985 -explain select * from t0 ignore index (i2) where key1 < 3 or key2 <4; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ALL i1 NULL NULL NULL 1024 Using where -explain select * from t0 where (key1 < 3 or key2 <4) and key3 = 50; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ref i1,i2,i3 i3 4 const 1 Using where -explain select * from t0 use index (i1,i2) where (key1 < 3 or key2 <4) and key3 = 50; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where -explain select * from t0 where (key1 > 1 or key2 > 2); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ALL i1,i2 NULL NULL NULL 1024 Using where -explain select * from t0 force index (i1,i2) where (key1 > 1 or key2 > 2); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 1024 Using sort_union(i1,i2); Using where -explain -select * from t0 where key1<3 or key2<3 or (key1>5 and key1<8) or -(key1>10 and key1<12) or (key2>100 and key2<110); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 17 Using sort_union(i1,i2); Using where -explain select * from t0 where key2 = 45 or key1 <=> null; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 range i1,i2 i2 4 NULL 1 Using where -explain select * from t0 where key2 = 45 or key1 is not null; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ALL i1,i2 NULL NULL NULL 1024 Using where -explain select * from t0 where key2 = 45 or key1 is null; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ref i2 i2 4 const 1 -explain select * from t0 where key2=10 or key3=3 or key4 <=> null; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i2,i3,i4 i2,i3 4,4 NULL 2 Using union(i2,i3); Using where -explain select * from t0 where key2=10 or key3=3 or key4 is null; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i2,i3 i2,i3 4,4 NULL 2 Using union(i2,i3); Using where -explain select key1 from t0 where (key1 <=> null) or (key2 < 5) or -(key3=10) or (key4 <=> null); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3,i4 i2,i3 4,4 NULL 6 Using sort_union(i2,i3); Using where -explain select key1 from t0 where (key1 <=> null) or (key1 < 5) or -(key3=10) or (key4 <=> null); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i3,i4 i1,i3 4,4 NULL 6 Using sort_union(i1,i3); Using where -explain select * from t0 where -(key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 5 or key6 < 5); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL 6 Using sort_union(i1,i2); Using where -explain -select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 9 Using sort_union(i1,i2); Using where -select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4); -key1 key2 key3 key4 key5 key6 key7 key8 -1 1 1 1 1 1 1 1023 -2 2 2 2 2 2 2 1022 -3 3 3 3 3 3 3 1021 -4 4 4 4 4 4 4 1020 -5 5 5 5 5 5 5 1019 -explain select * from t0 where -(key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 2 or key6 < 2); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL 6 Using sort_union(i1,i2); Using where -explain select * from t0 where -(key1 < 3 or key2 < 3) and (key3 < 100); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 range i1,i2,i3 i3 4 NULL 95 Using where -explain select * from t0 where -(key1 < 3 or key2 < 3) and (key3 < 1000); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ALL i1,i2,i3 NULL NULL NULL 1024 Using where -explain select * from t0 where -((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) -or -key2 > 5; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ALL i1,i2,i3 NULL NULL NULL 1024 Using where -explain select * from t0 where -((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) -or -key1 < 7; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where -select * from t0 where -((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) -or -key1 < 7; -key1 key2 key3 key4 key5 key6 key7 key8 -1 1 1 1 1 1 1 1023 -2 2 2 2 2 2 2 1022 -3 3 3 3 3 3 3 1021 -4 4 4 4 4 4 4 1020 -5 5 5 5 5 5 5 1019 -6 6 6 6 6 6 6 1018 -explain select * from t0 where -((key1 < 4 or key2 < 4) and (key3 <5 or key5 < 4)) -or -((key5 < 5 or key6 < 6) and (key7 <7 or key8 < 4)); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i1,i2,i5,i6 4,4,4,4 NULL 19 Using sort_union(i1,i2,i5,i6); Using where -explain select * from t0 where -((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) -or -((key7 <7 or key8 < 4) and (key5 < 5 or key6 < 6)); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i3,i5,i7,i8 4,4,4,4 NULL 20 Using sort_union(i3,i5,i7,i8); Using where -explain select * from t0 where -((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) -or -((key3 <7 or key5 < 2) and (key5 < 5 or key6 < 6)); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3,i5,i6 i3,i5 4,4 NULL 11 Using sort_union(i3,i5); Using where -explain select * from t0 where -((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) -or -(((key3 <7 and key7 < 6) or key5 < 2) and (key5 < 5 or key6 < 6)); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7 i3,i5 4,4 NULL 11 Using sort_union(i3,i5); Using where -explain select * from t0 where -((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) -or -((key3 >=5 or key5 < 2) and (key5 < 5 or key6 < 6)); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ALL i1,i2,i3,i5,i6 NULL NULL NULL 1024 Using where -explain select * from t0 force index(i1, i2, i3, i4, i5, i6 ) where -((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) -or -((key3 >=5 or key5 < 2) and (key5 < 5 or key6 < 6)); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2,i3,i5,i6 i3,i5 0,4 NULL 1024 Using sort_union(i3,i5); Using where -select * from t0 where key1 < 5 or key8 < 4 order by key1; -key1 key2 key3 key4 key5 key6 key7 key8 -1 1 1 1 1 1 1 1023 -2 2 2 2 2 2 2 1022 -3 3 3 3 3 3 3 1021 -4 4 4 4 4 4 4 1020 -1021 1021 1021 1021 1021 1021 1021 3 -1022 1022 1022 1022 1022 1022 1022 2 -1023 1023 1023 1023 1023 1023 1023 1 -1024 1024 1024 1024 1024 1024 1024 0 -explain -select * from t0 where key1 < 5 or key8 < 4 order by key1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i8 i1,i8 4,4 NULL 9 Using sort_union(i1,i8); Using where; Using filesort -create table t2 like t0; -insert into t2 select * from t0; -alter table t2 add index i1_3(key1, key3); -alter table t2 add index i2_3(key2, key3); -alter table t2 drop index i1; -alter table t2 drop index i2; -alter table t2 add index i321(key3, key2, key1); -explain select key3 from t2 where key1 = 100 or key2 = 100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 index_merge i1_3,i2_3 i1_3,i2_3 4,4 NULL 2 Using sort_union(i1_3,i2_3); Using where -explain select key3 from t2 where key1 <100 or key2 < 100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 index i1_3,i2_3 i321 12 NULL 1024 Using where; Using index -explain select key7 from t2 where key1 <100 or key2 < 100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL i1_3,i2_3 NULL NULL NULL 1024 Using where -create table t4 ( -key1a int not null, -key1b int not null, -key2 int not null, -key2_1 int not null, -key2_2 int not null, -key3 int not null, -index i1a (key1a, key1b), -index i1b (key1b, key1a), -index i2_1(key2, key2_1), -index i2_2(key2, key2_1) -); -insert into t4 select key1,key1,key1 div 10, key1 % 10, key1 % 10, key1 from t0; -select * from t4 where key1a = 3 or key1b = 4; -key1a key1b key2 key2_1 key2_2 key3 -3 3 0 3 3 3 -4 4 0 4 4 4 -explain select * from t4 where key1a = 3 or key1b = 4; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t4 index_merge i1a,i1b i1a,i1b 4,4 NULL 2 Using sort_union(i1a,i1b); Using where -explain select * from t4 where key2 = 1 and (key2_1 = 1 or key3 = 5); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 10 Using where -explain select * from t4 where key2 = 1 and (key2_1 = 1 or key2_2 = 5); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 10 Using where -explain select * from t4 where key2_1 = 1 or key2_2 = 5; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t4 ALL NULL NULL NULL NULL 1024 Using where -create table t1 like t0; -insert into t1 select * from t0; -explain select * from t0 left join t1 on (t0.key1=t1.key1) -where t0.key1=3 or t0.key2=4; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where -1 SIMPLE t1 ref i1 i1 4 test.t0.key1 1 -select * from t0 left join t1 on (t0.key1=t1.key1) -where t0.key1=3 or t0.key2=4; -key1 key2 key3 key4 key5 key6 key7 key8 key1 key2 key3 key4 key5 key6 key7 key8 -3 3 3 3 3 3 3 1021 3 3 3 3 3 3 3 1021 -4 4 4 4 4 4 4 1020 4 4 4 4 4 4 4 1020 -explain -select * from t0,t1 where (t0.key1=t1.key1) and ( t0.key1=3 or t0.key2=4); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where -1 SIMPLE t1 ref i1 i1 4 test.t0.key1 1 -explain -select * from t0,t1 where (t0.key1=t1.key1) and -(t0.key1=3 or t0.key2=4) and t1.key1<200; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ALL i1,i2 NULL NULL NULL 1024 Using where -1 SIMPLE t1 ref i1 i1 4 test.t0.key1 1 -explain -select * from t0,t1 where (t0.key1=t1.key1) and -(t0.key1=3 or t0.key2<4) and t1.key1=2; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ref i1,i2 i1 4 const 1 Using where -1 SIMPLE t1 ref i1 i1 4 const 1 -explain select * from t0,t1 where t0.key1 = 5 and -(t1.key1 = t0.key1 or t1.key8 = t0.key1); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 ref i1 i1 4 const 1 -1 SIMPLE t1 index_merge i1,i8 i1,i8 4,4 NULL 2 Using union(i1,i8); Using where -explain select * from t0,t1 where t0.key1 < 3 and -(t1.key1 = t0.key1 or t1.key8 = t0.key1); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 range i1 i1 4 NULL 3 Using where -1 SIMPLE t1 ALL i1,i8 NULL NULL NULL 1024 Range checked for each record (index map: 0x81) -explain select * from t1 where key1=3 or key2=4 -union select * from t1 where key1<4 or key3=5; -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where -2 UNION t1 index_merge i1,i3 i1,i3 4,4 NULL 5 Using sort_union(i1,i3); Using where -NULL UNION RESULT ALL NULL NULL NULL NULL NULL -explain select * from (select * from t1 where key1 = 3 or key2 =3) as Z where key8 >5; -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY system NULL NULL NULL NULL 1 -2 DERIVED t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where -create table t3 like t0; -insert into t3 select * from t0; -alter table t3 add key9 int not null, add index i9(key9); -alter table t3 add keyA int not null, add index iA(keyA); -alter table t3 add keyB int not null, add index iB(keyB); -alter table t3 add keyC int not null, add index iC(keyC); -update t3 set key9=key1,keyA=key1,keyB=key1,keyC=key1; -explain select * from t3 where -key1=1 or key2=2 or key3=3 or key4=4 or -key5=5 or key6=6 or key7=7 or key8=8 or -key9=9 or keyA=10 or keyB=11 or keyC=12; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t3 index_merge i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC 4,4,4,4,4,4,4,4,4,4,4,4 NULL 12 Using union(i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC); Using where -select * from t3 where -key1=1 or key2=2 or key3=3 or key4=4 or -key5=5 or key6=6 or key7=7 or key8=8 or -key9=9 or keyA=10 or keyB=11 or keyC=12; -key1 key2 key3 key4 key5 key6 key7 key8 key9 keyA keyB keyC -1 1 1 1 1 1 1 1023 1 1 1 1 -2 2 2 2 2 2 2 1022 2 2 2 2 -3 3 3 3 3 3 3 1021 3 3 3 3 -4 4 4 4 4 4 4 1020 4 4 4 4 -5 5 5 5 5 5 5 1019 5 5 5 5 -6 6 6 6 6 6 6 1018 6 6 6 6 -7 7 7 7 7 7 7 1017 7 7 7 7 -9 9 9 9 9 9 9 1015 9 9 9 9 -10 10 10 10 10 10 10 1014 10 10 10 10 -11 11 11 11 11 11 11 1013 11 11 11 11 -12 12 12 12 12 12 12 1012 12 12 12 12 -1016 1016 1016 1016 1016 1016 1016 8 1016 1016 1016 1016 -explain select * from t0 where key1 < 3 or key2 < 4; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where -select * from t0 where key1 < 3 or key2 < 4; -key1 key2 key3 key4 key5 key6 key7 key8 -1 1 1 1 1 1 1 1023 -2 2 2 2 2 2 2 1022 -3 3 3 3 3 3 3 1021 -update t0 set key8=123 where key1 < 3 or key2 < 4; -select * from t0 where key1 < 3 or key2 < 4; -key1 key2 key3 key4 key5 key6 key7 key8 -1 1 1 1 1 1 1 123 -2 2 2 2 2 2 2 123 -3 3 3 3 3 3 3 123 -delete from t0 where key1 < 3 or key2 < 4; -select * from t0 where key1 < 3 or key2 < 4; -key1 key2 key3 key4 key5 key6 key7 key8 -select count(*) from t0; -count(*) -1021 -drop table t4; -create table t4 (a int); -insert into t4 values (1),(4),(3); -set @save_join_buffer_size=@@join_buffer_size; -set join_buffer_size= 4000; -explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -from t0 as A force index(i1,i2), t0 as B force index (i1,i2) -where (A.key1 < 500000 or A.key2 < 3) -and (B.key1 < 500000 or B.key2 < 3); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL 1013 Using sort_union(i1,i2); Using where -1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL 1013 Using sort_union(i1,i2); Using where -select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -from t0 as A force index(i1,i2), t0 as B force index (i1,i2) -where (A.key1 < 500000 or A.key2 < 3) -and (B.key1 < 500000 or B.key2 < 3); -max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -10240 -update t0 set key1=1; -explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -from t0 as A force index(i1,i2), t0 as B force index (i1,i2) -where (A.key1 = 1 or A.key2 = 1) -and (B.key1 = 1 or B.key2 = 1); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL 1020 Using union(i1,i2); Using where -1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL 1020 Using union(i1,i2); Using where -select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -from t0 as A force index(i1,i2), t0 as B force index (i1,i2) -where (A.key1 = 1 or A.key2 = 1) -and (B.key1 = 1 or B.key2 = 1); -max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -8194 -alter table t0 add filler1 char(200), add filler2 char(200), add filler3 char(200); -update t0 set key2=1, key3=1, key4=1, key5=1,key6=1,key7=1 where key7 < 500; -explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -from t0 as A, t0 as B -where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1) -and (B.key1 = 1 and B.key2 = 1 and B.key3 = 1 and B.key4=1 and B.key5=1 and B.key6=1 and B.key7 = 1 or B.key8=1); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE A index_merge i1,i2,i3,i4,i5,i6,i7?,i8 i2,i3,i4,i5,i6,i7?,i8 X NULL # Using union(intersect(i2,i3,i4,i5,i6,i7?),i8); Using where -1 SIMPLE B index_merge i1,i2,i3,i4,i5,i6,i7?,i8 i2,i3,i4,i5,i6,i7?,i8 X NULL # Using union(intersect(i2,i3,i4,i5,i6,i7?),i8); Using where -select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -from t0 as A, t0 as B -where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1) -and (B.key1 = 1 and B.key2 = 1 and B.key3 = 1 and B.key4=1 and B.key5=1 and B.key6=1 and B.key7 = 1 or B.key8=1); -max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) -8186 -set join_buffer_size= @save_join_buffer_size; -drop table t0, t1, t2, t3, t4; -CREATE TABLE t1 ( -cola char(3) not null, colb char(3) not null, filler char(200), -key(cola), key(colb) -); -INSERT INTO t1 VALUES ('foo','bar', 'ZZ'),('fuz','baz', 'ZZ'); -OPTIMIZE TABLE t1; -Table Op Msg_type Msg_text -test.t1 optimize status OK -select count(*) from t1; -count(*) -8704 -explain select * from t1 WHERE cola = 'foo' AND colb = 'bar'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge cola,colb cola,colb 3,3 NULL 32 Using intersect(cola,colb); Using where -explain select * from t1 force index(cola,colb) WHERE cola = 'foo' AND colb = 'bar'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge cola,colb cola,colb 3,3 NULL 32 Using intersect(cola,colb); Using where -drop table t1; -create table t0 (a int); -insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1 ( -a int, b int, -filler1 char(200), filler2 char(200), -key(a),key(b) -); -insert into t1 select @v:= A.a, @v, 't1', 'filler2' from t0 A, t0 B, t0 C; -create table t2 like t1; -create table t3 ( -a int, b int, -filler1 char(200), filler2 char(200), -key(a),key(b) -) engine=merge union=(t1,t2); -explain select * from t1 where a=1 and b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge a,b a,b 5,5 NULL # Using intersect(a,b); Using where -explain select * from t3 where a=1 and b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t3 index_merge a,b a,b 5,5 NULL # Using intersect(a,b); Using where -drop table t3; -drop table t0, t1, t2; diff --git a/mysql-test/r/index_merge_innodb.result b/mysql-test/r/index_merge_innodb.result index 5af3e291c87..588de70e6e5 100644 --- a/mysql-test/r/index_merge_innodb.result +++ b/mysql-test/r/index_merge_innodb.result @@ -1,11 +1,13 @@ +#---------------- Index merge test 2 ------------------------------------------- +SET SESSION STORAGE_ENGINE = InnoDB; drop table if exists t1,t2; create table t1 ( -key1 int not null, -key2 int not null, +key1 int not null, +key2 int not null, INDEX i1(key1), INDEX i2(key2) -) engine=innodb; +); explain select * from t1 where key1 < 5 or key2 > 197; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 8 Using sort_union(i1,i2); Using where @@ -26,7 +28,7 @@ key1 key2 2 198 3 197 4 196 -alter table t1 add str1 char (255) not null, +alter table t1 add str1 char (255) not null, add zeroval int not null default 0, add str2 char (255) not null, add str3 char (255) not null; @@ -60,7 +62,7 @@ key2 integer not null, filler char (200), index (key1), index (key2) -) engine=innodb; +); show warnings; Level Code Message explain select pk from t1 where key1 = 1 and key2 = 1; @@ -91,14 +93,14 @@ filler1 char (200), index i1(key1a, key1b), index i2(key2a, key2b), index i3(key3a, key3b) -) engine=innodb; +); create table t2 (a int); insert into t2 values (0),(1),(2),(3),(4),(NULL); -insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) select A.a, B.a, C.a, D.a, C.a, D.a from t2 A,t2 B,t2 C, t2 D; -insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) select key1a, key1b, key2a, key2b, key3a, key3b from t1; -insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) select key1a, key1b, key2a, key2b, key3a, key3b from t1; analyze table t1; Table Op Msg_type Msg_text @@ -106,19 +108,19 @@ test.t1 analyze status OK select count(*) from t1; count(*) 5184 -explain select count(*) from t1 where +explain select count(*) from t1 where key1a = 2 and key1b is null and key2a = 2 and key2b is null; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 index_merge i1,i2 i1,i2 10,10 NULL 4 Using intersect(i1,i2); Using where; Using index -select count(*) from t1 where +select count(*) from t1 where key1a = 2 and key1b is null and key2a = 2 and key2b is null; count(*) 4 -explain select count(*) from t1 where +explain select count(*) from t1 where key1a = 2 and key1b is null and key3a = 2 and key3b is null; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 index_merge i1,i3 i1,i3 10,10 NULL 4 Using intersect(i1,i3); Using where; Using index -select count(*) from t1 where +select count(*) from t1 where key1a = 2 and key1b is null and key3a = 2 and key3b is null; count(*) 4 @@ -127,8 +129,8 @@ create table t1 ( id1 int, id2 date , index idx2 (id1,id2), -index idx1 (id2) -) engine = innodb; +index idx1 (id2) +); insert into t1 values(1,'20040101'), (2,'20040102'); select * from t1 where id1 = 1 and id2= '20040101'; id1 id2 @@ -147,8 +149,8 @@ PRIMARY KEY (`oid`), KEY `fk_bbk_niederlassung` (`fk_bbk_niederlassung`), KEY `fk_wochentag` (`fk_wochentag`), KEY `ix_version` (`version`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -insert into t1 values +) DEFAULT CHARSET=latin1; +insert into t1 values (1, 38, 1, '08:00:00', '13:00:00', 0, 1), (2, 38, 2, '08:00:00', '13:00:00', 0, 1), (3, 38, 3, '08:00:00', '13:00:00', 0, 1), @@ -174,25 +176,25 @@ insert into t1 values (23, 7, 3, '08:00:00', '13:00:00', 0, 1), (24, 7, 4, '08:00:00', '13:00:00', 0, 1), (25, 7, 5, '08:00:00', '13:00:00', 0, 1); -create view v1 as -select -zeit1.oid AS oid, +create view v1 as +select +zeit1.oid AS oid, zeit1.fk_bbk_niederlassung AS fk_bbk_niederlassung, zeit1.fk_wochentag AS fk_wochentag, -zeit1.uhrzeit_von AS uhrzeit_von, -zeit1.uhrzeit_bis AS uhrzeit_bis, +zeit1.uhrzeit_von AS uhrzeit_von, +zeit1.uhrzeit_bis AS uhrzeit_bis, zeit1.geloescht AS geloescht, zeit1.version AS version -from +from t1 zeit1 -where -(zeit1.version = +where +(zeit1.version = (select max(zeit2.version) AS `max(version)` - from t1 zeit2 -where -((zeit1.fk_bbk_niederlassung = zeit2.fk_bbk_niederlassung) and -(zeit1.fk_wochentag = zeit2.fk_wochentag) and -(zeit1.uhrzeit_von = zeit2.uhrzeit_von) and + from t1 zeit2 +where +((zeit1.fk_bbk_niederlassung = zeit2.fk_bbk_niederlassung) and +(zeit1.fk_wochentag = zeit2.fk_wochentag) and +(zeit1.uhrzeit_von = zeit2.uhrzeit_von) and (zeit1.uhrzeit_bis = zeit2.uhrzeit_bis) ) ) @@ -213,7 +215,7 @@ filler2 char(250) default NULL, PRIMARY KEY (t_cpac,t_vers,t_rele,t_cust), UNIQUE KEY IX_4 (t_cust,t_cpac,t_vers,t_rele), KEY IX_5 (t_vers,t_rele,t_cust) -) ENGINE=InnoDB; +); insert into t1 values ('tm','2.5 ','a ',' ','',''), ('tm','2.5U','a ','stnd','',''), ('da','3.3 ','b ',' ','',''), ('da','3.3U','b ','stnd','',''), @@ -275,9 +277,9 @@ primary key (pk), key idx1(a,b,c), key idx2(c), key idx3(kp1,kp2,kp3,kp4,kp5) -) engine=innodb default charset=latin1; +) default charset=latin1; set @fill=NULL; -SELECT COUNT(*) FROM t1 WHERE b = 0 AND a = 0 AND c = 13286427 AND +SELECT COUNT(*) FROM t1 WHERE b = 0 AND a = 0 AND c = 13286427 AND kp1='279' AND kp2='ELM0678' AND kp3='6' AND kp4='10' AND kp5 = 'R '; COUNT(*) 1 @@ -309,7 +311,7 @@ alter table t1 add index i3(key3); update t1 set key2=key1,key3=key1; explain select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge i2,i3 i3,i2 4,4 NULL 11 Using sort_union(i3,i2); Using where +1 SIMPLE t1 index_merge i2,i3 i3,i2 4,4 NULL 9 Using sort_union(i3,i2); Using where select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); key1 key2 key3 31 31 31 @@ -322,3 +324,260 @@ key1 key2 key3 38 38 38 39 39 39 drop table t1; +#---------------- 2-sweeps read Index merge test 2 ------------------------------- +SET SESSION STORAGE_ENGINE = InnoDB; +drop table if exists t1; +create table t1 ( +pk int primary key, +key1 int, +key2 int, +filler char(200), +filler2 char(200), +index(key1), +index(key2) +); +select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 ); +pk key1 key2 filler filler2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +9 9 9 filler-data filler-data-2 +10 10 10 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +5 5 5 filler-data filler-data-2 +6 6 6 filler-data filler-data-2 +7 7 7 filler-data filler-data-2 +8 8 8 filler-data filler-data-2 +set @maxv=1000; +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or key1=18 or key1=60; +pk key1 key2 filler filler2 +18 18 18 filler-data filler-data-2 +60 60 60 filler-data filler-data-2 +1 1 1 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +1000 1000 1000 filler-data filler-data-2 +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or key1 < 3 or key1 > @maxv-11; +pk key1 key2 filler filler2 +990 990 990 filler-data filler-data-2 +1 1 1 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +1000 1000 1000 filler-data filler-data-2 +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or +(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10); +pk key1 key2 filler filler2 +1 1 1 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +1000 1000 1000 filler-data filler-data-2 +select * from t1 where +(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) +or +(key1 < 5) or (key1 > @maxv-10); +pk key1 key2 filler filler2 +1 1 1 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +1000 1000 1000 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +drop table t1; +#---------------- Clustered PK ROR-index_merge tests ----------------------------- +SET SESSION STORAGE_ENGINE = InnoDB; +drop table if exists t1; +create table t1 +( +pk1 int not null, +pk2 int not null, +key1 int not null, +key2 int not null, +pktail1ok int not null, +pktail2ok int not null, +pktail3bad int not null, +pktail4bad int not null, +pktail5bad int not null, +pk2copy int not null, +badkey int not null, +filler1 char (200), +filler2 char (200), +key (key1), +key (key2), +/* keys with tails from CPK members */ +key (pktail1ok, pk1), +key (pktail2ok, pk1, pk2), +key (pktail3bad, pk2, pk1), +key (pktail4bad, pk1, pk2copy), +key (pktail5bad, pk1, pk2, pk2copy), +primary key (pk1, pk2) +); +explain select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range PRIMARY,key1 PRIMARY 8 NULL 9 Using where +select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; +pk1 pk2 key1 key2 pktail1ok pktail2ok pktail3bad pktail4bad pktail5bad pk2copy badkey filler1 filler2 +1 10 0 0 0 0 0 0 0 10 0 filler-data-10 filler2 +1 11 0 0 0 0 0 0 0 11 0 filler-data-11 filler2 +1 12 0 0 0 0 0 0 0 12 0 filler-data-12 filler2 +1 13 0 0 0 0 0 0 0 13 0 filler-data-13 filler2 +1 14 0 0 0 0 0 0 0 14 0 filler-data-14 filler2 +1 15 0 0 0 0 0 0 0 15 0 filler-data-15 filler2 +1 16 0 0 0 0 0 0 0 16 0 filler-data-16 filler2 +1 17 0 0 0 0 0 0 0 17 0 filler-data-17 filler2 +1 18 0 0 0 0 0 0 0 18 0 filler-data-18 filler2 +1 19 0 0 0 0 0 0 0 19 0 filler-data-19 filler2 +explain select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 4,4 NULL 1 Using intersect(key1,key2); Using where; Using index +select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; +pk1 pk2 +95 50 +95 51 +95 52 +95 53 +95 54 +95 55 +95 56 +95 57 +95 58 +95 59 +explain select * from t1 where badkey=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 4 const 100 Using where +explain select * from t1 where pk1 < 7500 and key1 = 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge PRIMARY,key1 key1,PRIMARY 4,4 NULL ROWS Using intersect(key1,PRIMARY); Using where +explain select * from t1 where pktail1ok=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,pktail1ok key1,pktail1ok 4,4 NULL 1 Using intersect(key1,pktail1ok); Using where +explain select * from t1 where pktail2ok=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,pktail2ok key1,pktail2ok 4,4 NULL 1 Using intersect(key1,pktail2ok); Using where +explain select * from t1 where (pktail2ok=1 and pk1< 50000) or key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge PRIMARY,key1,pktail2ok pktail2ok,key1 8,4 NULL 199 Using sort_union(pktail2ok,key1); Using where +explain select * from t1 where pktail3bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail3bad key1 4 const 100 Using where +explain select * from t1 where pktail4bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail4bad key1 4 const 100 Using where +explain select * from t1 where pktail5bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail5bad key1 4 const 100 Using where +explain select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 4,4 NULL 1 Using intersect(key1,key2); Using where; Using index +select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; +pk1 pk2 key1 key2 +95 50 10 10 +95 51 10 10 +95 52 10 10 +95 53 10 10 +95 54 10 10 +95 55 10 10 +95 56 10 10 +95 57 10 10 +95 58 10 10 +95 59 10 10 +drop table t1; +create table t1 +( +RUNID varchar(22), +SUBMITNR varchar(5), +ORDERNR char(1), +PROGRAMM varchar(8), +TESTID varchar(4), +UCCHECK char(1), +ETEXT varchar(80), +ETEXT_TYPE char(1), +INFO char(1), +SEVERITY tinyint(3), +TADIRFLAG char(1), +PRIMARY KEY (RUNID,SUBMITNR,ORDERNR,PROGRAMM,TESTID,UCCHECK), +KEY `TVERM~KEY` (PROGRAMM,TESTID,UCCHECK) +) DEFAULT CHARSET=latin1; +update t1 set `ETEXT` = '', `ETEXT_TYPE`='', `INFO`='', `SEVERITY`='', `TADIRFLAG`='' +WHERE +`RUNID`= '' AND `SUBMITNR`= '' AND `ORDERNR`='' AND `PROGRAMM`='' AND +`TESTID`='' AND `UCCHECK`=''; +drop table t1; diff --git a/mysql-test/r/index_merge_innodb2.result b/mysql-test/r/index_merge_innodb2.result deleted file mode 100644 index 91dd989fe90..00000000000 --- a/mysql-test/r/index_merge_innodb2.result +++ /dev/null @@ -1,136 +0,0 @@ -drop table if exists t1; -create table t1 ( -pk int primary key, -key1 int, -key2 int, -filler char(200), -filler2 char(200), -index(key1), -index(key2) -) engine=innodb; -select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 ); -pk key1 key2 filler filler2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -9 9 9 filler-data filler-data-2 -10 10 10 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -5 5 5 filler-data filler-data-2 -6 6 6 filler-data filler-data-2 -7 7 7 filler-data filler-data-2 -8 8 8 filler-data filler-data-2 -set @maxv=1000; -select * from t1 where -(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) -or key1=18 or key1=60; -pk key1 key2 filler filler2 -18 18 18 filler-data filler-data-2 -60 60 60 filler-data filler-data-2 -1 1 1 filler-data filler-data-2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -11 11 11 filler-data filler-data-2 -12 12 12 filler-data filler-data-2 -13 13 13 filler-data filler-data-2 -14 14 14 filler-data filler-data-2 -50 50 50 filler-data filler-data-2 -51 51 51 filler-data filler-data-2 -52 52 52 filler-data filler-data-2 -53 53 53 filler-data filler-data-2 -54 54 54 filler-data filler-data-2 -991 991 991 filler-data filler-data-2 -992 992 992 filler-data filler-data-2 -993 993 993 filler-data filler-data-2 -994 994 994 filler-data filler-data-2 -995 995 995 filler-data filler-data-2 -996 996 996 filler-data filler-data-2 -997 997 997 filler-data filler-data-2 -998 998 998 filler-data filler-data-2 -999 999 999 filler-data filler-data-2 -1000 1000 1000 filler-data filler-data-2 -select * from t1 where -(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) -or key1 < 3 or key1 > @maxv-11; -pk key1 key2 filler filler2 -990 990 990 filler-data filler-data-2 -1 1 1 filler-data filler-data-2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -11 11 11 filler-data filler-data-2 -12 12 12 filler-data filler-data-2 -13 13 13 filler-data filler-data-2 -14 14 14 filler-data filler-data-2 -50 50 50 filler-data filler-data-2 -51 51 51 filler-data filler-data-2 -52 52 52 filler-data filler-data-2 -53 53 53 filler-data filler-data-2 -54 54 54 filler-data filler-data-2 -991 991 991 filler-data filler-data-2 -992 992 992 filler-data filler-data-2 -993 993 993 filler-data filler-data-2 -994 994 994 filler-data filler-data-2 -995 995 995 filler-data filler-data-2 -996 996 996 filler-data filler-data-2 -997 997 997 filler-data filler-data-2 -998 998 998 filler-data filler-data-2 -999 999 999 filler-data filler-data-2 -1000 1000 1000 filler-data filler-data-2 -select * from t1 where -(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) -or -(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10); -pk key1 key2 filler filler2 -1 1 1 filler-data filler-data-2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -11 11 11 filler-data filler-data-2 -12 12 12 filler-data filler-data-2 -13 13 13 filler-data filler-data-2 -14 14 14 filler-data filler-data-2 -50 50 50 filler-data filler-data-2 -51 51 51 filler-data filler-data-2 -52 52 52 filler-data filler-data-2 -53 53 53 filler-data filler-data-2 -54 54 54 filler-data filler-data-2 -991 991 991 filler-data filler-data-2 -992 992 992 filler-data filler-data-2 -993 993 993 filler-data filler-data-2 -994 994 994 filler-data filler-data-2 -995 995 995 filler-data filler-data-2 -996 996 996 filler-data filler-data-2 -997 997 997 filler-data filler-data-2 -998 998 998 filler-data filler-data-2 -999 999 999 filler-data filler-data-2 -1000 1000 1000 filler-data filler-data-2 -select * from t1 where -(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) -or -(key1 < 5) or (key1 > @maxv-10); -pk key1 key2 filler filler2 -1 1 1 filler-data filler-data-2 -2 2 2 filler-data filler-data-2 -3 3 3 filler-data filler-data-2 -4 4 4 filler-data filler-data-2 -991 991 991 filler-data filler-data-2 -992 992 992 filler-data filler-data-2 -993 993 993 filler-data filler-data-2 -994 994 994 filler-data filler-data-2 -995 995 995 filler-data filler-data-2 -996 996 996 filler-data filler-data-2 -997 997 997 filler-data filler-data-2 -998 998 998 filler-data filler-data-2 -999 999 999 filler-data filler-data-2 -1000 1000 1000 filler-data filler-data-2 -11 11 11 filler-data filler-data-2 -12 12 12 filler-data filler-data-2 -13 13 13 filler-data filler-data-2 -14 14 14 filler-data filler-data-2 -50 50 50 filler-data filler-data-2 -51 51 51 filler-data filler-data-2 -52 52 52 filler-data filler-data-2 -53 53 53 filler-data filler-data-2 -54 54 54 filler-data filler-data-2 -drop table t1; diff --git a/mysql-test/r/index_merge_myisam.result b/mysql-test/r/index_merge_myisam.result new file mode 100644 index 00000000000..1c60e53a335 --- /dev/null +++ b/mysql-test/r/index_merge_myisam.result @@ -0,0 +1,1281 @@ +#---------------- Index merge test 1 ------------------------------------------- +SET SESSION STORAGE_ENGINE = MyISAM; +drop table if exists t0, t1, t2, t3, t4; +create table t0 +( +key1 int not null, +INDEX i1(key1) +); +alter table t0 add key2 int not null, add index i2(key2); +alter table t0 add key3 int not null, add index i3(key3); +alter table t0 add key4 int not null, add index i4(key4); +alter table t0 add key5 int not null, add index i5(key5); +alter table t0 add key6 int not null, add index i6(key6); +alter table t0 add key7 int not null, add index i7(key7); +alter table t0 add key8 int not null, add index i8(key8); +update t0 set key2=key1,key3=key1,key4=key1,key5=key1,key6=key1,key7=key1,key8=1024-key1; +analyze table t0; +Table Op Msg_type Msg_text +test.t0 analyze status OK +explain select * from t0 where key1 < 3 or key1 > 1020; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 range i1 i1 4 NULL 78 Using where +explain +select * from t0 where key1 < 3 or key2 > 1020; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 45 Using sort_union(i1,i2); Using where +select * from t0 where key1 < 3 or key2 > 1020; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +1021 1021 1021 1021 1021 1021 1021 3 +1022 1022 1022 1022 1022 1022 1022 2 +1023 1023 1023 1023 1023 1023 1023 1 +1024 1024 1024 1024 1024 1024 1024 0 +explain select * from t0 where key1 < 3 or key2 <4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where +explain +select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 11 Using sort_union(i1,i2); Using where +select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40); +key1 key2 key3 key4 key5 key6 key7 key8 +31 31 31 31 31 31 31 993 +32 32 32 32 32 32 32 992 +33 33 33 33 33 33 33 991 +34 34 34 34 34 34 34 990 +35 35 35 35 35 35 35 989 +36 36 36 36 36 36 36 988 +37 37 37 37 37 37 37 987 +38 38 38 38 38 38 38 986 +39 39 39 39 39 39 39 985 +explain select * from t0 ignore index (i2) where key1 < 3 or key2 <4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1 NULL NULL NULL 1024 Using where +explain select * from t0 where (key1 < 3 or key2 <4) and key3 = 50; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ref i1,i2,i3 i3 4 const 1 Using where +explain select * from t0 use index (i1,i2) where (key1 < 3 or key2 <4) and key3 = 50; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where +explain select * from t0 where (key1 > 1 or key2 > 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2 NULL NULL NULL 1024 Using where +explain select * from t0 force index (i1,i2) where (key1 > 1 or key2 > 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 1024 Using sort_union(i1,i2); Using where +explain +select * from t0 where key1<3 or key2<3 or (key1>5 and key1<8) or +(key1>10 and key1<12) or (key2>100 and key2<110); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 17 Using sort_union(i1,i2); Using where +explain select * from t0 where key2 = 45 or key1 <=> null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 range i1,i2 i2 4 NULL 1 Using where +explain select * from t0 where key2 = 45 or key1 is not null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2 NULL NULL NULL 1024 Using where +explain select * from t0 where key2 = 45 or key1 is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ref i2 i2 4 const 1 +explain select * from t0 where key2=10 or key3=3 or key4 <=> null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i2,i3,i4 i2,i3 4,4 NULL 2 Using union(i2,i3); Using where +explain select * from t0 where key2=10 or key3=3 or key4 is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i2,i3 i2,i3 4,4 NULL 2 Using union(i2,i3); Using where +explain select key1 from t0 where (key1 <=> null) or (key2 < 5) or +(key3=10) or (key4 <=> null); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i4 i2,i3 4,4 NULL 6 Using sort_union(i2,i3); Using where +explain select key1 from t0 where (key1 <=> null) or (key1 < 5) or +(key3=10) or (key4 <=> null); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i3,i4 i1,i3 4,4 NULL 6 Using sort_union(i1,i3); Using where +explain select * from t0 where +(key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 5 or key6 < 5); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL 6 Using sort_union(i1,i2); Using where +explain +select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 9 Using sort_union(i1,i2); Using where +select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4); +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +3 3 3 3 3 3 3 1021 +4 4 4 4 4 4 4 1020 +5 5 5 5 5 5 5 1019 +explain select * from t0 where +(key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 2 or key6 < 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL 6 Using sort_union(i1,i2); Using where +explain select * from t0 where +(key1 < 3 or key2 < 3) and (key3 < 100); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 range i1,i2,i3 i3 4 NULL 95 Using where +explain select * from t0 where +(key1 < 3 or key2 < 3) and (key3 < 1000); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2,i3 NULL NULL NULL 1024 Using where +explain select * from t0 where +((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) +or +key2 > 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2,i3 NULL NULL NULL 1024 Using where +explain select * from t0 where +((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) +or +key1 < 7; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where +select * from t0 where +((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4)) +or +key1 < 7; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +3 3 3 3 3 3 3 1021 +4 4 4 4 4 4 4 1020 +5 5 5 5 5 5 5 1019 +6 6 6 6 6 6 6 1018 +explain select * from t0 where +((key1 < 4 or key2 < 4) and (key3 <5 or key5 < 4)) +or +((key5 < 5 or key6 < 6) and (key7 <7 or key8 < 4)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i1,i2,i5,i6 4,4,4,4 NULL 19 Using sort_union(i1,i2,i5,i6); Using where +explain select * from t0 where +((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) +or +((key7 <7 or key8 < 4) and (key5 < 5 or key6 < 6)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i3,i5,i7,i8 4,4,4,4 NULL 20 Using sort_union(i3,i5,i7,i8); Using where +explain select * from t0 where +((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) +or +((key3 <7 or key5 < 2) and (key5 < 5 or key6 < 6)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6 i3,i5 4,4 NULL 11 Using sort_union(i3,i5); Using where +explain select * from t0 where +((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) +or +(((key3 <7 and key7 < 6) or key5 < 2) and (key5 < 5 or key6 < 6)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7 i3,i5 4,4 NULL 11 Using sort_union(i3,i5); Using where +explain select * from t0 where +((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) +or +((key3 >=5 or key5 < 2) and (key5 < 5 or key6 < 6)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2,i3,i5,i6 NULL NULL NULL 1024 Using where +explain select * from t0 force index(i1, i2, i3, i4, i5, i6 ) where +((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) +or +((key3 >=5 or key5 < 2) and (key5 < 5 or key6 < 6)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6 i3,i5 0,4 NULL 1024 Using sort_union(i3,i5); Using where +select * from t0 where key1 < 5 or key8 < 4 order by key1; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +3 3 3 3 3 3 3 1021 +4 4 4 4 4 4 4 1020 +1021 1021 1021 1021 1021 1021 1021 3 +1022 1022 1022 1022 1022 1022 1022 2 +1023 1023 1023 1023 1023 1023 1023 1 +1024 1024 1024 1024 1024 1024 1024 0 +explain +select * from t0 where key1 < 5 or key8 < 4 order by key1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i8 i1,i8 4,4 NULL 9 Using sort_union(i1,i8); Using where; Using filesort +create table t2 like t0; +insert into t2 select * from t0; +alter table t2 add index i1_3(key1, key3); +alter table t2 add index i2_3(key2, key3); +alter table t2 drop index i1; +alter table t2 drop index i2; +alter table t2 add index i321(key3, key2, key1); +explain select key3 from t2 where key1 = 100 or key2 = 100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index_merge i1_3,i2_3 i1_3,i2_3 4,4 NULL 2 Using sort_union(i1_3,i2_3); Using where +explain select key3 from t2 where key1 <100 or key2 < 100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index i1_3,i2_3 i321 12 NULL 1024 Using where; Using index +explain select key7 from t2 where key1 <100 or key2 < 100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL i1_3,i2_3 NULL NULL NULL 1024 Using where +create table t4 ( +key1a int not null, +key1b int not null, +key2 int not null, +key2_1 int not null, +key2_2 int not null, +key3 int not null, +index i1a (key1a, key1b), +index i1b (key1b, key1a), +index i2_1(key2, key2_1), +index i2_2(key2, key2_1) +); +insert into t4 select key1,key1,key1 div 10, key1 % 10, key1 % 10, key1 from t0; +select * from t4 where key1a = 3 or key1b = 4; +key1a key1b key2 key2_1 key2_2 key3 +3 3 0 3 3 3 +4 4 0 4 4 4 +explain select * from t4 where key1a = 3 or key1b = 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 index_merge i1a,i1b i1a,i1b 4,4 NULL 2 Using sort_union(i1a,i1b); Using where +explain select * from t4 where key2 = 1 and (key2_1 = 1 or key3 = 5); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 10 Using where +explain select * from t4 where key2 = 1 and (key2_1 = 1 or key2_2 = 5); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 10 Using where +explain select * from t4 where key2_1 = 1 or key2_2 = 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ALL NULL NULL NULL NULL 1024 Using where +create table t1 like t0; +insert into t1 select * from t0; +explain select * from t0 left join t1 on (t0.key1=t1.key1) +where t0.key1=3 or t0.key2=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where +1 SIMPLE t1 ref i1 i1 4 test.t0.key1 1 +select * from t0 left join t1 on (t0.key1=t1.key1) +where t0.key1=3 or t0.key2=4; +key1 key2 key3 key4 key5 key6 key7 key8 key1 key2 key3 key4 key5 key6 key7 key8 +3 3 3 3 3 3 3 1021 3 3 3 3 3 3 3 1021 +4 4 4 4 4 4 4 1020 4 4 4 4 4 4 4 1020 +explain +select * from t0,t1 where (t0.key1=t1.key1) and ( t0.key1=3 or t0.key2=4); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where +1 SIMPLE t1 ref i1 i1 4 test.t0.key1 1 +explain +select * from t0,t1 where (t0.key1=t1.key1) and +(t0.key1=3 or t0.key2=4) and t1.key1<200; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2 NULL NULL NULL 1024 Using where +1 SIMPLE t1 ref i1 i1 4 test.t0.key1 1 +explain +select * from t0,t1 where (t0.key1=t1.key1) and +(t0.key1=3 or t0.key2<4) and t1.key1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ref i1,i2 i1 4 const 1 Using where +1 SIMPLE t1 ref i1 i1 4 const 1 +explain select * from t0,t1 where t0.key1 = 5 and +(t1.key1 = t0.key1 or t1.key8 = t0.key1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ref i1 i1 4 const 1 +1 SIMPLE t1 index_merge i1,i8 i1,i8 4,4 NULL 2 Using union(i1,i8); Using where +explain select * from t0,t1 where t0.key1 < 3 and +(t1.key1 = t0.key1 or t1.key8 = t0.key1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 range i1 i1 4 NULL 3 Using where +1 SIMPLE t1 ALL i1,i8 NULL NULL NULL 1024 Range checked for each record (index map: 0x81) +explain select * from t1 where key1=3 or key2=4 +union select * from t1 where key1<4 or key3=5; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where +2 UNION t1 index_merge i1,i3 i1,i3 4,4 NULL 5 Using sort_union(i1,i3); Using where +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain select * from (select * from t1 where key1 = 3 or key2 =3) as Z where key8 >5; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY system NULL NULL NULL NULL 1 +2 DERIVED t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where +create table t3 like t0; +insert into t3 select * from t0; +alter table t3 add key9 int not null, add index i9(key9); +alter table t3 add keyA int not null, add index iA(keyA); +alter table t3 add keyB int not null, add index iB(keyB); +alter table t3 add keyC int not null, add index iC(keyC); +update t3 set key9=key1,keyA=key1,keyB=key1,keyC=key1; +explain select * from t3 where +key1=1 or key2=2 or key3=3 or key4=4 or +key5=5 or key6=6 or key7=7 or key8=8 or +key9=9 or keyA=10 or keyB=11 or keyC=12; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 index_merge i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC 4,4,4,4,4,4,4,4,4,4,4,4 NULL 12 Using union(i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC); Using where +select * from t3 where +key1=1 or key2=2 or key3=3 or key4=4 or +key5=5 or key6=6 or key7=7 or key8=8 or +key9=9 or keyA=10 or keyB=11 or keyC=12; +key1 key2 key3 key4 key5 key6 key7 key8 key9 keyA keyB keyC +1 1 1 1 1 1 1 1023 1 1 1 1 +2 2 2 2 2 2 2 1022 2 2 2 2 +3 3 3 3 3 3 3 1021 3 3 3 3 +4 4 4 4 4 4 4 1020 4 4 4 4 +5 5 5 5 5 5 5 1019 5 5 5 5 +6 6 6 6 6 6 6 1018 6 6 6 6 +7 7 7 7 7 7 7 1017 7 7 7 7 +9 9 9 9 9 9 9 1015 9 9 9 9 +10 10 10 10 10 10 10 1014 10 10 10 10 +11 11 11 11 11 11 11 1013 11 11 11 11 +12 12 12 12 12 12 12 1012 12 12 12 12 +1016 1016 1016 1016 1016 1016 1016 8 1016 1016 1016 1016 +explain select * from t0 where key1 < 3 or key2 < 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where +select * from t0 where key1 < 3 or key2 < 4; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +3 3 3 3 3 3 3 1021 +update t0 set key8=123 where key1 < 3 or key2 < 4; +select * from t0 where key1 < 3 or key2 < 4; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 123 +2 2 2 2 2 2 2 123 +3 3 3 3 3 3 3 123 +delete from t0 where key1 < 3 or key2 < 4; +select * from t0 where key1 < 3 or key2 < 4; +key1 key2 key3 key4 key5 key6 key7 key8 +select count(*) from t0; +count(*) +1021 +drop table t4; +create table t4 (a int); +insert into t4 values (1),(4),(3); +set @save_join_buffer_size=@@join_buffer_size; +set join_buffer_size= 4000; +explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +where (A.key1 < 500000 or A.key2 < 3) +and (B.key1 < 500000 or B.key2 < 3); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL 1013 Using sort_union(i1,i2); Using where +1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL 1013 Using sort_union(i1,i2); Using where +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +where (A.key1 < 500000 or A.key2 < 3) +and (B.key1 < 500000 or B.key2 < 3); +max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +10240 +update t0 set key1=1; +explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +where (A.key1 = 1 or A.key2 = 1) +and (B.key1 = 1 or B.key2 = 1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL 1020 Using union(i1,i2); Using where +1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL 1020 Using union(i1,i2); Using where +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +where (A.key1 = 1 or A.key2 = 1) +and (B.key1 = 1 or B.key2 = 1); +max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +8194 +alter table t0 add filler1 char(200), add filler2 char(200), add filler3 char(200); +update t0 set key2=1, key3=1, key4=1, key5=1,key6=1,key7=1 where key7 < 500; +explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A, t0 as B +where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1) +and (B.key1 = 1 and B.key2 = 1 and B.key3 = 1 and B.key4=1 and B.key5=1 and B.key6=1 and B.key7 = 1 or B.key8=1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE A index_merge i1,i2,i3,i4,i5,i6,i7?,i8 i2,i3,i4,i5,i6,i7?,i8 X NULL # Using union(intersect(i2,i3,i4,i5,i6,i7?),i8); Using where +1 SIMPLE B index_merge i1,i2,i3,i4,i5,i6,i7?,i8 i2,i3,i4,i5,i6,i7?,i8 X NULL # Using union(intersect(i2,i3,i4,i5,i6,i7?),i8); Using where +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A, t0 as B +where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1) +and (B.key1 = 1 and B.key2 = 1 and B.key3 = 1 and B.key4=1 and B.key5=1 and B.key6=1 and B.key7 = 1 or B.key8=1); +max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +8186 +set join_buffer_size= @save_join_buffer_size; +drop table t0, t1, t2, t3, t4; +CREATE TABLE t1 ( +cola char(3) not null, colb char(3) not null, filler char(200), +key(cola), key(colb) +); +INSERT INTO t1 VALUES ('foo','bar', 'ZZ'),('fuz','baz', 'ZZ'); +OPTIMIZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +select count(*) from t1; +count(*) +8704 +explain select * from t1 WHERE cola = 'foo' AND colb = 'bar'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge cola,colb cola,colb 3,3 NULL 32 Using intersect(cola,colb); Using where +explain select * from t1 force index(cola,colb) WHERE cola = 'foo' AND colb = 'bar'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge cola,colb cola,colb 3,3 NULL 32 Using intersect(cola,colb); Using where +drop table t1; +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +a int, b int, +filler1 char(200), filler2 char(200), +key(a),key(b) +); +insert into t1 select @v:= A.a, @v, 't1', 'filler2' from t0 A, t0 B, t0 C; +create table t2 like t1; +create table t3 ( +a int, b int, +filler1 char(200), filler2 char(200), +key(a),key(b) +) engine=merge union=(t1,t2); +explain select * from t1 where a=1 and b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge a,b a,b 5,5 NULL # Using intersect(a,b); Using where +explain select * from t3 where a=1 and b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 index_merge a,b a,b 5,5 NULL # Using intersect(a,b); Using where +drop table t3; +drop table t0, t1, t2; +CREATE TABLE t1(a INT); +INSERT INTO t1 VALUES(1); +CREATE TABLE t2(a INT, b INT, dummy CHAR(16) DEFAULT '', KEY(a), KEY(b)); +INSERT INTO t2(a,b) VALUES +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(1,2); +LOCK TABLES t1 WRITE, t2 WRITE; +INSERT INTO t2(a,b) VALUES(1,2); +SELECT t2.a FROM t1,t2 WHERE t2.b=2 AND t2.a=1; +a +1 +1 +UNLOCK TABLES; +DROP TABLE t1, t2; +#---------------- ROR-index_merge tests ----------------------- +SET SESSION STORAGE_ENGINE = MyISAM; +drop table if exists t0,t1,t2; +create table t1 +( +/* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */ +st_a int not null default 0, +swt1a int not null default 0, +swt2a int not null default 0, +st_b int not null default 0, +swt1b int not null default 0, +swt2b int not null default 0, +/* fields/keys for row retrieval tests */ +key1 int, +key2 int, +key3 int, +key4 int, +/* make rows much bigger then keys */ +filler1 char (200), +filler2 char (200), +filler3 char (200), +filler4 char (200), +filler5 char (200), +filler6 char (200), +/* order of keys is important */ +key sta_swt12a(st_a,swt1a,swt2a), +key sta_swt1a(st_a,swt1a), +key sta_swt2a(st_a,swt2a), +key sta_swt21a(st_a,swt2a,swt1a), +key st_a(st_a), +key stb_swt1a_2b(st_b,swt1b,swt2a), +key stb_swt1b(st_b,swt1b), +key st_b(st_b), +key(key1), +key(key2), +key(key3), +key(key4) +) ; +create table t0 as select * from t1; +# Printing of many insert into t0 values (....) disabled. +alter table t1 disable keys; +# Printing of many insert into t1 select .... from t0 disabled. +# Printing of many insert into t1 (...) values (....) disabled. +alter table t1 enable keys; +select count(*) from t1; +count(*) +64801 +explain select key1,key2 from t1 where key1=100 and key2=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where; Using index +select key1,key2 from t1 where key1=100 and key2=100; +key1 key2 +100 100 +explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 filler1 +100 100 100 100 key1-key2-key3-key4 +insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2'); +insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3'); +explain select key1,key2,filler1 from t1 where key1=100 and key2=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where +select key1,key2,filler1 from t1 where key1=100 and key2=100; +key1 key2 filler1 +100 100 key1-key2-key3-key4 +100 100 key1-key2 +explain select key1,key2 from t1 where key1=100 and key2=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where; Using index +select key1,key2 from t1 where key1=100 and key2=100; +key1 key2 +100 100 +100 100 +explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where +select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 +100 100 100 100 +100 100 -1 -1 +-1 -1 100 100 +explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 filler1 +100 100 100 100 key1-key2-key3-key4 +100 100 -1 -1 key1-key2 +-1 -1 100 100 key4-key3 +explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 2 Using intersect(key1,key2,key3); Using where; Using index +select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100; +key1 key2 key3 +100 100 100 +insert into t1 (key1,key2,key3,key4,filler1) values (101,101,101,101, 'key1234-101'); +explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=101; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 83 Using union(intersect(key1,key2),key3); Using where +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=101; +key1 key2 key3 key4 filler1 +100 100 100 100 key1-key2-key3-key4 +100 100 -1 -1 key1-key2 +101 101 101 101 key1234-101 +select key1,key2, filler1 from t1 where key1=100 and key2=100; +key1 key2 filler1 +100 100 key1-key2-key3-key4 +100 100 key1-key2 +update t1 set filler1='to be deleted' where key1=100 and key2=100; +update t1 set key1=200,key2=200 where key1=100 and key2=100; +delete from t1 where key1=200 and key2=200; +select key1,key2,filler1 from t1 where key2=100 and key2=200; +key1 key2 filler1 +explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 152 Using union(intersect(key1,key2),intersect(key3,key4)); Using where +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 filler1 +-1 -1 100 100 key4-key3 +delete from t1 where key3=100 and key4=100; +explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 152 Using union(intersect(key1,key2),intersect(key3,key4)); Using where +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 filler1 +explain select key1,key2 from t1 where key1=100 and key2=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 76 Using intersect(key1,key2); Using where; Using index +select key1,key2 from t1 where key1=100 and key2=100; +key1 key2 +insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-1'); +insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-2'); +insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-3'); +explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 136 Using union(key3,intersect(key1,key2),key4); Using where +select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +key1 key2 key3 key4 filler1 +100 100 200 200 key1-key2-key3-key4-3 +100 100 200 200 key1-key2-key3-key4-2 +100 100 200 200 key1-key2-key3-key4-1 +insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, -1, 200,'key4'); +explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 146 Using union(key3,intersect(key1,key2),key4); Using where +select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +key1 key2 key3 key4 filler1 +100 100 200 200 key1-key2-key3-key4-3 +100 100 200 200 key1-key2-key3-key4-2 +100 100 200 200 key1-key2-key3-key4-1 +-1 -1 -1 200 key4 +insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 200, -1,'key3'); +explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 156 Using union(key3,intersect(key1,key2),key4); Using where +select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +key1 key2 key3 key4 filler1 +100 100 200 200 key1-key2-key3-key4-3 +100 100 200 200 key1-key2-key3-key4-2 +100 100 200 200 key1-key2-key3-key4-1 +-1 -1 -1 200 key4 +-1 -1 200 -1 key3 +explain select * from t1 where st_a=1 and st_b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b st_a,st_b 4,4 NULL 3515 Using intersect(st_a,st_b); Using where +explain select st_a,st_b from t1 where st_a=1 and st_b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b st_a,st_b 4,4 NULL 3515 Using intersect(st_a,st_b); Using where; Using index +explain select st_a from t1 ignore index (st_a) where st_a=1 and st_b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,stb_swt1a_2b,stb_swt1b,st_b st_b 4 const 15093 Using where +explain select * from t1 where st_a=1 and swt1a=1 and swt2a=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a sta_swt21a 12 const,const,const 971 +explain select * from t1 where st_b=1 and swt1b=1 and swt2b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref stb_swt1a_2b,stb_swt1b,st_b stb_swt1a_2b 8 const,const 3879 Using where +explain select * from t1 where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt12a,stb_swt1a_2b 12,12 NULL 58 Using intersect(sta_swt12a,stb_swt1a_2b); Using where +explain select * from t1 ignore index (sta_swt21a, stb_swt1a_2b) +where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,st_a,stb_swt1b,st_b sta_swt12a,stb_swt1b 12,8 NULL 58 Using intersect(sta_swt12a,stb_swt1b); Using where +explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b) +where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt1a,sta_swt2a,st_a,stb_swt1b,st_b sta_swt1a,sta_swt2a,stb_swt1b 8,8,8 NULL 57 Using intersect(sta_swt1a,sta_swt2a,stb_swt1b); Using where +explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b, stb_swt1b) +where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt1a,sta_swt2a,st_a,st_b sta_swt1a,sta_swt2a,st_b 8,8,4 NULL 223 Using intersect(sta_swt1a,sta_swt2a,st_b); Using where +explain select * from t1 +where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt12a,stb_swt1a_2b 12,12 NULL 58 Using intersect(sta_swt12a,stb_swt1a_2b); Using where +explain select * from t1 +where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt1a,stb_swt1b 8,8 NULL 232 Using intersect(sta_swt1a,stb_swt1b); Using where +explain select st_a from t1 +where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt1a,stb_swt1b 8,8 NULL 232 Using intersect(sta_swt1a,stb_swt1b); Using where; Using index +explain select st_a from t1 +where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt1a,stb_swt1b 8,8 NULL 232 Using intersect(sta_swt1a,stb_swt1b); Using where; Using index +drop table t0,t1; +create table t2 ( +a char(10), +b char(10), +filler1 char(255), +filler2 char(255), +key(a(5)), +key(b(5)) +); +select count(a) from t2 where a='BBBBBBBB'; +count(a) +4 +select count(a) from t2 where b='BBBBBBBB'; +count(a) +4 +expla_or_bin select count(a_or_b) from t2 where a_or_b='AAAAAAAA' a_or_bnd a_or_b='AAAAAAAA'; +id select_type ta_or_ba_or_ble type possia_or_ble_keys key key_len ref rows Extra_or_b +1 SIMPLE t2 ref a_or_b,a_or_b a_or_b 6 const 4 Using where +select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; +count(a) +4 +select count(a) from t2 ignore index(a,b) where a='AAAAAAAA' and b='AAAAAAAA'; +count(a) +4 +insert into t2 values ('ab', 'ab', 'uh', 'oh'); +explain select a from t2 where a='ab'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 6 const 1 Using where +drop table t2; +#---------------- Index merge test 2 ------------------------------------------- +SET SESSION STORAGE_ENGINE = MyISAM; +drop table if exists t1,t2; +create table t1 +( +key1 int not null, +key2 int not null, +INDEX i1(key1), +INDEX i2(key2) +); +explain select * from t1 where key1 < 5 or key2 > 197; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where +select * from t1 where key1 < 5 or key2 > 197; +key1 key2 +0 200 +1 199 +2 198 +3 197 +4 196 +explain select * from t1 where key1 < 3 or key2 > 195; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where +select * from t1 where key1 < 3 or key2 > 195; +key1 key2 +0 200 +1 199 +2 198 +3 197 +4 196 +alter table t1 add str1 char (255) not null, +add zeroval int not null default 0, +add str2 char (255) not null, +add str3 char (255) not null; +update t1 set str1='aaa', str2='bbb', str3=concat(key2, '-', key1 div 2, '_' ,if(key1 mod 2 = 0, 'a', 'A')); +alter table t1 add primary key (str1, zeroval, str2, str3); +explain select * from t1 where key1 < 5 or key2 > 197; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where +select * from t1 where key1 < 5 or key2 > 197; +key1 key2 str1 zeroval str2 str3 +0 200 aaa 0 bbb 200-0_a +1 199 aaa 0 bbb 199-0_A +2 198 aaa 0 bbb 198-1_a +3 197 aaa 0 bbb 197-1_A +4 196 aaa 0 bbb 196-2_a +explain select * from t1 where key1 < 3 or key2 > 195; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where +select * from t1 where key1 < 3 or key2 > 195; +key1 key2 str1 zeroval str2 str3 +0 200 aaa 0 bbb 200-0_a +1 199 aaa 0 bbb 199-0_A +2 198 aaa 0 bbb 198-1_a +3 197 aaa 0 bbb 197-1_A +4 196 aaa 0 bbb 196-2_a +drop table t1; +create table t1 ( +pk integer not null auto_increment primary key, +key1 integer, +key2 integer not null, +filler char (200), +index (key1), +index (key2) +); +show warnings; +Level Code Message +explain select pk from t1 where key1 = 1 and key2 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,key2 key1 5 const 4 Using where +select pk from t1 where key2 = 1 and key1 = 1; +pk +26 +27 +select pk from t1 ignore index(key1,key2) where key2 = 1 and key1 = 1; +pk +26 +27 +drop table t1; +create table t1 ( +pk int primary key auto_increment, +key1a int, +key2a int, +key1b int, +key2b int, +dummy1 int, +dummy2 int, +dummy3 int, +dummy4 int, +key3a int, +key3b int, +filler1 char (200), +index i1(key1a, key1b), +index i2(key2a, key2b), +index i3(key3a, key3b) +); +create table t2 (a int); +insert into t2 values (0),(1),(2),(3),(4),(NULL); +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +select A.a, B.a, C.a, D.a, C.a, D.a from t2 A,t2 B,t2 C, t2 D; +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +select key1a, key1b, key2a, key2b, key3a, key3b from t1; +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +select key1a, key1b, key2a, key2b, key3a, key3b from t1; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +select count(*) from t1; +count(*) +5184 +explain select count(*) from t1 where +key1a = 2 and key1b is null and key2a = 2 and key2b is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i1,i2 i1,i2 10,10 NULL 2 Using intersect(i1,i2); Using where; Using index +select count(*) from t1 where +key1a = 2 and key1b is null and key2a = 2 and key2b is null; +count(*) +4 +explain select count(*) from t1 where +key1a = 2 and key1b is null and key3a = 2 and key3b is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i1,i3 i1,i3 10,10 NULL 2 Using intersect(i1,i3); Using where; Using index +select count(*) from t1 where +key1a = 2 and key1b is null and key3a = 2 and key3b is null; +count(*) +4 +drop table t1,t2; +create table t1 ( +id1 int, +id2 date , +index idx2 (id1,id2), +index idx1 (id2) +); +insert into t1 values(1,'20040101'), (2,'20040102'); +select * from t1 where id1 = 1 and id2= '20040101'; +id1 id2 +1 2004-01-01 +drop table t1; +drop view if exists v1; +CREATE TABLE t1 ( +`oid` int(11) unsigned NOT NULL auto_increment, +`fk_bbk_niederlassung` int(11) unsigned NOT NULL, +`fk_wochentag` int(11) unsigned NOT NULL, +`uhrzeit_von` time NOT NULL COMMENT 'HH:MM', +`uhrzeit_bis` time NOT NULL COMMENT 'HH:MM', +`geloescht` tinyint(4) NOT NULL, +`version` int(5) NOT NULL, +PRIMARY KEY (`oid`), +KEY `fk_bbk_niederlassung` (`fk_bbk_niederlassung`), +KEY `fk_wochentag` (`fk_wochentag`), +KEY `ix_version` (`version`) +) DEFAULT CHARSET=latin1; +insert into t1 values +(1, 38, 1, '08:00:00', '13:00:00', 0, 1), +(2, 38, 2, '08:00:00', '13:00:00', 0, 1), +(3, 38, 3, '08:00:00', '13:00:00', 0, 1), +(4, 38, 4, '08:00:00', '13:00:00', 0, 1), +(5, 38, 5, '08:00:00', '13:00:00', 0, 1), +(6, 38, 5, '08:00:00', '13:00:00', 1, 2), +(7, 38, 3, '08:00:00', '13:00:00', 1, 2), +(8, 38, 1, '08:00:00', '13:00:00', 1, 2), +(9, 38, 2, '08:00:00', '13:00:00', 1, 2), +(10, 38, 4, '08:00:00', '13:00:00', 1, 2), +(11, 38, 1, '08:00:00', '13:00:00', 0, 3), +(12, 38, 2, '08:00:00', '13:00:00', 0, 3), +(13, 38, 3, '08:00:00', '13:00:00', 0, 3), +(14, 38, 4, '08:00:00', '13:00:00', 0, 3), +(15, 38, 5, '08:00:00', '13:00:00', 0, 3), +(16, 38, 4, '08:00:00', '13:00:00', 0, 4), +(17, 38, 5, '08:00:00', '13:00:00', 0, 4), +(18, 38, 1, '08:00:00', '13:00:00', 0, 4), +(19, 38, 2, '08:00:00', '13:00:00', 0, 4), +(20, 38, 3, '08:00:00', '13:00:00', 0, 4), +(21, 7, 1, '08:00:00', '13:00:00', 0, 1), +(22, 7, 2, '08:00:00', '13:00:00', 0, 1), +(23, 7, 3, '08:00:00', '13:00:00', 0, 1), +(24, 7, 4, '08:00:00', '13:00:00', 0, 1), +(25, 7, 5, '08:00:00', '13:00:00', 0, 1); +create view v1 as +select +zeit1.oid AS oid, +zeit1.fk_bbk_niederlassung AS fk_bbk_niederlassung, +zeit1.fk_wochentag AS fk_wochentag, +zeit1.uhrzeit_von AS uhrzeit_von, +zeit1.uhrzeit_bis AS uhrzeit_bis, +zeit1.geloescht AS geloescht, +zeit1.version AS version +from +t1 zeit1 +where +(zeit1.version = +(select max(zeit2.version) AS `max(version)` + from t1 zeit2 +where +((zeit1.fk_bbk_niederlassung = zeit2.fk_bbk_niederlassung) and +(zeit1.fk_wochentag = zeit2.fk_wochentag) and +(zeit1.uhrzeit_von = zeit2.uhrzeit_von) and +(zeit1.uhrzeit_bis = zeit2.uhrzeit_bis) +) +) +) +and (zeit1.geloescht = 0); +select * from v1 where oid = 21; +oid fk_bbk_niederlassung fk_wochentag uhrzeit_von uhrzeit_bis geloescht version +21 7 1 08:00:00 13:00:00 0 1 +drop view v1; +drop table t1; +CREATE TABLE t1( +t_cpac varchar(2) NOT NULL, +t_vers varchar(4) NOT NULL, +t_rele varchar(2) NOT NULL, +t_cust varchar(4) NOT NULL, +filler1 char(250) default NULL, +filler2 char(250) default NULL, +PRIMARY KEY (t_cpac,t_vers,t_rele,t_cust), +UNIQUE KEY IX_4 (t_cust,t_cpac,t_vers,t_rele), +KEY IX_5 (t_vers,t_rele,t_cust) +); +insert into t1 values +('tm','2.5 ','a ',' ','',''), ('tm','2.5U','a ','stnd','',''), +('da','3.3 ','b ',' ','',''), ('da','3.3U','b ','stnd','',''), +('tl','7.6 ','a ',' ','',''), ('tt','7.6 ','a ',' ','',''), +('bc','B61 ','a ',' ','',''), ('bp','B61 ','a ',' ','',''), +('ca','B61 ','a ',' ','',''), ('ci','B61 ','a ',' ','',''), +('cp','B61 ','a ',' ','',''), ('dm','B61 ','a ',' ','',''), +('ec','B61 ','a ',' ','',''), ('ed','B61 ','a ',' ','',''), +('fm','B61 ','a ',' ','',''), ('nt','B61 ','a ',' ','',''), +('qm','B61 ','a ',' ','',''), ('tc','B61 ','a ',' ','',''), +('td','B61 ','a ',' ','',''), ('tf','B61 ','a ',' ','',''), +('tg','B61 ','a ',' ','',''), ('ti','B61 ','a ',' ','',''), +('tp','B61 ','a ',' ','',''), ('ts','B61 ','a ',' ','',''), +('wh','B61 ','a ',' ','',''), ('bc','B61U','a ','stnd','',''), +('bp','B61U','a ','stnd','',''), ('ca','B61U','a ','stnd','',''), +('ci','B61U','a ','stnd','',''), ('cp','B61U','a ','stnd','',''), +('dm','B61U','a ','stnd','',''), ('ec','B61U','a ','stnd','',''), +('fm','B61U','a ','stnd','',''), ('nt','B61U','a ','stnd','',''), +('qm','B61U','a ','stnd','',''), ('tc','B61U','a ','stnd','',''), +('td','B61U','a ','stnd','',''), ('tf','B61U','a ','stnd','',''), +('tg','B61U','a ','stnd','',''), ('ti','B61U','a ','stnd','',''), +('tp','B61U','a ','stnd','',''), ('ts','B61U','a ','stnd','',''), +('wh','B61U','a ','stnd','',''); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `t_cpac` varchar(2) NOT NULL, + `t_vers` varchar(4) NOT NULL, + `t_rele` varchar(2) NOT NULL, + `t_cust` varchar(4) NOT NULL, + `filler1` char(250) DEFAULT NULL, + `filler2` char(250) DEFAULT NULL, + PRIMARY KEY (`t_cpac`,`t_vers`,`t_rele`,`t_cust`), + UNIQUE KEY `IX_4` (`t_cust`,`t_cpac`,`t_vers`,`t_rele`), + KEY `IX_5` (`t_vers`,`t_rele`,`t_cust`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6'; +t_vers t_rele t_cust filler1 +7.6 a +7.6 a +select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6' + and t_rele='a' and t_cust = ' '; +t_vers t_rele t_cust filler1 +7.6 a +7.6 a +drop table t1; +create table t1 ( +pk int(11) not null auto_increment, +a int(11) not null default '0', +b int(11) not null default '0', +c int(11) not null default '0', +filler1 datetime, filler2 varchar(15), +filler3 longtext, +kp1 varchar(4), kp2 varchar(7), +kp3 varchar(2), kp4 varchar(4), +kp5 varchar(7), +filler4 char(1), +primary key (pk), +key idx1(a,b,c), +key idx2(c), +key idx3(kp1,kp2,kp3,kp4,kp5) +) default charset=latin1; +set @fill=NULL; +SELECT COUNT(*) FROM t1 WHERE b = 0 AND a = 0 AND c = 13286427 AND +kp1='279' AND kp2='ELM0678' AND kp3='6' AND kp4='10' AND kp5 = 'R '; +COUNT(*) +1 +drop table t1; +create table t1 +( +key1 int not null, +key2 int not null default 0, +key3 int not null default 0 +); +insert into t1(key1) values (1),(2),(3),(4),(5),(6),(7),(8); +set @d=8; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +alter table t1 add index i2(key2); +alter table t1 add index i3(key3); +update t1 set key2=key1,key3=key1; +explain select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i2,i3 i3,i2 4,4 NULL 11 Using sort_union(i3,i2); Using where +select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); +key1 key2 key3 +31 31 31 +32 32 32 +33 33 33 +34 34 34 +35 35 35 +36 36 36 +37 37 37 +38 38 38 +39 39 39 +drop table t1; +#---------------- 2-sweeps read Index merge test 2 ------------------------------- +SET SESSION STORAGE_ENGINE = MyISAM; +drop table if exists t1; +create table t1 ( +pk int primary key, +key1 int, +key2 int, +filler char(200), +filler2 char(200), +index(key1), +index(key2) +); +select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 ); +pk key1 key2 filler filler2 +10 10 10 filler-data filler-data-2 +9 9 9 filler-data filler-data-2 +8 8 8 filler-data filler-data-2 +7 7 7 filler-data filler-data-2 +6 6 6 filler-data filler-data-2 +5 5 5 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +set @maxv=1000; +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or key1=18 or key1=60; +pk key1 key2 filler filler2 +1000 1000 1000 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +60 60 60 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +18 18 18 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +1 1 1 filler-data filler-data-2 +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or key1 < 3 or key1 > @maxv-11; +pk key1 key2 filler filler2 +1000 1000 1000 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +990 990 990 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +1 1 1 filler-data filler-data-2 +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or +(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10); +pk key1 key2 filler filler2 +1000 1000 1000 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +1 1 1 filler-data filler-data-2 +select * from t1 where +(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) +or +(key1 < 5) or (key1 > @maxv-10); +pk key1 key2 filler filler2 +1000 1000 1000 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +1 1 1 filler-data filler-data-2 +drop table t1; +#---------------- Clustered PK ROR-index_merge tests ----------------------------- +SET SESSION STORAGE_ENGINE = MyISAM; +drop table if exists t1; +create table t1 +( +pk1 int not null, +pk2 int not null, +key1 int not null, +key2 int not null, +pktail1ok int not null, +pktail2ok int not null, +pktail3bad int not null, +pktail4bad int not null, +pktail5bad int not null, +pk2copy int not null, +badkey int not null, +filler1 char (200), +filler2 char (200), +key (key1), +key (key2), +/* keys with tails from CPK members */ +key (pktail1ok, pk1), +key (pktail2ok, pk1, pk2), +key (pktail3bad, pk2, pk1), +key (pktail4bad, pk1, pk2copy), +key (pktail5bad, pk1, pk2, pk2copy), +primary key (pk1, pk2) +); +explain select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range PRIMARY,key1 PRIMARY 8 NULL 7 Using where +select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; +pk1 pk2 key1 key2 pktail1ok pktail2ok pktail3bad pktail4bad pktail5bad pk2copy badkey filler1 filler2 +1 10 0 0 0 0 0 0 0 10 0 filler-data-10 filler2 +1 11 0 0 0 0 0 0 0 11 0 filler-data-11 filler2 +1 12 0 0 0 0 0 0 0 12 0 filler-data-12 filler2 +1 13 0 0 0 0 0 0 0 13 0 filler-data-13 filler2 +1 14 0 0 0 0 0 0 0 14 0 filler-data-14 filler2 +1 15 0 0 0 0 0 0 0 15 0 filler-data-15 filler2 +1 16 0 0 0 0 0 0 0 16 0 filler-data-16 filler2 +1 17 0 0 0 0 0 0 0 17 0 filler-data-17 filler2 +1 18 0 0 0 0 0 0 0 18 0 filler-data-18 filler2 +1 19 0 0 0 0 0 0 0 19 0 filler-data-19 filler2 +explain select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 4,4 NULL 1 Using intersect(key1,key2); Using where +select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; +pk1 pk2 +95 59 +95 58 +95 57 +95 56 +95 55 +95 54 +95 53 +95 52 +95 51 +95 50 +explain select * from t1 where badkey=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 4 const 91 Using where +explain select * from t1 where pk1 < 7500 and key1 = 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref PRIMARY,key1 key1 4 const ROWS Using where +explain select * from t1 where pktail1ok=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail1ok pktail1ok 4 const 76 Using where +explain select * from t1 where pktail2ok=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail2ok pktail2ok 4 const 82 Using where +explain select * from t1 where (pktail2ok=1 and pk1< 50000) or key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge PRIMARY,key1,pktail2ok pktail2ok,key1 8,4 NULL 173 Using sort_union(pktail2ok,key1); Using where +explain select * from t1 where pktail3bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail3bad pktail3bad 4 const 73 Using where +explain select * from t1 where pktail4bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail4bad pktail4bad 4 const 82 Using where +explain select * from t1 where pktail5bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail5bad pktail5bad 4 const 70 Using where +explain select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 4,4 NULL 1 Using intersect(key1,key2); Using where +select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; +pk1 pk2 key1 key2 +104 49 10 10 +104 48 10 10 +104 47 10 10 +104 46 10 10 +104 45 10 10 +104 44 10 10 +104 43 10 10 +104 42 10 10 +104 41 10 10 +104 40 10 10 +drop table t1; +create table t1 +( +RUNID varchar(22), +SUBMITNR varchar(5), +ORDERNR char(1), +PROGRAMM varchar(8), +TESTID varchar(4), +UCCHECK char(1), +ETEXT varchar(80), +ETEXT_TYPE char(1), +INFO char(1), +SEVERITY tinyint(3), +TADIRFLAG char(1), +PRIMARY KEY (RUNID,SUBMITNR,ORDERNR,PROGRAMM,TESTID,UCCHECK), +KEY `TVERM~KEY` (PROGRAMM,TESTID,UCCHECK) +) DEFAULT CHARSET=latin1; +update t1 set `ETEXT` = '', `ETEXT_TYPE`='', `INFO`='', `SEVERITY`='', `TADIRFLAG`='' +WHERE +`RUNID`= '' AND `SUBMITNR`= '' AND `ORDERNR`='' AND `PROGRAMM`='' AND +`TESTID`='' AND `UCCHECK`=''; +drop table t1; diff --git a/mysql-test/r/index_merge_ror.result b/mysql-test/r/index_merge_ror.result deleted file mode 100644 index bbf46b2d7e0..00000000000 --- a/mysql-test/r/index_merge_ror.result +++ /dev/null @@ -1,196 +0,0 @@ -drop table if exists t0,t1,t2; -select count(*) from t1; -count(*) -64801 -explain select key1,key2 from t1 where key1=100 and key2=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where; Using index -select key1,key2 from t1 where key1=100 and key2=100; -key1 key2 -100 100 -explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where -select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -key1 key2 key3 key4 filler1 -100 100 100 100 key1-key2-key3-key4 -insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2'); -insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3'); -explain select key1,key2,filler1 from t1 where key1=100 and key2=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where -select key1,key2,filler1 from t1 where key1=100 and key2=100; -key1 key2 filler1 -100 100 key1-key2-key3-key4 -100 100 key1-key2 -explain select key1,key2 from t1 where key1=100 and key2=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where; Using index -select key1,key2 from t1 where key1=100 and key2=100; -key1 key2 -100 100 -100 100 -explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where -select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -key1 key2 key3 key4 -100 100 100 100 -100 100 -1 -1 --1 -1 100 100 -explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where -select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -key1 key2 key3 key4 filler1 -100 100 100 100 key1-key2-key3-key4 -100 100 -1 -1 key1-key2 --1 -1 100 100 key4-key3 -explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 2 Using intersect(key1,key2,key3); Using where; Using index -select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100; -key1 key2 key3 -100 100 100 -insert into t1 (key1,key2,key3,key4,filler1) values (101,101,101,101, 'key1234-101'); -explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=101; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 83 Using union(intersect(key1,key2),key3); Using where -select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=101; -key1 key2 key3 key4 filler1 -100 100 100 100 key1-key2-key3-key4 -100 100 -1 -1 key1-key2 -101 101 101 101 key1234-101 -select key1,key2, filler1 from t1 where key1=100 and key2=100; -key1 key2 filler1 -100 100 key1-key2-key3-key4 -100 100 key1-key2 -update t1 set filler1='to be deleted' where key1=100 and key2=100; -update t1 set key1=200,key2=200 where key1=100 and key2=100; -delete from t1 where key1=200 and key2=200; -select key1,key2,filler1 from t1 where key2=100 and key2=200; -key1 key2 filler1 -explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 152 Using union(intersect(key1,key2),intersect(key3,key4)); Using where -select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -key1 key2 key3 key4 filler1 --1 -1 100 100 key4-key3 -delete from t1 where key3=100 and key4=100; -explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 152 Using union(intersect(key1,key2),intersect(key3,key4)); Using where -select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; -key1 key2 key3 key4 filler1 -explain select key1,key2 from t1 where key1=100 and key2=100; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 76 Using intersect(key1,key2); Using where; Using index -select key1,key2 from t1 where key1=100 and key2=100; -key1 key2 -insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-1'); -insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-2'); -insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-3'); -explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 136 Using union(key3,intersect(key1,key2),key4); Using where -select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; -key1 key2 key3 key4 filler1 -100 100 200 200 key1-key2-key3-key4-3 -100 100 200 200 key1-key2-key3-key4-2 -100 100 200 200 key1-key2-key3-key4-1 -insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, -1, 200,'key4'); -explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 146 Using union(key3,intersect(key1,key2),key4); Using where -select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; -key1 key2 key3 key4 filler1 -100 100 200 200 key1-key2-key3-key4-3 -100 100 200 200 key1-key2-key3-key4-2 -100 100 200 200 key1-key2-key3-key4-1 --1 -1 -1 200 key4 -insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 200, -1,'key3'); -explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 156 Using union(key3,intersect(key1,key2),key4); Using where -select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; -key1 key2 key3 key4 filler1 -100 100 200 200 key1-key2-key3-key4-3 -100 100 200 200 key1-key2-key3-key4-2 -100 100 200 200 key1-key2-key3-key4-1 --1 -1 -1 200 key4 --1 -1 200 -1 key3 -explain select * from t1 where st_a=1 and st_b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b st_a,st_b 4,4 NULL 3515 Using intersect(st_a,st_b); Using where -explain select st_a,st_b from t1 where st_a=1 and st_b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b st_a,st_b 4,4 NULL 3515 Using intersect(st_a,st_b); Using where; Using index -explain select st_a from t1 ignore index (st_a) where st_a=1 and st_b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,stb_swt1a_2b,stb_swt1b,st_b st_b 4 const 15093 Using where -explain select * from t1 where st_a=1 and swt1a=1 and swt2a=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a sta_swt21a 12 const,const,const 971 -explain select * from t1 where st_b=1 and swt1b=1 and swt2b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref stb_swt1a_2b,stb_swt1b,st_b stb_swt1a_2b 8 const,const 3879 Using where -explain select * from t1 where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt12a,stb_swt1a_2b 12,12 NULL 58 Using intersect(sta_swt12a,stb_swt1a_2b); Using where -explain select * from t1 ignore index (sta_swt21a, stb_swt1a_2b) -where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,st_a,stb_swt1b,st_b sta_swt12a,stb_swt1b 12,8 NULL 58 Using intersect(sta_swt12a,stb_swt1b); Using where -explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b) -where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt1a,sta_swt2a,st_a,stb_swt1b,st_b sta_swt1a,sta_swt2a,stb_swt1b 8,8,8 NULL 57 Using intersect(sta_swt1a,sta_swt2a,stb_swt1b); Using where -explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b, stb_swt1b) -where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt1a,sta_swt2a,st_a,st_b sta_swt1a,sta_swt2a,st_b 8,8,4 NULL 223 Using intersect(sta_swt1a,sta_swt2a,st_b); Using where -explain select * from t1 -where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt12a,stb_swt1a_2b 12,12 NULL 58 Using intersect(sta_swt12a,stb_swt1a_2b); Using where -explain select * from t1 -where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt1a,stb_swt1b 8,8 NULL 232 Using intersect(sta_swt1a,stb_swt1b); Using where -explain select st_a from t1 -where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt1a,stb_swt1b 8,8 NULL 232 Using intersect(sta_swt1a,stb_swt1b); Using where; Using index -explain select st_a from t1 -where st_a=1 and swt1a=1 and st_b=1 and swt1b=1 and swt1b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b sta_swt1a,stb_swt1b 8,8 NULL 232 Using intersect(sta_swt1a,stb_swt1b); Using where; Using index -drop table t0,t1; -create table t2 ( -a char(10), -b char(10), -filler1 char(255), -filler2 char(255), -key(a(5)), -key(b(5)) -); -select count(a) from t2 where a='BBBBBBBB'; -count(a) -4 -select count(a) from t2 where b='BBBBBBBB'; -count(a) -4 -expla_or_bin select count(a_or_b) from t2 where a_or_b='AAAAAAAA' a_or_bnd a_or_b='AAAAAAAA'; -id select_type ta_or_ba_or_ble type possia_or_ble_keys key key_len ref rows Extra_or_b -1 SIMPLE t2 ref a_or_b,a_or_b a_or_b 6 const 4 Using where -select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; -count(a) -4 -select count(a) from t2 ignore index(a,b) where a='AAAAAAAA' and b='AAAAAAAA'; -count(a) -4 -insert into t2 values ('ab', 'ab', 'uh', 'oh'); -explain select a from t2 where a='ab'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ref a a 6 const 1 Using where -drop table t2; diff --git a/mysql-test/r/index_merge_ror_cpk.result b/mysql-test/r/index_merge_ror_cpk.result deleted file mode 100644 index 79bb1297abf..00000000000 --- a/mysql-test/r/index_merge_ror_cpk.result +++ /dev/null @@ -1,120 +0,0 @@ -drop table if exists t1; -create table t1 -( -pk1 int not null, -pk2 int not null, -key1 int not null, -key2 int not null, -pktail1ok int not null, -pktail2ok int not null, -pktail3bad int not null, -pktail4bad int not null, -pktail5bad int not null, -pk2copy int not null, -badkey int not null, -filler1 char (200), -filler2 char (200), -key (key1), -key (key2), -/* keys with tails from CPK members */ -key (pktail1ok, pk1), -key (pktail2ok, pk1, pk2), -key (pktail3bad, pk2, pk1), -key (pktail4bad, pk1, pk2copy), -key (pktail5bad, pk1, pk2, pk2copy), -primary key (pk1, pk2) -) engine=innodb; -explain select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range PRIMARY,key1 PRIMARY 8 NULL 9 Using where -select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; -pk1 pk2 key1 key2 pktail1ok pktail2ok pktail3bad pktail4bad pktail5bad pk2copy badkey filler1 filler2 -1 10 0 0 0 0 0 0 0 10 0 filler-data-10 filler2 -1 11 0 0 0 0 0 0 0 11 0 filler-data-11 filler2 -1 12 0 0 0 0 0 0 0 12 0 filler-data-12 filler2 -1 13 0 0 0 0 0 0 0 13 0 filler-data-13 filler2 -1 14 0 0 0 0 0 0 0 14 0 filler-data-14 filler2 -1 15 0 0 0 0 0 0 0 15 0 filler-data-15 filler2 -1 16 0 0 0 0 0 0 0 16 0 filler-data-16 filler2 -1 17 0 0 0 0 0 0 0 17 0 filler-data-17 filler2 -1 18 0 0 0 0 0 0 0 18 0 filler-data-18 filler2 -1 19 0 0 0 0 0 0 0 19 0 filler-data-19 filler2 -explain select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2 key1,key2 4,4 NULL 1 Using intersect(key1,key2); Using where; Using index -select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; -pk1 pk2 -95 50 -95 51 -95 52 -95 53 -95 54 -95 55 -95 56 -95 57 -95 58 -95 59 -explain select * from t1 where badkey=1 and key1=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref key1 key1 4 const 100 Using where -explain select * from t1 where pk1 < 7500 and key1 = 10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge PRIMARY,key1 key1,PRIMARY 4,4 NULL ROWS Using intersect(key1,PRIMARY); Using where -explain select * from t1 where pktail1ok=1 and key1=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,pktail1ok key1,pktail1ok 4,4 NULL 1 Using intersect(key1,pktail1ok); Using where -explain select * from t1 where pktail2ok=1 and key1=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,pktail2ok key1,pktail2ok 4,4 NULL 1 Using intersect(key1,pktail2ok); Using where -select ' The following is actually a deficiency, it uses sort_union currently:' as 'note:'; -note: - The following is actually a deficiency, it uses sort_union currently: -explain select * from t1 where (pktail2ok=1 and pk1< 50000) or key1=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge PRIMARY,key1,pktail2ok pktail2ok,key1 8,4 NULL 199 Using sort_union(pktail2ok,key1); Using where -explain select * from t1 where pktail3bad=1 and key1=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref key1,pktail3bad key1 4 const 100 Using where -explain select * from t1 where pktail4bad=1 and key1=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref key1,pktail4bad key1 4 const 100 Using where -explain select * from t1 where pktail5bad=1 and key1=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref key1,pktail5bad key1 4 const 100 Using where -explain select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge key1,key2 key1,key2 4,4 NULL 1 Using intersect(key1,key2); Using where; Using index -select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; -pk1 pk2 key1 key2 -95 50 10 10 -95 51 10 10 -95 52 10 10 -95 53 10 10 -95 54 10 10 -95 55 10 10 -95 56 10 10 -95 57 10 10 -95 58 10 10 -95 59 10 10 -drop table t1; -create table t1 -( -RUNID varchar(22), -SUBMITNR varchar(5), -ORDERNR char(1) , -PROGRAMM varchar(8), -TESTID varchar(4), -UCCHECK char(1), -ETEXT varchar(80), -ETEXT_TYPE char(1), -INFO char(1), -SEVERITY tinyint(3), -TADIRFLAG char(1), -PRIMARY KEY (RUNID,SUBMITNR,ORDERNR,PROGRAMM,TESTID,UCCHECK), -KEY `TVERM~KEY` (PROGRAMM,TESTID,UCCHECK) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -update t1 set `ETEXT` = '', `ETEXT_TYPE`='', `INFO`='', `SEVERITY`='', `TADIRFLAG`='' -WHERE -`RUNID`= '' AND `SUBMITNR`= '' AND `ORDERNR`='' AND `PROGRAMM`='' AND -`TESTID`='' AND `UCCHECK`=''; -drop table t1; diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 077fa0f2376..b73b59a433a 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -46,6 +46,8 @@ COLUMN_PRIVILEGES ENGINES EVENTS FILES +GLOBAL_STATUS +GLOBAL_VARIABLES KEY_COLUMN_USAGE PARTITIONS PLUGINS @@ -54,6 +56,8 @@ REFERENTIAL_CONSTRAINTS ROUTINES SCHEMATA SCHEMA_PRIVILEGES +SESSION_STATUS +SESSION_VARIABLES STATISTICS TABLES TABLE_CONSTRAINTS @@ -353,7 +357,7 @@ mysql test explain select * from v0; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY # ALL NULL NULL NULL NULL 2 +1 SIMPLE # ALL NULL NULL NULL NULL 2 create view v1 (c) as select table_name from information_schema.tables where table_name="v1"; select * from v1; @@ -758,6 +762,7 @@ table_schema table_name column_name information_schema COLUMNS COLUMN_TYPE information_schema EVENTS EVENT_DEFINITION information_schema EVENTS SQL_MODE +information_schema GLOBAL_VARIABLES VARIABLE_VALUE information_schema PARTITIONS PARTITION_EXPRESSION information_schema PARTITIONS SUBPARTITION_EXPRESSION information_schema PARTITIONS PARTITION_DESCRIPTION @@ -765,6 +770,7 @@ information_schema PLUGINS PLUGIN_DESCRIPTION information_schema PROCESSLIST INFO information_schema ROUTINES ROUTINE_DEFINITION information_schema ROUTINES SQL_MODE +information_schema SESSION_VARIABLES VARIABLE_VALUE information_schema TRIGGERS ACTION_CONDITION information_schema TRIGGERS ACTION_STATEMENT information_schema TRIGGERS SQL_MODE @@ -847,7 +853,7 @@ delete from mysql.db where user='mysqltest_4'; flush privileges; SELECT table_schema, count(*) FROM information_schema.TABLES where TABLE_SCHEMA!='cluster' GROUP BY TABLE_SCHEMA; table_schema count(*) -information_schema 23 +information_schema 27 mysql 21 create table t1 (i int, j int); create trigger trg1 before insert on t1 for each row @@ -1240,6 +1246,8 @@ COLUMN_PRIVILEGES TABLE_SCHEMA ENGINES ENGINE EVENTS EVENT_SCHEMA FILES TABLE_SCHEMA +GLOBAL_STATUS VARIABLE_NAME +GLOBAL_VARIABLES VARIABLE_NAME KEY_COLUMN_USAGE CONSTRAINT_SCHEMA PARTITIONS TABLE_SCHEMA PLUGINS PLUGIN_NAME @@ -1248,6 +1256,8 @@ REFERENTIAL_CONSTRAINTS CONSTRAINT_SCHEMA ROUTINES ROUTINE_SCHEMA SCHEMATA SCHEMA_NAME SCHEMA_PRIVILEGES TABLE_SCHEMA +SESSION_STATUS VARIABLE_NAME +SESSION_VARIABLES VARIABLE_NAME STATISTICS TABLE_SCHEMA TABLES TABLE_SCHEMA TABLE_CONSTRAINTS CONSTRAINT_SCHEMA @@ -1278,6 +1288,8 @@ COLUMN_PRIVILEGES TABLE_SCHEMA ENGINES ENGINE EVENTS EVENT_SCHEMA FILES TABLE_SCHEMA +GLOBAL_STATUS VARIABLE_NAME +GLOBAL_VARIABLES VARIABLE_NAME KEY_COLUMN_USAGE CONSTRAINT_SCHEMA PARTITIONS TABLE_SCHEMA PLUGINS PLUGIN_NAME @@ -1286,6 +1298,8 @@ REFERENTIAL_CONSTRAINTS CONSTRAINT_SCHEMA ROUTINES ROUTINE_SCHEMA SCHEMATA SCHEMA_NAME SCHEMA_PRIVILEGES TABLE_SCHEMA +SESSION_STATUS VARIABLE_NAME +SESSION_VARIABLES VARIABLE_NAME STATISTICS TABLE_SCHEMA TABLES TABLE_SCHEMA TABLE_CONSTRAINTS CONSTRAINT_SCHEMA diff --git a/mysql-test/r/information_schema_db.result b/mysql-test/r/information_schema_db.result index faba5e8d83c..db14b7b6600 100644 --- a/mysql-test/r/information_schema_db.result +++ b/mysql-test/r/information_schema_db.result @@ -13,6 +13,8 @@ COLUMN_PRIVILEGES ENGINES EVENTS FILES +GLOBAL_STATUS +GLOBAL_VARIABLES KEY_COLUMN_USAGE PARTITIONS PLUGINS @@ -21,6 +23,8 @@ REFERENTIAL_CONSTRAINTS ROUTINES SCHEMATA SCHEMA_PRIVILEGES +SESSION_STATUS +SESSION_VARIABLES STATISTICS TABLES TABLE_CONSTRAINTS diff --git a/mysql-test/r/innodb_handler.result b/mysql-test/r/innodb_handler.result deleted file mode 100644 index 7e853a55e37..00000000000 --- a/mysql-test/r/innodb_handler.result +++ /dev/null @@ -1,167 +0,0 @@ -drop table if exists t1,t2; -create table t1 (a int, b char(10), key a(a), key b(a,b)) engine=innodb; -insert into t1 values -(17,"ddd"),(18,"eee"),(19,"fff"),(19,"yyy"), -(14,"aaa"),(15,"bbb"),(16,"ccc"),(16,"xxx"), -(20,"ggg"),(21,"hhh"),(22,"iii"); -handler t1 open as t2; -handler t2 read a first; -a b -14 aaa -handler t2 read a next; -a b -15 bbb -handler t2 read a next; -a b -16 ccc -handler t2 read a prev; -a b -15 bbb -handler t2 read a last; -a b -22 iii -handler t2 read a prev; -a b -21 hhh -handler t2 read a prev; -a b -20 ggg -handler t2 read a first; -a b -14 aaa -handler t2 read a prev; -a b -handler t2 read a last; -a b -22 iii -handler t2 read a prev; -a b -21 hhh -handler t2 read a next; -a b -22 iii -handler t2 read a next; -a b -handler t2 read a=(15); -a b -15 bbb -handler t2 read a=(16); -a b -16 ccc -handler t2 read a=(19,"fff"); -ERROR 42000: Too many key parts specified; max 1 parts allowed -handler t2 read b=(19,"fff"); -a b -19 fff -handler t2 read b=(19,"yyy"); -a b -19 yyy -handler t2 read b=(19); -a b -19 fff -handler t1 read a last; -ERROR 42S02: Unknown table 't1' in HANDLER -handler t2 read a=(11); -a b -handler t2 read a>=(11); -a b -14 aaa -handler t2 read a=(18); -a b -18 eee -handler t2 read a>=(18); -a b -18 eee -handler t2 read a>(18); -a b -19 fff -handler t2 read a<=(18); -a b -18 eee -handler t2 read a<(18); -a b -17 ddd -handler t2 read a first limit 5; -a b -14 aaa -15 bbb -16 ccc -16 xxx -17 ddd -handler t2 read a next limit 3; -a b -18 eee -19 fff -19 yyy -handler t2 read a prev limit 10; -a b -19 fff -18 eee -17 ddd -16 xxx -16 ccc -15 bbb -14 aaa -handler t2 read a>=(16) limit 4; -a b -16 ccc -16 xxx -17 ddd -18 eee -handler t2 read a>=(16) limit 2,2; -a b -17 ddd -18 eee -handler t2 read a last limit 3; -a b -22 iii -21 hhh -20 ggg -handler t2 read a=(19); -a b -19 fff -handler t2 read a=(19) where b="yyy"; -a b -19 yyy -handler t2 read first; -a b -17 ddd -handler t2 read next; -a b -18 eee -handler t2 read last; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1 -handler t2 close; -handler t1 open; -handler t1 read a next; -a b -14 aaa -handler t1 read a next; -a b -15 bbb -handler t1 close; -handler t1 open; -handler t1 read a prev; -a b -22 iii -handler t1 read a prev; -a b -21 hhh -handler t1 close; -handler t1 open as t2; -handler t2 read first; -a b -17 ddd -alter table t1 engine=innodb; -handler t2 read first; -ERROR 42S02: Unknown table 't2' in HANDLER -drop table t1; -CREATE TABLE t1 ( no1 smallint(5) NOT NULL default '0', no2 int(10) NOT NULL default '0', PRIMARY KEY (no1,no2)) ENGINE=InnoDB; -INSERT INTO t1 VALUES (1,274),(1,275),(2,6),(2,8),(4,1),(4,2); -HANDLER t1 OPEN; -HANDLER t1 READ `primary` = (1, 1000); -no1 no2 -HANDLER t1 READ `primary` PREV; -no1 no2 -1 275 -DROP TABLE t1; diff --git a/mysql-test/r/innodb_mysql.result b/mysql-test/r/innodb_mysql.result index 98e03239567..690ebeb5607 100644 --- a/mysql-test/r/innodb_mysql.result +++ b/mysql-test/r/innodb_mysql.result @@ -1,5 +1,6 @@ +SET SESSION STORAGE_ENGINE = InnoDB; drop table if exists t1,t2,t1m,t1i,t2m,t2i,t4; -create table t1(f1 varchar(800) binary not null, key(f1)) engine = innodb +create table t1(f1 varchar(800) binary not null, key(f1)) character set utf8 collate utf8_general_ci; Warnings: Warning 1071 Specified key was too long; max key length is 765 bytes @@ -10,8 +11,8 @@ c_id int(11) not null default '0', org_id int(11) default null, unique key contacts$c_id (c_id), key contacts$org_id (org_id) -) engine=innodb; -insert into t1 values +); +insert into t1 values (2,null),(120,null),(141,null),(218,7), (128,1), (151,2),(234,2),(236,2),(243,2),(255,2),(259,2),(232,3),(235,3),(238,3), (246,3),(253,3),(269,3),(285,3),(291,3),(293,3),(131,4),(230,4),(231,4); @@ -33,16 +34,16 @@ sla_set int(11) default null, unique key t2$slai_id (slai_id), key t2$owner_id (owner_id), key t2$sla_id (sla_id) -) engine=innodb; +); insert into t2(slai_id, owner_tbl, owner_id, sla_id) values (1,3,1,1), (3,3,10,2), (4,3,3,6), (5,3,2,5), (6,3,8,3), (7,3,9,7), (8,3,6,8), (9,3,4,9), (10,3,5,10), (11,3,11,11), (12,3,7,12); flush tables; select si.slai_id from t1 c join t2 si on -((si.owner_tbl = 3 and si.owner_id = c.org_id) or -( si.owner_tbl = 2 and si.owner_id = c.c_id)) -where +((si.owner_tbl = 3 and si.owner_id = c.org_id) or +( si.owner_tbl = 2 and si.owner_id = c.c_id)) +where c.c_id = 218 and expiredate is null; slai_id 12 @@ -53,17 +54,17 @@ c_id org_id 141 NULL select si.slai_id from t1 c join t2 si on -((si.owner_tbl = 3 and si.owner_id = c.org_id) or -( si.owner_tbl = 2 and si.owner_id = c.c_id)) -where +((si.owner_tbl = 3 and si.owner_id = c.org_id) or +( si.owner_tbl = 2 and si.owner_id = c.c_id)) +where c.c_id = 218 and expiredate is null; slai_id 12 drop table t1, t2; -CREATE TABLE t1 (a int, b int, KEY b (b)) Engine=InnoDB; -CREATE TABLE t2 (a int, b int, PRIMARY KEY (a,b)) Engine=InnoDB; -CREATE TABLE t3 (a int, b int, c int, PRIMARY KEY (a), -UNIQUE KEY b (b,c), KEY a (a,b,c)) Engine=InnoDB; +CREATE TABLE t1 (a int, b int, KEY b (b)); +CREATE TABLE t2 (a int, b int, PRIMARY KEY (a,b)); +CREATE TABLE t3 (a int, b int, c int, PRIMARY KEY (a), +UNIQUE KEY b (b,c), KEY a (a,b,c)); INSERT INTO t1 VALUES (1, 1); INSERT INTO t1 SELECT a + 1, b + 1 FROM t1; INSERT INTO t1 SELECT a + 2, b + 2 FROM t1; @@ -73,14 +74,14 @@ DELETE FROM t2 WHERE a = 1 AND b < 2; INSERT INTO t3 VALUES (1,1,1),(2,1,2); INSERT INTO t3 SELECT a + 2, a + 2, 3 FROM t3; INSERT INTO t3 SELECT a + 4, a + 4, 3 FROM t3; -SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE -t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2) +SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE +t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2) ORDER BY t1.b LIMIT 2; b a 1 1 2 2 -SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE -t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2) +SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE +t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2) ORDER BY t1.b LIMIT 5; b a 1 1 @@ -98,8 +99,8 @@ CREATE TABLE `t2` ( `id4` INT NOT NULL, UNIQUE (`id2`,`id4`), KEY (`id1`) -) ENGINE=InnoDB; -INSERT INTO `t2`(`id1`,`id2`,`id3`,`id4`) VALUES +); +INSERT INTO `t2`(`id1`,`id2`,`id3`,`id4`) VALUES (1,1,1,0), (1,1,2,1), (5,1,2,2), @@ -110,66 +111,12 @@ SELECT `id1` FROM `t1` WHERE `id1` NOT IN (SELECT `id1` FROM `t2` WHERE `id2` = id1 2 DROP TABLE t1, t2; -create table t1m (a int) engine=myisam; -create table t1i (a int) engine=innodb; -create table t2m (a int) engine=myisam; -create table t2i (a int) engine=innodb; +create table t1m (a int) engine = MEMORY; +create table t1i (a int); +create table t2m (a int) engine = MEMORY; +create table t2i (a int); insert into t2m values (5); insert into t2i values (5); -select min(a) from t1m; -min(a) -NULL -select min(7) from t1m; -min(7) -NULL -select min(7) from DUAL; -min(7) -NULL -explain select min(7) from t2m join t1m; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away -select min(7) from t2m join t1m; -min(7) -NULL -select max(a) from t1m; -max(a) -NULL -select max(7) from t1m; -max(7) -NULL -select max(7) from DUAL; -max(7) -NULL -explain select max(7) from t2m join t1m; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away -select max(7) from t2m join t1m; -max(7) -NULL -select 1, min(a) from t1m where a=99; -1 min(a) -1 NULL -select 1, min(a) from t1m where 1=99; -1 min(a) -1 NULL -select 1, min(1) from t1m where a=99; -1 min(1) -1 NULL -select 1, min(1) from t1m where 1=99; -1 min(1) -1 NULL -select 1, max(a) from t1m where a=99; -1 max(a) -1 NULL -select 1, max(a) from t1m where 1=99; -1 max(a) -1 NULL -select 1, max(1) from t1m where a=99; -1 max(1) -1 NULL -select 1, max(1) from t1m where 1=99; -1 max(1) -1 NULL select min(a) from t1i; min(a) NULL @@ -178,7 +125,7 @@ min(7) NULL select min(7) from DUAL; min(7) -NULL +7 explain select min(7) from t2i join t1i; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2i ALL NULL NULL NULL NULL 1 @@ -194,7 +141,7 @@ max(7) NULL select max(7) from DUAL; max(7) -NULL +7 explain select max(7) from t2i join t1i; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2i ALL NULL NULL NULL NULL 1 @@ -250,7 +197,7 @@ count(*) min(7) max(7) drop table t1m, t1i, t2m, t2i; create table t1 ( a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(64) default ' ' -); +) ENGINE = MEMORY; insert into t1 (a1, a2, b, c, d) values ('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'), ('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'), @@ -286,14 +233,14 @@ insert into t1 (a1, a2, b, c, d) values ('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'); create table t4 ( pk_col int auto_increment primary key, a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(64) default ' ' -) engine=innodb; +); insert into t4 (a1, a2, b, c, d, dummy) select * from t1; create index idx12672_0 on t4 (a1); create index idx12672_1 on t4 (a1,a2,b,c); create index idx12672_2 on t4 (a1,a2,b); -analyze table t1; +analyze table t4; Table Op Msg_type Msg_text -test.t1 analyze status OK +test.t4 analyze status OK select distinct a1 from t4 where pk_col not in (1,2,3,4); a1 a @@ -303,18 +250,18 @@ d drop table t1,t4; create table t1 ( a varchar(30), b varchar(30), primary key(a), key(b) -) engine=innodb; +); select distinct a from t1; a drop table t1; -create table t1(a int, key(a)) engine=innodb; +create table t1(a int, key(a)); insert into t1 values(1); select a, count(a) from t1 group by a with rollup; a count(a) 1 1 NULL 1 drop table t1; -create table t1 (f1 int, f2 char(1), primary key(f1,f2)) engine=innodb; +create table t1 (f1 int, f2 char(1), primary key(f1,f2)); insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d"); alter table t1 drop primary key, add primary key (f2, f1); explain select distinct f1 a, f1 b from t1; @@ -325,25 +272,24 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range NULL PRIMARY 5 NULL 3 Using index for group-by; Using temporary drop table t1; CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20), -INDEX (name)) ENGINE=InnoDB; -CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11), -FOREIGN KEY (fkey) REFERENCES t2(id)) ENGINE=InnoDB; +INDEX (name)); +CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11)); +ALTER TABLE t2 ADD FOREIGN KEY (fkey) REFERENCES t2(id); INSERT INTO t1 VALUES (1,'A1'),(2,'A2'),(3,'B'); INSERT INTO t2 VALUES (1,1),(2,2),(3,2),(4,3),(5,3); EXPLAIN -SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id +SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id WHERE t1.name LIKE 'A%'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 index PRIMARY,name name 23 NULL 3 Using where; Using index 1 SIMPLE t2 ref fkey fkey 5 test.t1.id 1 Using where; Using index EXPLAIN -SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id +SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id WHERE t1.name LIKE 'A%' OR FALSE; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 index NULL fkey 5 NULL 5 Using index 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.fkey 1 Using where DROP TABLE t1,t2; -set storage_engine=innodb; CREATE TABLE t1 (a int, b int); insert into t1 values (1,1),(1,2); CREATE TABLE t2 (primary key (a)) select * from t1; @@ -401,7 +347,7 @@ ERROR 23000: Duplicate entry '1' for key 'PRIMARY' SELECT * from t2; a b drop table t1,t2; -create table t1(f1 varchar(800) binary not null, key(f1)) engine = innodb +create table t1(f1 varchar(800) binary not null, key(f1)) character set utf8 collate utf8_general_ci; Warnings: Warning 1071 Specified key was too long; max key length is 765 bytes @@ -413,7 +359,7 @@ CREATE TABLE `t2` ( `c` int(11) default NULL, PRIMARY KEY (`k`), UNIQUE KEY `idx_1` (`a`) -) ENGINE=InnoDB; +); insert into t2 ( a ) values ( 6 ) on duplicate key update c = ifnull( c, 0 ) + 1; diff --git a/mysql-test/r/insert_update.result b/mysql-test/r/insert_update.result index 030c1976e20..baf3f71c4f7 100644 --- a/mysql-test/r/insert_update.result +++ b/mysql-test/r/insert_update.result @@ -63,9 +63,9 @@ Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c`,values(`test`.`t1`.`a`) AS `VALUES(a)` from `test`.`t1` explain extended select * from t1 where values(a); id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00 Using where Warnings: -Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c` from `test`.`t1` +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c` from `test`.`t1` where values(`test`.`t1`.`a`) DROP TABLE t1; create table t1(a int primary key, b int); insert into t1 values(1,1),(2,2),(3,3),(4,4),(5,5); @@ -197,3 +197,25 @@ PRIMARY KEY (a) ) ENGINE=MyISAM; INSERT INTO t1 ( a ) SELECT 0 ON DUPLICATE KEY UPDATE a = a + VALUES (a) ; DROP TABLE t1; +CREATE TABLE t1 +( +a BIGINT UNSIGNED, +b BIGINT UNSIGNED, +PRIMARY KEY (a) +); +INSERT INTO t1 VALUES (45, 1) ON DUPLICATE KEY UPDATE b = +IF(VALUES(b) > t1.b, VALUES(b), t1.b); +SELECT * FROM t1; +a b +45 1 +INSERT INTO t1 VALUES (45, 2) ON DUPLICATE KEY UPDATE b = +IF(VALUES(b) > t1.b, VALUES(b), t1.b); +SELECT * FROM t1; +a b +45 2 +INSERT INTO t1 VALUES (45, 1) ON DUPLICATE KEY UPDATE b = +IF(VALUES(b) > t1.b, VALUES(b), t1.b); +SELECT * FROM t1; +a b +45 2 +DROP TABLE t1; diff --git a/mysql-test/r/join.result b/mysql-test/r/join.result index 75daa0fd46d..897ec4f7a6a 100644 --- a/mysql-test/r/join.result +++ b/mysql-test/r/join.result @@ -776,3 +776,31 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL a,b NULL NULL NULL 1000 Using where 1 SIMPLE t3 ref b b 5 test.t2.b 1 Using where drop table t1, t2, t3; +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (a int, b int, primary key(a)); +insert into t2 select @v:=A.a+10*B.a, @v from t1 A, t1 B; +explain select * from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 +show status like '%cost%'; +Variable_name Value +Last_query_cost 4.016090 +select 'The cost of accessing t1 (dont care if it changes' '^'; +The cost of accessing t1 (dont care if it changes +The cost of accessing t1 (dont care if it changes^ +select 'vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv' Z; +Z +vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv +explain select * from t1, t2 A, t2 B where A.a = t1.a and B.a=A.b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 +1 SIMPLE A eq_ref PRIMARY PRIMARY 4 test.t1.a 1 +1 SIMPLE B eq_ref PRIMARY PRIMARY 4 test.A.b 1 +show status like '%cost%'; +Variable_name Value +Last_query_cost 24.016090 +select '^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error' Z; +Z +^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error +drop table t1, t2; diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result index 0a309371725..4d58d035f21 100644 --- a/mysql-test/r/join_outer.result +++ b/mysql-test/r/join_outer.result @@ -1126,7 +1126,7 @@ a b a b 7 8 7 5 EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.a WHERE t1.a = t2.a OR t1.a = t2.b; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 4 Using where +1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 4 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t1.a = t2.a WHERE t1.a IN(t2.a, t2.b); id select_type table type possible_keys key key_len ref rows Extra diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result index 72beee4b2e3..481b3de9f02 100644 --- a/mysql-test/r/loaddata.result +++ b/mysql-test/r/loaddata.result @@ -43,9 +43,9 @@ drop table t1; create table t1 (a int, b char(10)); load data infile '../std_data_ln/loaddata3.dat' into table t1 fields terminated by '' enclosed by '' ignore 1 lines; Warnings: -Warning 1264 Out of range value for column 'a' at row 3 +Warning 1366 Incorrect integer value: 'error ' for column 'a' at row 3 Warning 1262 Row 3 was truncated; it contained more data than there were input columns -Warning 1264 Out of range value for column 'a' at row 5 +Warning 1366 Incorrect integer value: 'wrong end ' for column 'a' at row 5 Warning 1262 Row 5 was truncated; it contained more data than there were input columns select * from t1; a b @@ -57,7 +57,8 @@ a b truncate table t1; load data infile '../std_data_ln/loaddata4.dat' into table t1 fields terminated by '' enclosed by '' lines terminated by '' ignore 1 lines; Warnings: -Warning 1264 Out of range value for column 'a' at row 4 +Warning 1366 Incorrect integer value: ' +' for column 'a' at row 4 Warning 1261 Row 4 doesn't contain data for all columns select * from t1; a b diff --git a/mysql-test/r/log_tables.result b/mysql-test/r/log_tables.result index 638c05dd712..a9683cc7c56 100644 --- a/mysql-test/r/log_tables.result +++ b/mysql-test/r/log_tables.result @@ -73,16 +73,16 @@ select * from mysql.slow_log; start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text TIMESTAMP USER_HOST QUERY_TIME 00:00:00 1 0 test 0 0 1 select sleep(2) alter table mysql.general_log engine=myisam; -ERROR HY000: You can't alter a log table if logging is enabled +ERROR HY000: You cannot 'ALTER' a log table if logging is enabled alter table mysql.slow_log engine=myisam; -ERROR HY000: You can't alter a log table if logging is enabled +ERROR HY000: You cannot 'ALTER' a log table if logging is enabled drop table mysql.general_log; -ERROR HY000: Cannot drop log table if log is enabled +ERROR HY000: You cannot 'DROP' a log table if logging is enabled drop table mysql.slow_log; -ERROR HY000: Cannot drop log table if log is enabled +ERROR HY000: You cannot 'DROP' a log table if logging is enabled set global general_log='OFF'; alter table mysql.slow_log engine=myisam; -ERROR HY000: You can't alter a log table if logging is enabled +ERROR HY000: You cannot 'ALTER' a log table if logging is enabled set global slow_query_log='OFF'; show create table mysql.general_log; Table Create Table @@ -112,8 +112,8 @@ slow_log CREATE TABLE `slow_log` ( alter table mysql.general_log engine=myisam; alter table mysql.slow_log engine=myisam; Warnings: -Warning 1264 Out of range value for column 'last_insert_id' at row 0 -Warning 1264 Out of range value for column 'insert_id' at row 0 +Warning 1366 Incorrect integer value: '' for column 'last_insert_id' at row 0 +Warning 1366 Incorrect integer value: '' for column 'insert_id' at row 0 show create table mysql.general_log; Table Create Table general_log CREATE TABLE `general_log` ( @@ -173,13 +173,13 @@ unlock tables; set global general_log='OFF'; set global slow_query_log='OFF'; alter table mysql.slow_log engine=ndb; -ERROR HY000: One can use only CSV and MyISAM engines for the log tables +ERROR HY000: This storage engine cannot be used for log tables" alter table mysql.slow_log engine=innodb; -ERROR HY000: One can use only CSV and MyISAM engines for the log tables +ERROR HY000: This storage engine cannot be used for log tables" alter table mysql.slow_log engine=archive; -ERROR HY000: One can use only CSV and MyISAM engines for the log tables +ERROR HY000: This storage engine cannot be used for log tables" alter table mysql.slow_log engine=blackhole; -ERROR HY000: One can use only CSV and MyISAM engines for the log tables +ERROR HY000: This storage engine cannot be used for log tables" drop table mysql.slow_log; drop table mysql.general_log; drop table mysql.general_log; diff --git a/mysql-test/r/merge.result b/mysql-test/r/merge.result index db2c462398f..1e0207683ee 100644 --- a/mysql-test/r/merge.result +++ b/mysql-test/r/merge.result @@ -178,12 +178,12 @@ t3 CREATE TABLE `t3` ( ) ENGINE=MRG_MyISAM DEFAULT CHARSET=latin1 UNION=(`t1`,`t2`) create table t4 (a int not null, b char(10), key(a)) engine=MERGE UNION=(t1,t2); select * from t4; -ERROR HY000: All tables in the MERGE table are not identically defined +ERROR HY000: Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist alter table t4 add column c int; -ERROR HY000: All tables in the MERGE table are not identically defined +ERROR HY000: Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist flush tables; select * from t4; -ERROR HY000: All tables in the MERGE table are not identically defined +ERROR HY000: Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist create database mysqltest; create table mysqltest.t6 (a int not null primary key auto_increment, message char(20)); create table t5 (a int not null, b char(20), key(a)) engine=MERGE UNION=(test.t1,mysqltest.t6); @@ -277,7 +277,7 @@ t3 CREATE TABLE `t3` ( drop table t3,t2,t1; create table t1 (a int not null, key(a)) engine=merge; select * from t1; -a +ERROR HY000: Got error 124 from storage engine drop table t1; create table t1 (a int not null, b int not null, key(a,b)); create table t2 (a int not null, b int not null, key(a,b)); @@ -771,6 +771,21 @@ Table Op Msg_type Msg_text test.t1 check status OK test.t2 check status OK drop table t1, t2, t3; +CREATE TABLE t1(a INT); +INSERT INTO t1 VALUES(2),(1); +CREATE TABLE t2(a INT, KEY(a)) ENGINE=MERGE UNION=(t1); +SELECT * FROM t2 WHERE a=2; +ERROR HY000: Got error 124 from storage engine +DROP TABLE t1, t2; +CREATE TABLE t1(a INT) ENGINE=MEMORY; +CREATE TABLE t2(a INT) ENGINE=MERGE UNION=(t1); +SELECT * FROM t2; +ERROR HY000: Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist +DROP TABLE t1, t2; +CREATE TABLE t2(a INT) ENGINE=MERGE UNION=(t3); +SELECT * FROM t2; +ERROR HY000: Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist +DROP TABLE t2; create table t1 (b bit(1)); create table t2 (b bit(1)); create table tm (b bit(1)) engine = merge union = (t1,t2); diff --git a/mysql-test/r/mix2_myisam.result b/mysql-test/r/mix2_myisam.result new file mode 100644 index 00000000000..9a7d71820f8 --- /dev/null +++ b/mysql-test/r/mix2_myisam.result @@ -0,0 +1,2475 @@ +SET SESSION STORAGE_ENGINE = MEMORY; +drop table if exists t1,t2,t3,t4; +drop database if exists mysqltest; +create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) engine=MyISAM; +insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'), (2, 'Erik'), (3, 'Sasha'), (3, 'Jeremy'), (4, 'Matt'); +select id, code, name from t1 order by id; +id code name +1 1 Tim +2 1 Monty +3 2 David +4 2 Erik +5 3 Sasha +6 3 Jeremy +7 4 Matt +update ignore t1 set id = 8, name = 'Sinisa' where id < 3; +select id, code, name from t1 order by id; +id code name +2 1 Monty +3 2 David +4 2 Erik +5 3 Sasha +6 3 Jeremy +7 4 Matt +8 1 Sinisa +update ignore t1 set id = id + 10, name = 'Ralph' where id < 4; +select id, code, name from t1 order by id; +id code name +3 2 David +4 2 Erik +5 3 Sasha +6 3 Jeremy +7 4 Matt +8 1 Sinisa +12 1 Ralph +drop table t1; +CREATE TABLE t1 ( +id int(11) NOT NULL auto_increment, +parent_id int(11) DEFAULT '0' NOT NULL, +level tinyint(4) DEFAULT '0' NOT NULL, +PRIMARY KEY (id), +KEY parent_id (parent_id), +KEY level (level) +) engine=MyISAM; +INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1),(179,5,2); +update t1 set parent_id=parent_id+100; +select * from t1 where parent_id=102; +id parent_id level +8 102 2 +9 102 2 +15 102 2 +update t1 set id=id+1000; +update t1 set id=1024 where id=1009; +Got one of the listed errors +select * from t1; +id parent_id level +1001 100 0 +1003 101 1 +1004 101 1 +1008 102 2 +1009 102 2 +1017 103 2 +1022 104 2 +1024 104 2 +1028 105 2 +1029 105 2 +1030 105 2 +1031 106 2 +1032 106 2 +1033 106 2 +1203 107 2 +1202 107 2 +1020 103 2 +1157 100 0 +1193 105 2 +1040 107 2 +1002 101 1 +1015 102 2 +1006 101 1 +1034 106 2 +1035 106 2 +1016 103 2 +1007 101 1 +1036 107 2 +1018 103 2 +1026 105 2 +1027 105 2 +1183 104 2 +1038 107 2 +1025 105 2 +1037 107 2 +1021 104 2 +1019 103 2 +1005 101 1 +1179 105 2 +update ignore t1 set id=id+1; +select * from t1; +id parent_id level +1001 100 0 +1003 101 1 +1004 101 1 +1008 102 2 +1010 102 2 +1017 103 2 +1023 104 2 +1024 104 2 +1028 105 2 +1029 105 2 +1030 105 2 +1031 106 2 +1032 106 2 +1033 106 2 +1204 107 2 +1203 107 2 +1020 103 2 +1158 100 0 +1194 105 2 +1041 107 2 +1002 101 1 +1015 102 2 +1006 101 1 +1034 106 2 +1035 106 2 +1016 103 2 +1007 101 1 +1036 107 2 +1018 103 2 +1026 105 2 +1027 105 2 +1184 104 2 +1039 107 2 +1025 105 2 +1038 107 2 +1022 104 2 +1019 103 2 +1005 101 1 +1180 105 2 +update ignore t1 set id=1023 where id=1010; +select * from t1 where parent_id=102; +id parent_id level +1008 102 2 +1010 102 2 +1015 102 2 +explain select level from t1 where level=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref level level 1 const # Using index +explain select level,id from t1 where level=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref level level 1 const # +explain select level,id,parent_id from t1 where level=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref level level 1 const # +select level,id from t1 where level=1; +level id +1 1003 +1 1004 +1 1002 +1 1006 +1 1007 +1 1005 +select level,id,parent_id from t1 where level=1; +level id parent_id +1 1003 101 +1 1004 101 +1 1002 101 +1 1006 101 +1 1007 101 +1 1005 101 +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +show keys from t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment +t1 0 PRIMARY 1 id A # NULL NULL BTREE +t1 1 parent_id 1 parent_id A # NULL NULL BTREE +t1 1 level 1 level A # NULL NULL BTREE +drop table t1; +CREATE TABLE t1 ( +gesuchnr int(11) DEFAULT '0' NOT NULL, +benutzer_id int(11) DEFAULT '0' NOT NULL, +PRIMARY KEY (gesuchnr,benutzer_id) +) engine=MyISAM; +replace into t1 (gesuchnr,benutzer_id) values (2,1); +replace into t1 (gesuchnr,benutzer_id) values (1,1); +replace into t1 (gesuchnr,benutzer_id) values (1,1); +select * from t1; +gesuchnr benutzer_id +1 1 +2 1 +drop table t1; +create table t1 (a int) engine=MyISAM; +insert into t1 values (1), (2); +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +delete from t1 where a = 1; +select * from t1; +a +2 +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; +create table t1 (a int,b varchar(20)) engine=MyISAM; +insert into t1 values (1,""), (2,"testing"); +delete from t1 where a = 1; +select * from t1; +a b +2 testing +create index skr on t1 (a); +insert into t1 values (3,""), (4,"testing"); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +show keys from t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment +t1 1 skr 1 a A # NULL NULL YES BTREE +drop table t1; +create table t1 (a int,b varchar(20),key(a)) engine=MyISAM; +insert into t1 values (1,""), (2,"testing"); +select * from t1 where a = 1; +a b +1 +drop table t1; +CREATE TABLE t1 ( +user_id int(10) DEFAULT '0' NOT NULL, +name varchar(100), +phone varchar(100), +ref_email varchar(100) DEFAULT '' NOT NULL, +detail varchar(200), +PRIMARY KEY (user_id,ref_email) +)engine=MyISAM; +INSERT INTO t1 VALUES (10292,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10292,'shirish','2333604','shirish@yahoo.com','ddsds'),(10292,'sonali','323232','sonali@bolly.com','filmstar'); +select * from t1 where user_id=10292; +user_id name phone ref_email detail +10292 sanjeev 29153373 sansh777@hotmail.com xxx +10292 shirish 2333604 shirish@yahoo.com ddsds +10292 sonali 323232 sonali@bolly.com filmstar +INSERT INTO t1 VALUES (10291,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10293,'shirish','2333604','shirish@yahoo.com','ddsds'); +select * from t1 where user_id=10292; +user_id name phone ref_email detail +10292 sanjeev 29153373 sansh777@hotmail.com xxx +10292 shirish 2333604 shirish@yahoo.com ddsds +10292 sonali 323232 sonali@bolly.com filmstar +select * from t1 where user_id>=10292; +user_id name phone ref_email detail +10292 sanjeev 29153373 sansh777@hotmail.com xxx +10292 shirish 2333604 shirish@yahoo.com ddsds +10292 sonali 323232 sonali@bolly.com filmstar +10293 shirish 2333604 shirish@yahoo.com ddsds +select * from t1 where user_id>10292; +user_id name phone ref_email detail +10293 shirish 2333604 shirish@yahoo.com ddsds +select * from t1 where user_id<10292; +user_id name phone ref_email detail +10291 sanjeev 29153373 sansh777@hotmail.com xxx +drop table t1; +CREATE TABLE t1 (a int not null, b int not null,c int not null, +key(a),primary key(a,b), unique(c),key(a),unique(b)) ENGINE = MyISAM; +show index from t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment +t1 0 PRIMARY 1 a A # NULL NULL BTREE +t1 0 PRIMARY 2 b A # NULL NULL BTREE +t1 0 c 1 c A # NULL NULL BTREE +t1 0 b 1 b A # NULL NULL BTREE +t1 1 a 1 a A # NULL NULL BTREE +t1 1 a_2 1 a A # NULL NULL BTREE +drop table t1; +create table t1 (col1 int not null, col2 char(4) not null, primary key(col1)) ENGINE = MEMORY; +alter table t1 engine=MyISAM; +insert into t1 values ('1','1'),('5','2'),('2','3'),('3','4'),('4','4'); +select * from t1; +col1 col2 +1 1 +5 2 +2 3 +3 4 +4 4 +update t1 set col2='7' where col1='4'; +select * from t1; +col1 col2 +1 1 +5 2 +2 3 +3 4 +4 7 +alter table t1 add co3 int not null; +select * from t1; +col1 col2 co3 +1 1 0 +5 2 0 +2 3 0 +3 4 0 +4 7 0 +update t1 set col2='9' where col1='2'; +select * from t1; +col1 col2 co3 +1 1 0 +5 2 0 +2 9 0 +3 4 0 +4 7 0 +drop table t1; +create table t1 (a int not null , b int, primary key (a)) engine = MyISAM; +create table t2 (a int not null , b int, primary key (a)) engine = MEMORY; +insert into t1 VALUES (1,3) , (2,3), (3,3); +select * from t1; +a b +1 3 +2 3 +3 3 +insert into t2 select * from t1; +select * from t2; +a b +1 3 +2 3 +3 3 +delete from t1 where b = 3; +select * from t1; +a b +insert into t1 select * from t2; +select * from t1; +a b +3 3 +2 3 +1 3 +select * from t2; +a b +1 3 +2 3 +3 3 +drop table t1,t2; +CREATE TABLE t1 ( +id int(11) NOT NULL auto_increment, +ggid varchar(32) binary DEFAULT '' NOT NULL, +email varchar(64) DEFAULT '' NOT NULL, +passwd varchar(32) binary DEFAULT '' NOT NULL, +PRIMARY KEY (id), +UNIQUE ggid (ggid) +) ENGINE=MyISAM; +insert into t1 (ggid,passwd) values ('test1','xxx'); +insert into t1 (ggid,passwd) values ('test2','yyy'); +insert into t1 (ggid,passwd) values ('test2','this will fail'); +ERROR 23000: Duplicate entry 'test2' for key 'ggid' +insert into t1 (ggid,id) values ('this will fail',1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +select * from t1 where ggid='test1'; +id ggid email passwd +1 test1 xxx +select * from t1 where passwd='xxx'; +id ggid email passwd +1 test1 xxx +select * from t1 where id=2; +id ggid email passwd +2 test2 yyy +replace into t1 (ggid,id) values ('this will work',1); +replace into t1 (ggid,passwd) values ('test2','this will work'); +update t1 set id=100,ggid='test2' where id=1; +ERROR 23000: Duplicate entry 'test2' for key 'ggid' +select * from t1; +id ggid email passwd +1 this will work +3 test2 this will work +select * from t1 where id=1; +id ggid email passwd +1 this will work +select * from t1 where id=999; +id ggid email passwd +drop table t1; +CREATE TABLE t1 ( +user_name varchar(12), +password text, +subscribed char(1), +user_id int(11) DEFAULT '0' NOT NULL, +quota bigint(20), +weight double, +access_date date, +access_time time, +approved datetime, +dummy_primary_key int(11) NOT NULL auto_increment, +PRIMARY KEY (dummy_primary_key) +) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('user_0','somepassword','N',0,0,0,'2000-09-07','23:06:59','2000-09-07 23:06:59',1); +INSERT INTO t1 VALUES ('user_1','somepassword','Y',1,1,1,'2000-09-07','23:06:59','2000-09-07 23:06:59',2); +INSERT INTO t1 VALUES ('user_2','somepassword','N',2,2,1.4142135623731,'2000-09-07','23:06:59','2000-09-07 23:06:59',3); +INSERT INTO t1 VALUES ('user_3','somepassword','Y',3,3,1.7320508075689,'2000-09-07','23:06:59','2000-09-07 23:06:59',4); +INSERT INTO t1 VALUES ('user_4','somepassword','N',4,4,2,'2000-09-07','23:06:59','2000-09-07 23:06:59',5); +select user_name, password , subscribed, user_id, quota, weight, access_date, access_time, approved, dummy_primary_key from t1 order by user_name; +user_name password subscribed user_id quota weight access_date access_time approved dummy_primary_key +user_0 somepassword N 0 0 0 2000-09-07 23:06:59 2000-09-07 23:06:59 1 +user_1 somepassword Y 1 1 1 2000-09-07 23:06:59 2000-09-07 23:06:59 2 +user_2 somepassword N 2 2 1.4142135623731 2000-09-07 23:06:59 2000-09-07 23:06:59 3 +user_3 somepassword Y 3 3 1.7320508075689 2000-09-07 23:06:59 2000-09-07 23:06:59 4 +user_4 somepassword N 4 4 2 2000-09-07 23:06:59 2000-09-07 23:06:59 5 +drop table t1; +CREATE TABLE t1 ( +id int(11) NOT NULL auto_increment, +parent_id int(11) DEFAULT '0' NOT NULL, +level tinyint(4) DEFAULT '0' NOT NULL, +KEY (id), +KEY parent_id (parent_id), +KEY level (level) +) engine=MyISAM; +INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1); +INSERT INTO t1 values (179,5,2); +update t1 set parent_id=parent_id+100; +select * from t1 where parent_id=102; +id parent_id level +8 102 2 +9 102 2 +15 102 2 +update t1 set id=id+1000; +update t1 set id=1024 where id=1009; +select * from t1; +id parent_id level +1001 100 0 +1003 101 1 +1004 101 1 +1008 102 2 +1024 102 2 +1017 103 2 +1022 104 2 +1024 104 2 +1028 105 2 +1029 105 2 +1030 105 2 +1031 106 2 +1032 106 2 +1033 106 2 +1203 107 2 +1202 107 2 +1020 103 2 +1157 100 0 +1193 105 2 +1040 107 2 +1002 101 1 +1015 102 2 +1006 101 1 +1034 106 2 +1035 106 2 +1016 103 2 +1007 101 1 +1036 107 2 +1018 103 2 +1026 105 2 +1027 105 2 +1183 104 2 +1038 107 2 +1025 105 2 +1037 107 2 +1021 104 2 +1019 103 2 +1005 101 1 +1179 105 2 +update ignore t1 set id=id+1; +select * from t1; +id parent_id level +1002 100 0 +1004 101 1 +1005 101 1 +1009 102 2 +1025 102 2 +1018 103 2 +1023 104 2 +1025 104 2 +1029 105 2 +1030 105 2 +1031 105 2 +1032 106 2 +1033 106 2 +1034 106 2 +1204 107 2 +1203 107 2 +1021 103 2 +1158 100 0 +1194 105 2 +1041 107 2 +1003 101 1 +1016 102 2 +1007 101 1 +1035 106 2 +1036 106 2 +1017 103 2 +1008 101 1 +1037 107 2 +1019 103 2 +1027 105 2 +1028 105 2 +1184 104 2 +1039 107 2 +1026 105 2 +1038 107 2 +1022 104 2 +1020 103 2 +1006 101 1 +1180 105 2 +update ignore t1 set id=1023 where id=1010; +select * from t1 where parent_id=102; +id parent_id level +1009 102 2 +1025 102 2 +1016 102 2 +explain select level from t1 where level=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref level level 1 const # Using index +select level,id from t1 where level=1; +level id +1 1004 +1 1005 +1 1003 +1 1007 +1 1008 +1 1006 +select level,id,parent_id from t1 where level=1; +level id parent_id +1 1004 101 +1 1005 101 +1 1003 101 +1 1007 101 +1 1008 101 +1 1006 101 +select level,id from t1 where level=1 order by id; +level id +1 1003 +1 1004 +1 1005 +1 1006 +1 1007 +1 1008 +delete from t1 where level=1; +select * from t1; +id parent_id level +1002 100 0 +1009 102 2 +1025 102 2 +1018 103 2 +1023 104 2 +1025 104 2 +1029 105 2 +1030 105 2 +1031 105 2 +1032 106 2 +1033 106 2 +1034 106 2 +1204 107 2 +1203 107 2 +1021 103 2 +1158 100 0 +1194 105 2 +1041 107 2 +1016 102 2 +1035 106 2 +1036 106 2 +1017 103 2 +1037 107 2 +1019 103 2 +1027 105 2 +1028 105 2 +1184 104 2 +1039 107 2 +1026 105 2 +1038 107 2 +1022 104 2 +1020 103 2 +1180 105 2 +drop table t1; +CREATE TABLE t1 ( +sca_code char(6) NOT NULL, +cat_code char(6) NOT NULL, +sca_desc varchar(50), +lan_code char(2) NOT NULL, +sca_pic varchar(100), +sca_sdesc varchar(50), +sca_sch_desc varchar(16), +PRIMARY KEY (sca_code, cat_code, lan_code), +INDEX sca_pic (sca_pic) +) engine = MyISAM ; +INSERT INTO t1 ( sca_code, cat_code, sca_desc, lan_code, sca_pic, sca_sdesc, sca_sch_desc) VALUES ( 'PD', 'J', 'PENDANT', 'EN', NULL, NULL, 'PENDANT'),( 'RI', 'J', 'RING', 'EN', NULL, NULL, 'RING'),( 'QQ', 'N', 'RING', 'EN', 'not null', NULL, 'RING'); +select count(*) from t1 where sca_code = 'PD'; +count(*) +1 +select count(*) from t1 where sca_code <= 'PD'; +count(*) +1 +select count(*) from t1 where sca_pic is null; +count(*) +2 +alter table t1 drop index sca_pic, add index sca_pic (cat_code, sca_pic); +select count(*) from t1 where sca_code='PD' and sca_pic is null; +count(*) +1 +select count(*) from t1 where cat_code='E'; +count(*) +0 +alter table t1 drop index sca_pic, add index (sca_pic, cat_code); +select count(*) from t1 where sca_code='PD' and sca_pic is null; +count(*) +1 +select count(*) from t1 where sca_pic >= 'n'; +count(*) +1 +select sca_pic from t1 where sca_pic is null; +sca_pic +NULL +NULL +update t1 set sca_pic="test" where sca_pic is null; +delete from t1 where sca_code='pd'; +drop table t1; +set @a:=now(); +CREATE TABLE t1 (a int not null, b timestamp not null, primary key (a)) engine=MyISAM; +insert into t1 (a) values(1),(2),(3); +select t1.a from t1 natural join t1 as t2 where t1.b >= @a order by t1.a; +a +1 +2 +3 +select a from t1 natural join t1 as t2 where b >= @a order by a; +a +1 +2 +3 +update t1 set a=5 where a=1; +select a from t1; +a +2 +3 +5 +drop table t1; +create table t1 (a varchar(100) not null, primary key(a), b int not null) engine=MyISAM; +insert into t1 values("hello",1),("world",2); +select * from t1 order by b desc; +a b +world 2 +hello 1 +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +show keys from t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment +t1 0 PRIMARY 1 a A # NULL NULL BTREE +drop table t1; +create table t1 (i int, j int ) ENGINE=MyISAM; +insert into t1 values (1,2); +select * from t1 where i=1 and j=2; +i j +1 2 +create index ax1 on t1 (i,j); +select * from t1 where i=1 and j=2; +i j +1 2 +drop table t1; +CREATE TABLE t1 ( +a int3 unsigned NOT NULL, +b int1 unsigned NOT NULL, +UNIQUE (a, b) +) ENGINE = MyISAM; +INSERT INTO t1 VALUES (1, 1); +SELECT MIN(B),MAX(b) FROM t1 WHERE t1.a = 1; +MIN(B) MAX(b) +1 1 +drop table t1; +CREATE TABLE t1 (a int unsigned NOT NULL) engine=MyISAM; +INSERT INTO t1 VALUES (1); +SELECT * FROM t1; +a +1 +DROP TABLE t1; +create table t1 (a int primary key,b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int, r int, s int, t int, u int, v int, w int, x int, y int, z int, a1 int, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int, b1 int, b2 int, b3 int, b4 int, b5 int, b6 int) engine = MyISAM; +insert into t1 values (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1); +explain select * from t1 where a > 0 and a < 50; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 system PRIMARY NULL NULL NULL # +drop table t1; +create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=MyISAM; +insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); +LOCK TABLES t1 WRITE; +insert into t1 values (99,1,2,'D'),(1,1,2,'D'); +ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' +select id from t1; +id +0 +1 +2 +99 +select id from t1; +id +0 +1 +2 +99 +UNLOCK TABLES; +DROP TABLE t1; +create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) engine=MyISAM; +insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); +LOCK TABLES t1 WRITE; +begin; +insert into t1 values (99,1,2,'D'),(1,1,2,'D'); +ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' +select id from t1; +id +0 +1 +2 +99 +insert ignore into t1 values (100,1,2,'D'),(1,1,99,'D'); +commit; +select id,id3 from t1; +id id3 +0 0 +2 2 +1 1 +99 2 +100 2 +UNLOCK TABLES; +DROP TABLE t1; +create table t1 (a char(20), unique (a(5))) engine=MyISAM; +drop table t1; +create table t1 (a char(20), index (a(5))) engine=MyISAM; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) DEFAULT NULL, + KEY `a` (`a`(5)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1; +create temporary table t1 (a int not null auto_increment, primary key(a)) engine=MyISAM; +insert into t1 values (NULL),(NULL),(NULL); +delete from t1 where a=3; +insert into t1 values (NULL); +select * from t1; +a +1 +2 +4 +alter table t1 add b int; +select * from t1; +a b +1 NULL +2 NULL +4 NULL +drop table t1; +create table t1 +( +id int auto_increment primary key, +name varchar(32) not null, +value text not null, +uid int not null, +unique key(name,uid) +) engine=MyISAM; +insert into t1 values (1,'one','one value',101), +(2,'two','two value',102),(3,'three','three value',103); +set insert_id=5; +replace into t1 (value,name,uid) values ('other value','two',102); +delete from t1 where uid=102; +set insert_id=5; +replace into t1 (value,name,uid) values ('other value','two',102); +set insert_id=6; +replace into t1 (value,name,uid) values ('other value','two',102); +select * from t1; +id name value uid +1 one one value 101 +3 three three value 103 +6 two other value 102 +drop table t1; +create database mysqltest; +create table mysqltest.t1 (a int not null) engine= MyISAM; +insert into mysqltest.t1 values(1); +create table mysqltest.t2 (a int not null) engine= MEMORY; +insert into mysqltest.t2 values(1); +create table mysqltest.t3 (a int not null) engine= MEMORY; +insert into mysqltest.t3 values(1); +commit; +drop database mysqltest; +show tables from mysqltest; +ERROR 42000: Unknown database 'mysqltest' +set autocommit=0; +create table t1 (a int not null) engine= MyISAM; +insert into t1 values(1),(2); +truncate table t1; +commit; +truncate table t1; +truncate table t1; +select * from t1; +a +insert into t1 values(1),(2); +delete from t1; +select * from t1; +a +commit; +drop table t1; +set autocommit=1; +create table t1 (a int not null) engine= MyISAM; +insert into t1 values(1),(2); +truncate table t1; +insert into t1 values(1),(2); +select * from t1; +a +1 +2 +truncate table t1; +insert into t1 values(1),(2); +delete from t1; +select * from t1; +a +drop table t1; +create table t1 (a int not null, b int not null, c int not null, primary key (a),key(b)) engine=MyISAM; +insert into t1 values (3,3,3),(1,1,1),(2,2,2),(4,4,4); +explain select * from t1 order by a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort +explain select * from t1 order by b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort +explain select * from t1 order by c; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort +explain select a from t1 order by a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL # Using index +explain select b from t1 order by b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL b 4 NULL # Using index +explain select a,b from t1 order by b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort +explain select a,b from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # +explain select a,b,c from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # +drop table t1; +create table t1 (t int not null default 1, key (t)) engine=MyISAM; +desc t1; +Field Type Null Key Default Extra +t int(11) NO MUL 1 +drop table t1; +CREATE TABLE t1 ( +number bigint(20) NOT NULL default '0', +cname char(15) NOT NULL default '', +carrier_id smallint(6) NOT NULL default '0', +privacy tinyint(4) NOT NULL default '0', +last_mod_date timestamp NOT NULL, +last_mod_id smallint(6) NOT NULL default '0', +last_app_date timestamp NOT NULL, +last_app_id smallint(6) default '-1', +version smallint(6) NOT NULL default '0', +assigned_scps int(11) default '0', +status tinyint(4) default '0' +) ENGINE=MyISAM; +INSERT INTO t1 VALUES (4077711111,'SeanWheeler',90,2,20020111112846,500,00000000000000,-1,2,3,1); +INSERT INTO t1 VALUES (9197722223,'berry',90,3,20020111112809,500,20020102114532,501,4,10,0); +INSERT INTO t1 VALUES (650,'San Francisco',0,0,20011227111336,342,00000000000000,-1,1,24,1); +INSERT INTO t1 VALUES (302467,'Sue\'s Subshop',90,3,20020109113241,500,20020102115111,501,7,24,0); +INSERT INTO t1 VALUES (6014911113,'SudzCarwash',520,1,20020102115234,500,20020102115259,501,33,32768,0); +INSERT INTO t1 VALUES (333,'tubs',99,2,20020109113440,501,20020109113440,500,3,10,0); +CREATE TABLE t2 ( +number bigint(20) NOT NULL default '0', +cname char(15) NOT NULL default '', +carrier_id smallint(6) NOT NULL default '0', +privacy tinyint(4) NOT NULL default '0', +last_mod_date timestamp NOT NULL, +last_mod_id smallint(6) NOT NULL default '0', +last_app_date timestamp NOT NULL, +last_app_id smallint(6) default '-1', +version smallint(6) NOT NULL default '0', +assigned_scps int(11) default '0', +status tinyint(4) default '0' +) ENGINE=MyISAM; +INSERT INTO t2 VALUES (4077711111,'SeanWheeler',0,2,20020111112853,500,00000000000000,-1,2,3,1); +INSERT INTO t2 VALUES (9197722223,'berry',90,3,20020111112818,500,20020102114532,501,4,10,0); +INSERT INTO t2 VALUES (650,'San Francisco',90,0,20020109113158,342,00000000000000,-1,1,24,1); +INSERT INTO t2 VALUES (333,'tubs',99,2,20020109113453,501,20020109113453,500,3,10,0); +select * from t1; +number cname carrier_id privacy last_mod_date last_mod_id last_app_date last_app_id version assigned_scps status +4077711111 SeanWheeler 90 2 2002-01-11 11:28:46 500 0000-00-00 00:00:00 -1 2 3 1 +9197722223 berry 90 3 2002-01-11 11:28:09 500 2002-01-02 11:45:32 501 4 10 0 +650 San Francisco 0 0 2001-12-27 11:13:36 342 0000-00-00 00:00:00 -1 1 24 1 +302467 Sue's Subshop 90 3 2002-01-09 11:32:41 500 2002-01-02 11:51:11 501 7 24 0 +6014911113 SudzCarwash 520 1 2002-01-02 11:52:34 500 2002-01-02 11:52:59 501 33 32768 0 +333 tubs 99 2 2002-01-09 11:34:40 501 2002-01-09 11:34:40 500 3 10 0 +select * from t2; +number cname carrier_id privacy last_mod_date last_mod_id last_app_date last_app_id version assigned_scps status +4077711111 SeanWheeler 0 2 2002-01-11 11:28:53 500 0000-00-00 00:00:00 -1 2 3 1 +9197722223 berry 90 3 2002-01-11 11:28:18 500 2002-01-02 11:45:32 501 4 10 0 +650 San Francisco 90 0 2002-01-09 11:31:58 342 0000-00-00 00:00:00 -1 1 24 1 +333 tubs 99 2 2002-01-09 11:34:53 501 2002-01-09 11:34:53 500 3 10 0 +delete t1, t2 from t1 left join t2 on t1.number=t2.number where (t1.carrier_id=90 and t1.number=t2.number) or (t2.carrier_id=90 and t1.number=t2.number) or (t1.carrier_id=90 and t2.number is null); +select * from t1; +number cname carrier_id privacy last_mod_date last_mod_id last_app_date last_app_id version assigned_scps status +6014911113 SudzCarwash 520 1 2002-01-02 11:52:34 500 2002-01-02 11:52:59 501 33 32768 0 +333 tubs 99 2 2002-01-09 11:34:40 501 2002-01-09 11:34:40 500 3 10 0 +select * from t2; +number cname carrier_id privacy last_mod_date last_mod_id last_app_date last_app_id version assigned_scps status +333 tubs 99 2 2002-01-09 11:34:53 501 2002-01-09 11:34:53 500 3 10 0 +select * from t2; +number cname carrier_id privacy last_mod_date last_mod_id last_app_date last_app_id version assigned_scps status +333 tubs 99 2 2002-01-09 11:34:53 501 2002-01-09 11:34:53 500 3 10 0 +drop table t1,t2; +create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) engine=MyISAM; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +SELECT @@tx_isolation,@@global.tx_isolation; +@@tx_isolation @@global.tx_isolation +SERIALIZABLE REPEATABLE-READ +insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'); +select id, code, name from t1 order by id; +id code name +1 1 Tim +2 1 Monty +3 2 David +COMMIT; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +insert into t1 (code, name) values (2, 'Erik'), (3, 'Sasha'); +select id, code, name from t1 order by id; +id code name +1 1 Tim +2 1 Monty +3 2 David +4 2 Erik +5 3 Sasha +COMMIT; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +insert into t1 (code, name) values (3, 'Jeremy'), (4, 'Matt'); +select id, code, name from t1 order by id; +id code name +1 1 Tim +2 1 Monty +3 2 David +4 2 Erik +5 3 Sasha +6 3 Jeremy +7 4 Matt +COMMIT; +DROP TABLE t1; +create table t1 (n int(10), d int(10)) engine=MyISAM; +create table t2 (n int(10), d int(10)) engine=MyISAM; +insert into t1 values(1,1),(1,2); +insert into t2 values(1,10),(2,20); +UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n; +select * from t1; +n d +1 10 +1 10 +select * from t2; +n d +1 30 +2 20 +drop table t1,t2; +create table t1 (a int, b int) engine=MyISAM; +insert into t1 values(20,null); +select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on +t2.b=t3.a; +b ifnull(t2.b,"this is null") +NULL this is null +select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on +t2.b=t3.a order by 1; +b ifnull(t2.b,"this is null") +NULL this is null +insert into t1 values(10,null); +select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on +t2.b=t3.a order by 1; +b ifnull(t2.b,"this is null") +NULL this is null +NULL this is null +drop table t1; +create table t1 (a varchar(10) not null) engine = MEMORY; +create table t2 (b varchar(10) not null unique) engine=MyISAM; +select t1.a from t1,t2 where t1.a=t2.b; +a +drop table t1,t2; +create table t1 (a int not null, b int, primary key (a)) engine = MyISAM; +create table t2 (a int not null, b int, primary key (a)) engine = MyISAM; +insert into t1 values (10, 20); +insert into t2 values (10, 20); +update t1, t2 set t1.b = 150, t2.b = t1.b where t2.a = t1.a and t1.a = 10; +drop table t1,t2; +CREATE TABLE t1 (a int not null primary key, b int not null, unique (b)) engine=MyISAM; +INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9); +UPDATE t1 set a=a+100 where b between 2 and 3 and a < 1000; +SELECT * from t1; +a b +1 1 +102 2 +103 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +drop table t1; +CREATE TABLE t1 (a int not null primary key, b int not null, key (b)) engine=MyISAM; +CREATE TABLE t2 (a int not null primary key, b int not null, key (b)) engine=MyISAM; +INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12); +INSERT INTO t2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9); +update t1,t2 set t1.a=t1.a+100; +select * from t1; +a b +101 1 +102 2 +103 3 +104 4 +105 5 +106 6 +107 7 +108 8 +109 9 +110 10 +111 11 +112 12 +update t1,t2 set t1.a=t1.a+100 where t1.a=101; +select * from t1; +a b +201 1 +102 2 +103 3 +104 4 +105 5 +106 6 +107 7 +108 8 +109 9 +110 10 +111 11 +112 12 +update t1,t2 set t1.b=t1.b+10 where t1.b=2; +select * from t1; +a b +201 1 +102 12 +103 3 +104 4 +105 5 +106 6 +107 7 +108 8 +109 9 +110 10 +111 11 +112 12 +update t1,t2 set t1.b=t1.b+2,t2.b=t1.b+10 where t1.b between 3 and 5 and t1.a=t2.a+100; +select * from t1; +a b +201 1 +102 12 +103 5 +104 6 +105 7 +106 6 +107 7 +108 8 +109 9 +110 10 +111 11 +112 12 +select * from t2; +a b +1 1 +2 2 +3 13 +4 14 +5 15 +6 6 +7 7 +8 8 +9 9 +drop table t1,t2; +CREATE TABLE t2 ( NEXT_T BIGINT NOT NULL PRIMARY KEY) ENGINE=MEMORY; +CREATE TABLE t1 ( B_ID INTEGER NOT NULL PRIMARY KEY) ENGINE=MyISAM; +SET AUTOCOMMIT=0; +INSERT INTO t1 ( B_ID ) VALUES ( 1 ); +INSERT INTO t2 ( NEXT_T ) VALUES ( 1 ); +ROLLBACK; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +SELECT * FROM t1; +B_ID +1 +drop table t1,t2; +create table t1 ( pk int primary key, parent int not null, child int not null, index (parent) ) engine = MyISAM; +insert into t1 values (1,0,4), (2,1,3), (3,2,1), (4,1,2); +select distinct parent,child from t1 order by parent; +parent child +0 4 +1 2 +1 3 +2 1 +drop table t1; +create table t1 (a int not null auto_increment primary key, b int, c int, key(c)) engine=MyISAM; +create table t2 (a int not null auto_increment primary key, b int) ENGINE = MEMORY; +insert into t1 (b) values (null),(null),(null),(null),(null),(null),(null); +insert into t2 (a) select b from t1; +insert into t1 (b) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +insert into t2 (a) select b from t1; +insert into t1 (a) select b from t2; +select count(*) from t1; +count(*) +29267 +explain select * from t1 where c between 1 and 2500; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range c c 5 NULL # Using where +update t1 set c=a; +explain select * from t1 where c between 1 and 2500; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range c c 5 NULL # Using where +drop table t1,t2; +create table t1 (id int primary key auto_increment, fk int, index index_fk (fk)) engine=MyISAM; +insert into t1 (id) values (null),(null),(null),(null),(null); +update t1 set fk=69 where fk is null order by id limit 1; +SELECT * from t1; +id fk +1 69 +2 NULL +3 NULL +4 NULL +5 NULL +drop table t1; +create table t1 (a int not null, b int not null, key (a)) engine=MyISAM; +insert into t1 values (1,1),(1,2),(1,3),(3,1),(3,2),(3,3),(3,1),(3,2),(3,3),(2,1),(2,2),(2,3); +SET @tmp=0; +update t1 set b=(@tmp:=@tmp+1) order by a; +update t1 set b=99 where a=1 order by b asc limit 1; +update t1 set b=100 where a=1 order by b desc limit 2; +update t1 set a=a+10+b where a=1 order by b; +select * from t1 order by a,b; +a b +2 4 +2 5 +2 6 +3 7 +3 8 +3 9 +3 10 +3 11 +3 12 +13 2 +111 100 +111 100 +drop table t1; +create table t1 ( c char(8) not null ) engine=MyISAM; +insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); +insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F'); +alter table t1 add b char(8) not null; +alter table t1 add a char(8) not null; +alter table t1 add primary key (a,b,c); +update t1 set a=c, b=c; +create table t2 (c char(8) not null, b char(8) not null, a char(8) not null, primary key(a,b,c)) engine=MyISAM; +insert into t2 select * from t1; +delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; +drop table t1,t2; +SET AUTOCOMMIT=1; +create table t1 (a integer auto_increment primary key) engine=MyISAM; +insert into t1 (a) values (NULL),(NULL); +truncate table t1; +insert into t1 (a) values (NULL),(NULL); +SELECT * from t1; +a +1 +2 +drop table t1; +CREATE TABLE t1 (col1 int(1))ENGINE=MyISAM; +CREATE TABLE t2 (col1 int(1),stamp TIMESTAMP,INDEX stamp_idx +(stamp))ENGINE=MyISAM; +insert into t1 values (1),(2),(3); +insert into t2 values (1, 20020204130000),(2, 20020204130000),(4,20020204310000 ),(5,20020204230000); +Warnings: +Warning 1265 Data truncated for column 'stamp' at row 3 +SELECT col1 FROM t1 UNION SELECT col1 FROM t2 WHERE stamp < +'20020204120000' GROUP BY col1; +col1 +1 +2 +3 +4 +drop table t1,t2; +CREATE TABLE t1 ( +`id` int(10) unsigned NOT NULL auto_increment, +`id_object` int(10) unsigned default '0', +`id_version` int(10) unsigned NOT NULL default '1', +`label` varchar(100) NOT NULL default '', +`description` text, +PRIMARY KEY (`id`), +KEY `id_object` (`id_object`), +KEY `id_version` (`id_version`) +) ENGINE=MyISAM; +INSERT INTO t1 VALUES("6", "3382", "9", "Test", NULL), ("7", "102", "5", "Le Pekin (Test)", NULL),("584", "1794", "4", "Test de resto", NULL),("837", "1822", "6", "Test 3", NULL),("1119", "3524", "1", "Societe Test", NULL),("1122", "3525", "1", "Fournisseur Test", NULL); +CREATE TABLE t2 ( +`id` int(10) unsigned NOT NULL auto_increment, +`id_version` int(10) unsigned NOT NULL default '1', +PRIMARY KEY (`id`), +KEY `id_version` (`id_version`) +) ENGINE=MyISAM; +INSERT INTO t2 VALUES("3524", "1"),("3525", "1"),("1794", "4"),("102", "5"),("1822", "6"),("3382", "9"); +SELECT t2.id, t1.`label` FROM t2 INNER JOIN +(SELECT t1.id_object as id_object FROM t1 WHERE t1.`label` LIKE '%test%') AS lbl +ON (t2.id = lbl.id_object) INNER JOIN t1 ON (t2.id = t1.id_object); +id label +3382 Test +102 Le Pekin (Test) +1794 Test de resto +1822 Test 3 +3524 Societe Test +3525 Fournisseur Test +drop table t1,t2; +create table t1 (a int, b varchar(200), c text not null) checksum=1 engine=MyISAM; +create table t2 (a int, b varchar(200), c text not null) checksum=0 engine=MyISAM; +create table t3 (a int, b varchar(200), c varchar(200) not null) checksum=1 engine=MEMORY; +create table t4 (a int, b varchar(200), c varchar(200) not null) checksum=0 engine=MEMORY; +create table t5 (a int, b varchar(200), c text not null) checksum=1 engine=MyISAM; +create table t6 (a int, b varchar(200), c text not null) checksum=0 engine=MyISAM; +insert t1 values (1, "aaa", "bbb"), (NULL, "", "ccccc"), (0, NULL, ""); +insert t2 select * from t1; +insert t3 select * from t1; +insert t4 select * from t1; +insert t5 select * from t1; +insert t6 select * from t1; +checksum table t1, t2, t3, t4, t5, t6, t7 quick; +Table Checksum +test.t1 2948697075 +test.t2 NULL +test.t3 NULL +test.t4 NULL +test.t5 2948697075 +test.t6 NULL +test.t7 NULL +Warnings: +Error 1146 Table 'test.t7' doesn't exist +checksum table t1, t2, t3, t4, t5, t6, t7; +Table Checksum +test.t1 2948697075 +test.t2 2948697075 +test.t3 2948697075 +test.t4 2948697075 +test.t5 2948697075 +test.t6 2948697075 +test.t7 NULL +Warnings: +Error 1146 Table 'test.t7' doesn't exist +checksum table t1, t2, t3, t4, t5, t6, t7 extended; +Table Checksum +test.t1 2948697075 +test.t2 2948697075 +test.t3 2948697075 +test.t4 2948697075 +test.t5 2948697075 +test.t6 2948697075 +test.t7 NULL +Warnings: +Error 1146 Table 'test.t7' doesn't exist +drop table t1,t2,t3, t4, t5, t6; +create table t1 (id int, name char(10) not null, name2 char(10) not null) engine=MyISAM; +insert into t1 values(1,'first','fff'),(2,'second','sss'),(3,'third','ttt'); +select trim(name2) from t1 union all select trim(name) from t1 union all select trim(id) from t1; +trim(name2) +fff +sss +ttt +first +second +third +1 +2 +3 +drop table t1; +create table t1 (a int) engine=MyISAM; +create table t2 like t1; +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1,t2; +flush status; +show status like "binlog_cache_use"; +Variable_name Value +Binlog_cache_use 0 +show status like "binlog_cache_disk_use"; +Variable_name Value +Binlog_cache_disk_use 0 +create table t1 (a int) engine=MyISAM; +show status like "binlog_cache_use"; +Variable_name Value +Binlog_cache_use 0 +show status like "binlog_cache_disk_use"; +Variable_name Value +Binlog_cache_disk_use 0 +begin; +delete from t1; +commit; +show status like "binlog_cache_use"; +Variable_name Value +Binlog_cache_use 0 +show status like "binlog_cache_disk_use"; +Variable_name Value +Binlog_cache_disk_use 0 +drop table t1; +create table t1 (c char(10), index (c,c)) engine=MyISAM; +ERROR 42S21: Duplicate column name 'c' +create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1)) engine=MyISAM; +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2)) engine=MyISAM; +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1)) engine=MyISAM; +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10)) engine=MyISAM; +alter table t1 add key (c1,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c2,c1,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c1,c2,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c1,c1,c2); +ERROR 42S21: Duplicate column name 'c1' +drop table t1; +create table t1(a int(1) , b int(1)) engine=MyISAM; +insert into t1 values ('1111', '3333'); +select distinct concat(a, b) from t1; +concat(a, b) +11113333 +drop table t1; +create temporary table t1 (a int) engine=MyISAM; +insert into t1 values (4711); +truncate t1; +insert into t1 values (42); +select * from t1; +a +42 +drop table t1; +create table t1 (a int) engine=MyISAM; +insert into t1 values (4711); +truncate t1; +insert into t1 values (42); +select * from t1; +a +42 +drop table t1; +create table t1 (a int not null, b int not null, c blob not null, d int not null, e int, primary key (a,b,c(255),d)) engine=MyISAM; +insert into t1 values (2,2,"b",2,2),(1,1,"a",1,1),(3,3,"ab",3,3); +select * from t1 order by a,b,c,d; +a b c d e +1 1 a 1 1 +2 2 b 2 2 +3 3 ab 3 3 +explain select * from t1 order by a,b,c,d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using filesort +drop table t1; +create table t1 (a char(1), b char(1), key(a, b)) engine=MyISAM; +insert into t1 values ('8', '6'), ('4', '7'); +select min(a) from t1; +min(a) +4 +select min(b) from t1 where a='8'; +min(b) +6 +drop table t1; +create table t1 (x bigint unsigned not null primary key) engine=MyISAM; +insert into t1(x) values (0xfffffffffffffff0),(0xfffffffffffffff1); +select * from t1; +x +18446744073709551600 +18446744073709551601 +select count(*) from t1 where x>0; +count(*) +2 +select count(*) from t1 where x=0; +count(*) +0 +select count(*) from t1 where x<0; +count(*) +0 +select count(*) from t1 where x < -16; +count(*) +0 +select count(*) from t1 where x = -16; +count(*) +0 +explain select count(*) from t1 where x > -16; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 2 Using where; Using index +select count(*) from t1 where x > -16; +count(*) +2 +select * from t1 where x > -16; +x +18446744073709551600 +18446744073709551601 +select count(*) from t1 where x = 18446744073709551601; +count(*) +1 +drop table t1; +set storage_engine=MyISAM; +drop table if exists t1,t2,t3; +--- Testing varchar --- +--- Testing varchar --- +create table t1 (v varchar(10), c char(10), t text); +insert into t1 values('+ ', '+ ', '+ '); +set @a=repeat(' ',20); +insert into t1 values (concat('+',@a),concat('+',@a),concat('+',@a)); +Warnings: +Note 1265 Data truncated for column 'v' at row 1 +select concat('*',v,'*',c,'*',t,'*') from t1; +concat('*',v,'*',c,'*',t,'*') +*+ *+*+ * +*+ *+*+ * +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(10) DEFAULT NULL, + `c` char(10) DEFAULT NULL, + `t` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +create table t2 like t1; +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `v` varchar(10) DEFAULT NULL, + `c` char(10) DEFAULT NULL, + `t` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +create table t3 select * from t1; +show create table t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `v` varchar(10) DEFAULT NULL, + `c` char(10) DEFAULT NULL, + `t` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +alter table t1 modify c varchar(10); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(10) DEFAULT NULL, + `c` varchar(10) DEFAULT NULL, + `t` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +alter table t1 modify v char(10); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` char(10) DEFAULT NULL, + `c` varchar(10) DEFAULT NULL, + `t` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +alter table t1 modify t varchar(10); +Warnings: +Note 1265 Data truncated for column 't' at row 2 +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` char(10) DEFAULT NULL, + `c` varchar(10) DEFAULT NULL, + `t` varchar(10) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select concat('*',v,'*',c,'*',t,'*') from t1; +concat('*',v,'*',c,'*',t,'*') +*+*+*+ * +*+*+*+ * +drop table t1,t2,t3; +create table t1 (v varchar(10), c char(10), t text, key(v), key(c), key(t(10))); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(10) DEFAULT NULL, + `c` char(10) DEFAULT NULL, + `t` text, + KEY `v` (`v`), + KEY `c` (`c`), + KEY `t` (`t`(10)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select count(*) from t1; +count(*) +270 +insert into t1 values(concat('a',char(1)),concat('a',char(1)),concat('a',char(1))); +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where c='a'; +count(*) +10 +select count(*) from t1 where t='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where c='a '; +count(*) +10 +select count(*) from t1 where t='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where c like 'a%'; +count(*) +11 +select count(*) from t1 where t like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 13 const # Using where; Using index +explain select count(*) from t1 where c='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref c c 11 const # Using where; Using index +explain select count(*) from t1 where t='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range t t 13 NULL # Using where +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 13 NULL # Using where; Using index +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 13 const # Using where; Using index +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 13 const # Using where; Using index +alter table t1 add unique(v); +ERROR 23000: Duplicate entry '{ ' for key 'v_2' +alter table t1 add key(v); +select concat('*',v,'*',c,'*',t,'*') as qq from t1 where v='a'; +qq +*a*a*a* +*a *a*a * +*a *a*a * +*a *a*a * +*a *a*a * +*a *a*a * +*a *a*a * +*a *a*a * +*a *a*a * +*a *a*a * +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v,v_2 # 13 const # Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(c) from t1 group by v limit 10; +v count(c) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(c) from t1 group by v limit 10; +v count(c) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select c,count(*) from t1 group by c limit 10; +c count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select c,count(t) from t1 group by c limit 10; +c count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result c,count(t) from t1 group by c limit 10; +c count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select t,count(*) from t1 group by t limit 10; +t count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select t,count(t) from t1 group by t limit 10; +t count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result t,count(t) from t1 group by t limit 10; +t count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 modify v varchar(300), drop key v, drop key v_2, add key v (v); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) DEFAULT NULL, + `c` char(10) DEFAULT NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 303 const # Using where; Using index +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 303 NULL # Using where; Using index +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 303 const # Using where; Using index +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 303 const # Using where; Using index +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 303 const # Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 drop key v, add key v (v(30)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) DEFAULT NULL, + `c` char(10) DEFAULT NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(30)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const # Using where +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL # Using where +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const # Using where +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const # Using where +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const # Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 modify v varchar(600), drop key v, add key v (v); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(600) DEFAULT NULL, + `c` char(10) DEFAULT NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +drop table t1; +create table t1 (a char(10), unique (a)); +insert into t1 values ('a '); +insert into t1 values ('a '); +ERROR 23000: Duplicate entry 'a' for key 'a' +alter table t1 modify a varchar(10); +insert into t1 values ('a '),('a '),('a '),('a '); +ERROR 23000: Duplicate entry 'a ' for key 'a' +insert into t1 values ('a '); +ERROR 23000: Duplicate entry 'a ' for key 'a' +insert into t1 values ('a '); +ERROR 23000: Duplicate entry 'a ' for key 'a' +insert into t1 values ('a '); +ERROR 23000: Duplicate entry 'a ' for key 'a' +update t1 set a='a ' where a like 'a%'; +select concat(a,'.') from t1; +concat(a,'.') +a . +update t1 set a='abc ' where a like 'a '; +select concat(a,'.') from t1; +concat(a,'.') +a . +update t1 set a='a ' where a like 'a %'; +select concat(a,'.') from t1; +concat(a,'.') +a . +update t1 set a='a ' where a like 'a '; +select concat(a,'.') from t1; +concat(a,'.') +a . +drop table t1; +create table t1 (v varchar(10), c char(10), t text, key(v(5)), key(c(5)), key(t(5))); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(10) DEFAULT NULL, + `c` char(10) DEFAULT NULL, + `t` text, + KEY `v` (`v`(5)), + KEY `c` (`c`(5)), + KEY `t` (`t`(5)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1; +create table t1 (v char(10) character set utf8); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` char(10) CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1; +create table t1 (v varchar(10), c char(10)) row_format=fixed; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(10) DEFAULT NULL, + `c` char(10) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED +insert into t1 values('a','a'),('a ','a '); +select concat('*',v,'*',c,'*') from t1; +concat('*',v,'*',c,'*') +*a*a* +*a *a* +drop table t1; +create table t1 (v varchar(65530), key(v(10))); +insert into t1 values(repeat('a',65530)); +select length(v) from t1 where v=repeat('a',65530); +length(v) +65530 +drop table t1; +create table t1(a int, b varchar(12), key ba(b, a)); +insert into t1 values (1, 'A'), (20, NULL); +explain select * from t1 where a=20 and b is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref ba ba 20 const,const 1 Using where; Using index +select * from t1 where a=20 and b is null; +a b +20 NULL +drop table t1; +create table t1 (v varchar(65530), key(v)); +Warnings: +Warning 1071 Specified key was too long; max key length is 1000 bytes +drop table t1; +create table t1 (v varchar(65536)); +Warnings: +Note 1246 Converting column 'v' from VARCHAR to TEXT +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` mediumtext +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1; +create table t1 (v varchar(65530) character set utf8); +Warnings: +Note 1246 Converting column 'v' from VARCHAR to TEXT +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` mediumtext CHARACTER SET utf8 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1; +set storage_engine=MEMORY; +create table t1 (v varchar(16384)) engine=MyISAM; +drop table t1; +create table t1 (a char(1), b char(1), key(a, b)) engine=MyISAM; +insert into t1 values ('8', '6'), ('4', '7'); +select min(a) from t1; +min(a) +4 +select min(b) from t1 where a='8'; +min(b) +6 +drop table t1; +CREATE TABLE t1 ( `a` int(11) NOT NULL auto_increment, `b` int(11) default NULL,PRIMARY KEY (`a`),UNIQUE KEY `b` (`b`)) ENGINE=MyISAM; +insert into t1 (b) values (1); +replace into t1 (b) values (2), (1), (3); +select * from t1; +a b +3 1 +2 2 +4 3 +truncate table t1; +insert into t1 (b) values (1); +replace into t1 (b) values (2); +replace into t1 (b) values (1); +replace into t1 (b) values (3); +select * from t1; +a b +3 1 +2 2 +4 3 +drop table t1; +create table t1 (rowid int not null auto_increment, val int not null,primary +key (rowid), unique(val)) engine=MyISAM; +replace into t1 (val) values ('1'),('2'); +replace into t1 (val) values ('1'),('2'); +insert into t1 (val) values ('1'),('2'); +ERROR 23000: Duplicate entry '1' for key 'val' +select * from t1; +rowid val +3 1 +4 2 +drop table t1; +CREATE TABLE t1 (GRADE DECIMAL(4) NOT NULL, PRIMARY KEY (GRADE)) ENGINE=MyISAM; +INSERT INTO t1 (GRADE) VALUES (151),(252),(343); +SELECT GRADE FROM t1 WHERE GRADE > 160 AND GRADE < 300; +GRADE +252 +SELECT GRADE FROM t1 WHERE GRADE= 151; +GRADE +151 +DROP TABLE t1; +create table t1 (f1 varchar(10), f2 varchar(10), primary key (f1,f2)) engine=MyISAM; +create table t2 (f3 varchar(10), f4 varchar(10), key (f4)) engine=MyISAM; +insert into t2 values ('aa','cc'); +insert into t1 values ('aa','bb'),('aa','cc'); +delete t1 from t1,t2 where f1=f3 and f4='cc'; +select * from t1; +f1 f2 +drop table t1,t2; +create table t1 ( +a int, b char(10), c char(10), filler char(10), primary key(a, b(2)), unique key (a, c(2)) +) character set utf8 engine = MyISAM; +create table t2 ( +a int, b char(10), c char(10), filler char(10), primary key(a, b(2)), unique key (a, c(2)) +) character set ucs2 engine = MyISAM; +insert into t1 values (1,'abcdefg','abcdefg','one'); +insert into t1 values (2,'ijkilmn','ijkilmn','two'); +insert into t1 values (3,'qrstuvw','qrstuvw','three'); +insert into t1 values (4,_utf8 0xe880bd,_utf8 0xe880bd,'four'); +insert into t1 values (4,_utf8 0x5b,_utf8 0x5b,'five'); +insert into t1 values (4,_utf8 0xe880bde880bd,_utf8 0xe880bde880bd,'six'); +insert into t1 values (4,_utf8 0xe880bdD0B1e880bd,_utf8 0xe880bdD0B1e880bd,'seven'); +insert into t1 values (4,_utf8 0xD0B1,_utf8 0xD0B1,'eight'); +insert into t2 values (1,'abcdefg','abcdefg','one'); +insert into t2 values (2,'ijkilmn','ijkilmn','two'); +insert into t2 values (3,'qrstuvw','qrstuvw','three'); +insert into t2 values (4,_ucs2 0x00e400,_ucs2 0x00e400,'four'); +insert into t2 values (4,_ucs2 0x00640065,_ucs2 0x00640065,'five'); +insert into t2 values (4,_ucs2 0x00e400e50068,_ucs2 0x00e400e50068,'six'); +insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven'); +insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight'); +insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten'); +insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven'); +insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point'); +insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken'); +update t1 set filler = 'boo' where a = 1; +update t2 set filler ='email' where a = 4; +select a,hex(b),hex(c),filler from t1 order by filler; +a hex(b) hex(c) filler +1 61626364656667 61626364656667 boo +4 D0B1 D0B1 eight +4 5B 5B five +4 E880BD E880BD four +4 E880BDD0B1E880BD E880BDD0B1E880BD seven +4 E880BDE880BD E880BDE880BD six +3 71727374757677 71727374757677 three +2 696A6B696C6D6E 696A6B696C6D6E two +select a,hex(b),hex(c),filler from t2 order by filler; +a hex(b) hex(c) filler +4 05612020 05612020 email +4 0000563001FC0563 0000563001FC0563 email +4 05630563 05630563 email +4 0563 0563 email +4 0120 0120 email +4 01FC 01FC email +4 00E400E50068 00E400E50068 email +4 00640065 00640065 email +4 0000E400 0000E400 email +1 0061006200630064006500660067 0061006200630064006500660067 one +3 0071007200730074007500760077 0071007200730074007500760077 three +2 0069006A006B0069006C006D006E 0069006A006B0069006C006D006E two +drop table t1; +drop table t2; +create table t1 ( +a int, b varchar(10), c varchar(10), filler varchar(10), primary key(a, b(2)), unique key (a, c(2)) +) character set utf8 engine = MyISAM; +create table t2 ( +a int, b varchar(10), c varchar(10), filler varchar(10), primary key(a, b(2)), unique key (a, c(2)) +) character set ucs2 engine = MyISAM; +insert into t1 values (1,'abcdefg','abcdefg','one'); +insert into t1 values (2,'ijkilmn','ijkilmn','two'); +insert into t1 values (3,'qrstuvw','qrstuvw','three'); +insert into t1 values (4,_utf8 0xe880bd,_utf8 0xe880bd,'four'); +insert into t1 values (4,_utf8 0x5b,_utf8 0x5b,'five'); +insert into t1 values (4,_utf8 0xe880bde880bd,_utf8 0xe880bde880bd,'six'); +insert into t1 values (4,_utf8 0xe880bdD0B1e880bd,_utf8 0xe880bdD0B1e880bd,'seven'); +insert into t1 values (4,_utf8 0xD0B1,_utf8 0xD0B1,'eight'); +insert into t2 values (1,'abcdefg','abcdefg','one'); +insert into t2 values (2,'ijkilmn','ijkilmn','two'); +insert into t2 values (3,'qrstuvw','qrstuvw','three'); +insert into t2 values (4,_ucs2 0x00e400,_ucs2 0x00e400,'four'); +insert into t2 values (4,_ucs2 0x00640065,_ucs2 0x00640065,'five'); +insert into t2 values (4,_ucs2 0x00e400e50068,_ucs2 0x00e400e50068,'six'); +insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven'); +insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight'); +insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten'); +insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven'); +insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point'); +insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken'); +update t1 set filler = 'boo' where a = 1; +update t2 set filler ='email' where a = 4; +select a,hex(b),hex(c),filler from t1 order by filler; +a hex(b) hex(c) filler +1 61626364656667 61626364656667 boo +4 D0B1 D0B1 eight +4 5B 5B five +4 E880BD E880BD four +4 E880BDD0B1E880BD E880BDD0B1E880BD seven +4 E880BDE880BD E880BDE880BD six +3 71727374757677 71727374757677 three +2 696A6B696C6D6E 696A6B696C6D6E two +select a,hex(b),hex(c),filler from t2 order by filler; +a hex(b) hex(c) filler +4 05612020 05612020 email +4 0000563001FC0563 0000563001FC0563 email +4 05630563 05630563 email +4 0563 0563 email +4 0120 0120 email +4 01FC 01FC email +4 00E400E50068 00E400E50068 email +4 00640065 00640065 email +4 0000E400 0000E400 email +1 0061006200630064006500660067 0061006200630064006500660067 one +3 0071007200730074007500760077 0071007200730074007500760077 three +2 0069006A006B0069006C006D006E 0069006A006B0069006C006D006E two +drop table t1; +drop table t2; +create table t1 ( +a int, b text(10), c text(10), filler text(10), primary key(a, b(2)), unique key (a, c(2)) +) character set utf8 engine = MyISAM; +create table t2 ( +a int, b text(10), c text(10), filler text(10), primary key(a, b(2)), unique key (a, c(2)) +) character set ucs2 engine = MyISAM; +insert into t1 values (1,'abcdefg','abcdefg','one'); +insert into t1 values (2,'ijkilmn','ijkilmn','two'); +insert into t1 values (3,'qrstuvw','qrstuvw','three'); +insert into t1 values (4,_utf8 0xe880bd,_utf8 0xe880bd,'four'); +insert into t1 values (4,_utf8 0x5b,_utf8 0x5b,'five'); +insert into t1 values (4,_utf8 0xe880bde880bd,_utf8 0xe880bde880bd,'six'); +insert into t1 values (4,_utf8 0xe880bdD0B1e880bd,_utf8 0xe880bdD0B1e880bd,'seven'); +insert into t1 values (4,_utf8 0xD0B1,_utf8 0xD0B1,'eight'); +insert into t2 values (1,'abcdefg','abcdefg','one'); +insert into t2 values (2,'ijkilmn','ijkilmn','two'); +insert into t2 values (3,'qrstuvw','qrstuvw','three'); +insert into t2 values (4,_ucs2 0x00e400,_ucs2 0x00e400,'four'); +insert into t2 values (4,_ucs2 0x00640065,_ucs2 0x00640065,'five'); +insert into t2 values (4,_ucs2 0x00e400e50068,_ucs2 0x00e400e50068,'six'); +insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven'); +insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight'); +insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten'); +insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven'); +insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point'); +insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken'); +update t1 set filler = 'boo' where a = 1; +update t2 set filler ='email' where a = 4; +select a,hex(b),hex(c),filler from t1 order by filler; +a hex(b) hex(c) filler +1 61626364656667 61626364656667 boo +4 D0B1 D0B1 eight +4 5B 5B five +4 E880BD E880BD four +4 E880BDD0B1E880BD E880BDD0B1E880BD seven +4 E880BDE880BD E880BDE880BD six +3 71727374757677 71727374757677 three +2 696A6B696C6D6E 696A6B696C6D6E two +select a,hex(b),hex(c),filler from t2 order by filler; +a hex(b) hex(c) filler +4 0000E400 0000E400 email +4 00640065 00640065 email +4 00E400E50068 00E400E50068 email +4 01FC 01FC email +4 0120 0120 email +4 0563 0563 email +4 05630563 05630563 email +4 0000563001FC0563 0000563001FC0563 email +4 05612020 05612020 email +1 0061006200630064006500660067 0061006200630064006500660067 one +3 0071007200730074007500760077 0071007200730074007500760077 three +2 0069006A006B0069006C006D006E 0069006A006B0069006C006D006E two +drop table t1; +drop table t2; +create table t1 ( +a int, b blob(10), c blob(10), filler blob(10), primary key(a, b(2)), unique key (a, c(2)) +) character set utf8 engine = MyISAM; +create table t2 ( +a int, b blob(10), c blob(10), filler blob(10), primary key(a, b(2)), unique key (a, c(2)) +) character set ucs2 engine = MyISAM; +insert into t1 values (1,'abcdefg','abcdefg','one'); +insert into t1 values (2,'ijkilmn','ijkilmn','two'); +insert into t1 values (3,'qrstuvw','qrstuvw','three'); +insert into t1 values (4,_utf8 0xe880bd,_utf8 0xe880bd,'four'); +insert into t1 values (4,_utf8 0x5b,_utf8 0x5b,'five'); +insert into t1 values (4,_utf8 0xD0B1,_utf8 0xD0B1,'eight'); +insert into t2 values (1,'abcdefg','abcdefg','one'); +insert into t2 values (2,'ijkilmn','ijkilmn','two'); +insert into t2 values (3,'qrstuvw','qrstuvw','three'); +insert into t2 values (4,_ucs2 0x00e400,_ucs2 0x00e400,'four'); +insert into t2 values (4,_ucs2 0x00640065,_ucs2 0x00640065,'five'); +insert into t2 values (4,_ucs2 0x00e400e50068,_ucs2 0x00e400e50068,'six'); +insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven'); +insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight'); +insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten'); +insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken'); +update t1 set filler = 'boo' where a = 1; +update t2 set filler ='email' where a = 4; +select a,hex(b),hex(c),filler from t1 order by filler; +a hex(b) hex(c) filler +1 61626364656667 61626364656667 boo +4 D0B1 D0B1 eight +4 5B 5B five +4 E880BD E880BD four +3 71727374757677 71727374757677 three +2 696A6B696C6D6E 696A6B696C6D6E two +select a,hex(b),hex(c),filler from t2 order by filler; +a hex(b) hex(c) filler +4 0000E400 0000E400 email +4 00640065 00640065 email +4 00E400E50068 00E400E50068 email +4 01FC 01FC email +4 0120 0120 email +4 0563 0563 email +4 05612020 05612020 email +1 61626364656667 61626364656667 one +3 71727374757677 71727374757677 three +2 696A6B696C6D6E 696A6B696C6D6E two +drop table t1; +drop table t2; +commit; +CREATE TABLE t1 ( +ind enum('0','1','2') NOT NULL default '0', +string1 varchar(250) NOT NULL, +PRIMARY KEY (ind) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; +CREATE TABLE t2 ( +ind enum('0','1','2') NOT NULL default '0', +string1 varchar(250) NOT NULL, +PRIMARY KEY (ind) +) ENGINE=MyISAM DEFAULT CHARSET=ucs2; +INSERT INTO t1 VALUES ('1', ''),('2', ''); +INSERT INTO t2 VALUES ('1', ''),('2', ''); +SELECT hex(ind),hex(string1) FROM t1 ORDER BY string1; +hex(ind) hex(string1) +31 +32 +SELECT hex(ind),hex(string1) FROM t2 ORDER BY string1; +hex(ind) hex(string1) +0031 +0032 +drop table t1,t2; +CREATE TABLE t1 ( +ind set('0','1','2') NOT NULL default '0', +string1 varchar(250) NOT NULL, +PRIMARY KEY (ind) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; +CREATE TABLE t2 ( +ind set('0','1','2') NOT NULL default '0', +string1 varchar(250) NOT NULL, +PRIMARY KEY (ind) +) ENGINE=MyISAM DEFAULT CHARSET=ucs2; +INSERT INTO t1 VALUES ('1', ''),('2', ''); +INSERT INTO t2 VALUES ('1', ''),('2', ''); +SELECT hex(ind),hex(string1) FROM t1 ORDER BY string1; +hex(ind) hex(string1) +31 +32 +SELECT hex(ind),hex(string1) FROM t2 ORDER BY string1; +hex(ind) hex(string1) +0031 +0032 +drop table t1,t2; +CREATE TABLE t1 ( +ind bit not null, +string1 varchar(250) NOT NULL, +PRIMARY KEY (ind) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; +CREATE TABLE t2 ( +ind bit not null, +string1 varchar(250) NOT NULL, +PRIMARY KEY (ind) +) ENGINE=MyISAM DEFAULT CHARSET=ucs2; +insert into t1 values(0,''),(1,''); +insert into t2 values(0,''),(1,''); +select hex(ind),hex(string1) from t1 order by string1; +hex(ind) hex(string1) +0 +1 +select hex(ind),hex(string1) from t2 order by string1; +hex(ind) hex(string1) +0 +1 +drop table t1,t2; +create table t2 ( +a int, b char(10), filler char(10), primary key(a, b(2)) +) character set utf8 engine = MyISAM; +insert into t2 values (1,'abcdefg','one'); +insert into t2 values (2,'ijkilmn','two'); +insert into t2 values (3, 'qrstuvw','three'); +update t2 set a=5, filler='booo' where a=1; +drop table t2; +create table t2 ( +a int, b char(10), filler char(10), primary key(a, b(2)) +) character set ucs2 engine = MyISAM; +insert into t2 values (1,'abcdefg','one'); +insert into t2 values (2,'ijkilmn','two'); +insert into t2 values (3, 'qrstuvw','three'); +update t2 set a=5, filler='booo' where a=1; +drop table t2; +create table t1(a int not null, b char(110),primary key(a,b(100))) engine=MyISAM default charset=utf8; +insert into t1 values(1,'abcdefg'),(2,'defghijk'); +insert into t1 values(6,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1); +insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2); +select a,hex(b) from t1 order by b; +a hex(b) +1 61626364656667 +2 6465666768696A6B +6 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1 +7 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2 +update t1 set b = 'three' where a = 6; +drop table t1; +create table t1(a int not null, b text(110),primary key(a,b(100))) engine=MyISAM default charset=utf8; +insert into t1 values(1,'abcdefg'),(2,'defghijk'); +insert into t1 values(6,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1); +insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2); +select a,hex(b) from t1 order by b; +a hex(b) +1 61626364656667 +2 6465666768696A6B +6 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1 +7 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2 +update t1 set b = 'three' where a = 6; +drop table t1; +create table t1(a int not null, b int, c int, d int, primary key(a)) engine=MyISAM; +insert into t1(a) values (1),(2),(3); +commit; +set autocommit = 0; +update t1 set b = 5 where a = 2; +create trigger t1t before insert on t1 for each row begin set NEW.b = NEW.a * 10 + 5, NEW.c = NEW.a / 10; end | +set autocommit = 0; +insert into t1(a) values (10),(20),(30),(40),(50),(60),(70),(80),(90),(100), +(11),(21),(31),(41),(51),(61),(71),(81),(91),(101), +(12),(22),(32),(42),(52),(62),(72),(82),(92),(102), +(13),(23),(33),(43),(53),(63),(73),(83),(93),(103), +(14),(24),(34),(44),(54),(64),(74),(84),(94),(104); +commit; +commit; +drop trigger t1t; +drop table t1; +create table t1(a int not null, b int, c int, d int, primary key(a)) engine=MyISAM; +create table t2(a int not null, b int, c int, d int, primary key(a)) engine=MyISAM; +create table t3(a int not null, b int, c int, d int, primary key(a)) engine=MyISAM; +create table t4(a int not null, b int, c int, d int, primary key(a)) engine=MyISAM; +create table t5(a int not null, b int, c int, d int, primary key(a)) engine=MyISAM; +insert into t1(a) values (1),(2),(3); +insert into t2(a) values (1),(2),(3); +insert into t3(a) values (1),(2),(3); +insert into t4(a) values (1),(2),(3); +insert into t3(a) values (5),(7),(8); +insert into t4(a) values (5),(7),(8); +insert into t5(a) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12); +create trigger t1t before insert on t1 for each row begin +INSERT INTO t2 SET a = NEW.a; +end | +create trigger t2t before insert on t2 for each row begin +DELETE FROM t3 WHERE a = NEW.a; +end | +create trigger t3t before delete on t3 for each row begin +UPDATE t4 SET b = b + 1 WHERE a = OLD.a; +end | +create trigger t4t before update on t4 for each row begin +UPDATE t5 SET b = b + 1 where a = NEW.a; +end | +commit; +set autocommit = 0; +update t1 set b = b + 5 where a = 1; +update t2 set b = b + 5 where a = 1; +update t3 set b = b + 5 where a = 1; +update t4 set b = b + 5 where a = 1; +insert into t5(a) values(20); +set autocommit = 0; +insert into t1(a) values(7); +insert into t2(a) values(8); +delete from t2 where a = 3; +update t4 set b = b + 1 where a = 3; +commit; +drop trigger t1t; +drop trigger t2t; +drop trigger t3t; +drop trigger t4t; +drop table t1, t2, t3, t4, t5; +create table t1(a date) engine=MyISAM; +create table t2(a date, key(a)) engine=MyISAM; +insert into t1 values('2005-10-01'); +insert into t2 values('2005-10-01'); +select * from t1, t2 +where t2.a between t1.a - interval 2 day and t1.a + interval 2 day; +a a +2005-10-01 2005-10-01 +drop table t1, t2; +create table t1 (id int not null, f_id int not null, f int not null, +primary key(f_id, id)) engine=MyISAM; +create table t2 (id int not null,s_id int not null,s varchar(200), +primary key(id)) engine=MyISAM; +INSERT INTO t1 VALUES (8, 1, 3); +INSERT INTO t1 VALUES (1, 2, 1); +INSERT INTO t2 VALUES (1, 0, ''); +INSERT INTO t2 VALUES (8, 1, ''); +commit; +DELETE ml.* FROM t1 AS ml LEFT JOIN t2 AS mm ON (mm.id=ml.id) +WHERE mm.id IS NULL; +select ml.* from t1 as ml left join t2 as mm on (mm.id=ml.id) +where mm.id is null lock in share mode; +id f_id f +drop table t1,t2; +create table t1(a int not null, b int, primary key(a)) engine=MyISAM; +insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3); +commit; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +update t1 set b = 5 where b = 1; +set autocommit = 0; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +select * from t1 where a = 7 and b = 3 for update; +a b +7 3 +commit; +commit; +drop table t1; +CREATE TABLE t1 ( a int ) ENGINE=MyISAM; +BEGIN; +INSERT INTO t1 VALUES (1); +OPTIMIZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +DROP TABLE t1; diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index c4d65d8ceaa..43deb4ce846 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -515,6 +515,34 @@ select c1 from t1 order by c1 limit 1; c1 a drop table t1; +create table t1 (a int not null, primary key(a)); +create table t2 (a int not null, b int not null, primary key(a,b)); +insert into t1 values (1),(2),(3),(4),(5),(6); +insert into t2 values (1,1),(2,1); +lock tables t1 read local, t2 read local; +select straight_join * from t1,t2 force index (primary) where t1.a=t2.a; +a a b +1 1 1 +2 2 1 +insert into t2 values(2,0); +select straight_join * from t1,t2 force index (primary) where t1.a=t2.a; +a a b +1 1 1 +2 2 1 +drop table t1,t2; +CREATE TABLE t1 (c1 varchar(250) NOT NULL); +CREATE TABLE t2 (c1 varchar(250) NOT NULL, PRIMARY KEY (c1)); +INSERT INTO t1 VALUES ('test000001'), ('test000002'), ('test000003'); +INSERT INTO t2 VALUES ('test000002'), ('test000003'), ('test000004'); +LOCK TABLES t1 READ LOCAL, t2 READ LOCAL; +SELECT t1.c1 AS t1c1, t2.c1 AS t2c1 FROM t1, t2 +WHERE t1.c1 = t2.c1 HAVING t1c1 != t2c1; +t1c1 t2c1 +INSERT INTO t2 VALUES ('test000001'), ('test000005'); +SELECT t1.c1 AS t1c1, t2.c1 AS t2c1 FROM t1, t2 +WHERE t1.c1 = t2.c1 HAVING t1c1 != t2c1; +t1c1 t2c1 +DROP TABLE t1,t2; CREATE TABLE t1 (`a` int(11) NOT NULL default '0', `b` int(11) NOT NULL default '0', UNIQUE KEY `a` USING RTREE (`a`,`b`)) ENGINE=MyISAM; Got one of the listed errors create table t1 (a int, b varchar(200), c text not null) checksum=1; diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index 2b564829541..b59f63d207e 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -23,6 +23,9 @@ INSERT INTO t1 VALUES (1), (2); DROP TABLE t1; +# +# Bug #2005 +# CREATE TABLE t1 (a decimal(64, 20)); INSERT INTO t1 VALUES ("1234567890123456789012345678901234567890"), ("0987654321098765432109876543210987654321"); @@ -31,6 +34,9 @@ CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1; INSERT INTO `t1` VALUES ('1234567890123456789012345678901234567890.00000000000000000000'),('987654321098765432109876543210987654321.00000000000000000000'); DROP TABLE t1; +# +# Bug #2055 +# CREATE TABLE t1 (a double); INSERT INTO t1 VALUES ('-9e999999'); Warnings: @@ -40,6 +46,9 @@ CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1; INSERT INTO `t1` VALUES (RES); DROP TABLE t1; +# +# Bug #3361 mysqldump quotes DECIMAL values inconsistently +# CREATE TABLE t1 (a DECIMAL(10,5), b FLOAT); INSERT INTO t1 VALUES (1.2345, 2.3456); INSERT INTO t1 VALUES ('1.2345', 2.3456); @@ -137,6 +146,9 @@ INSERT INTO t1 VALUES (1, "test", "tes"), (2, "TEST", "TES"); DROP TABLE t1; +# +# Bug #1707 +# CREATE TABLE t1 (`a"b"` char(2)); INSERT INTO t1 VALUES ("1\""), ("\"2"); @@ -156,6 +168,10 @@ INSERT INTO t1 VALUES ("1\""), ("\"2"); DROP TABLE t1; +# +# Bug #1994 +# Bug #4261 +# CREATE TABLE t1 (a VARCHAR(255)) DEFAULT CHARSET koi8r; INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5'), (NULL); @@ -191,6 +207,9 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +# +# Bug #2634 +# CREATE TABLE t1 (a int) ENGINE=MYISAM; INSERT INTO t1 VALUES (1), (2); /*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; @@ -240,11 +259,17 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +# +# Bug #2592 'mysqldump doesn't quote "tricky" names correctly' +# create table ```a` (i int); CREATE TABLE ```a` ( `i` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1; drop table ```a`; +# +# Bug #2591 "mysqldump quotes names inconsistently" +# create table t1(a int); /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; @@ -353,6 +378,9 @@ UNLOCK TABLES; set global sql_mode=''; drop table t1; +# +# Bug #2705 'mysqldump --tab extra output' +# create table t1(a int); insert into t1 values (1),(2),(3); @@ -381,6 +409,9 @@ CREATE TABLE `t1` ( 2 3 drop table t1; +# +# Bug #6101: create database problem +# /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; @@ -433,6 +464,12 @@ USE `mysqldump_test_db`; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; drop database mysqldump_test_db; +# +# Bug #7020 +# Check that we don't dump in UTF8 in compatible mode by default, +# but use the default compiled values, or the values given in +# --default-character-set=xxx. However, we should dump in UTF8 +# if it is explicitely set. CREATE TABLE t1 (a CHAR(10)); INSERT INTO t1 VALUES (_latin1 'ÄÖÜß'); @@ -466,6 +503,13 @@ UNLOCK TABLES; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; +# +# Bug#8063: make test mysqldump [ fail ] +# We cannot tes this command because its output depends +# on --default-character-set incompiled into "mysqldump" program. +# If the future we can move this command into a separate test with +# checking that "mysqldump" is compiled with "latin1" +# /*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; /*!40103 SET TIME_ZONE='+00:00' */; /*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; @@ -536,6 +580,9 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +# +# WL #2319: Exclude Tables from dump +# CREATE TABLE t1 (a int); CREATE TABLE t2 (a int); INSERT INTO t1 VALUES (1),(2),(3); @@ -573,6 +620,9 @@ UNLOCK TABLES; DROP TABLE t1; DROP TABLE t2; +# +# Bug #8830 +# CREATE TABLE t1 (`b` blob); INSERT INTO `t1` VALUES (0x602010000280100005E71A); @@ -607,6 +657,9 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +# +# Test for --insert-ignore +# CREATE TABLE t1 (a int); INSERT INTO t1 VALUES (1),(2),(3); INSERT INTO t1 VALUES (4),(5),(6); @@ -671,6 +724,10 @@ INSERT DELAYED IGNORE INTO `t1` VALUES (1),(2),(3),(4),(5),(6); /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +# +# Bug #10286: mysqldump -c crashes on table that has many fields with long +# names +# create table t1 ( F_c4ca4238a0b923820dcc509a6f75849b int, F_c81e728d9d4c2f636f067f89cc14862c int, @@ -1364,6 +1421,9 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; drop table t1; +# +# Test for --add-drop-database +# CREATE TABLE t1 (a int); INSERT INTO t1 VALUES (1),(2),(3); @@ -1404,6 +1464,9 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +# +# Bug #9558 mysqldump --no-data db t1 t2 format still dumps data +# CREATE DATABASE mysqldump_test_db; USE mysqldump_test_db; CREATE TABLE t1 ( a INT ); @@ -1492,6 +1555,11 @@ CREATE TABLE `t2` ( DROP TABLE t1, t2; DROP DATABASE mysqldump_test_db; +# +# Testing with tables and databases that don't exists +# or contains illegal characters +# (Bug #9358 mysqldump crashes if tablename starts with \) +# create database mysqldump_test_db; use mysqldump_test_db; create table t1(a varchar(30) primary key, b int not null); @@ -1530,6 +1598,9 @@ mysqldump: Got error: 1049: Unknown database 'mysqld\ump_test_db' when selecting drop table t1, t2, t3; drop database mysqldump_test_db; use test; +# +# Bug #9657 mysqldump xml ( -x ) does not format NULL fields correctly +# create table t1 (a int(10)); create table t2 (pk int primary key auto_increment, a int(10), b varchar(30), c datetime, d blob, e text); @@ -1586,6 +1657,9 @@ insert into t2 (a, b) values (NULL, NULL),(10, NULL),(NULL, "twenty"),(30, "thir drop table t1, t2; +# +# BUG #12123 +# create table t1 (a text character set utf8, b text character set latin1); insert t1 values (0x4F736E616272C3BC636B, 0x4BF66C6E); select * from t1; @@ -1596,7 +1670,13 @@ select * from t1; a b Osnabrück Köln drop table t1; +# +# BUG#15328 Segmentation fault occured if my.cnf is invalid for escape sequence +# --fields-optionally-enclosed-by=" +# +# BUG #19025 mysqldump doesn't correctly dump "auto_increment = [int]" +# create table `t1` ( t1_name varchar(255) default null, t1_id int(10) unsigned not null auto_increment, @@ -1634,6 +1714,9 @@ t1 CREATE TABLE `t1` ( KEY `t1_name` (`t1_name`) ) ENGINE=MyISAM AUTO_INCREMENT=1003 DEFAULT CHARSET=latin1 drop table `t1`; +# +# Bug #18536: wrong table order +# create table t1(a int); create table t2(a int); create table t3(a int); @@ -1671,6 +1754,9 @@ CREATE TABLE `t2` ( /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; drop table t1, t2, t3; +# +# Bug #21288: mysqldump segmentation fault when using --where +# create table t1 (a int); mysqldump: Couldn't execute 'SELECT /*!40001 SQL_NO_CACHE */ * FROM `t1` WHERE xx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx': You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' at line 1 (1064) mysqldump: Got error: 1064: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' at line 1 when retrieving data from server @@ -1702,6 +1788,9 @@ CREATE TABLE `t1` ( drop table t1; End of 4.1 tests +# +# Bug #10213 mysqldump crashes when dumping VIEWs(on MacOS X) +# create database db1; use db1; CREATE TABLE t2 ( @@ -1761,6 +1850,9 @@ drop table t2; drop view v2; drop database db1; use test; +# +# Bug 10713 mysqldump includes database in create view and referenced tables +# create database db2; use db2; create table t1 (a int); @@ -1790,6 +1882,9 @@ a b drop table t1, t2; drop database db1; use test; +# +# dump of view +# create table t1(a int); create view v1 as select * from t1; @@ -1834,6 +1929,9 @@ DROP TABLE IF EXISTS `v1`; drop view v1; drop table t1; +# +# Bug #10213 mysqldump crashes when dumping VIEWs(on MacOS X) +# create database mysqldump_test_db; use mysqldump_test_db; CREATE TABLE t2 ( @@ -1893,6 +1991,9 @@ drop table t2; drop view v2; drop database mysqldump_test_db; use test; +# +# Bug #9756 +# CREATE TABLE t1 (a char(10)); INSERT INTO t1 VALUES ('\''); @@ -1927,6 +2028,9 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +# +# Bug #10927 mysqldump: Can't reload dump with view that consist of other view +# create table t1(a int, b int, c varchar(30)); insert into t1 values(1, 2, "one"), (2, 4, "two"), (3, 6, "three"); create view v3 as @@ -2004,6 +2108,9 @@ DROP TABLE IF EXISTS `v3`; drop view v1, v2, v3; drop table t1; +# +# Test for dumping triggers +# CREATE TABLE t1 (a int, b bigint default NULL); CREATE TABLE t2 (a int); create trigger trg1 before insert on t1 for each row @@ -2202,8 +2309,14 @@ set @fired:= "No"; end if; end BEFORE # STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER root@localhost DROP TABLE t1, t2; +# +# Bugs #9136, #12917: problems with --defaults-extra-file option +# --port=1234 --port=1234 +# +# Test of fix to BUG 12597 +# DROP TABLE IF EXISTS `test1`; Warnings: Note 1051 Unknown table 'test1' @@ -2235,6 +2348,9 @@ a2 DROP TRIGGER testref; DROP TABLE test1; DROP TABLE test2; +# +# BUG#9056 - mysqldump does not dump routines +# DROP TABLE IF EXISTS t1; DROP FUNCTION IF EXISTS bug9056_func1; DROP FUNCTION IF EXISTS bug9056_func2; @@ -2331,6 +2447,9 @@ DROP PROCEDURE bug9056_proc1; DROP PROCEDURE bug9056_proc2; DROP PROCEDURE `a'b`; drop table t1; +# +# BUG# 13052 - mysqldump timestamp reloads broken +# drop table if exists t1; create table t1 (`d` timestamp, unique (`d`)); set time_zone='+00:00'; @@ -2417,6 +2536,9 @@ UNLOCK TABLES; drop table t1; set global time_zone=default; set time_zone=default; +# +# Test of fix to BUG 13146 - ansi quotes break loading of triggers +# DROP TABLE IF EXISTS `t1 test`; DROP TABLE IF EXISTS `t2 test`; CREATE TABLE `t1 test` ( @@ -2480,6 +2602,9 @@ UNLOCK TABLES; DROP TRIGGER `test trig`; DROP TABLE `t1 test`; DROP TABLE `t2 test`; +# +# BUG# 12838 mysqldump -x with views exits with error +# drop table if exists t1; create table t1 (a int, b varchar(32), c varchar(32)); insert into t1 values (1, 'first value', 'xxxx'); @@ -2572,6 +2697,10 @@ drop view v2; drop view v0; drop view v1; drop table t1; +# +# BUG#14554 - mysqldump does not separate words "ROW" and "BEGIN" +# for tables with trigger created in the IGNORE_SPACE sql mode. +# SET @old_sql_mode = @@SQL_MODE; SET SQL_MODE = IGNORE_SPACE; CREATE TABLE t1 (a INT); @@ -2627,6 +2756,9 @@ DELIMITER ; DROP TRIGGER tr1; DROP TABLE t1; +# +# Bug #13318: Bad result with empty field and --hex-blob +# create table t1 (a binary(1), b blob); insert into t1 values ('',''); @@ -2694,6 +2826,9 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; drop table t1; +# +# Bug 14871 Invalid view dump output +# create table t1 (a int); insert into t1 values (289), (298), (234), (456), (789); create definer = CURRENT_USER view v1 as select * from t1; @@ -2720,6 +2855,9 @@ a 789 drop table t1; drop view v1, v2, v3, v4, v5; +# +# Bug #16878 dump of trigger +# create table t1 (a int, created datetime); create table t2 (b int, created datetime); create trigger tr1 before insert on t1 for each row set @@ -2742,6 +2880,9 @@ end AFTER # root@localhost drop trigger tr1; drop trigger tr2; drop table t1, t2; +# +# Bug#18462 mysqldump does not dump view structures correctly +# create table t (qty int, price int); insert into t values(3, 50); insert into t values(5, 51); @@ -2761,6 +2902,10 @@ mysqldump { drop view v1; drop view v2; drop table t; +# +# Bug#14857 Reading dump files with single statement stored routines fails. +# fixed by patch for bug#16878 +# /*!50003 CREATE FUNCTION `f`() RETURNS bigint(20) return 42 */| /*!50003 CREATE PROCEDURE `p`() @@ -2775,6 +2920,9 @@ p CREATE DEFINER=`root`@`localhost` PROCEDURE `p`() select 42 drop function f; drop procedure p; +# +# Bug #17371 Unable to dump a schema with invalid views +# create table t1 ( id serial ); create view v1 as select * from t1; drop table t1; @@ -2784,6 +2932,9 @@ mysqldump { } mysqldump drop view v1; +# BUG#17201 Spurious 'DROP DATABASE' in output, +# also confusion between tables and views. +# Example code from Markus Popp create database mysqldump_test_db; use mysqldump_test_db; create table t1 (id int); @@ -2844,6 +2995,9 @@ USE `mysqldump_test_db`; drop view v1; drop table t1; drop database mysqldump_test_db; +# +# Bug21014 Segmentation fault of mysqldump on view +# create database mysqldump_tables; use mysqldump_tables; create table basetable ( id serial, tag varchar(64) ); @@ -2877,6 +3031,9 @@ drop view nasishnasifu; drop database mysqldump_views; drop table mysqldump_tables.basetable; drop database mysqldump_tables; +# +# Bug20221 Dumping of multiple databases containing view(s) yields maleformed dumps +# create database mysqldump_dba; use mysqldump_dba; create table t1 (f1 int, f2 int); @@ -2909,6 +3066,9 @@ drop view v1; drop table t1; drop database mysqldump_dbb; use test; +# +# Bug#21215 mysqldump creating incomplete backups without warning +# create user mysqltest_1@localhost; create table t1(a int, b varchar(34)); reset master; @@ -2920,6 +3080,12 @@ mysqldump: Couldn't execute 'SHOW MASTER STATUS': Access denied; you need the SU grant REPLICATION CLIENT on *.* to mysqltest_1@localhost; drop table t1; drop user mysqltest_1@localhost; +# +# Bug #21527 mysqldump incorrectly tries to LOCK TABLES on the +# information_schema database. +# +# Bug #21424 mysqldump failing to export/import views +# create database mysqldump_myDB; use mysqldump_myDB; create user myDB_User; @@ -2938,6 +3104,9 @@ revoke all privileges on mysqldump_myDB.* from myDB_User@localhost; drop user myDB_User; drop database mysqldump_myDB; flush privileges; +# Bug #21424 continues from here. +# Restore. Flush Privileges test ends. +# use mysqldump_myDB; select * from mysqldump_myDB.v1; c1 @@ -2953,7 +3122,81 @@ revoke all privileges on mysqldump_myDB.* from myDB_User@localhost; drop user myDB_User; drop database mysqldump_myDB; use test; -End of 5.0 tests +# +# BUG#13926: --order-by-primary fails if PKEY contains quote character +# +DROP TABLE IF EXISTS `t1`; +CREATE TABLE `t1` ( +`a b` INT, +`c"d` INT, +`e``f` INT, +PRIMARY KEY (`a b`, `c"d`, `e``f`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +insert into t1 values (0815, 4711, 2006); +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO,ANSI' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +DROP TABLE IF EXISTS "t1"; +CREATE TABLE "t1" ( + "a b" int(11) NOT NULL DEFAULT '0', + "c""d" int(11) NOT NULL DEFAULT '0', + "e`f" int(11) NOT NULL DEFAULT '0', + PRIMARY KEY ("a b","c""d","e`f") +); + +LOCK TABLES "t1" WRITE; +/*!40000 ALTER TABLE "t1" DISABLE KEYS */; +INSERT INTO "t1" VALUES (815,4711,2006); +/*!40000 ALTER TABLE "t1" ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +DROP TABLE IF EXISTS `t1`; +CREATE TABLE `t1` ( + `a b` int(11) NOT NULL DEFAULT '0', + `c"d` int(11) NOT NULL DEFAULT '0', + `e``f` int(11) NOT NULL DEFAULT '0', + PRIMARY KEY (`a b`,`c"d`,`e``f`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; + +LOCK TABLES `t1` WRITE; +/*!40000 ALTER TABLE `t1` DISABLE KEYS */; +INSERT INTO `t1` VALUES (815,4711,2006); +/*!40000 ALTER TABLE `t1` ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +DROP TABLE `t1`; +# +# End of 5.0 tests +# drop table if exists t1; CREATE TABLE t1(a int, b int); INSERT INTO t1 VALUES (1,1); @@ -3167,6 +3410,9 @@ mysql-import: Error: 1146, Table 'test.words' doesn't exist, when using table: w drop table t1; drop table t2; drop table words2; +# +# BUG# 16853: mysqldump doesn't show events +# create database first; use first; set time_zone = 'UTC'; @@ -3204,6 +3450,11 @@ third ee3 root@localhost ONE TIME 2030-12-31 22:01:23 NULL NULL NULL NULL ENABLE drop database third; set time_zone = 'SYSTEM'; use test; +# +# BUG#17201 Spurious 'DROP DATABASE' in output, +# also confusion between tables and views. +# Example code from Markus Popp +# create database mysqldump_test_db; use mysqldump_test_db; create table t1 (id int); @@ -3264,4 +3515,6 @@ USE `mysqldump_test_db`; drop view v1; drop table t1; drop database mysqldump_test_db; -End of 5.1 tests +# +# End of 5.1 tests +# diff --git a/mysql-test/r/mysqlshow.result b/mysql-test/r/mysqlshow.result index 60ad5998465..14d8e4f464b 100644 --- a/mysql-test/r/mysqlshow.result +++ b/mysql-test/r/mysqlshow.result @@ -87,6 +87,8 @@ Database: information_schema | ENGINES | | EVENTS | | FILES | +| GLOBAL_STATUS | +| GLOBAL_VARIABLES | | KEY_COLUMN_USAGE | | PARTITIONS | | PLUGINS | @@ -95,6 +97,8 @@ Database: information_schema | ROUTINES | | SCHEMATA | | SCHEMA_PRIVILEGES | +| SESSION_STATUS | +| SESSION_VARIABLES | | STATISTICS | | TABLES | | TABLE_CONSTRAINTS | @@ -115,6 +119,8 @@ Database: INFORMATION_SCHEMA | ENGINES | | EVENTS | | FILES | +| GLOBAL_STATUS | +| GLOBAL_VARIABLES | | KEY_COLUMN_USAGE | | PARTITIONS | | PLUGINS | @@ -123,6 +129,8 @@ Database: INFORMATION_SCHEMA | ROUTINES | | SCHEMATA | | SCHEMA_PRIVILEGES | +| SESSION_STATUS | +| SESSION_VARIABLES | | STATISTICS | | TABLES | | TABLE_CONSTRAINTS | diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result index 10c89effa8b..ce9fb6941f3 100644 --- a/mysql-test/r/mysqltest.result +++ b/mysql-test/r/mysqltest.result @@ -217,8 +217,11 @@ hej a long variable content a long variable content -a long $where variable content +a long a long variable content variable content +a long \$where variable content +banana = banana +Not a banana: ba\$cat\$cat mysqltest: At line 1: Missing arguments to let mysqltest: At line 1: Missing variable name in let mysqltest: At line 1: Missing assignment operator in let diff --git a/mysql-test/r/olap.result b/mysql-test/r/olap.result index 993426a02fc..f9e211c8186 100644 --- a/mysql-test/r/olap.result +++ b/mysql-test/r/olap.result @@ -611,7 +611,7 @@ C NULL EXPLAIN SELECT type FROM v1 GROUP BY type WITH ROLLUP; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 10 Using filesort +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using filesort DROP VIEW v1; DROP TABLE t1; CREATE TABLE t1 (a int(11) NOT NULL); diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result index 64653de5e9c..6461696b0d0 100644 --- a/mysql-test/r/order_by.result +++ b/mysql-test/r/order_by.result @@ -495,17 +495,17 @@ gid sid uid 103853 5 250 EXPLAIN select t1.gid, t2.sid, t3.uid from t3, t2, t1 where t2.gid = t1.gid and t2.uid = t3.uid order by t1.gid, t3.uid; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 6 Using index -1 SIMPLE t2 eq_ref PRIMARY,uid PRIMARY 4 test.t1.gid 1 +1 SIMPLE t2 ALL PRIMARY,uid NULL NULL NULL 6 Using temporary; Using filesort 1 SIMPLE t3 eq_ref PRIMARY PRIMARY 2 test.t2.uid 1 Using where; Using index +1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.gid 1 Using index EXPLAIN SELECT t1.gid, t3.uid from t1, t3 where t1.gid = t3.uid order by t1.gid,t3.skr; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t3 ALL PRIMARY NULL NULL NULL 6 Using temporary; Using filesort 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t3.uid 1 Using where; Using index EXPLAIN SELECT t1.gid, t2.sid, t3.uid from t2, t1, t3 where t2.gid = t1.gid and t2.uid = t3.uid order by t3.uid, t1.gid; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 6 Using index; Using temporary; Using filesort -1 SIMPLE t2 eq_ref PRIMARY,uid PRIMARY 4 test.t1.gid 1 +1 SIMPLE t2 ALL PRIMARY,uid NULL NULL NULL 6 Using temporary; Using filesort +1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.gid 1 Using index 1 SIMPLE t3 eq_ref PRIMARY PRIMARY 2 test.t2.uid 1 Using where; Using index EXPLAIN SELECT t1.gid, t3.uid from t1, t3 where t1.gid = t3.uid order by t3.skr,t1.gid; id select_type table type possible_keys key key_len ref rows Extra diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index d8d433ef216..65f3b7ebc64 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -7,7 +7,7 @@ drop table t1; create table t1 (a int) partition by key(a) partitions 0.2+e1; -ERROR 42000: Only normal integers allowed as number here near '0.2+e1' at line 3 +ERROR 42000: Only integers allowed as number here near '0.2+e1' at line 3 create table t1 (a int) partition by key(a) partitions -1; @@ -15,11 +15,11 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp create table t1 (a int) partition by key(a) partitions 1.5; -ERROR 42000: Only normal integers allowed as number here near '1.5' at line 3 +ERROR 42000: Only integers allowed as number here near '1.5' at line 3 create table t1 (a int) partition by key(a) partitions 1e+300; -ERROR 42000: Only normal integers allowed as number here near '1e+300' at line 3 +ERROR 42000: Only integers allowed as number here near '1e+300' at line 3 create table t1 (a int) engine = innodb partition by key (a); @@ -68,7 +68,7 @@ create table t1 (a int) engine = csv partition by list (a) (partition p0 values in (null)); -ERROR HY000: CSV handler cannot be used in partitioned tables +ERROR HY000: Engine cannot be used in partitioned tables create table t1 (a bigint) partition by range (a) (partition p0 values less than (0xFFFFFFFFFFFFFFFF), @@ -102,7 +102,7 @@ create table t1 (a int) engine = csv partition by list (a) (partition p0 values in (null)); -ERROR HY000: CSV handler cannot be used in partitioned tables +ERROR HY000: Engine cannot be used in partitioned tables create table t1 (a int) partition by key(a) (partition p0 engine = MEMORY); @@ -1054,7 +1054,7 @@ drop table t1; create table t1 (a int) partition by key (a) (partition p0 engine = MERGE); -ERROR HY000: MyISAM Merge handler cannot be used in partitioned tables +ERROR HY000: Engine cannot be used in partitioned tables create table t1 (a varchar(1)) partition by key (a) as select 'a'; diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index e6bbd3f3124..b7dae03bf47 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -499,6 +499,54 @@ create temporary table if not exists t1 (a1 int); execute stmt; drop temporary table t1; deallocate prepare stmt; +CREATE TABLE t1( +ID int(10) unsigned NOT NULL auto_increment, +Member_ID varchar(15) NOT NULL default '', +Action varchar(12) NOT NULL, +Action_Date datetime NOT NULL, +Track varchar(15) default NULL, +User varchar(12) default NULL, +Date_Updated timestamp NOT NULL default CURRENT_TIMESTAMP on update +CURRENT_TIMESTAMP, +PRIMARY KEY (ID), +KEY Action (Action), +KEY Action_Date (Action_Date) +); +INSERT INTO t1(Member_ID, Action, Action_Date, Track) VALUES +('111111', 'Disenrolled', '2006-03-01', 'CAD' ), +('111111', 'Enrolled', '2006-03-01', 'CAD' ), +('111111', 'Disenrolled', '2006-07-03', 'CAD' ), +('222222', 'Enrolled', '2006-03-07', 'CAD' ), +('222222', 'Enrolled', '2006-03-07', 'CHF' ), +('222222', 'Disenrolled', '2006-08-02', 'CHF' ), +('333333', 'Enrolled', '2006-03-01', 'CAD' ), +('333333', 'Disenrolled', '2006-03-01', 'CAD' ), +('444444', 'Enrolled', '2006-03-01', 'CAD' ), +('555555', 'Disenrolled', '2006-03-01', 'CAD' ), +('555555', 'Enrolled', '2006-07-21', 'CAD' ), +('555555', 'Disenrolled', '2006-03-01', 'CHF' ), +('666666', 'Enrolled', '2006-02-09', 'CAD' ), +('666666', 'Enrolled', '2006-05-12', 'CHF' ), +('666666', 'Disenrolled', '2006-06-01', 'CAD' ); +PREPARE STMT FROM +"SELECT GROUP_CONCAT(Track SEPARATOR ', ') FROM t1 + WHERE Member_ID=? AND Action='Enrolled' AND + (Track,Action_Date) IN (SELECT Track, MAX(Action_Date) FROM t1 + WHERE Member_ID=? + GROUP BY Track + HAVING Track>='CAD' AND + MAX(Action_Date)>'2006-03-01')"; +SET @id='111111'; +EXECUTE STMT USING @id,@id; +GROUP_CONCAT(Track SEPARATOR ', ') +NULL +SET @id='222222'; +EXECUTE STMT USING @id,@id; +GROUP_CONCAT(Track SEPARATOR ', ') +CAD +DEALLOCATE PREPARE STMT; +DROP TABLE t1; +End of 4.1 tests create table t1 (a varchar(20)); insert into t1 values ('foo'); prepare stmt FROM 'SELECT char_length (a) FROM t1'; @@ -1291,6 +1339,27 @@ EXECUTE stmt USING @a; i j i i j DEALLOCATE PREPARE stmt; DROP TABLE IF EXISTS t1, t2, t3; +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (i INT KEY); +CREATE TABLE t2 (i INT); +INSERT INTO t1 VALUES (1), (2); +INSERT INTO t2 VALUES (1); +PREPARE stmt FROM "SELECT t2.i FROM t1 LEFT JOIN t2 ON t2.i = t1.i + WHERE t1.i = ?"; +SET @arg= 1; +EXECUTE stmt USING @arg; +i +1 +SET @arg= 2; +EXECUTE stmt USING @arg; +i +NULL +SET @arg= 1; +EXECUTE stmt USING @arg; +i +1 +DEALLOCATE PREPARE stmt; +DROP TABLE t1, t2; End of 5.0 tests. create procedure proc_1() reset query cache; call proc_1(); diff --git a/mysql-test/r/ps_2myisam.result b/mysql-test/r/ps_2myisam.result index d42693bdb95..2bfd6d31ac9 100644 --- a/mysql-test/r/ps_2myisam.result +++ b/mysql-test/r/ps_2myisam.result @@ -1929,26 +1929,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, @@ -1976,26 +1976,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select @@ -2026,26 +2026,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -2066,26 +2066,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select ? := c1 from t9 where c1= 1" ; @@ -2114,26 +2114,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2158,26 +2158,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2204,26 +2204,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -2242,26 +2242,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1 into ? from t9 where c1= 1" ; @@ -2689,21 +2689,21 @@ set @arg00= '1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 1 -c2 1 -c3 1 -c4 1 -c5 1 -c6 1 +c1 127 +c2 32767 +c3 8388607 +c4 2147483647 +c5 2147483647 +c6 9223372036854775807 c7 3.40282e+38 c8 1.11111111111111e+50 c9 1.11111111111111e+50 @@ -2739,21 +2739,21 @@ set @arg00= '-1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 -1 -c2 -1 -c3 -1 -c4 -1 -c5 -1 -c6 -1 +c1 -128 +c2 -32768 +c3 -8388608 +c4 -2147483648 +c5 -2147483648 +c6 -9223372036854775808 c7 -3.40282e+38 c8 -1.11111111111111e+50 c9 -1.11111111111111e+50 diff --git a/mysql-test/r/ps_3innodb.result b/mysql-test/r/ps_3innodb.result index 9bc66a00a34..607a0426bd7 100644 --- a/mysql-test/r/ps_3innodb.result +++ b/mysql-test/r/ps_3innodb.result @@ -1912,26 +1912,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, @@ -1959,26 +1959,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select @@ -2009,26 +2009,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -2049,26 +2049,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select ? := c1 from t9 where c1= 1" ; @@ -2097,26 +2097,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2141,26 +2141,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2187,26 +2187,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -2225,26 +2225,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1 into ? from t9 where c1= 1" ; @@ -2672,21 +2672,21 @@ set @arg00= '1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 1 -c2 1 -c3 1 -c4 1 -c5 1 -c6 1 +c1 127 +c2 32767 +c3 8388607 +c4 2147483647 +c5 2147483647 +c6 9223372036854775807 c7 3.40282e+38 c8 1.11111111111111e+50 c9 1.11111111111111e+50 @@ -2722,21 +2722,21 @@ set @arg00= '-1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 -1 -c2 -1 -c3 -1 -c4 -1 -c5 -1 -c6 -1 +c1 -128 +c2 -32768 +c3 -8388608 +c4 -2147483648 +c5 -2147483648 +c6 -9223372036854775808 c7 -3.40282e+38 c8 -1.11111111111111e+50 c9 -1.11111111111111e+50 diff --git a/mysql-test/r/ps_4heap.result b/mysql-test/r/ps_4heap.result index 370065351d1..f4eec0c610c 100644 --- a/mysql-test/r/ps_4heap.result +++ b/mysql-test/r/ps_4heap.result @@ -1913,26 +1913,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 0 31 8 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 0 31 8 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 0 31 8 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 0 31 8 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 0 31 8 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 0 31 8 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 0 31 8 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 0 31 8 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, @@ -1960,26 +1960,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 0 31 8 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 0 31 8 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 0 31 8 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 0 31 8 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 0 31 8 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 0 31 8 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 0 31 8 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 0 31 8 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select @@ -2010,26 +2010,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 0 31 8 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 0 31 8 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 0 31 8 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 0 31 8 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 0 31 8 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 0 31 8 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 0 31 8 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 0 31 8 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -2050,26 +2050,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 0 31 8 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 0 31 8 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 0 31 8 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 0 31 8 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 0 31 8 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 0 31 8 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 0 31 8 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 0 31 8 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select ? := c1 from t9 where c1= 1" ; @@ -2098,26 +2098,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 0 31 8 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 0 31 8 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 0 31 8 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 0 31 8 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 0 31 8 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 0 31 8 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 0 31 8 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 0 31 8 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2142,26 +2142,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 0 31 8 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 0 31 8 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 0 31 8 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 0 31 8 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 0 31 8 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 0 31 8 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 0 31 8 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 0 31 8 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2188,26 +2188,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 0 31 8 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 0 31 8 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 0 31 8 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 0 31 8 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 0 31 8 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 0 31 8 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 0 31 8 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 0 31 8 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -2226,26 +2226,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 0 31 8 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 0 31 8 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 0 31 8 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 0 31 8 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 0 31 8 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 0 31 8 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 0 31 8 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 0 31 8 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1 into ? from t9 where c1= 1" ; @@ -2673,21 +2673,21 @@ set @arg00= '1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 1 -c2 1 -c3 1 -c4 1 -c5 1 -c6 1 +c1 127 +c2 32767 +c3 8388607 +c4 2147483647 +c5 2147483647 +c6 9223372036854775807 c7 3.40282e+38 c8 1.11111111111111e+50 c9 1.11111111111111e+50 @@ -2723,21 +2723,21 @@ set @arg00= '-1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 -1 -c2 -1 -c3 -1 -c4 -1 -c5 -1 -c6 -1 +c1 -128 +c2 -32768 +c3 -8388608 +c4 -2147483648 +c5 -2147483648 +c6 -9223372036854775808 c7 -3.40282e+38 c8 -1.11111111111111e+50 c9 -1.11111111111111e+50 diff --git a/mysql-test/r/ps_5merge.result b/mysql-test/r/ps_5merge.result index 6a304787ffb..38e4626d59c 100644 --- a/mysql-test/r/ps_5merge.result +++ b/mysql-test/r/ps_5merge.result @@ -1849,26 +1849,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, @@ -1896,26 +1896,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select @@ -1946,26 +1946,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -1986,26 +1986,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select ? := c1 from t9 where c1= 1" ; @@ -2034,26 +2034,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2078,26 +2078,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -2124,26 +2124,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -2162,26 +2162,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1 into ? from t9 where c1= 1" ; @@ -2609,21 +2609,21 @@ set @arg00= '1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 1 -c2 1 -c3 1 -c4 1 -c5 1 -c6 1 +c1 127 +c2 32767 +c3 8388607 +c4 2147483647 +c5 2147483647 +c6 9223372036854775807 c7 3.40282e+38 c8 1.11111111111111e+50 c9 1.11111111111111e+50 @@ -2659,21 +2659,21 @@ set @arg00= '-1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 -1 -c2 -1 -c3 -1 -c4 -1 -c5 -1 -c6 -1 +c1 -128 +c2 -32768 +c3 -8388608 +c4 -2147483648 +c5 -2147483648 +c6 -9223372036854775808 c7 -3.40282e+38 c8 -1.11111111111111e+50 c9 -1.11111111111111e+50 @@ -4863,26 +4863,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4, @@ -4910,26 +4910,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select @@ -4960,26 +4960,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -5000,26 +5000,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select ? := c1 from t9 where c1= 1" ; @@ -5048,26 +5048,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -5092,26 +5092,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, @@ -5138,26 +5138,26 @@ def @arg09 253 23 1 Y 128 31 63 def @arg10 253 23 1 Y 128 31 63 def @arg11 253 67 6 Y 128 30 63 def @arg12 253 67 6 Y 128 30 63 -def @arg13 253 8192 10 Y 128 31 63 -def @arg14 253 8192 19 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 8 Y 128 31 63 +def @arg13 253 16777216 10 Y 128 31 63 +def @arg14 253 16777216 19 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 8 Y 128 31 63 def @arg17 253 20 4 Y 128 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 -def @arg20 253 8192 1 Y 0 31 8 -def @arg21 253 8192 10 Y 0 31 8 -def @arg22 253 8192 30 Y 0 31 8 -def @arg23 253 8192 8 Y 128 31 63 -def @arg24 253 8192 8 Y 0 31 8 -def @arg25 253 8192 4 Y 128 31 63 -def @arg26 253 8192 4 Y 0 31 8 -def @arg27 253 8192 10 Y 128 31 63 -def @arg28 253 8192 10 Y 0 31 8 -def @arg29 253 8192 8 Y 128 31 63 -def @arg30 253 8192 8 Y 0 31 8 -def @arg31 253 8192 3 Y 0 31 8 -def @arg32 253 8192 6 Y 0 31 8 +def @arg20 253 16777216 1 Y 0 31 8 +def @arg21 253 16777216 10 Y 0 31 8 +def @arg22 253 16777216 30 Y 0 31 8 +def @arg23 253 16777216 8 Y 128 31 63 +def @arg24 253 16777216 8 Y 0 31 8 +def @arg25 253 16777216 4 Y 128 31 63 +def @arg26 253 16777216 4 Y 0 31 8 +def @arg27 253 16777216 10 Y 128 31 63 +def @arg28 253 16777216 10 Y 0 31 8 +def @arg29 253 16777216 8 Y 128 31 63 +def @arg30 253 16777216 8 Y 0 31 8 +def @arg31 253 16777216 3 Y 0 31 8 +def @arg32 253 16777216 6 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 1 1 1 1 1 1 1 1 1 1 1.0000 1.0000 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday set @my_key= 0 ; @@ -5176,26 +5176,26 @@ def @arg09 253 23 0 Y 128 31 63 def @arg10 253 23 0 Y 128 31 63 def @arg11 253 67 0 Y 128 30 63 def @arg12 253 67 0 Y 128 30 63 -def @arg13 253 8192 0 Y 128 31 63 -def @arg14 253 8192 0 Y 128 31 63 -def @arg15 253 8192 19 Y 128 31 63 -def @arg16 253 8192 0 Y 128 31 63 +def @arg13 253 16777216 0 Y 128 31 63 +def @arg14 253 16777216 0 Y 128 31 63 +def @arg15 253 16777216 19 Y 128 31 63 +def @arg16 253 16777216 0 Y 128 31 63 def @arg17 253 20 0 Y 128 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 -def @arg20 253 8192 0 Y 0 31 8 -def @arg21 253 8192 0 Y 0 31 8 -def @arg22 253 8192 0 Y 0 31 8 -def @arg23 253 8192 0 Y 128 31 63 -def @arg24 253 8192 0 Y 0 31 8 -def @arg25 253 8192 0 Y 128 31 63 -def @arg26 253 8192 0 Y 0 31 8 -def @arg27 253 8192 0 Y 128 31 63 -def @arg28 253 8192 0 Y 0 31 8 -def @arg29 253 8192 0 Y 128 31 63 -def @arg30 253 8192 0 Y 0 31 8 -def @arg31 253 8192 0 Y 0 31 8 -def @arg32 253 8192 0 Y 0 31 8 +def @arg20 253 16777216 0 Y 0 31 8 +def @arg21 253 16777216 0 Y 0 31 8 +def @arg22 253 16777216 0 Y 0 31 8 +def @arg23 253 16777216 0 Y 128 31 63 +def @arg24 253 16777216 0 Y 0 31 8 +def @arg25 253 16777216 0 Y 128 31 63 +def @arg26 253 16777216 0 Y 0 31 8 +def @arg27 253 16777216 0 Y 128 31 63 +def @arg28 253 16777216 0 Y 0 31 8 +def @arg29 253 16777216 0 Y 128 31 63 +def @arg30 253 16777216 0 Y 0 31 8 +def @arg31 253 16777216 0 Y 0 31 8 +def @arg32 253 16777216 0 Y 0 31 8 @arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL prepare stmt1 from "select c1 into ? from t9 where c1= 1" ; @@ -5623,21 +5623,21 @@ set @arg00= '1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 1 -c2 1 -c3 1 -c4 1 -c5 1 -c6 1 +c1 127 +c2 32767 +c3 8388607 +c4 2147483647 +c5 2147483647 +c6 9223372036854775807 c7 3.40282e+38 c8 1.11111111111111e+50 c9 1.11111111111111e+50 @@ -5673,21 +5673,21 @@ set @arg00= '-1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 -1 -c2 -1 -c3 -1 -c4 -1 -c5 -1 -c6 -1 +c1 -128 +c2 -32768 +c3 -8388608 +c4 -2147483648 +c5 -2147483648 +c6 -9223372036854775808 c7 -3.40282e+38 c8 -1.11111111111111e+50 c9 -1.11111111111111e+50 diff --git a/mysql-test/r/ps_7ndb.result b/mysql-test/r/ps_7ndb.result index 772848dcf38..9e0577f8ae2 100644 --- a/mysql-test/r/ps_7ndb.result +++ b/mysql-test/r/ps_7ndb.result @@ -2672,21 +2672,21 @@ set @arg00= '1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 1 -c2 1 -c3 1 -c4 1 -c5 1 -c6 1 +c1 127 +c2 32767 +c3 8388607 +c4 2147483647 +c5 2147483647 +c6 9223372036854775807 c7 3.40282e+38 c8 1.11111111111111e+50 c9 1.11111111111111e+50 @@ -2722,21 +2722,21 @@ set @arg00= '-1.11111111111111111111e+50' ; execute my_insert using @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00, @arg00 ; Warnings: -Warning 1265 Data truncated for column 'c1' at row 1 -Warning 1265 Data truncated for column 'c2' at row 1 -Warning 1265 Data truncated for column 'c3' at row 1 -Warning 1265 Data truncated for column 'c4' at row 1 -Warning 1265 Data truncated for column 'c5' at row 1 -Warning 1265 Data truncated for column 'c6' at row 1 +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +Warning 1264 Out of range value for column 'c3' at row 1 +Warning 1264 Out of range value for column 'c4' at row 1 +Warning 1264 Out of range value for column 'c5' at row 1 +Warning 1264 Out of range value for column 'c6' at row 1 Warning 1264 Out of range value for column 'c7' at row 1 Warning 1264 Out of range value for column 'c12' at row 1 execute my_select ; -c1 -1 -c2 -1 -c3 -1 -c4 -1 -c5 -1 -c6 -1 +c1 -128 +c2 -32768 +c3 -8388608 +c4 -2147483648 +c5 -2147483648 +c6 -9223372036854775808 c7 -3.40282e+38 c8 -1.11111111111111e+50 c9 -1.11111111111111e+50 diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result index 684d216f856..96289305da1 100644 --- a/mysql-test/r/query_cache.result +++ b/mysql-test/r/query_cache.result @@ -949,24 +949,24 @@ COUNT(*) Warnings: Warning 1292 Incorrect datetime value: '20050327 invalid' for column 'date' at row 1 Warning 1292 Incorrect datetime value: '20050327 invalid' for column 'date' at row 1 -Warning 1292 Truncated incorrect DOUBLE value: '20050327 invalid' -Warning 1292 Truncated incorrect DOUBLE value: '20050327 invalid' +Warning 1292 Truncated incorrect INTEGER value: '20050327 invalid' +Warning 1292 Truncated incorrect INTEGER value: '20050327 invalid' SELECT COUNT(*) FROM t1 WHERE date BETWEEN '20050326' AND '20050328 invalid'; COUNT(*) 0 Warnings: Warning 1292 Incorrect datetime value: '20050328 invalid' for column 'date' at row 1 Warning 1292 Incorrect datetime value: '20050328 invalid' for column 'date' at row 1 -Warning 1292 Truncated incorrect DOUBLE value: '20050328 invalid' -Warning 1292 Truncated incorrect DOUBLE value: '20050328 invalid' +Warning 1292 Truncated incorrect INTEGER value: '20050328 invalid' +Warning 1292 Truncated incorrect INTEGER value: '20050328 invalid' SELECT COUNT(*) FROM t1 WHERE date BETWEEN '20050326' AND '20050327 invalid'; COUNT(*) 0 Warnings: Warning 1292 Incorrect datetime value: '20050327 invalid' for column 'date' at row 1 Warning 1292 Incorrect datetime value: '20050327 invalid' for column 'date' at row 1 -Warning 1292 Truncated incorrect DOUBLE value: '20050327 invalid' -Warning 1292 Truncated incorrect DOUBLE value: '20050327 invalid' +Warning 1292 Truncated incorrect INTEGER value: '20050327 invalid' +Warning 1292 Truncated incorrect INTEGER value: '20050327 invalid' show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 4141e6061d2..cfdee6e630e 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -750,13 +750,13 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index EXPLAIN SELECT a,b FROM v1 WHERE a < 2 and b=3; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index +1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index EXPLAIN SELECT a,b FROM t1 WHERE a < 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index EXPLAIN SELECT a,b FROM v1 WHERE a < 2; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index +1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index SELECT a,b FROM t1 WHERE a < 2 and b=3; a b 1 3 @@ -799,13 +799,13 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index explain select * from v1 where a in (3,4) and b in (1,2,3); id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index +1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index explain select * from t1 where a between 3 and 4 and b between 1 and 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index explain select * from v1 where a between 3 and 4 and b between 1 and 2; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index +1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index drop view v1; drop table t1; create table t3 (a int); @@ -941,3 +941,17 @@ item started price A1 2005-11-01 08:00:00 1000.000 A1 2005-11-15 00:00:00 2000.000 DROP TABLE t1; +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (a int, b int, filler char(100)); +insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A, +t1 B, t1 C where A.a < 5; +insert into t2 select 1000, b, 'filler' from t2; +alter table t2 add index (a,b); +select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z; +Z +In following EXPLAIN the access method should be ref, #rows~=500 (and not 2) +explain select * from t2 where a=1000 and b<11; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 5 const 502 Using where +drop table t1, t2; diff --git a/mysql-test/r/innodb-big.result b/mysql-test/r/read_many_rows_innodb.result similarity index 69% rename from mysql-test/r/innodb-big.result rename to mysql-test/r/read_many_rows_innodb.result index 19204b7cc65..ff11535f687 100644 --- a/mysql-test/r/innodb-big.result +++ b/mysql-test/r/read_many_rows_innodb.result @@ -1,8 +1,9 @@ +SET SESSION STORAGE_ENGINE = InnoDB; DROP TABLE IF EXISTS t1, t2, t3, t4; -CREATE TABLE t1 (id INTEGER) ENGINE=MYISAM; -CREATE TABLE t2 (id INTEGER primary key) ENGINE=INNODB; -CREATE TABLE t3 (a char(32) primary key,id INTEGER) ENGINE=INNODB; -CREATE TABLE t4 (a char(32) primary key,id INTEGER) ENGINE=MYISAM; +CREATE TABLE t1 (id INTEGER) ENGINE=MyISAM; +CREATE TABLE t2 (id INTEGER PRIMARY KEY); +CREATE TABLE t3 (a CHAR(32) PRIMARY KEY,id INTEGER); +CREATE TABLE t4 (a CHAR(32) PRIMARY KEY,id INTEGER) ENGINE=MyISAM; INSERT INTO t1 (id) VALUES (1); INSERT INTO t1 SELECT id+1 FROM t1; INSERT INTO t1 SELECT id+2 FROM t1; @@ -26,9 +27,9 @@ INSERT INTO t1 SELECT id+262144 FROM t1; INSERT INTO t1 SELECT id+524288 FROM t1; INSERT INTO t1 SELECT id+1048576 FROM t1; INSERT INTO t2 SELECT * FROM t1; -INSERT INTO t3 SELECT concat(id),id from t2 ORDER BY -id; -INSERT INTO t4 SELECT * from t3 ORDER BY concat(a); -select sum(id) from t3; -sum(id) +INSERT INTO t3 SELECT CONCAT(id),id FROM t2 ORDER BY -id; +INSERT INTO t4 SELECT * FROM t3 ORDER BY CONCAT(a); +SELECT SUM(id) FROM t3; +SUM(id) 2199024304128 -drop table t1,t2,t3,t4; +DROP TABLE t1,t2,t3,t4; diff --git a/mysql-test/r/round.result b/mysql-test/r/round.result new file mode 100644 index 00000000000..49a00885f34 --- /dev/null +++ b/mysql-test/r/round.result @@ -0,0 +1,272 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (sint8 tinyint not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('127.4'); +INSERT INTO t1 VALUES ('127.5'); +Warnings: +Warning 1264 Out of range value for column 'sint8' at row 1 +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('-127.4'); +INSERT INTO t1 VALUES ('-127.5'); +INSERT INTO t1 VALUES ('-128.4'); +INSERT INTO t1 VALUES ('-128.5'); +Warnings: +Warning 1264 Out of range value for column 'sint8' at row 1 +SELECT * FROM t1; +sint8 +0 +1 +127 +127 +0 +-1 +-127 +-128 +-128 +-128 +DROP TABLE t1; +CREATE TABLE t1 (uint8 tinyint unsigned not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('127.4'); +INSERT INTO t1 VALUES ('127.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +Warnings: +Warning 1264 Out of range value for column 'uint8' at row 1 +INSERT INTO t1 VALUES ('255.4'); +INSERT INTO t1 VALUES ('255.5'); +Warnings: +Warning 1264 Out of range value for column 'uint8' at row 1 +SELECT * FROM t1; +uint8 +0 +1 +127 +128 +0 +0 +255 +255 +DROP TABLE t1; +CREATE TABLE t1 (sint16 smallint not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('32767.4'); +INSERT INTO t1 VALUES ('32767.5'); +Warnings: +Warning 1264 Out of range value for column 'sint16' at row 1 +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('-32767.4'); +INSERT INTO t1 VALUES ('-32767.5'); +INSERT INTO t1 VALUES ('-32768.4'); +INSERT INTO t1 VALUES ('-32768.5'); +Warnings: +Warning 1264 Out of range value for column 'sint16' at row 1 +SELECT * FROM t1; +sint16 +0 +1 +32767 +32767 +0 +-1 +-32767 +-32768 +-32768 +-32768 +DROP TABLE t1; +CREATE TABLE t1 (uint16 smallint unsigned not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('32767.4'); +INSERT INTO t1 VALUES ('32767.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +Warnings: +Warning 1264 Out of range value for column 'uint16' at row 1 +INSERT INTO t1 VALUES ('65535.4'); +INSERT INTO t1 VALUES ('65535.5'); +Warnings: +Warning 1264 Out of range value for column 'uint16' at row 1 +SELECT * FROM t1; +uint16 +0 +1 +32767 +32768 +0 +0 +65535 +65535 +DROP TABLE t1; +CREATE TABLE t1 (sint24 mediumint not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('8388607.4'); +INSERT INTO t1 VALUES ('8388607.5'); +Warnings: +Warning 1264 Out of range value for column 'sint24' at row 1 +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('-8388607.4'); +INSERT INTO t1 VALUES ('-8388607.5'); +INSERT INTO t1 VALUES ('-8388608.4'); +INSERT INTO t1 VALUES ('-8388608.5'); +Warnings: +Warning 1264 Out of range value for column 'sint24' at row 1 +SELECT * FROM t1; +sint24 +0 +1 +8388607 +8388607 +0 +-1 +-8388607 +-8388608 +-8388608 +-8388608 +DROP TABLE t1; +CREATE TABLE t1 (uint24 mediumint unsigned not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('8388607.4'); +INSERT INTO t1 VALUES ('8388607.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +Warnings: +Warning 1264 Out of range value for column 'uint24' at row 1 +INSERT INTO t1 VALUES ('16777215.4'); +INSERT INTO t1 VALUES ('16777215.5'); +Warnings: +Warning 1264 Out of range value for column 'uint24' at row 1 +SELECT * FROM t1; +uint24 +0 +1 +8388607 +8388608 +0 +0 +16777215 +16777215 +DROP TABLE t1; +CREATE TABLE t1 (sint64 bigint not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('9223372036854775807.4'); +INSERT INTO t1 VALUES ('9223372036854775807.5'); +Warnings: +Warning 1264 Out of range value for column 'sint64' at row 1 +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('-9223372036854775807.4'); +INSERT INTO t1 VALUES ('-9223372036854775807.5'); +INSERT INTO t1 VALUES ('-9223372036854775808.4'); +INSERT INTO t1 VALUES ('-9223372036854775808.5'); +Warnings: +Warning 1264 Out of range value for column 'sint64' at row 1 +SELECT * FROM t1; +sint64 +0 +1 +9223372036854775807 +9223372036854775807 +0 +-1 +-9223372036854775807 +-9223372036854775808 +-9223372036854775808 +-9223372036854775808 +DROP TABLE t1; +CREATE TABLE t1 (uint64 bigint unsigned not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('9223372036854775807.4'); +INSERT INTO t1 VALUES ('9223372036854775807.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +Warnings: +Warning 1264 Out of range value for column 'uint64' at row 1 +INSERT INTO t1 VALUES ('18446744073709551615.4'); +INSERT INTO t1 VALUES ('18446744073709551615.5'); +Warnings: +Warning 1264 Out of range value for column 'uint64' at row 1 +INSERT INTO t1 VALUES ('1844674407370955161.0'); +INSERT INTO t1 VALUES ('1844674407370955161.1'); +INSERT INTO t1 VALUES ('1844674407370955161.2'); +INSERT INTO t1 VALUES ('1844674407370955161.3'); +INSERT INTO t1 VALUES ('1844674407370955161.4'); +INSERT INTO t1 VALUES ('1844674407370955161.5'); +INSERT INTO t1 VALUES ('1844674407370955161.0e1'); +INSERT INTO t1 VALUES ('1844674407370955161.1e1'); +INSERT INTO t1 VALUES ('1844674407370955161.2e1'); +INSERT INTO t1 VALUES ('1844674407370955161.3e1'); +INSERT INTO t1 VALUES ('1844674407370955161.4e1'); +INSERT INTO t1 VALUES ('1844674407370955161.5e1'); +INSERT INTO t1 VALUES ('18446744073709551610e-1'); +INSERT INTO t1 VALUES ('18446744073709551611e-1'); +INSERT INTO t1 VALUES ('18446744073709551612e-1'); +INSERT INTO t1 VALUES ('18446744073709551613e-1'); +INSERT INTO t1 VALUES ('18446744073709551614e-1'); +INSERT INTO t1 VALUES ('18446744073709551615e-1'); +SELECT * FROM t1; +uint64 +0 +1 +9223372036854775807 +9223372036854775808 +0 +0 +18446744073709551615 +18446744073709551615 +1844674407370955161 +1844674407370955161 +1844674407370955161 +1844674407370955161 +1844674407370955161 +1844674407370955162 +18446744073709551610 +18446744073709551611 +18446744073709551612 +18446744073709551613 +18446744073709551614 +18446744073709551615 +1844674407370955161 +1844674407370955161 +1844674407370955161 +1844674407370955161 +1844674407370955161 +1844674407370955162 +DROP TABLE t1; +CREATE TABLE t1 (str varchar(128), sint64 bigint not null default 0); +INSERT INTO t1 (str) VALUES ('1.5'); +INSERT INTO t1 (str) VALUES ('1.00005e4'); +INSERT INTO t1 (str) VALUES ('1.0005e3'); +INSERT INTO t1 (str) VALUES ('1.005e2'); +INSERT INTO t1 (str) VALUES ('1.05e1'); +INSERT INTO t1 (str) VALUES ('1.5e0'); +INSERT INTO t1 (str) VALUES ('100005e-1'); +INSERT INTO t1 (str) VALUES ('100050e-2'); +INSERT INTO t1 (str) VALUES ('100500e-3'); +INSERT INTO t1 (str) VALUES ('105000e-4'); +INSERT INTO t1 (str) VALUES ('150000e-5'); +UPDATE t1 SET sint64=str; +SELECT * FROM t1; +str sint64 +1.5 2 +1.00005e4 10001 +1.0005e3 1001 +1.005e2 101 +1.05e1 11 +1.5e0 2 +100005e-1 10001 +100050e-2 1001 +100500e-3 101 +105000e-4 11 +150000e-5 2 +DROP TABLE t1; diff --git a/mysql-test/r/row.result b/mysql-test/r/row.result index 3028f42de1a..08d457f7ad7 100644 --- a/mysql-test/r/row.result +++ b/mysql-test/r/row.result @@ -181,3 +181,128 @@ SELECT ROW(1,1,1) = ROW(1,1,1) as `1`, ROW(1,1,1) = ROW(1,2,1) as `0`, ROW(1,NUL select row(NULL,1)=(2,0); row(NULL,1)=(2,0) 0 +CREATE TABLE t1 (a int, b int, PRIMARY KEY (a,b)); +INSERT INTO t1 VALUES (1,1), (2,1), (3,1), (1,2), (3,2), (3,3); +EXPLAIN SELECT * FROM t1 WHERE a=3 AND b=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 const PRIMARY PRIMARY 8 const,const 1 Using index +EXPLAIN SELECT * FROM t1 WHERE (a,b)=(3,2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 const PRIMARY PRIMARY 8 const,const 1 Using index +SELECT * FROM t1 WHERE a=3 and b=2; +a b +3 2 +SELECT * FROM t1 WHERE (a,b)=(3,2); +a b +3 2 +CREATE TABLE t2 (a int, b int, c int, PRIMARY KEY (a,b,c)); +INSERT INTO t2 VALUES +(1,1,2), (3,1,3), (1,2,2), (4,4,2), +(1,1,1), (3,1,1), (1,2,1); +EXPLAIN SELECT * FROM t1,t2 WHERE t1.a=t2.a AND t1.b=t2.b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 6 Using index +1 SIMPLE t2 ref PRIMARY PRIMARY 8 test.t1.a,test.t1.b 1 Using index +EXPLAIN SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,t2.b); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 6 Using index +1 SIMPLE t2 ref PRIMARY PRIMARY 8 test.t1.a,test.t1.b 1 Using index +SELECT * FROM t1,t2 WHERE t1.a=t2.a and t1.b=t2.b; +a b a b c +1 1 1 1 1 +1 1 1 1 2 +1 2 1 2 1 +1 2 1 2 2 +3 1 3 1 1 +3 1 3 1 3 +SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,t2.b); +a b a b c +1 1 1 1 1 +1 1 1 1 2 +1 2 1 2 1 +1 2 1 2 2 +3 1 3 1 1 +3 1 3 1 3 +EXPLAIN SELECT * FROM t1,t2 WHERE t1.a=t2.a AND t1.b=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 6 Using where; Using index +1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a 1 Using index +EXPLAIN SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 6 Using where; Using index +1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a 1 Using index +SELECT * FROM t1,t2 WHERE t1.a=1 and t1.b=t2.b; +a b a b c +1 1 1 1 2 +1 1 3 1 3 +1 2 1 2 2 +1 1 1 1 1 +1 1 3 1 1 +1 2 1 2 1 +SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,2); +a b a b c +1 2 1 1 1 +1 2 1 1 2 +1 2 1 2 1 +1 2 1 2 2 +3 2 3 1 1 +3 2 3 1 3 +EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,t2.b+1); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 6 100.00 Using index +1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using where; Using index +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where ((`test`.`t2`.`a` = `test`.`t1`.`a`) and (`test`.`t1`.`b` = (`test`.`t2`.`b` + 1))) +SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,t2.b+1); +a b a b c +1 2 1 1 1 +1 2 1 1 2 +3 2 3 1 1 +3 2 3 1 3 +EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE (t1.a-1,t1.b)=(t2.a-1,t2.b+1); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 index NULL PRIMARY 8 NULL 6 100.00 Using index +1 SIMPLE t2 index NULL PRIMARY 12 NULL 7 100.00 Using where; Using index +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where (((`test`.`t1`.`a` - 1) = (`test`.`t2`.`a` - 1)) and (`test`.`t1`.`b` = (`test`.`t2`.`b` + 1))) +SELECT * FROM t1,t2 WHERE (t1.a-1,t1.b)=(t2.a-1,t2.b+1); +a b a b c +1 2 1 1 2 +3 2 3 1 3 +1 2 1 1 1 +3 2 3 1 1 +EXPLAIN SELECT * FROM t2 WHERE a=3 AND b=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref PRIMARY PRIMARY 8 const,const 1 Using index +EXPLAIN SELECT * FROM t2 WHERE (a,b)=(3,2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref PRIMARY PRIMARY 8 const,const 1 Using index +SELECT * FROM t2 WHERE a=3 and b=2; +a b c +SELECT * FROM t2 WHERE (a,b)=(3,2); +a b c +EXPLAIN SELECT * FROM t1,t2 WHERE t2.a=t1.a AND t2.b=2 AND t2.c=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 6 Using index +1 SIMPLE t2 eq_ref PRIMARY PRIMARY 12 test.t1.a,const,const 1 Using index +EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE (t2.a,(t2.b,t2.c))=(t1.a,(2,1)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 6 100.00 Using index +1 SIMPLE t2 eq_ref PRIMARY PRIMARY 12 test.t1.a,const,const 1 100.00 Using index +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where ((`test`.`t2`.`c` = 1) and (`test`.`t2`.`b` = 2) and (`test`.`t2`.`a` = `test`.`t1`.`a`)) +SELECT * FROM t1,t2 WHERE (t2.a,(t2.b,t2.c))=(t1.a,(2,1)); +a b a b c +1 1 1 2 1 +1 2 1 2 1 +EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE t2.a=t1.a AND (t2.b,t2.c)=(2,1); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 6 100.00 Using index +1 SIMPLE t2 eq_ref PRIMARY PRIMARY 12 test.t1.a,const,const 1 100.00 Using index +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where ((`test`.`t2`.`c` = 1) and (`test`.`t2`.`b` = 2) and (`test`.`t2`.`a` = `test`.`t1`.`a`)) +SELECT * FROM t1,t2 WHERE t2.a=t1.a AND (t2.b,t2.c)=(2,1); +a b a b c +1 1 1 2 1 +1 2 1 2 1 +DROP TABLE t1,t2; diff --git a/mysql-test/r/rowid_order_innodb.result b/mysql-test/r/rowid_order_innodb.result index f76002e9cb2..e0796cd7ab5 100644 --- a/mysql-test/r/rowid_order_innodb.result +++ b/mysql-test/r/rowid_order_innodb.result @@ -1,12 +1,13 @@ +SET SESSION STORAGE_ENGINE = InnoDB; drop table if exists t1, t2, t3,t4; -create table t1 ( +create table t1 ( pk1 int not NULL, key1 int(11), key2 int(11), PRIMARY KEY (pk1), KEY key1 (key1), KEY key2 (key2) -) engine=innodb; +); insert into t1 values (-5, 1, 1), (-100, 1, 1), (3, 1, 1), @@ -23,14 +24,14 @@ pk1 key1 key2 3 1 1 10 1 1 drop table t1; -create table t1 ( +create table t1 ( pk1 int unsigned not NULL, key1 int(11), key2 int(11), PRIMARY KEY (pk1), KEY key1 (key1), KEY key2 (key2) -) engine=innodb; +); insert into t1 values (0, 1, 1), (0xFFFFFFFF, 1, 1), (0xFFFFFFFE, 1, 1), @@ -44,14 +45,14 @@ pk1 key1 key2 4294967294 1 1 4294967295 1 1 drop table t1; -create table t1 ( +create table t1 ( pk1 char(4) not NULL, key1 int(11), key2 int(11), PRIMARY KEY (pk1), KEY key1 (key1), KEY key2 (key2) -) engine=innodb collate latin2_general_ci; +) collate latin2_general_ci; insert into t1 values ('a1', 1, 1), ('b2', 1, 1), ('A3', 1, 1), @@ -72,8 +73,8 @@ key2 int(11), PRIMARY KEY (pk1,pk2,pk3), KEY key1 (key1), KEY key2 (key2) -) engine=innodb; -insert into t1 values +); +insert into t1 values (1, 'u', 'u', 1, 1), (1, 'u', char(0xEC), 1, 1), (1, 'u', 'x', 1, 1); @@ -170,7 +171,7 @@ key2 int(11), primary key(pk1, pk2), KEY key1 (key1), KEY key2 (key2) -) engine=innodb; +); insert into t1 values ('','empt',2,2), ('a','a--a',2,2), ('bb','b--b',2,2), diff --git a/mysql-test/r/rpl_insert.result b/mysql-test/r/rpl_insert.result index bcc9b176ed3..b6a97926f73 100644 --- a/mysql-test/r/rpl_insert.result +++ b/mysql-test/r/rpl_insert.result @@ -1,3 +1,6 @@ +# +# Bug#20821: INSERT DELAYED fails to write some rows to binlog +# stop slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; reset master; @@ -13,4 +16,8 @@ COUNT(*) SELECT COUNT(*) FROM mysqlslap.t1; COUNT(*) 5000 -DROP SCHEMA IF EXISTS mysqlslap; +# +# Cleanup +# +USE test; +DROP SCHEMA mysqlslap; diff --git a/mysql-test/r/rpl_insert_id.result b/mysql-test/r/rpl_insert_id.result index 3c33fe1be2b..63be35b8720 100644 --- a/mysql-test/r/rpl_insert_id.result +++ b/mysql-test/r/rpl_insert_id.result @@ -1,3 +1,14 @@ +# +# Setup +# +use test; +drop table if exists t1, t2, t3; +# +# See if queries that use both auto_increment and LAST_INSERT_ID() +# are replicated well +# +# We also check how the foreign_key_check variable is replicated +# stop slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; reset master; @@ -38,6 +49,9 @@ select * from t2; b c 5 0 6 11 +# +# check if INSERT SELECT in auto_increment is well replicated (bug #490) +# drop table t2; drop table t1; create table t1(a int auto_increment, key(a)); @@ -68,12 +82,19 @@ b c 9 13 drop table t1; drop table t2; +# +# Bug#8412: Error codes reported in binary log for CHARACTER SET, +# FOREIGN_KEY_CHECKS +# SET TIMESTAMP=1000000000; CREATE TABLE t1 ( a INT UNIQUE ); SET FOREIGN_KEY_CHECKS=0; INSERT INTO t1 VALUES (1),(1); Got one of the listed errors drop table t1; +# +# Bug#14553: NULL in WHERE resets LAST_INSERT_ID +# create table t1(a int auto_increment, key(a)); create table t2(a int); insert into t1 (a) values (null); @@ -87,6 +108,14 @@ a 1 drop table t1; drop table t2; +# +# End of 4.1 tests +# +# +# BUG#15728: LAST_INSERT_ID function inside a stored function returns 0 +# +# The solution is not to reset last_insert_id on enter to sub-statement. +# drop function if exists bug15728; drop function if exists bug15728_insert; drop table if exists t1, t2; @@ -234,6 +263,161 @@ n b 2 100 3 350 drop table t1; +DROP PROCEDURE IF EXISTS p1; +DROP TABLE IF EXISTS t1, t2; +SELECT LAST_INSERT_ID(0); +LAST_INSERT_ID(0) +0 +CREATE TABLE t1 ( +id INT NOT NULL DEFAULT 0, +last_id INT, +PRIMARY KEY (id) +); +CREATE TABLE t2 ( +id INT NOT NULL AUTO_INCREMENT, +last_id INT, +PRIMARY KEY (id) +); +CREATE PROCEDURE p1() +BEGIN +INSERT INTO t2 (last_id) VALUES (LAST_INSERT_ID()); +INSERT INTO t1 (last_id) VALUES (LAST_INSERT_ID()); +END| +CALL p1(); +SELECT * FROM t1; +id last_id +0 1 +SELECT * FROM t2; +id last_id +1 0 +SELECT * FROM t1; +id last_id +0 1 +SELECT * FROM t2; +id last_id +1 0 +DROP PROCEDURE p1; +DROP TABLE t1, t2; +DROP PROCEDURE IF EXISTS p1; +DROP FUNCTION IF EXISTS f1; +DROP FUNCTION IF EXISTS f2; +DROP FUNCTION IF EXISTS f3; +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +i INT NOT NULL AUTO_INCREMENT PRIMARY KEY, +j INT DEFAULT 0 +); +CREATE TABLE t2 (i INT); +CREATE PROCEDURE p1() +BEGIN +INSERT INTO t1 (i) VALUES (NULL); +INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); +INSERT INTO t1 (i) VALUES (NULL), (NULL); +INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); +END | +CREATE FUNCTION f1() RETURNS INT MODIFIES SQL DATA +BEGIN +INSERT INTO t1 (i) VALUES (NULL); +INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); +INSERT INTO t1 (i) VALUES (NULL), (NULL); +INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); +RETURN 0; +END | +CREATE FUNCTION f2() RETURNS INT NOT DETERMINISTIC +RETURN LAST_INSERT_ID() | +CREATE FUNCTION f3() RETURNS INT MODIFIES SQL DATA +BEGIN +INSERT INTO t2 (i) VALUES (LAST_INSERT_ID()); +RETURN 0; +END | +INSERT INTO t1 VALUES (NULL, -1); +CALL p1(); +SELECT f1(); +f1() +0 +INSERT INTO t1 VALUES (NULL, f2()), (NULL, LAST_INSERT_ID()), +(NULL, LAST_INSERT_ID()), (NULL, f2()), (NULL, f2()); +INSERT INTO t1 VALUES (NULL, f2()); +INSERT INTO t1 VALUES (NULL, LAST_INSERT_ID()), (NULL, LAST_INSERT_ID(5)), +(NULL, @@LAST_INSERT_ID); +INSERT INTO t1 VALUES (NULL, 0), (NULL, LAST_INSERT_ID()); +UPDATE t1 SET j= -1 WHERE i IS NULL; +INSERT INTO t1 (i) VALUES (NULL); +INSERT INTO t1 (i) VALUES (NULL); +SELECT f3(); +f3() +0 +SELECT * FROM t1; +i j +1 -1 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 3 +9 3 +10 3 +11 3 +12 3 +13 8 +14 13 +15 5 +16 13 +17 -1 +18 14 +19 0 +20 0 +SELECT * FROM t2; +i +2 +3 +5 +6 +19 +SELECT * FROM t1; +i j +1 -1 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 3 +9 3 +10 3 +11 3 +12 3 +13 8 +14 13 +15 5 +16 13 +17 -1 +18 14 +19 0 +20 0 +SELECT * FROM t2; +i +2 +3 +5 +6 +19 +DROP PROCEDURE p1; +DROP FUNCTION f1; +DROP FUNCTION f2; +DROP FUNCTION f3; +DROP TABLE t1, t2; +# +# End of 5.0 tests +# +create table t2 ( +id int not null auto_increment, +last_id int, +primary key (id) +); truncate table t2; create table t1 (id tinyint primary key); create function insid() returns int @@ -267,5 +451,30 @@ select * from t2; id last_id 4 0 8 0 -drop table t1, t2; +drop table t1; drop function insid; +truncate table t2; +create table t1 (n int primary key auto_increment not null, +b int, unique(b)); +create procedure foo() +begin +insert into t1 values(null,10); +insert ignore into t1 values(null,10); +insert ignore into t1 values(null,10); +insert into t2 values(null,3); +end| +call foo(); +select * from t1; +n b +1 10 +select * from t2; +id last_id +1 3 +select * from t1; +n b +1 10 +select * from t2; +id last_id +1 3 +drop table t1, t2; +drop procedure foo; diff --git a/mysql-test/r/rpl_loaddata.result b/mysql-test/r/rpl_loaddata.result index c22815186d1..cae11e98caa 100644 --- a/mysql-test/r/rpl_loaddata.result +++ b/mysql-test/r/rpl_loaddata.result @@ -28,7 +28,7 @@ day id category name 2003-03-22 2416 a bbbbb show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -slave-bin.000001 1248 +slave-bin.000001 1276 drop table t1; drop table t2; drop table t3; @@ -39,7 +39,7 @@ set global sql_slave_skip_counter=1; start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1765 # # master-bin.000001 Yes Yes # 0 0 1765 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1793 # # master-bin.000001 Yes Yes # 0 0 1793 # None 0 No # set sql_log_bin=0; delete from t1; set sql_log_bin=1; @@ -49,7 +49,7 @@ change master to master_user='test'; change master to master_user='root'; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1800 # # master-bin.000001 No No # 0 0 1800 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1828 # # master-bin.000001 No No # 0 0 1828 # None 0 No # set global sql_slave_skip_counter=1; start slave; set sql_log_bin=0; diff --git a/mysql-test/r/rpl_ndb_insert_ignore.result b/mysql-test/r/rpl_ndb_insert_ignore.result index 4d55328a5d7..030845e89e2 100644 --- a/mysql-test/r/rpl_ndb_insert_ignore.result +++ b/mysql-test/r/rpl_ndb_insert_ignore.result @@ -30,16 +30,16 @@ a b 2 2 3 3 4 4 -7 5 -10 6 +5 5 +6 6 SELECT * FROM t1 ORDER BY a; a b 1 1 2 2 3 3 4 4 -7 5 -10 6 +5 5 +6 6 drop table t1; CREATE TABLE t1 ( a int unsigned not null auto_increment primary key, diff --git a/mysql-test/r/rpl_rbr_to_sbr.result b/mysql-test/r/rpl_rbr_to_sbr.result index c5a672ee13b..4b2d129c732 100644 --- a/mysql-test/r/rpl_rbr_to_sbr.result +++ b/mysql-test/r/rpl_rbr_to_sbr.result @@ -5,9 +5,6 @@ reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; SET BINLOG_FORMAT=MIXED; -SELECT @@GLOBAL.BINLOG_FORMAT, @@SESSION.BINLOG_FORMAT; -@@GLOBAL.BINLOG_FORMAT @@SESSION.BINLOG_FORMAT -STATEMENT MIXED SET GLOBAL BINLOG_FORMAT=MIXED; SELECT @@GLOBAL.BINLOG_FORMAT, @@SESSION.BINLOG_FORMAT; @@GLOBAL.BINLOG_FORMAT @@SESSION.BINLOG_FORMAT diff --git a/mysql-test/r/rpl_rewrt_db.result b/mysql-test/r/rpl_rewrt_db.result index 0c33ffc918f..1b843bffdca 100644 --- a/mysql-test/r/rpl_rewrt_db.result +++ b/mysql-test/r/rpl_rewrt_db.result @@ -67,9 +67,9 @@ drop table t1; create table t1 (a int, b char(10)); load data infile '../std_data_ln/loaddata3.dat' into table t1 fields terminated by '' enclosed by '' ignore 1 lines; Warnings: -Warning 1264 Out of range value for column 'a' at row 3 +Warning 1366 Incorrect integer value: 'error ' for column 'a' at row 3 Warning 1262 Row 3 was truncated; it contained more data than there were input columns -Warning 1264 Out of range value for column 'a' at row 5 +Warning 1366 Incorrect integer value: 'wrong end ' for column 'a' at row 5 Warning 1262 Row 5 was truncated; it contained more data than there were input columns select * from rewrite.t1; a b @@ -81,7 +81,8 @@ a b truncate table t1; load data infile '../std_data_ln/loaddata4.dat' into table t1 fields terminated by '' enclosed by '' lines terminated by '' ignore 1 lines; Warnings: -Warning 1264 Out of range value for column 'a' at row 4 +Warning 1366 Incorrect integer value: ' +' for column 'a' at row 4 Warning 1261 Row 4 doesn't contain data for all columns select * from rewrite.t1; a b diff --git a/mysql-test/r/rpl_row_basic_11bugs.result b/mysql-test/r/rpl_row_basic_11bugs.result index e8be537816e..e49facd2d70 100644 --- a/mysql-test/r/rpl_row_basic_11bugs.result +++ b/mysql-test/r/rpl_row_basic_11bugs.result @@ -60,3 +60,63 @@ master-bin.000001 4 Format_desc 1 102 Server ver: SERVER_VERSION, Binlog ver: 4 master-bin.000001 102 Query 1 188 use `test`; CREATE TABLE t1 (a INT) master-bin.000001 188 Table_map 1 227 table_id: # (test.t1) master-bin.000001 227 Write_rows 1 266 table_id: # flags: STMT_END_F +DROP TABLE t1; +================ Test for BUG#17620 ================ +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +**** On Slave **** +SET GLOBAL QUERY_CACHE_SIZE=0; +**** On Master **** +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +**** On Slave **** +SET GLOBAL QUERY_CACHE_SIZE=16*1024*1024; +**** On Master **** +INSERT INTO t1 VALUES (4),(5),(6); +**** On Slave **** +SELECT * FROM t1; +a +1 +2 +3 +4 +5 +6 +**** On Master **** +INSERT INTO t1 VALUES (7),(8),(9); +**** On Slave **** +SELECT * FROM t1; +a +1 +2 +3 +4 +5 +6 +7 +8 +9 +================ Test for BUG#22550 ================ +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +CREATE TABLE t1 (a BIT(1), b INT) ENGINE=MYISAM; +INSERT INTO t1 VALUES(1,2); +SELECT HEX(a),b FROM t1; +HEX(a) b +1 2 +SELECT HEX(a),b FROM t1; +HEX(a) b +1 2 +UPDATE t1 SET a=0 WHERE b=2; +SELECT HEX(a),b FROM t1; +HEX(a) b +0 2 +SELECT HEX(a),b FROM t1; +HEX(a) b +0 2 diff --git a/mysql-test/r/rpl_row_basic_8partition.result b/mysql-test/r/rpl_row_basic_8partition.result index a6728303a4c..dedd5d044e0 100644 --- a/mysql-test/r/rpl_row_basic_8partition.result +++ b/mysql-test/r/rpl_row_basic_8partition.result @@ -5,7 +5,10 @@ reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; DROP TABLE IF EXISTS t1; -SET BINLOG_FORMAT=ROW; +SET @@BINLOG_FORMAT = ROW; +SELECT @@BINLOG_FORMAT; +@@BINLOG_FORMAT +ROW **** Partition RANGE testing **** CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255), bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, diff --git a/mysql-test/r/rpl_row_max_relay_size.result b/mysql-test/r/rpl_row_max_relay_size.result index 6146d623ec5..163e8231de5 100644 --- a/mysql-test/r/rpl_row_max_relay_size.result +++ b/mysql-test/r/rpl_row_max_relay_size.result @@ -1,3 +1,5 @@ +SET SESSION BINLOG_FORMAT=ROW; +SET GLOBAL BINLOG_FORMAT=ROW; stop slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; reset master; @@ -5,9 +7,15 @@ reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; stop slave; +# +# Generate a big enough master's binlog to cause relay log rotations +# create table t1 (a int); drop table t1; reset slave; +# +# Test 1 +# set global max_binlog_size=8192; set global max_relay_log_size=8192-1; select @@global.max_relay_log_size; @@ -15,47 +23,251 @@ select @@global.max_relay_log_size; 4096 start slave; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 58664 # # master-bin.000001 Yes Yes # 0 0 58664 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 58664 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 58664 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 2 +# stop slave; reset slave; set global max_relay_log_size=(5*4096); select @@global.max_relay_log_size; -@@global.max_relay_log_size -20480 +@@global.max_relay_log_size 20480 start slave; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 58664 # # master-bin.000001 Yes Yes # 0 0 58664 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 58664 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 58664 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 3: max_relay_log_size = 0 +# stop slave; reset slave; set global max_relay_log_size=0; select @@global.max_relay_log_size; -@@global.max_relay_log_size -0 +@@global.max_relay_log_size 0 start slave; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 58664 # # master-bin.000001 Yes Yes # 0 0 58664 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 58664 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 58664 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 4: Tests below are mainly to ensure that we have not coded with wrong assumptions +# stop slave; reset slave; flush logs; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 4 # # No No # 0 0 0 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File +Read_Master_Log_Pos 4 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File +Slave_IO_Running No +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 0 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 5 +# reset slave; start slave; flush logs; create table t1 (a int); show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 58750 # # master-bin.000001 Yes Yes # 0 0 58750 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 58750 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 58750 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 6: one more rotation, to be sure Relay_Log_Space is correctly updated +# flush logs; drop table t1; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 58826 # # master-bin.000001 Yes Yes # 0 0 58826 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 58826 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 58826 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # flush logs; show master status; -File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000002 102 +File master-bin.000002 +Position 102 +Binlog_Do_DB +Binlog_Ignore_DB +# +# End of 4.1 tests +# diff --git a/mysql-test/r/rpl_row_tabledefs_2myisam.result b/mysql-test/r/rpl_row_tabledefs_2myisam.result new file mode 100644 index 00000000000..ae792a5dc2a --- /dev/null +++ b/mysql-test/r/rpl_row_tabledefs_2myisam.result @@ -0,0 +1,381 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +STOP SLAVE; +SET GLOBAL SQL_MODE='STRICT_ALL_TABLES'; +START SLAVE; +CREATE TABLE t1_int (a INT PRIMARY KEY, b INT) ENGINE='MyISAM'; +CREATE TABLE t1_bit (a INT PRIMARY KEY, b INT) ENGINE='MyISAM'; +CREATE TABLE t1_char (a INT PRIMARY KEY, b INT) ENGINE='MyISAM'; +CREATE TABLE t1_nodef (a INT PRIMARY KEY, b INT) ENGINE='MyISAM'; +CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE='MyISAM'; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE='MyISAM'; +CREATE TABLE t4 (a INT) ENGINE='MyISAM'; +CREATE TABLE t5 (a INT, b INT, c INT) ENGINE='MyISAM'; +CREATE TABLE t6 (a INT, b INT, c INT) ENGINE='MyISAM'; +CREATE TABLE t7 (a INT NOT NULL) ENGINE='MyISAM'; +CREATE TABLE t8 (a INT NOT NULL) ENGINE='MyISAM'; +CREATE TABLE t9 (a INT) ENGINE='MyISAM'; +ALTER TABLE t1_int ADD x INT DEFAULT 42; +ALTER TABLE t1_bit +ADD x BIT(3) DEFAULT b'011', +ADD y BIT(5) DEFAULT b'10101', +ADD z BIT(2) DEFAULT b'10'; +ALTER TABLE t1_char ADD x CHAR(20) DEFAULT 'Just a test'; +ALTER TABLE t1_nodef ADD x INT NOT NULL; +ALTER TABLE t2 DROP b; +ALTER TABLE t4 MODIFY a FLOAT; +ALTER TABLE t5 MODIFY b FLOAT; +ALTER TABLE t6 MODIFY c FLOAT; +ALTER TABLE t7 ADD e1 INT, ADD e2 INT, ADD e3 INT, ADD e4 INT, +ADD e5 INT, ADD e6 INT, ADD e7 INT, ADD e8 INT; +ALTER TABLE t8 ADD e1 INT NOT NULL DEFAULT 0, ADD e2 INT NOT NULL DEFAULT 0, +ADD e3 INT NOT NULL DEFAULT 0, ADD e4 INT NOT NULL DEFAULT 0, +ADD e5 INT NOT NULL DEFAULT 0, ADD e6 INT NOT NULL DEFAULT 0, +ADD e7 INT NOT NULL DEFAULT 0, ADD e8 INT NOT NULL DEFAULT 0; +INSERT INTO t1_int VALUES (2, 4, 4711); +INSERT INTO t1_char VALUES (2, 4, 'Foo is a bar'); +INSERT INTO t1_bit VALUES (2, 4, b'101', b'11100', b'01'); +**** On Master **** +INSERT INTO t1_int VALUES (1,2); +INSERT INTO t1_int VALUES (2,5); +INSERT INTO t1_bit VALUES (1,2); +INSERT INTO t1_bit VALUES (2,5); +INSERT INTO t1_char VALUES (1,2); +INSERT INTO t1_char VALUES (2,5); +SELECT * FROM t1_int; +a b +1 2 +2 5 +SELECT * FROM t1_bit; +a b +1 2 +2 5 +SELECT * FROM t1_char; +a b +1 2 +2 5 +**** On Slave **** +SELECT a,b,x FROM t1_int; +a b x +2 5 4711 +1 2 42 +SELECT a,b,HEX(x),HEX(y),HEX(z) FROM t1_bit; +a b HEX(x) HEX(y) HEX(z) +2 5 5 1C 1 +1 2 3 15 2 +SELECT a,b,x FROM t1_char; +a b x +2 5 Foo is a bar +1 2 Just a test +**** On Master **** +UPDATE t1_int SET b=2*b WHERE a=2; +UPDATE t1_char SET b=2*b WHERE a=2; +UPDATE t1_bit SET b=2*b WHERE a=2; +SELECT * FROM t1_int; +a b +1 2 +2 10 +SELECT * FROM t1_bit; +a b +1 2 +2 10 +SELECT * FROM t1_char; +a b +1 2 +2 10 +**** On Slave **** +SELECT a,b,x FROM t1_int; +a b x +2 10 4711 +1 2 42 +SELECT a,b,HEX(x),HEX(y),HEX(z) FROM t1_bit; +a b HEX(x) HEX(y) HEX(z) +2 10 5 1C 1 +1 2 3 15 2 +SELECT a,b,x FROM t1_char; +a b x +2 10 Foo is a bar +1 2 Just a test +INSERT INTO t9 VALUES (2); +INSERT INTO t1_nodef VALUES (1,2); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1364 +Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (2); +INSERT INTO t2 VALUES (2,4); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1522 +Last_Error Table width mismatch - received 2 columns, test.t2 has 1 columns +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (4); +INSERT INTO t4 VALUES (4); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1522 +Last_Error Column 0 type mismatch - received type 3, test.t4 has type 4 +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (5); +INSERT INTO t5 VALUES (5,10,25); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1522 +Last_Error Column 1 type mismatch - received type 3, test.t5 has type 4 +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (6); +INSERT INTO t6 VALUES (6,12,36); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1522 +Last_Error Column 2 type mismatch - received type 3, test.t6 has type 4 +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (6); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +INSERT INTO t7 VALUES (1),(2),(3); +INSERT INTO t8 VALUES (1),(2),(3); +SELECT * FROM t7; +a +1 +2 +3 +SELECT * FROM t8; +a +1 +2 +3 +SELECT * FROM t7; +a e1 e2 e3 e4 e5 e6 e7 e8 +1 NULL NULL NULL NULL NULL NULL NULL NULL +2 NULL NULL NULL NULL NULL NULL NULL NULL +3 NULL NULL NULL NULL NULL NULL NULL NULL +SELECT * FROM t8; +a e1 e2 e3 e4 e5 e6 e7 e8 +1 0 0 0 0 0 0 0 0 +2 0 0 0 0 0 0 0 0 +3 0 0 0 0 0 0 0 0 +**** On Master **** +TRUNCATE t1_nodef; +SET SQL_LOG_BIN=0; +INSERT INTO t1_nodef VALUES (1,2); +INSERT INTO t1_nodef VALUES (2,4); +SET SQL_LOG_BIN=1; +**** On Slave **** +INSERT INTO t1_nodef VALUES (1,2,3); +INSERT INTO t1_nodef VALUES (2,4,6); +**** On Master **** +UPDATE t1_nodef SET b=2*b WHERE a=1; +SELECT * FROM t1_nodef; +a b +1 4 +2 4 +**** On Slave **** +SELECT * FROM t1_nodef; +a b x +1 4 3 +2 4 6 +**** On Master **** +DELETE FROM t1_nodef WHERE a=2; +SELECT * FROM t1_nodef; +a b +1 4 +**** On Slave **** +SELECT * FROM t1_nodef; +a b x +1 4 3 +**** Cleanup **** +DROP TABLE IF EXISTS t1_int,t1_bit,t1_char,t1_nodef; +DROP TABLE IF EXISTS t2,t3,t4,t5,t6,t7,t8,t9; diff --git a/mysql-test/r/rpl_row_tabledefs_3innodb.result b/mysql-test/r/rpl_row_tabledefs_3innodb.result new file mode 100644 index 00000000000..b7f0b7b15e2 --- /dev/null +++ b/mysql-test/r/rpl_row_tabledefs_3innodb.result @@ -0,0 +1,381 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +STOP SLAVE; +SET GLOBAL SQL_MODE='STRICT_ALL_TABLES'; +START SLAVE; +CREATE TABLE t1_int (a INT PRIMARY KEY, b INT) ENGINE='InnoDB'; +CREATE TABLE t1_bit (a INT PRIMARY KEY, b INT) ENGINE='InnoDB'; +CREATE TABLE t1_char (a INT PRIMARY KEY, b INT) ENGINE='InnoDB'; +CREATE TABLE t1_nodef (a INT PRIMARY KEY, b INT) ENGINE='InnoDB'; +CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE='InnoDB'; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE='InnoDB'; +CREATE TABLE t4 (a INT) ENGINE='InnoDB'; +CREATE TABLE t5 (a INT, b INT, c INT) ENGINE='InnoDB'; +CREATE TABLE t6 (a INT, b INT, c INT) ENGINE='InnoDB'; +CREATE TABLE t7 (a INT NOT NULL) ENGINE='InnoDB'; +CREATE TABLE t8 (a INT NOT NULL) ENGINE='InnoDB'; +CREATE TABLE t9 (a INT) ENGINE='InnoDB'; +ALTER TABLE t1_int ADD x INT DEFAULT 42; +ALTER TABLE t1_bit +ADD x BIT(3) DEFAULT b'011', +ADD y BIT(5) DEFAULT b'10101', +ADD z BIT(2) DEFAULT b'10'; +ALTER TABLE t1_char ADD x CHAR(20) DEFAULT 'Just a test'; +ALTER TABLE t1_nodef ADD x INT NOT NULL; +ALTER TABLE t2 DROP b; +ALTER TABLE t4 MODIFY a FLOAT; +ALTER TABLE t5 MODIFY b FLOAT; +ALTER TABLE t6 MODIFY c FLOAT; +ALTER TABLE t7 ADD e1 INT, ADD e2 INT, ADD e3 INT, ADD e4 INT, +ADD e5 INT, ADD e6 INT, ADD e7 INT, ADD e8 INT; +ALTER TABLE t8 ADD e1 INT NOT NULL DEFAULT 0, ADD e2 INT NOT NULL DEFAULT 0, +ADD e3 INT NOT NULL DEFAULT 0, ADD e4 INT NOT NULL DEFAULT 0, +ADD e5 INT NOT NULL DEFAULT 0, ADD e6 INT NOT NULL DEFAULT 0, +ADD e7 INT NOT NULL DEFAULT 0, ADD e8 INT NOT NULL DEFAULT 0; +INSERT INTO t1_int VALUES (2, 4, 4711); +INSERT INTO t1_char VALUES (2, 4, 'Foo is a bar'); +INSERT INTO t1_bit VALUES (2, 4, b'101', b'11100', b'01'); +**** On Master **** +INSERT INTO t1_int VALUES (1,2); +INSERT INTO t1_int VALUES (2,5); +INSERT INTO t1_bit VALUES (1,2); +INSERT INTO t1_bit VALUES (2,5); +INSERT INTO t1_char VALUES (1,2); +INSERT INTO t1_char VALUES (2,5); +SELECT * FROM t1_int; +a b +1 2 +2 5 +SELECT * FROM t1_bit; +a b +1 2 +2 5 +SELECT * FROM t1_char; +a b +1 2 +2 5 +**** On Slave **** +SELECT a,b,x FROM t1_int; +a b x +2 5 4711 +1 2 42 +SELECT a,b,HEX(x),HEX(y),HEX(z) FROM t1_bit; +a b HEX(x) HEX(y) HEX(z) +2 5 5 1C 1 +1 2 3 15 2 +SELECT a,b,x FROM t1_char; +a b x +2 5 Foo is a bar +1 2 Just a test +**** On Master **** +UPDATE t1_int SET b=2*b WHERE a=2; +UPDATE t1_char SET b=2*b WHERE a=2; +UPDATE t1_bit SET b=2*b WHERE a=2; +SELECT * FROM t1_int; +a b +1 2 +2 10 +SELECT * FROM t1_bit; +a b +1 2 +2 10 +SELECT * FROM t1_char; +a b +1 2 +2 10 +**** On Slave **** +SELECT a,b,x FROM t1_int; +a b x +2 10 4711 +1 2 42 +SELECT a,b,HEX(x),HEX(y),HEX(z) FROM t1_bit; +a b HEX(x) HEX(y) HEX(z) +2 10 5 1C 1 +1 2 3 15 2 +SELECT a,b,x FROM t1_char; +a b x +2 10 Foo is a bar +1 2 Just a test +INSERT INTO t9 VALUES (2); +INSERT INTO t1_nodef VALUES (1,2); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1364 +Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (2); +INSERT INTO t2 VALUES (2,4); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1522 +Last_Error Table width mismatch - received 2 columns, test.t2 has 1 columns +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (4); +INSERT INTO t4 VALUES (4); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1522 +Last_Error Column 0 type mismatch - received type 3, test.t4 has type 4 +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (5); +INSERT INTO t5 VALUES (5,10,25); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1522 +Last_Error Column 1 type mismatch - received type 3, test.t5 has type 4 +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (6); +INSERT INTO t6 VALUES (6,12,36); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 1522 +Last_Error Column 2 type mismatch - received type 3, test.t6 has type 4 +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; +START SLAVE; +INSERT INTO t9 VALUES (6); +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +INSERT INTO t7 VALUES (1),(2),(3); +INSERT INTO t8 VALUES (1),(2),(3); +SELECT * FROM t7; +a +1 +2 +3 +SELECT * FROM t8; +a +1 +2 +3 +SELECT * FROM t7; +a e1 e2 e3 e4 e5 e6 e7 e8 +1 NULL NULL NULL NULL NULL NULL NULL NULL +2 NULL NULL NULL NULL NULL NULL NULL NULL +3 NULL NULL NULL NULL NULL NULL NULL NULL +SELECT * FROM t8; +a e1 e2 e3 e4 e5 e6 e7 e8 +1 0 0 0 0 0 0 0 0 +2 0 0 0 0 0 0 0 0 +3 0 0 0 0 0 0 0 0 +**** On Master **** +TRUNCATE t1_nodef; +SET SQL_LOG_BIN=0; +INSERT INTO t1_nodef VALUES (1,2); +INSERT INTO t1_nodef VALUES (2,4); +SET SQL_LOG_BIN=1; +**** On Slave **** +INSERT INTO t1_nodef VALUES (1,2,3); +INSERT INTO t1_nodef VALUES (2,4,6); +**** On Master **** +UPDATE t1_nodef SET b=2*b WHERE a=1; +SELECT * FROM t1_nodef; +a b +1 4 +2 4 +**** On Slave **** +SELECT * FROM t1_nodef; +a b x +1 4 3 +2 4 6 +**** On Master **** +DELETE FROM t1_nodef WHERE a=2; +SELECT * FROM t1_nodef; +a b +1 4 +**** On Slave **** +SELECT * FROM t1_nodef; +a b x +1 4 3 +**** Cleanup **** +DROP TABLE IF EXISTS t1_int,t1_bit,t1_char,t1_nodef; +DROP TABLE IF EXISTS t2,t3,t4,t5,t6,t7,t8,t9; diff --git a/mysql-test/r/rpl_row_tabledefs.result b/mysql-test/r/rpl_row_tabledefs_7ndb.result similarity index 61% rename from mysql-test/r/rpl_row_tabledefs.result rename to mysql-test/r/rpl_row_tabledefs_7ndb.result index 715ffcc7578..0d0da3b6185 100644 --- a/mysql-test/r/rpl_row_tabledefs.result +++ b/mysql-test/r/rpl_row_tabledefs_7ndb.result @@ -4,21 +4,96 @@ reset master; reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; -CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -CREATE TABLE t4 (a INT) ENGINE=myisam; -CREATE TABLE t5 (a INT, b INT, c INT) ENGINE=myisam; -CREATE TABLE t6 (a INT, b INT, c INT) ENGINE=myisam; -CREATE TABLE t9 (a INT PRIMARY KEY) ENGINE=myisam; -ALTER TABLE t1 ADD x INT DEFAULT 42; -ALTER TABLE t2 ADD x INT DEFAULT 42 AFTER a; -ALTER TABLE t3 ADD x INT DEFAULT 42 FIRST; +STOP SLAVE; +SET GLOBAL SQL_MODE='STRICT_ALL_TABLES'; +START SLAVE; +CREATE TABLE t1_int (a INT PRIMARY KEY, b INT) ENGINE='NDB'; +CREATE TABLE t1_bit (a INT PRIMARY KEY, b INT) ENGINE='NDB'; +CREATE TABLE t1_char (a INT PRIMARY KEY, b INT) ENGINE='NDB'; +CREATE TABLE t1_nodef (a INT PRIMARY KEY, b INT) ENGINE='NDB'; +CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE='NDB'; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE='NDB'; +CREATE TABLE t4 (a INT) ENGINE='NDB'; +CREATE TABLE t5 (a INT, b INT, c INT) ENGINE='NDB'; +CREATE TABLE t6 (a INT, b INT, c INT) ENGINE='NDB'; +CREATE TABLE t9 (a INT) ENGINE='NDB'; +ALTER TABLE t1_int ADD x INT DEFAULT 42; +ALTER TABLE t1_bit +ADD x BIT(3) DEFAULT b'011', +ADD y BIT(5) DEFAULT b'10101', +ADD z BIT(2) DEFAULT b'10'; +ALTER TABLE t1_char ADD x CHAR(20) DEFAULT 'Just a test'; +ALTER TABLE t1_nodef ADD x INT NOT NULL; +ALTER TABLE t2 DROP b; ALTER TABLE t4 MODIFY a FLOAT; ALTER TABLE t5 MODIFY b FLOAT; ALTER TABLE t6 MODIFY c FLOAT; -INSERT INTO t9 VALUES (1); -INSERT INTO t1 VALUES (1,2); +INSERT INTO t1_int VALUES (2, 4, 4711); +INSERT INTO t1_char VALUES (2, 4, 'Foo is a bar'); +INSERT INTO t1_bit VALUES (2, 4, b'101', b'11100', b'01'); +**** On Master **** +INSERT INTO t1_int VALUES (1,2); +INSERT INTO t1_int VALUES (2,5); +INSERT INTO t1_bit VALUES (1,2); +INSERT INTO t1_bit VALUES (2,5); +INSERT INTO t1_char VALUES (1,2); +INSERT INTO t1_char VALUES (2,5); +SELECT * FROM t1_int; +a b +1 2 +2 5 +SELECT * FROM t1_bit; +a b +1 2 +2 5 +SELECT * FROM t1_char; +a b +1 2 +2 5 +**** On Slave **** +SELECT a,b,x FROM t1_int; +a b x +1 2 42 +2 5 42 +SELECT a,b,HEX(x),HEX(y),HEX(z) FROM t1_bit; +a b HEX(x) HEX(y) HEX(z) +1 2 3 15 2 +2 5 3 15 2 +SELECT a,b,x FROM t1_char; +a b x +1 2 Just a test +2 5 Just a test +**** On Master **** +UPDATE t1_int SET b=2*b WHERE a=2; +UPDATE t1_char SET b=2*b WHERE a=2; +UPDATE t1_bit SET b=2*b WHERE a=2; +SELECT * FROM t1_int; +a b +1 2 +2 10 +SELECT * FROM t1_bit; +a b +1 2 +2 10 +SELECT * FROM t1_char; +a b +1 2 +2 10 +**** On Slave **** +SELECT a,b,x FROM t1_int; +a b x +1 2 42 +2 10 42 +SELECT a,b,HEX(x),HEX(y),HEX(z) FROM t1_bit; +a b HEX(x) HEX(y) HEX(z) +1 2 3 15 2 +2 10 3 15 2 +SELECT a,b,x FROM t1_char; +a b x +1 2 Just a test +2 10 Just a test +INSERT INTO t9 VALUES (2); +INSERT INTO t1_nodef VALUES (1,2); SHOW SLAVE STATUS; Slave_IO_State # Master_Host 127.0.0.1 @@ -26,7 +101,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 1042 +Read_Master_Log_Pos # Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -38,10 +113,10 @@ Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table -Last_Errno 1454 -Last_Error Table width mismatch - received 2 columns, test.t1 has 3 columns +Last_Errno 1364 +Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef Skip_Counter 0 -Exec_Master_Log_Pos 968 +Exec_Master_Log_Pos # Relay_Log_Space # Until_Condition None Until_Log_File @@ -64,7 +139,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 1185 +Read_Master_Log_Pos # Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -76,48 +151,10 @@ Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table -Last_Errno 1454 -Last_Error Table width mismatch - received 2 columns, test.t2 has 3 columns +Last_Errno 1514 +Last_Error Table width mismatch - received 2 columns, test.t2 has 1 columns Skip_Counter 0 -Exec_Master_Log_Pos 1111 -Relay_Log_Space # -Until_Condition None -Until_Log_File -Until_Log_Pos 0 -Master_SSL_Allowed No -Master_SSL_CA_File -Master_SSL_CA_Path -Master_SSL_Cert -Master_SSL_Cipher -Master_SSL_Key -Seconds_Behind_Master # -SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; -START SLAVE; -INSERT INTO t9 VALUES (3); -INSERT INTO t3 VALUES (3,6); -SHOW SLAVE STATUS; -Slave_IO_State # -Master_Host 127.0.0.1 -Master_User root -Master_Port MASTER_PORT -Connect_Retry 1 -Master_Log_File master-bin.000001 -Read_Master_Log_Pos 1328 -Relay_Log_File # -Relay_Log_Pos # -Relay_Master_Log_File master-bin.000001 -Slave_IO_Running Yes -Slave_SQL_Running No -Replicate_Do_DB -Replicate_Ignore_DB -Replicate_Do_Table -Replicate_Ignore_Table -Replicate_Wild_Do_Table -Replicate_Wild_Ignore_Table -Last_Errno 1454 -Last_Error Table width mismatch - received 2 columns, test.t3 has 3 columns -Skip_Counter 0 -Exec_Master_Log_Pos 1254 +Exec_Master_Log_Pos # Relay_Log_Space # Until_Condition None Until_Log_File @@ -140,7 +177,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 1466 +Read_Master_Log_Pos # Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -152,10 +189,10 @@ Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table -Last_Errno 1454 +Last_Errno 1514 Last_Error Column 0 type mismatch - received type 3, test.t4 has type 4 Skip_Counter 0 -Exec_Master_Log_Pos 1397 +Exec_Master_Log_Pos # Relay_Log_Space # Until_Condition None Until_Log_File @@ -178,7 +215,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 1614 +Read_Master_Log_Pos # Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -190,10 +227,10 @@ Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table -Last_Errno 1454 +Last_Errno 1514 Last_Error Column 1 type mismatch - received type 3, test.t5 has type 4 Skip_Counter 0 -Exec_Master_Log_Pos 1535 +Exec_Master_Log_Pos # Relay_Log_Space # Until_Condition None Until_Log_File @@ -216,7 +253,7 @@ Master_User root Master_Port MASTER_PORT Connect_Retry 1 Master_Log_File master-bin.000001 -Read_Master_Log_Pos 1762 +Read_Master_Log_Pos # Relay_Log_File # Relay_Log_Pos # Relay_Master_Log_File master-bin.000001 @@ -228,10 +265,10 @@ Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table -Last_Errno 1454 +Last_Errno 1514 Last_Error Column 2 type mismatch - received type 3, test.t6 has type 4 Skip_Counter 0 -Exec_Master_Log_Pos 1683 +Exec_Master_Log_Pos # Relay_Log_Space # Until_Condition None Until_Log_File @@ -245,4 +282,5 @@ Master_SSL_Key Seconds_Behind_Master # SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; START SLAVE; -DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t9; +DROP TABLE IF EXISTS t1_int,t1_bit,t1_char,t1_nodef; +DROP TABLE IF EXISTS t2,t3,t4,t5,t6,t9; diff --git a/mysql-test/r/rpl_stm_max_relay_size.result b/mysql-test/r/rpl_stm_max_relay_size.result index 1afbc406a8e..7882bd1aa46 100644 --- a/mysql-test/r/rpl_stm_max_relay_size.result +++ b/mysql-test/r/rpl_stm_max_relay_size.result @@ -5,9 +5,15 @@ reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; stop slave; +# +# Generate a big enough master's binlog to cause relay log rotations +# create table t1 (a int); drop table t1; reset slave; +# +# Test 1 +# set global max_binlog_size=8192; set global max_relay_log_size=8192-1; select @@global.max_relay_log_size; @@ -15,47 +21,251 @@ select @@global.max_relay_log_size; 4096 start slave; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 72956 # # master-bin.000001 Yes Yes # 0 0 72956 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 72956 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 72956 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 2 +# stop slave; reset slave; set global max_relay_log_size=(5*4096); select @@global.max_relay_log_size; -@@global.max_relay_log_size -20480 +@@global.max_relay_log_size 20480 start slave; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 72956 # # master-bin.000001 Yes Yes # 0 0 72956 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 72956 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 72956 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 3: max_relay_log_size = 0 +# stop slave; reset slave; set global max_relay_log_size=0; select @@global.max_relay_log_size; -@@global.max_relay_log_size -0 +@@global.max_relay_log_size 0 start slave; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 72956 # # master-bin.000001 Yes Yes # 0 0 72956 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 72956 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 72956 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 4: Tests below are mainly to ensure that we have not coded with wrong assumptions +# stop slave; reset slave; flush logs; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 4 # # No No # 0 0 0 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File +Read_Master_Log_Pos 4 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File +Slave_IO_Running No +Slave_SQL_Running No +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 0 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 5 +# reset slave; start slave; flush logs; create table t1 (a int); show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 73042 # # master-bin.000001 Yes Yes # 0 0 73042 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 73042 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 73042 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # +# +# Test 6: one more rotation, to be sure Relay_Log_Space is correctly updated +# flush logs; drop table t1; show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 73118 # # master-bin.000001 Yes Yes # 0 0 73118 # None 0 No # +Slave_IO_State # +Master_Host 127.0.0.1 +Master_User root +Master_Port MASTER_PORT +Connect_Retry 1 +Master_Log_File master-bin.000001 +Read_Master_Log_Pos 73118 +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File master-bin.000001 +Slave_IO_Running Yes +Slave_SQL_Running Yes +Replicate_Do_DB +Replicate_Ignore_DB +Replicate_Do_Table +Replicate_Ignore_Table # +Replicate_Wild_Do_Table +Replicate_Wild_Ignore_Table +Last_Errno 0 +Last_Error +Skip_Counter 0 +Exec_Master_Log_Pos 73118 +Relay_Log_Space # +Until_Condition None +Until_Log_File +Until_Log_Pos 0 +Master_SSL_Allowed No +Master_SSL_CA_File +Master_SSL_CA_Path +Master_SSL_Cert +Master_SSL_Cipher +Master_SSL_Key +Seconds_Behind_Master # flush logs; show master status; -File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000002 102 +File master-bin.000002 +Position 102 +Binlog_Do_DB +Binlog_Ignore_DB +# +# End of 4.1 tests +# diff --git a/mysql-test/r/rpl_switch_stm_row_mixed.result b/mysql-test/r/rpl_switch_stm_row_mixed.result index 1ad199c6354..047bfe53704 100644 --- a/mysql-test/r/rpl_switch_stm_row_mixed.result +++ b/mysql-test/r/rpl_switch_stm_row_mixed.result @@ -7,6 +7,8 @@ start slave; drop database if exists mysqltest1; create database mysqltest1; use mysqltest1; +set session binlog_format=row; +set global binlog_format=row; show global variables like "binlog_format%"; Variable_name Value binlog_format ROW @@ -358,6 +360,28 @@ count(*) select count(*) from t16; count(*) 3 +DROP TABLE IF EXISTS t11; +SET SESSION BINLOG_FORMAT=STATEMENT; +CREATE TABLE t11 (song VARCHAR(255)); +LOCK TABLES t11 WRITE; +SET SESSION BINLOG_FORMAT=ROW; +INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict'); +SET SESSION BINLOG_FORMAT=STATEMENT; +INSERT INTO t11 VALUES('Careful With That Axe, Eugene'); +UNLOCK TABLES; +SELECT * FROM t11; +song Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict +song Careful With That Axe, Eugene +USE mysqltest1; +SELECT * FROM t11; +song Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict +song Careful With That Axe, Eugene +DROP TABLE IF EXISTS t12; +SET SESSION BINLOG_FORMAT=MIXED; +CREATE TABLE t12 (data LONG); +LOCK TABLES t12 WRITE; +INSERT INTO t12 VALUES(UUID()); +UNLOCK TABLES; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # drop database if exists mysqltest1 @@ -659,4 +683,323 @@ master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Query 1 # use `mysqltest1`; insert into t16 values("try_66_") +master-bin.000001 # Query 1 # use `mysqltest1`; DROP TABLE IF EXISTS t11 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t11 (song VARCHAR(255)) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; INSERT INTO t11 VALUES('Careful With That Axe, Eugene') +master-bin.000001 # Query 1 # use `mysqltest1`; DROP TABLE IF EXISTS t12 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t12 (data LONG) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t12) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +show binlog events from 102; +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Query 1 # drop database if exists mysqltest1 +master-bin.000001 # Query 1 # create database mysqltest1 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t1 (a varchar(100)) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_8_") +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_9_") +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_10_") +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_11_" +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_13_") +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_14_") +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_15_") +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_16_" +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_18_") +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_21_" +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_24_" +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t2` ( + `rpad(UUID(),100,' ')` varchar(100) CHARACTER SET utf8 NOT NULL DEFAULT '' +) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t3` ( + `1` varbinary(36) NOT NULL DEFAULT '' +) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t4` ( + `a` varchar(100) DEFAULT NULL +) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t4) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t5) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo() +begin +insert into t1 values("work_25_"); +insert into t1 values(concat("for_26_",UUID())); +insert into t1 select "yesterday_27_"; +end +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo2() +begin +insert into t1 values(concat("emergency_28_",UUID())); +insert into t1 values("work_29_"); +insert into t1 values(concat("for_30_",UUID())); +set session binlog_format=row; # accepted for stored procs +insert into t1 values("more work_31_"); +set session binlog_format=mixed; +end +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned +begin +set session binlog_format=row; # rejected for stored funcs +insert into t1 values("alarm"); +return 100; +end +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo4(x varchar(100)) +begin +insert into t1 values(concat("work_250_",x)); +insert into t1 select "yesterday_270_"; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'hello'))) +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_" +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'world'))) +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_" +master-bin.000001 # Query 1 # use `mysqltest1`; drop function foo3 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned +begin +insert into t1 values("foo3_32_"); +call foo(); +return 100; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo4() returns bigint unsigned +begin +insert into t2 select foo3(); +return 100; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo5() returns bigint unsigned +begin +insert into t2 select UUID(); +return 100; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo6(x varchar(100)) returns bigint unsigned +begin +insert into t2 select x; +return 100; +end +master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `foo6`(_latin1'foo6_1_') +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select uuid() +master-bin.000001 # Query 1 # use `mysqltest1`; create table t11 (data varchar(255)) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t11_bi before insert on t11 for each row +begin +set NEW.data = concat(NEW.data,UUID()); +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; create table t20 select * from t1 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t21 select * from t2 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t22 select * from t3 +master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1,t2,t3 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query 1 # use `mysqltest1`; create table t2 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query 1 # use `mysqltest1`; create table t3 (b varchar(100)) +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f (x varchar(100)) returns int deterministic +begin +insert into t1 values(null,x); +insert into t2 values(null,x); +return 1; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Intvar 1 # INSERT_ID=3 +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_44_") +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; create table t12 select * from t1 +master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int, b varchar(100), key(a)) +master-bin.000001 # Intvar 1 # INSERT_ID=4 +master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f`(_latin1'try_45_') +master-bin.000001 # Query 1 # use `mysqltest1`; create table t13 select * from t1 +master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query 1 # use `mysqltest1`; drop function f +master-bin.000001 # Query 1 # use `mysqltest1`; create table t14 (unique (a)) select * from t2 +master-bin.000001 # Query 1 # use `mysqltest1`; truncate table t2 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f1 (x varchar(100)) returns int deterministic +begin +insert into t1 values(null,x); +return 1; +end +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic +begin +insert into t2 values(null,x); +return 1; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic +begin +declare y int; +insert into t1 values(null,x); +set y = (select count(*) from t2); +return y; +end +master-bin.000001 # Intvar 1 # INSERT_ID=4 +master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f1`(_latin1'try_53_') +master-bin.000001 # Intvar 1 # INSERT_ID=5 +master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f2`(_latin1'try_54_') +master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t1_bi before insert on t1 for each row +begin +insert into t2 values(null,"try_55_"); +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; alter table t1 modify a int, drop primary key +master-bin.000001 # Intvar 1 # INSERT_ID=5 +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_57_") +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t16` ( + `UUID()` varchar(36) CHARACTER SET utf8 NOT NULL DEFAULT '' +) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t16 values("try_66_") +master-bin.000001 # Query 1 # use `mysqltest1`; DROP TABLE IF EXISTS t11 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t11 (song VARCHAR(255)) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; INSERT INTO t11 VALUES('Careful With That Axe, Eugene') +master-bin.000001 # Query 1 # use `mysqltest1`; DROP TABLE IF EXISTS t12 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t12 (data LONG) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t12) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F drop database mysqltest1; diff --git a/mysql-test/r/rpl_view.result b/mysql-test/r/rpl_view.result index 9d3b05ef8d5..f5ddb9e13ba 100644 --- a/mysql-test/r/rpl_view.result +++ b/mysql-test/r/rpl_view.result @@ -80,4 +80,19 @@ c ---> Cleaning up... DROP VIEW v1; DROP TABLE t1; +create table t1(a int, b int); +insert into t1 values (1, 1), (1, 2), (1, 3); +create view v1(a, b) as select a, sum(b) from t1 group by a; +explain v1; +Field Type Null Key Default Extra +a int(11) YES NULL +b decimal(32,0) YES NULL +show create table v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a`,sum(`t1`.`b`) AS `b` from `t1` group by `t1`.`a` +select * from v1; +a b +1 6 +drop table t1; +drop view v1; End of 5.0 tests diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result index 85c36c8d41c..8800ce248d5 100644 --- a/mysql-test/r/select.result +++ b/mysql-test/r/select.result @@ -2809,7 +2809,7 @@ CREATE TABLE t1 (i BIGINT UNSIGNED NOT NULL); INSERT INTO t1 VALUES (10); SELECT i='1e+01',i=1e+01, i in (1e+01,1e+01), i in ('1e+01','1e+01') FROM t1; i='1e+01' i=1e+01 i in (1e+01,1e+01) i in ('1e+01','1e+01') -0 1 1 1 +1 1 1 1 DROP TABLE t1; CREATE TABLE t1 ( K2C4 varchar(4) character set latin1 collate latin1_bin NOT NULL default '', diff --git a/mysql-test/r/sp-vars.result b/mysql-test/r/sp-vars.result index 6335870caa9..d41d98113a1 100644 --- a/mysql-test/r/sp-vars.result +++ b/mysql-test/r/sp-vars.result @@ -896,7 +896,7 @@ sp_var @user_var 0 Warnings: -Warning 1264 Out of range value for column 'sp_var' at row 1 +Warning 1366 Incorrect integer value: 'Hello, world!' for column 'sp_var' at row 1 DROP PROCEDURE p1; DROP TABLE t1; diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index fb40b52475b..79e32b921cc 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -5359,6 +5359,21 @@ a 2 DROP TABLE t11, t12| DROP FUNCTION bug19862| +drop table if exists t3| +drop database if exists mysqltest1| +create table t3 (a int)| +insert into t3 (a) values (1), (2)| +create database mysqltest1| +use mysqltest1| +drop database mysqltest1| +select database()| +database() +NULL +select * from (select 1 as a) as t1 natural join (select * from test.t3) as t2| +a +1 +use test| +drop table t3| DROP PROCEDURE IF EXISTS bug16899_p1| DROP FUNCTION IF EXISTS bug16899_f1| CREATE DEFINER=1234567890abcdefGHIKL@localhost PROCEDURE bug16899_p1() @@ -5379,5 +5394,81 @@ Procedure sql_mode Create Procedure bug21416 CREATE DEFINER=`root`@`localhost` PROCEDURE `bug21416`() show create procedure bug21416 drop procedure bug21416| +DROP PROCEDURE IF EXISTS bug21414| +CREATE PROCEDURE bug21414() SELECT 1| +FLUSH TABLES WITH READ LOCK| +DROP PROCEDURE bug21414| +ERROR HY000: Can't execute the query because you have a conflicting read lock +UNLOCK TABLES| +The following should succeed. +DROP PROCEDURE bug21414| +set names utf8| +drop database if exists това_е_дълго_име_за_база_данни_нали| +create database това_е_дълго_име_за_база_данни_нали| +INSERT INTO mysql.proc VALUES ('това_е_дълго_име_за_база_данни_нали','това_е_процедура_Ñ_доÑта_дълго_име_нали_и_още_по_дълго','PROCEDURE','това_е_процедура_Ñ_доÑта_дълго_име_нали_и_още_по_дълго','SQL','CONTAINS_SQL','NO','DEFINER','','','bad_body','root@localhost',now(), now(),'','')| +call това_е_дълго_име_за_база_данни_нали.това_е_процедура_Ñ_доÑта_дълго_име_нали_и_още_по_дълго()| +ERROR HY000: Failed to load routine това_е_дълго_име_за_база_данни_нали.. The table mysql.proc is missing, corrupt, or contains bad data (internal code -6) +drop database това_е_дълго_име_за_база_данни_нали| +CREATE TABLE t3 ( +Member_ID varchar(15) NOT NULL, +PRIMARY KEY (Member_ID) +)| +CREATE TABLE t4 ( +ID int(10) unsigned NOT NULL auto_increment, +Member_ID varchar(15) NOT NULL default '', +Action varchar(12) NOT NULL, +Action_Date datetime NOT NULL, +Track varchar(15) default NULL, +User varchar(12) default NULL, +Date_Updated timestamp NOT NULL default CURRENT_TIMESTAMP on update +CURRENT_TIMESTAMP, +PRIMARY KEY (ID), +KEY Action (Action), +KEY Action_Date (Action_Date) +)| +INSERT INTO t3(Member_ID) VALUES +('111111'), ('222222'), ('333333'), ('444444'), ('555555'), ('666666')| +INSERT INTO t4(Member_ID, Action, Action_Date, Track) VALUES +('111111', 'Disenrolled', '2006-03-01', 'CAD' ), +('111111', 'Enrolled', '2006-03-01', 'CAD' ), +('111111', 'Disenrolled', '2006-07-03', 'CAD' ), +('222222', 'Enrolled', '2006-03-07', 'CAD' ), +('222222', 'Enrolled', '2006-03-07', 'CHF' ), +('222222', 'Disenrolled', '2006-08-02', 'CHF' ), +('333333', 'Enrolled', '2006-03-01', 'CAD' ), +('333333', 'Disenrolled', '2006-03-01', 'CAD' ), +('444444', 'Enrolled', '2006-03-01', 'CAD' ), +('555555', 'Disenrolled', '2006-03-01', 'CAD' ), +('555555', 'Enrolled', '2006-07-21', 'CAD' ), +('555555', 'Disenrolled', '2006-03-01', 'CHF' ), +('666666', 'Enrolled', '2006-02-09', 'CAD' ), +('666666', 'Enrolled', '2006-05-12', 'CHF' ), +('666666', 'Disenrolled', '2006-06-01', 'CAD' )| +DROP FUNCTION IF EXISTS bug21493| +CREATE FUNCTION bug21493(paramMember VARCHAR(15)) RETURNS varchar(45) +BEGIN +DECLARE tracks VARCHAR(45); +SELECT GROUP_CONCAT(Track SEPARATOR ', ') INTO tracks FROM t4 +WHERE Member_ID=paramMember AND Action='Enrolled' AND +(Track,Action_Date) IN (SELECT Track, MAX(Action_Date) FROM t4 +WHERE Member_ID=paramMember GROUP BY Track); +RETURN tracks; +END| +SELECT bug21493('111111')| +bug21493('111111') +NULL +SELECT bug21493('222222')| +bug21493('222222') +CAD +SELECT bug21493(Member_ID) FROM t3| +bug21493(Member_ID) +NULL +CAD +CAD +CAD +CAD +CHF +DROP FUNCTION bug21493| +DROP TABLE t3,t4| End of 5.0 tests drop table t1,t2; diff --git a/mysql-test/r/status.result b/mysql-test/r/status.result index 45b84219f2a..e9891e4d495 100644 --- a/mysql-test/r/status.result +++ b/mysql-test/r/status.result @@ -3,6 +3,10 @@ show status like 'Table_lock%'; Variable_name Value Table_locks_immediate 0 Table_locks_waited 0 +select * from information_schema.session_status where variable_name like 'Table_lock%'; +VARIABLE_NAME VARIABLE_VALUE +TABLE_LOCKS_IMMEDIATE 0.0000000 +TABLE_LOCKS_WAITED 0.0000000 SET SQL_LOG_BIN=0; drop table if exists t1; create table t1(n int) engine=myisam; @@ -16,6 +20,10 @@ show status like 'Table_lock%'; Variable_name Value Table_locks_immediate 3 Table_locks_waited 1 +select * from information_schema.session_status where variable_name like 'Table_lock%'; +VARIABLE_NAME VARIABLE_VALUE +TABLE_LOCKS_IMMEDIATE 3.0000000 +TABLE_LOCKS_WAITED 1.0000000 drop table t1; select 1; 1 @@ -53,21 +61,36 @@ FLUSH STATUS; SHOW STATUS LIKE 'max_used_connections'; Variable_name Value Max_used_connections 1 +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; +VARIABLE_NAME VARIABLE_VALUE +MAX_USED_CONNECTIONS 1.0000000 SET @save_thread_cache_size=@@thread_cache_size; SET GLOBAL thread_cache_size=3; SHOW STATUS LIKE 'max_used_connections'; Variable_name Value Max_used_connections 3 +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; +VARIABLE_NAME VARIABLE_VALUE +MAX_USED_CONNECTIONS 3.0000000 FLUSH STATUS; SHOW STATUS LIKE 'max_used_connections'; Variable_name Value Max_used_connections 2 +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; +VARIABLE_NAME VARIABLE_VALUE +MAX_USED_CONNECTIONS 2.0000000 SHOW STATUS LIKE 'max_used_connections'; Variable_name Value Max_used_connections 3 +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; +VARIABLE_NAME VARIABLE_VALUE +MAX_USED_CONNECTIONS 3.0000000 SHOW STATUS LIKE 'max_used_connections'; Variable_name Value Max_used_connections 4 +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; +VARIABLE_NAME VARIABLE_VALUE +MAX_USED_CONNECTIONS 4.0000000 SET GLOBAL thread_cache_size=@save_thread_cache_size; show status like 'com_show_status'; Variable_name Value diff --git a/mysql-test/r/strict.result b/mysql-test/r/strict.result index 25475b8cd45..0e21ff80e29 100644 --- a/mysql-test/r/strict.result +++ b/mysql-test/r/strict.result @@ -619,9 +619,9 @@ ERROR 22012: Division by 0 UPDATE t1 SET col1= MOD(col1,0) WHERE col1 > 0; ERROR 22012: Division by 0 INSERT INTO t1 (col1) VALUES (''); -ERROR 22003: Out of range value for column 'col1' at row 1 +ERROR HY000: Incorrect integer value: '' for column 'col1' at row 1 INSERT INTO t1 (col1) VALUES ('a59b'); -ERROR 22003: Out of range value for column 'col1' at row 1 +ERROR HY000: Incorrect integer value: 'a59b' for column 'col1' at row 1 INSERT INTO t1 (col1) VALUES ('1a'); ERROR 01000: Data truncated for column 'col1' at row 1 INSERT IGNORE INTO t1 (col1) VALUES ('2a'); @@ -701,9 +701,9 @@ ERROR 22012: Division by 0 UPDATE t1 SET col1= MOD(col1,0) WHERE col1 > 0; ERROR 22012: Division by 0 INSERT INTO t1 (col1) VALUES (''); -ERROR 22003: Out of range value for column 'col1' at row 1 +ERROR HY000: Incorrect integer value: '' for column 'col1' at row 1 INSERT INTO t1 (col1) VALUES ('a59b'); -ERROR 22003: Out of range value for column 'col1' at row 1 +ERROR HY000: Incorrect integer value: 'a59b' for column 'col1' at row 1 INSERT INTO t1 (col1) VALUES ('1a'); ERROR 01000: Data truncated for column 'col1' at row 1 INSERT IGNORE INTO t1 (col1) VALUES ('2a'); @@ -925,10 +925,10 @@ Warning 1264 Out of range value for column 'col2' at row 1 Warning 1264 Out of range value for column 'col2' at row 1 SELECT * FROM t1; col1 col2 -0 0 +-2.2e-307 0 1e-303 0 1.7e+308 1.7e+308 -0 0 +-2.2e-307 0 -2e-307 0 1.7e+308 1.7e+308 0 NULL diff --git a/mysql-test/r/strict_autoinc_1myisam.result b/mysql-test/r/strict_autoinc_1myisam.result new file mode 100644 index 00000000000..dcda74a19cd --- /dev/null +++ b/mysql-test/r/strict_autoinc_1myisam.result @@ -0,0 +1,27 @@ +set @org_mode=@@sql_mode; +create table t1 +( +`a` tinyint(4) NOT NULL auto_increment, +primary key (`a`) +) engine = 'MYISAM' ; +set @@sql_mode='strict_all_tables'; +insert into t1 values(1000); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set auto_increment_increment=1000; +set auto_increment_offset=700; +insert into t1 values(null); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set @@sql_mode=@org_mode; +insert into t1 values(null); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +select * from t1; +a +127 +drop table t1; diff --git a/mysql-test/r/strict_autoinc_2innodb.result b/mysql-test/r/strict_autoinc_2innodb.result new file mode 100644 index 00000000000..9e90ab886f3 --- /dev/null +++ b/mysql-test/r/strict_autoinc_2innodb.result @@ -0,0 +1,27 @@ +set @org_mode=@@sql_mode; +create table t1 +( +`a` tinyint(4) NOT NULL auto_increment, +primary key (`a`) +) engine = 'InnoDB' ; +set @@sql_mode='strict_all_tables'; +insert into t1 values(1000); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set auto_increment_increment=1000; +set auto_increment_offset=700; +insert into t1 values(null); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set @@sql_mode=@org_mode; +insert into t1 values(null); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +select * from t1; +a +127 +drop table t1; diff --git a/mysql-test/r/strict_autoinc_3heap.result b/mysql-test/r/strict_autoinc_3heap.result new file mode 100644 index 00000000000..d22054eb59d --- /dev/null +++ b/mysql-test/r/strict_autoinc_3heap.result @@ -0,0 +1,27 @@ +set @org_mode=@@sql_mode; +create table t1 +( +`a` tinyint(4) NOT NULL auto_increment, +primary key (`a`) +) engine = 'MEMORY' ; +set @@sql_mode='strict_all_tables'; +insert into t1 values(1000); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set auto_increment_increment=1000; +set auto_increment_offset=700; +insert into t1 values(null); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set @@sql_mode=@org_mode; +insert into t1 values(null); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +select * from t1; +a +127 +drop table t1; diff --git a/mysql-test/r/strict_autoinc_4bdb.result b/mysql-test/r/strict_autoinc_4bdb.result new file mode 100644 index 00000000000..48ebe196baf --- /dev/null +++ b/mysql-test/r/strict_autoinc_4bdb.result @@ -0,0 +1,27 @@ +set @org_mode=@@sql_mode; +create table t1 +( +`a` tinyint(4) NOT NULL auto_increment, +primary key (`a`) +) engine = 'BDB' ; +set @@sql_mode='strict_all_tables'; +insert into t1 values(1000); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set auto_increment_increment=1000; +set auto_increment_offset=700; +insert into t1 values(null); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set @@sql_mode=@org_mode; +insert into t1 values(null); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +select * from t1; +a +127 +drop table t1; diff --git a/mysql-test/r/strict_autoinc_5ndb.result b/mysql-test/r/strict_autoinc_5ndb.result new file mode 100644 index 00000000000..debc7242e15 --- /dev/null +++ b/mysql-test/r/strict_autoinc_5ndb.result @@ -0,0 +1,27 @@ +set @org_mode=@@sql_mode; +create table t1 +( +`a` tinyint(4) NOT NULL auto_increment, +primary key (`a`) +) engine = 'NDB' ; +set @@sql_mode='strict_all_tables'; +insert into t1 values(1000); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set auto_increment_increment=1000; +set auto_increment_offset=700; +insert into t1 values(null); +ERROR 22003: Out of range value for column 'a' at row 1 +select count(*) from t1; +count(*) +0 +set @@sql_mode=@org_mode; +insert into t1 values(null); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +select * from t1; +a +127 +drop table t1; diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 141f9ed0f4a..a1463bc2b3c 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1011,7 +1011,7 @@ INSERT INTO t1 VALUES (1); UPDATE t1 SET i=i+1 WHERE i=(SELECT MAX(i)); select * from t1; i -1 +2 drop table t1; CREATE TABLE t1 (a int(1)); EXPLAIN EXTENDED SELECT (SELECT RAND() FROM t1) FROM t1; @@ -1160,7 +1160,7 @@ Code2 char(2) NOT NULL default '', PRIMARY KEY (Code) ) ENGINE=MyISAM; INSERT INTO t2 VALUES ('AUS','Australia','Oceania','Australia and New Zealand',7741220.00,1901,18886000,79.8,351182.00,392911.00,'Australia','Constitutional Monarchy, Federation','Elisabeth II',135,'AU'); -INSERT INTO t2 VALUES ('AZE','Azerbaijan','Asia','Middle East',86600.00,1991,7734000,62.9,4127.00,4100.00,'Azärbaycan','Federal Republic','Heydär Äliyev',144,'AZ'); +INSERT INTO t2 VALUES ('AZE','Azerbaijan','Asia','Middle East',86600.00,1991,7734000,62.9,4127.00,4100.00,'Azärbaycan','Federal Republic','Heydär Äliyev',144,'AZ'); select t2.Continent, t1.Name, t1.Population from t2 LEFT JOIN t1 ON t2.Code = t1.t2 where t1.Population IN (select max(t1.Population) AS Population from t1, t2 where t1.t2 = t2.Code group by Continent); Continent Name Population Oceania Sydney 3276207 @@ -1203,7 +1203,7 @@ UPDATE t1 SET t.i=i+(SELECT MAX(i) FROM (SELECT 1) t); ERROR 42S22: Unknown column 't.i' in 'field list' select * from t1; i -1 +3 drop table t1; CREATE TABLE t1 ( id int(11) default NULL @@ -2512,7 +2512,7 @@ Code2 char(2) NOT NULL default '' ) ENGINE=MyISAM; INSERT INTO t1 VALUES ('XXX','Xxxxx','Oceania','Xxxxxx',26.00,0,0,0,0,0,'Xxxxx','Xxxxx','Xxxxx',NULL,'XX'); INSERT INTO t1 VALUES ('ASM','American Samoa','Oceania','Polynesia',199.00,0,68000,75.1,334.00,NULL,'Amerika Samoa','US Territory','George W. Bush',54,'AS'); -INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); +INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); INSERT INTO t1 VALUES ('UMI','United States Minor Outlying Islands','Oceania','Micronesia/Caribbean',16.00,0,0,NULL,0.00,NULL,'United States Minor Outlying Islands','Dependent Territory of the US','George W. Bush',NULL,'UM'); /*!40000 ALTER TABLE t1 ENABLE KEYS */; SELECT DISTINCT Continent AS c FROM t1 WHERE Code <> SOME ( SELECT Code FROM t1 WHERE Continent = c AND Population < 200); @@ -2937,6 +2937,71 @@ retailerID statusID changed 0048 1 2006-01-06 12:37:50 0059 1 2006-01-06 12:37:50 drop table t1; +create table t1(a int, primary key (a)); +insert into t1 values (10); +create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b)); +insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989'); +explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r +ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' +ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 system PRIMARY NULL NULL NULL 1 +1 PRIMARY r const PRIMARY PRIMARY 4 const 1 +2 DEPENDENT SUBQUERY t2 range b b 40 NULL 2 Using where +SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r +ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' +ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10; +a a b +10 3 35989 +explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r +ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' +ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 system PRIMARY NULL NULL NULL 1 +1 PRIMARY r const PRIMARY PRIMARY 4 const 1 +2 DEPENDENT SUBQUERY t2 range b b 40 NULL 2 Using where +SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r +ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' +ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10; +a a b +10 1 359 +drop table t1,t2; +CREATE TABLE t1 ( +field1 int NOT NULL, +field2 int NOT NULL, +field3 int NOT NULL, +PRIMARY KEY (field1,field2,field3) +); +CREATE TABLE t2 ( +fieldA int NOT NULL, +fieldB int NOT NULL, +PRIMARY KEY (fieldA,fieldB) +); +INSERT INTO t1 VALUES +(1,1,1), (1,1,2), (1,2,1), (1,2,2), (1,2,3), (1,3,1); +INSERT INTO t2 VALUES (1,1), (1,2), (1,3); +SELECT field1, field2, COUNT(*) +FROM t1 GROUP BY field1, field2; +field1 field2 COUNT(*) +1 1 2 +1 2 3 +1 3 1 +SELECT field1, field2 +FROM t1 +GROUP BY field1, field2 +HAVING COUNT(*) >= ALL (SELECT fieldB +FROM t2 WHERE fieldA = field1); +field1 field2 +1 2 +SELECT field1, field2 +FROM t1 +GROUP BY field1, field2 +HAVING COUNT(*) < ANY (SELECT fieldB +FROM t2 WHERE fieldA = field1); +field1 field2 +1 1 +1 3 +DROP TABLE t1, t2; create table t1 (df decimal(5,1)); insert into t1 values(1.1); insert into t1 values(2.2); @@ -3368,3 +3433,89 @@ ORDER BY t1.t DESC LIMIT 1); i1 i2 t i1 i2 t 24 1 2005-05-27 12:40:30 24 1 2006-06-20 12:29:40 DROP TABLE t1, t2; +CREATE TABLE t1 (i INT); +(SELECT i FROM t1) UNION (SELECT i FROM t1); +i +SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS +( +(SELECT i FROM t1) UNION +(SELECT i FROM t1) +); +i +SELECT * FROM t1 +WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1))); +i +explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12)) +from t1; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'union (select t12.i from t1 t12)) +from t1' at line 1 +explain select * from t1 where not exists +((select t11.i from t1 t11) union (select t12.i from t1 t12)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found +2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used +3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table +4 UNION t12 system NULL NULL NULL NULL 0 const row not found +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT); +CREATE TABLE t2 (a INT); +INSERT INTO t2 values (1); +INSERT INTO t1 VALUES (1,1),(1,2),(2,3),(3,4); +SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; +(SELECT COUNT(DISTINCT t1.b) from t2) +2 +1 +1 +SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +FROM t1 GROUP BY t1.a; +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +2 +1 +1 +SELECT COUNT(DISTINCT t1.b), (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; +COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) +2 2 +1 1 +1 1 +SELECT COUNT(DISTINCT t1.b), +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +FROM t1 GROUP BY t1.a; +COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +2 2 +1 1 +1 1 +SELECT ( +SELECT ( +SELECT COUNT(DISTINCT t1.b) +) +) +FROM t1 GROUP BY t1.a; +( +SELECT ( +SELECT COUNT(DISTINCT t1.b) +) +) +2 +1 +1 +SELECT ( +SELECT ( +SELECT ( +SELECT COUNT(DISTINCT t1.b) +) +) +FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 t2 +GROUP BY t2.a; +( +SELECT ( +SELECT ( +SELECT COUNT(DISTINCT t1.b) +) +) +FROM t1 GROUP BY t1.a LIMIT 1) +2 +2 +2 +DROP TABLE t1,t2; diff --git a/mysql-test/r/temp_table.result b/mysql-test/r/temp_table.result index 637f74a1d50..6c9a389c1f4 100644 --- a/mysql-test/r/temp_table.result +++ b/mysql-test/r/temp_table.result @@ -135,6 +135,23 @@ d c bar 2 foo 1 drop table t1, t2; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i INT); +LOCK TABLE t1 WRITE; +CREATE TEMPORARY TABLE t1 (i INT); +The following command should not block +DROP TEMPORARY TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (i INT); +CREATE TEMPORARY TABLE t2 (i INT); +DROP TEMPORARY TABLE t2, t1; +ERROR 42S02: Unknown table 't1' +SELECT * FROM t2; +ERROR 42S02: Table 'test.t2' doesn't exist +SELECT * FROM t1; +i +DROP TABLE t1; +End of 4.1 tests. create temporary table t1 (a int); insert into t1 values (4711); select * from t1; diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result index 7fe3194304c..5f027195e2e 100644 --- a/mysql-test/r/trigger.result +++ b/mysql-test/r/trigger.result @@ -1173,4 +1173,16 @@ TRIGGER t2_bi BEFORE INSERT ON t2 FOR EACH ROW SET @a = 2; ERROR HY000: String '1234567890abcdefghij1234567890abcdefghij1234567890abcdefghijQWERTY' is too long for host name (should be no longer than 60) DROP TABLE t1; DROP TABLE t2; +drop table if exists t1; +create table t1 (i int, j int key); +insert into t1 values (1,1), (2,2), (3,3); +create trigger t1_bu before update on t1 for each row +set new.j = new.j + 10; +update t1 set i= i+ 10 where j > 2; +select * from t1; +i j +1 1 +2 2 +13 13 +drop table t1; End of 5.0 tests diff --git a/mysql-test/r/true.require b/mysql-test/r/true.require new file mode 100644 index 00000000000..09aae1ed1d0 --- /dev/null +++ b/mysql-test/r/true.require @@ -0,0 +1,2 @@ +TRUE +1 diff --git a/mysql-test/r/type_bit.result b/mysql-test/r/type_bit.result index 08a1b229491..5c9b70fadc3 100644 --- a/mysql-test/r/type_bit.result +++ b/mysql-test/r/type_bit.result @@ -602,4 +602,12 @@ NULL NULL 0 0 11111111 11111111 drop table bug15583; +create table t1(a bit(1), b smallint unsigned); +insert into t1 (b, a) values ('2', '1'); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +select hex(a), b from t1; +hex(a) b +1 2 +drop table t1; End of 5.0 tests diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result index 3f0b9f4c13c..12ce742eab4 100644 --- a/mysql-test/r/type_date.result +++ b/mysql-test/r/type_date.result @@ -27,12 +27,12 @@ INSERT INTO t1 VALUES ( "2000-1-2" ); INSERT INTO t1 VALUES ( "2000-1-3" ); INSERT INTO t1 VALUES ( "2000-1-4" ); INSERT INTO t1 VALUES ( "2000-1-5" ); -SELECT * FROM t1 WHERE datum BETWEEN "2000-1-2" AND "2000-1-4"; +SELECT * FROM t1 WHERE datum BETWEEN cast("2000-1-2" as date) AND cast("2000-1-4" as date); datum 2000-01-02 2000-01-03 2000-01-04 -SELECT * FROM t1 WHERE datum BETWEEN "2000-1-2" AND datum - INTERVAL 100 DAY; +SELECT * FROM t1 WHERE datum BETWEEN cast("2000-1-2" as date) AND datum - INTERVAL 100 DAY; datum DROP TABLE t1; CREATE TABLE t1 ( @@ -104,3 +104,9 @@ SELECT * FROM t1; y 0000 DROP TABLE t1; +create table t1(start_date date, end_date date); +insert into t1 values ('2000-01-01','2000-01-02'); +select 1 from t1 where cast('2000-01-01 12:01:01' as datetime) between start_date and end_date; +1 +1 +drop table t1; diff --git a/mysql-test/r/type_float.result b/mysql-test/r/type_float.result index cfae61d5e91..d28131cc456 100644 --- a/mysql-test/r/type_float.result +++ b/mysql-test/r/type_float.result @@ -262,6 +262,13 @@ desc t3; Field Type Null Key Default Extra a decimal(21,2) NO 0.00 drop table t1,t2,t3; +select 1e-308, 1.00000001e-300, 100000000e-300; +1e-308 1.00000001e-300 100000000e-300 +0 1.00000001e-300 1e-292 +select 10e307; +10e307 +1e+308 +End of 4.1 tests create table t1 (s1 float(0,2)); ERROR 42000: For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column 's1'). create table t1 (s1 float(1,2)); diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result index 58a217f4b14..9805b788a73 100644 --- a/mysql-test/r/union.result +++ b/mysql-test/r/union.result @@ -1283,10 +1283,66 @@ b create table t3 SELECT left(a,100000000) FROM t1 UNION SELECT b FROM t2; show create table t3; Table Create Table +t3 CREATE TABLE `t3` ( + `left(a,100000000)` mediumtext +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop tables t1,t2,t3; +CREATE TABLE t1 (a longtext); +CREATE TABLE t2 (b varchar(20)); +INSERT INTO t1 VALUES ('a'),('b'); +SELECT left(a,100000000) FROM t1 UNION SELECT b FROM t2; +left(a,100000000) +a +b +create table t3 SELECT left(a,100000000) FROM t1 UNION SELECT b FROM t2; +show create table t3; +Table Create Table t3 CREATE TABLE `t3` ( `left(a,100000000)` longtext ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop tables t1,t2,t3; +SELECT @tmp_max:= @@max_allowed_packet; +@tmp_max:= @@max_allowed_packet +1048576 +SET max_allowed_packet=25000000; +CREATE TABLE t1 (a mediumtext); +CREATE TABLE t2 (b varchar(20)); +INSERT INTO t1 VALUES ('a'); +CREATE TABLE t3 SELECT REPEAT(a,20000000) AS a FROM t1 UNION SELECT b FROM t2; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` longtext +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLES t1,t3; +CREATE TABLE t1 (a tinytext); +INSERT INTO t1 VALUES ('a'); +CREATE TABLE t3 SELECT REPEAT(a,2) AS a FROM t1 UNION SELECT b FROM t2; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` varchar(510) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLES t1,t3; +CREATE TABLE t1 (a mediumtext); +INSERT INTO t1 VALUES ('a'); +CREATE TABLE t3 SELECT REPEAT(a,2) AS a FROM t1 UNION SELECT b FROM t2; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` longtext +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLES t1,t3; +CREATE TABLE t1 (a tinyblob); +INSERT INTO t1 VALUES ('a'); +CREATE TABLE t3 SELECT REPEAT(a,2) AS a FROM t1 UNION SELECT b FROM t2; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` varbinary(510) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLES t1,t2,t3; +SET max_allowed_packet:= @tmp_max; create table t1 ( id int not null auto_increment, primary key (id), col1 int); insert into t1 (col1) values (2),(3),(4),(5),(6); select 99 union all select id from t1 order by 1; @@ -1365,7 +1421,7 @@ drop table t1, t2; (select avg(1)) union (select avg(1)) union (select avg(1)) union (select avg(1)) union (select avg(1)) union (select avg(1)); avg(1) -NULL +1.0000 select _utf8'12' union select _latin1'12345'; 12 12 diff --git a/mysql-test/r/innodb_unsafe_binlog.result b/mysql-test/r/unsafe_binlog_innodb.result similarity index 70% rename from mysql-test/r/innodb_unsafe_binlog.result rename to mysql-test/r/unsafe_binlog_innodb.result index 38f0c2a12fa..54a24a52d57 100644 --- a/mysql-test/r/innodb_unsafe_binlog.result +++ b/mysql-test/r/unsafe_binlog_innodb.result @@ -1,8 +1,8 @@ drop table if exists t1,t2; create table t1 (id int not null, f_id int not null, f int not null, -primary key(f_id, id)) engine=innodb; +primary key(f_id, id)) engine = InnoDB; create table t2 (id int not null,s_id int not null,s varchar(200), -primary key(id)) engine=innodb; +primary key(id)) engine = InnoDB; INSERT INTO t1 VALUES (8, 1, 3); INSERT INTO t1 VALUES (1, 2, 1); INSERT INTO t2 VALUES (1, 0, ''); @@ -14,7 +14,7 @@ select ml.* from t1 as ml left join t2 as mm on (mm.id=ml.id) where mm.id is null lock in share mode; id f_id f drop table t1,t2; -create table t1(a int not null, b int, primary key(a)) engine=innodb; +create table t1(a int not null, b int, primary key(a)) engine = InnoDB; insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3); commit; set autocommit = 0; @@ -34,7 +34,7 @@ ERROR HY000: Lock wait timeout exceeded; try restarting transaction commit; commit; drop table t1; -create table t1(a int not null, b int, primary key(a)) engine=innodb; +create table t1(a int not null, b int, primary key(a)) engine = InnoDB; insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2),(7,3); commit; set autocommit = 0; @@ -46,9 +46,9 @@ a b commit; commit; drop table t1; -create table t1(a int not null, b int, primary key(a)) engine=innodb; +create table t1(a int not null, b int, primary key(a)) engine = InnoDB; insert into t1 values (1,2),(5,3),(4,2); -create table t2(d int not null, e int, primary key(d)) engine=innodb; +create table t2(d int not null, e int, primary key(d)) engine = InnoDB; insert into t2 values (8,6),(12,1),(3,1); commit; set autocommit = 0; @@ -60,24 +60,27 @@ d e set autocommit = 0; insert into t1 select * from t2; update t1 set b = (select e from t2 where a = d); -create table t3(d int not null, e int, primary key(d)) engine=innodb +create table t3(d int not null, e int, primary key(d)) engine = InnoDB select * from t2; commit; commit; drop table t1, t2, t3; -create table t1(a int not null, b int, primary key(a)) engine=innodb; +SET SESSION STORAGE_ENGINE = InnoDB; +SET SESSION STORAGE_ENGINE = InnoDB; +SET SESSION STORAGE_ENGINE = InnoDB; +create table t1(a int not null, b int, primary key(a)) engine = InnoDB; insert into t1 values (1,2),(5,3),(4,2); -create table t2(a int not null, b int, primary key(a)) engine=innodb; +create table t2(a int not null, b int, primary key(a)) engine = InnoDB; insert into t2 values (8,6),(12,1),(3,1); -create table t3(d int not null, b int, primary key(d)) engine=innodb; +create table t3(d int not null, b int, primary key(d)) engine = InnoDB; insert into t3 values (8,6),(12,1),(3,1); -create table t5(a int not null, b int, primary key(a)) engine=innodb; +create table t5(a int not null, b int, primary key(a)) engine = InnoDB; insert into t5 values (1,2),(5,3),(4,2); -create table t6(d int not null, e int, primary key(d)) engine=innodb; +create table t6(d int not null, e int, primary key(d)) engine = InnoDB; insert into t6 values (8,6),(12,1),(3,1); -create table t8(a int not null, b int, primary key(a)) engine=innodb; +create table t8(a int not null, b int, primary key(a)) engine = InnoDB; insert into t8 values (1,2),(5,3),(4,2); -create table t9(d int not null, e int, primary key(d)) engine=innodb; +create table t9(d int not null, e int, primary key(d)) engine = InnoDB; insert into t9 values (8,6),(12,1),(3,1); commit; set autocommit = 0; @@ -94,19 +97,19 @@ SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; update t3 set b = (select b from t2 where a = d); set autocommit = 0; SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; -create table t4(a int not null, b int, primary key(a)) engine=innodb select * from t2; +create table t4(a int not null, b int, primary key(a)) select * from t2; set autocommit = 0; insert into t5 (select * from t2 lock in share mode); set autocommit = 0; update t6 set e = (select b from t2 where a = d lock in share mode); set autocommit = 0; -create table t7(a int not null, b int, primary key(a)) engine=innodb select * from t2 lock in share mode; +create table t7(a int not null, b int, primary key(a)) select * from t2 lock in share mode; set autocommit = 0; insert into t8 (select * from t2 for update); set autocommit = 0; update t9 set e = (select b from t2 where a = d for update); set autocommit = 0; -create table t10(a int not null, b int, primary key(a)) engine=innodb select * from t2 for update; +create table t10(a int not null, b int, primary key(a)) select * from t2 for update; ERROR HY000: Lock wait timeout exceeded; try restarting transaction ERROR HY000: Lock wait timeout exceeded; try restarting transaction ERROR HY000: Lock wait timeout exceeded; try restarting transaction diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index b55d8d9ef5a..f759e78adbf 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -103,21 +103,36 @@ set max_join_size=100; show variables like 'max_join_size'; Variable_name Value max_join_size 100 +select * from information_schema.session_variables where variable_name like 'max_join_size'; +VARIABLE_NAME VARIABLE_VALUE +MAX_JOIN_SIZE 100 show global variables like 'max_join_size'; Variable_name Value max_join_size 10 +select * from information_schema.global_variables where variable_name like 'max_join_size'; +VARIABLE_NAME VARIABLE_VALUE +MAX_JOIN_SIZE 10 set GLOBAL max_join_size=2000; show global variables like 'max_join_size'; Variable_name Value max_join_size 2000 +select * from information_schema.global_variables where variable_name like 'max_join_size'; +VARIABLE_NAME VARIABLE_VALUE +MAX_JOIN_SIZE 2000 set max_join_size=DEFAULT; show variables like 'max_join_size'; Variable_name Value max_join_size 2000 +select * from information_schema.session_variables where variable_name like 'max_join_size'; +VARIABLE_NAME VARIABLE_VALUE +MAX_JOIN_SIZE 2000 set GLOBAL max_join_size=DEFAULT; show global variables like 'max_join_size'; Variable_name Value max_join_size HA_POS_ERROR +select * from information_schema.global_variables where variable_name like 'max_join_size'; +VARIABLE_NAME VARIABLE_VALUE +MAX_JOIN_SIZE HA_POS_ERROR set @@max_join_size=1000, @@global.max_join_size=2000; select @@local.max_join_size, @@global.max_join_size; @@local.max_join_size @@global.max_join_size @@ -149,14 +164,23 @@ set global concurrent_insert=2; show variables like 'concurrent_insert'; Variable_name Value concurrent_insert 2 +select * from information_schema.session_variables where variable_name like 'concurrent_insert'; +VARIABLE_NAME VARIABLE_VALUE +CONCURRENT_INSERT 2 set global concurrent_insert=1; show variables like 'concurrent_insert'; Variable_name Value concurrent_insert 1 +select * from information_schema.session_variables where variable_name like 'concurrent_insert'; +VARIABLE_NAME VARIABLE_VALUE +CONCURRENT_INSERT 1 set global concurrent_insert=0; show variables like 'concurrent_insert'; Variable_name Value concurrent_insert 0 +select * from information_schema.session_variables where variable_name like 'concurrent_insert'; +VARIABLE_NAME VARIABLE_VALUE +CONCURRENT_INSERT 0 set global concurrent_insert=DEFAULT; select @@concurrent_insert; @@concurrent_insert @@ -165,26 +189,44 @@ set global timed_mutexes=ON; show variables like 'timed_mutexes'; Variable_name Value timed_mutexes ON +select * from information_schema.session_variables where variable_name like 'timed_mutexes'; +VARIABLE_NAME VARIABLE_VALUE +TIMED_MUTEXES ON set global timed_mutexes=0; show variables like 'timed_mutexes'; Variable_name Value timed_mutexes OFF +select * from information_schema.session_variables where variable_name like 'timed_mutexes'; +VARIABLE_NAME VARIABLE_VALUE +TIMED_MUTEXES OFF set storage_engine=MYISAM, storage_engine="HEAP", global storage_engine="MERGE"; show local variables like 'storage_engine'; Variable_name Value storage_engine MEMORY +select * from information_schema.session_variables where variable_name like 'storage_engine'; +VARIABLE_NAME VARIABLE_VALUE +STORAGE_ENGINE MEMORY show global variables like 'storage_engine'; Variable_name Value storage_engine MRG_MYISAM +select * from information_schema.global_variables where variable_name like 'storage_engine'; +VARIABLE_NAME VARIABLE_VALUE +STORAGE_ENGINE MRG_MYISAM set GLOBAL query_cache_size=100000; set GLOBAL myisam_max_sort_file_size=2000000; show global variables like 'myisam_max_sort_file_size'; Variable_name Value myisam_max_sort_file_size 1048576 +select * from information_schema.global_variables where variable_name like 'myisam_max_sort_file_size'; +VARIABLE_NAME VARIABLE_VALUE +MYISAM_MAX_SORT_FILE_SIZE 1048576 set GLOBAL myisam_max_sort_file_size=default; show variables like 'myisam_max_sort_file_size'; Variable_name Value myisam_max_sort_file_size FILE_SIZE +select * from information_schema.session_variables where variable_name like 'myisam_max_sort_file_size'; +VARIABLE_NAME VARIABLE_VALUE +MYISAM_MAX_SORT_FILE_SIZE FILE_SIZE set global net_retry_count=10, session net_retry_count=10; set global net_buffer_length=1024, net_write_timeout=200, net_read_timeout=300; set session net_buffer_length=2048, net_write_timeout=500, net_read_timeout=600; @@ -194,12 +236,24 @@ net_buffer_length 1024 net_read_timeout 300 net_retry_count 10 net_write_timeout 200 +select * from information_schema.global_variables where variable_name like 'net_%'; +VARIABLE_NAME VARIABLE_VALUE +NET_BUFFER_LENGTH 1024 +NET_READ_TIMEOUT 300 +NET_RETRY_COUNT 10 +NET_WRITE_TIMEOUT 200 show session variables like 'net_%'; Variable_name Value net_buffer_length 2048 net_read_timeout 600 net_retry_count 10 net_write_timeout 500 +select * from information_schema.session_variables where variable_name like 'net_%'; +VARIABLE_NAME VARIABLE_VALUE +NET_BUFFER_LENGTH 2048 +NET_READ_TIMEOUT 600 +NET_RETRY_COUNT 10 +NET_WRITE_TIMEOUT 500 set session net_buffer_length=8000, global net_read_timeout=900, net_write_timeout=1000; show global variables like 'net_%'; Variable_name Value @@ -207,24 +261,45 @@ net_buffer_length 1024 net_read_timeout 900 net_retry_count 10 net_write_timeout 1000 +select * from information_schema.global_variables where variable_name like 'net_%'; +VARIABLE_NAME VARIABLE_VALUE +NET_BUFFER_LENGTH 1024 +NET_READ_TIMEOUT 900 +NET_RETRY_COUNT 10 +NET_WRITE_TIMEOUT 1000 show session variables like 'net_%'; Variable_name Value net_buffer_length 7168 net_read_timeout 600 net_retry_count 10 net_write_timeout 500 +select * from information_schema.session_variables where variable_name like 'net_%'; +VARIABLE_NAME VARIABLE_VALUE +NET_BUFFER_LENGTH 7168 +NET_READ_TIMEOUT 600 +NET_RETRY_COUNT 10 +NET_WRITE_TIMEOUT 500 set net_buffer_length=1; show variables like 'net_buffer_length'; Variable_name Value net_buffer_length 1024 +select * from information_schema.session_variables where variable_name like 'net_buffer_length'; +VARIABLE_NAME VARIABLE_VALUE +NET_BUFFER_LENGTH 1024 set net_buffer_length=2000000000; show variables like 'net_buffer_length'; Variable_name Value net_buffer_length 1048576 +select * from information_schema.session_variables where variable_name like 'net_buffer_length'; +VARIABLE_NAME VARIABLE_VALUE +NET_BUFFER_LENGTH 1048576 set character set cp1251_koi8; show variables like "character_set_client"; Variable_name Value character_set_client cp1251 +select * from information_schema.session_variables where variable_name like 'character_set_client'; +VARIABLE_NAME VARIABLE_VALUE +CHARACTER_SET_CLIENT cp1251 select @@timestamp>0; @@timestamp>0 1 @@ -239,6 +314,13 @@ query_prealloc_size 8192 range_alloc_block_size 2048 transaction_alloc_block_size 8192 transaction_prealloc_size 4096 +select * from information_schema.session_variables where variable_name like '%alloc%'; +VARIABLE_NAME VARIABLE_VALUE +QUERY_ALLOC_BLOCK_SIZE 8192 +QUERY_PREALLOC_SIZE 8192 +RANGE_ALLOC_BLOCK_SIZE 2048 +TRANSACTION_ALLOC_BLOCK_SIZE 8192 +TRANSACTION_PREALLOC_SIZE 4096 set @@range_alloc_block_size=1024*16; set @@query_alloc_block_size=1024*17+2; set @@query_prealloc_size=1024*18; @@ -254,6 +336,13 @@ query_prealloc_size 18432 range_alloc_block_size 16384 transaction_alloc_block_size 19456 transaction_prealloc_size 20480 +select * from information_schema.session_variables where variable_name like '%alloc%'; +VARIABLE_NAME VARIABLE_VALUE +QUERY_ALLOC_BLOCK_SIZE 17408 +QUERY_PREALLOC_SIZE 18432 +RANGE_ALLOC_BLOCK_SIZE 16384 +TRANSACTION_ALLOC_BLOCK_SIZE 19456 +TRANSACTION_PREALLOC_SIZE 20480 set @@range_alloc_block_size=default; set @@query_alloc_block_size=default, @@query_prealloc_size=default; set transaction_alloc_block_size=default, @@transaction_prealloc_size=default; @@ -264,6 +353,13 @@ query_prealloc_size 8192 range_alloc_block_size 2048 transaction_alloc_block_size 8192 transaction_prealloc_size 4096 +select * from information_schema.session_variables where variable_name like '%alloc%'; +VARIABLE_NAME VARIABLE_VALUE +QUERY_ALLOC_BLOCK_SIZE 8192 +QUERY_PREALLOC_SIZE 8192 +RANGE_ALLOC_BLOCK_SIZE 2048 +TRANSACTION_ALLOC_BLOCK_SIZE 8192 +TRANSACTION_PREALLOC_SIZE 4096 SELECT @@version LIKE 'non-existent'; @@version LIKE 'non-existent' 0 @@ -485,6 +581,9 @@ set global myisam_max_sort_file_size=4294967296; show global variables like 'myisam_max_sort_file_size'; Variable_name Value myisam_max_sort_file_size MAX_FILE_SIZE +select * from information_schema.global_variables where variable_name like 'myisam_max_sort_file_size'; +VARIABLE_NAME VARIABLE_VALUE +MYISAM_MAX_SORT_FILE_SIZE MAX_FILE_SIZE set global myisam_max_sort_file_size=default; select @@global.max_user_connections,@@local.max_join_size; @@global.max_user_connections @@local.max_join_size @@ -524,18 +623,30 @@ set @tstlw = @@log_warnings; show global variables like 'log_warnings'; Variable_name Value log_warnings 1 +select * from information_schema.global_variables where variable_name like 'log_warnings'; +VARIABLE_NAME VARIABLE_VALUE +LOG_WARNINGS 1 set global log_warnings = 0; show global variables like 'log_warnings'; Variable_name Value log_warnings 0 +select * from information_schema.global_variables where variable_name like 'log_warnings'; +VARIABLE_NAME VARIABLE_VALUE +LOG_WARNINGS 0 set global log_warnings = 42; show global variables like 'log_warnings'; Variable_name Value log_warnings 42 +select * from information_schema.global_variables where variable_name like 'log_warnings'; +VARIABLE_NAME VARIABLE_VALUE +LOG_WARNINGS 42 set global log_warnings = @tstlw; show global variables like 'log_warnings'; Variable_name Value log_warnings 1 +select * from information_schema.global_variables where variable_name like 'log_warnings'; +VARIABLE_NAME VARIABLE_VALUE +LOG_WARNINGS 1 create table t1 ( c1 tinyint, c2 smallint, @@ -567,10 +678,16 @@ SET GLOBAL MYISAM_DATA_POINTER_SIZE= 7; SHOW VARIABLES LIKE 'MYISAM_DATA_POINTER_SIZE'; Variable_name Value myisam_data_pointer_size 7 +SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'MYISAM_DATA_POINTER_SIZE'; +VARIABLE_NAME VARIABLE_VALUE +MYISAM_DATA_POINTER_SIZE 7 SET GLOBAL table_open_cache=-1; SHOW VARIABLES LIKE 'table_open_cache'; Variable_name Value table_open_cache 1 +SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'table_open_cache'; +VARIABLE_NAME VARIABLE_VALUE +TABLE_OPEN_CACHE 1 SET GLOBAL table_open_cache=DEFAULT; set character_set_results=NULL; select ifnull(@@character_set_results,"really null"); @@ -639,21 +756,36 @@ set @@sql_big_selects = 1; show variables like 'sql_big_selects'; Variable_name Value sql_big_selects ON +select * from information_schema.session_variables where variable_name like 'sql_big_selects'; +VARIABLE_NAME VARIABLE_VALUE +SQL_BIG_SELECTS ON set @@sql_big_selects = @old_sql_big_selects; set @@sql_notes = 0, @@sql_warnings = 0; show variables like 'sql_notes'; Variable_name Value sql_notes OFF +select * from information_schema.session_variables where variable_name like 'sql_notes'; +VARIABLE_NAME VARIABLE_VALUE +SQL_NOTES OFF show variables like 'sql_warnings'; Variable_name Value sql_warnings OFF +select * from information_schema.session_variables where variable_name like 'sql_warnings'; +VARIABLE_NAME VARIABLE_VALUE +SQL_WARNINGS OFF set @@sql_notes = 1, @@sql_warnings = 1; show variables like 'sql_notes'; Variable_name Value sql_notes ON +select * from information_schema.session_variables where variable_name like 'sql_notes'; +VARIABLE_NAME VARIABLE_VALUE +SQL_NOTES ON show variables like 'sql_warnings'; Variable_name Value sql_warnings ON +select * from information_schema.session_variables where variable_name like 'sql_warnings'; +VARIABLE_NAME VARIABLE_VALUE +SQL_WARNINGS ON select @@system_time_zone; @@system_time_zone # @@ -667,12 +799,21 @@ select @@basedir, @@datadir, @@tmpdir; show variables like 'basedir'; Variable_name Value basedir # +select * from information_schema.session_variables where variable_name like 'basedir'; +VARIABLE_NAME VARIABLE_VALUE +BASEDIR # show variables like 'datadir'; Variable_name Value datadir # +select * from information_schema.session_variables where variable_name like 'datadir'; +VARIABLE_NAME VARIABLE_VALUE +DATADIR # show variables like 'tmpdir'; Variable_name Value tmpdir # +select * from information_schema.session_variables where variable_name like 'tmpdir'; +VARIABLE_NAME VARIABLE_VALUE +TMPDIR # select @@ssl_ca, @@ssl_capath, @@ssl_cert, @@ssl_cipher, @@ssl_key; @@ssl_ca @@ssl_capath @@ssl_cert @@ssl_cipher @@ssl_key # # # # # @@ -683,12 +824,22 @@ ssl_capath # ssl_cert # ssl_cipher # ssl_key # +select * from information_schema.session_variables where variable_name like 'ssl%'; +VARIABLE_NAME VARIABLE_VALUE +SSL_CA # +SSL_CAPATH # +SSL_CERT # +SSL_CIPHER # +SSL_KEY # select @@log_queries_not_using_indexes; @@log_queries_not_using_indexes 0 show variables like 'log_queries_not_using_indexes'; Variable_name Value log_queries_not_using_indexes OFF +select * from information_schema.session_variables where variable_name like 'log_queries_not_using_indexes'; +VARIABLE_NAME VARIABLE_VALUE +LOG_QUERIES_NOT_USING_INDEXES OFF select @@""; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '""' at line 1 select @@&; diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 7e14874a3e6..2fa467727f1 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -49,7 +49,7 @@ select v1.b from v1; ERROR 42S22: Unknown column 'v1.b' in 'field list' explain extended select c from v1; id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 +1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00 Warnings: Note 1003 select (`test`.`t1`.`b` + 1) AS `c` from `test`.`t1` create algorithm=temptable view v2 (c) as select b+1 from t1; @@ -83,7 +83,7 @@ c 12 explain extended select c from v3; id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 +1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00 Warnings: Note 1003 select ((`test`.`t1`.`b` + 1) + 1) AS `c` from `test`.`t1` create algorithm=temptable view v4 (c) as select c+1 from v2; @@ -376,7 +376,7 @@ c 30 explain extended select * from v1; id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 Using where +1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00 Using where Warnings: Note 1003 select `test`.`t1`.`b` AS `c` from `test`.`t1` where (`test`.`t1`.`a` < 3) update v1 set c=c+1; @@ -472,11 +472,11 @@ create view v3 (x,y,z) as select b, a, b from t1; create view v4 (x,y,z) as select c+1, b, a from t1; create algorithm=temptable view v5 (x,y,z) as select c, b, a from t1; insert into v3 values (-60,4,30); -ERROR HY000: The target table v3 of the INSERT is not updatable +ERROR HY000: The target table v3 of the INSERT is not insertable-into insert into v4 values (-60,4,30); -ERROR HY000: The target table v4 of the INSERT is not updatable +ERROR HY000: The target table v4 of the INSERT is not insertable-into insert into v5 values (-60,4,30); -ERROR HY000: The target table v5 of the INSERT is not updatable +ERROR HY000: The target table v5 of the INSERT is not insertable-into insert into v1 values (-60,4,30); insert into v1 (z,y,x) values (50,6,-100); insert into v2 values (5,40); @@ -499,11 +499,11 @@ create view v3 (x,y,z) as select b, a, b from t1; create view v4 (x,y,z) as select c+1, b, a from t1; create algorithm=temptable view v5 (x,y,z) as select c, b, a from t1; insert into v3 select c, b, a from t2; -ERROR HY000: The target table v3 of the INSERT is not updatable +ERROR HY000: The target table v3 of the INSERT is not insertable-into insert into v4 select c, b, a from t2; -ERROR HY000: The target table v4 of the INSERT is not updatable +ERROR HY000: The target table v4 of the INSERT is not insertable-into insert into v5 select c, b, a from t2; -ERROR HY000: The target table v5 of the INSERT is not updatable +ERROR HY000: The target table v5 of the INSERT is not insertable-into insert into v1 select c, b, a from t2; insert into v1 (z,y,x) select a+20,b+2,-100 from t2; insert into v2 select b+1, a+10 from t2; @@ -1306,9 +1306,9 @@ a b delete from t1; load data infile '../std_data_ln/loaddata3.dat' ignore into table v1 fields terminated by '' enclosed by '' ignore 1 lines; Warnings: -Warning 1264 Out of range value for column 'a' at row 3 +Warning 1366 Incorrect integer value: 'error ' for column 'a' at row 3 Error 1369 CHECK OPTION failed 'test.v1' -Warning 1264 Out of range value for column 'a' at row 4 +Warning 1366 Incorrect integer value: 'wrong end ' for column 'a' at row 4 Error 1369 CHECK OPTION failed 'test.v1' select * from t1 order by a,b; a b @@ -1352,14 +1352,14 @@ drop table t1; create table t1 (s1 smallint); create view v1 as select * from t1 where 20 < (select (s1) from t1); insert into v1 values (30); -ERROR HY000: The target table v1 of the INSERT is not updatable +ERROR HY000: The target table v1 of the INSERT is not insertable-into create view v2 as select * from t1; create view v3 as select * from t1 where 20 < (select (s1) from v2); insert into v3 values (30); -ERROR HY000: The target table v3 of the INSERT is not updatable +ERROR HY000: The target table v3 of the INSERT is not insertable-into create view v4 as select * from v2 where 20 < (select (s1) from t1); insert into v4 values (30); -ERROR HY000: The target table v4 of the INSERT is not updatable +ERROR HY000: The target table v4 of the INSERT is not insertable-into drop view v4, v3, v2, v1; drop table t1; create table t1 (a int); @@ -1391,9 +1391,9 @@ a a b 4 NULL NULL explain extended select * from t3 left join v3 on (t3.a = v3.a); id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t3 ALL NULL NULL NULL NULL 3 100.00 -1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 -1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 100.00 +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Warnings: Note 1003 select `test`.`t3`.`a` AS `a`,`test`.`t1`.`a` AS `a`,`test`.`t2`.`a` AS `b` from `test`.`t3` left join (`test`.`t1` left join `test`.`t2` on((`test`.`t1`.`a` = `test`.`t2`.`a`))) on((`test`.`t3`.`a` = `test`.`t1`.`a`)) where 1 create view v1 (a) as select a from t1; @@ -1406,9 +1406,9 @@ a a b 4 NULL NULL explain extended select * from t3 left join v4 on (t3.a = v4.a); id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t3 ALL NULL NULL NULL NULL 3 100.00 -1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 -1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 100.00 +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Warnings: Note 1003 select `test`.`t3`.`a` AS `a`,`test`.`t1`.`a` AS `a`,`test`.`t2`.`a` AS `b` from `test`.`t3` left join (`test`.`t1` left join (`test`.`t2`) on((`test`.`t1`.`a` = `test`.`t2`.`a`))) on((`test`.`t3`.`a` = `test`.`t1`.`a`)) where 1 prepare stmt1 from "select * from t3 left join v4 on (t3.a = v4.a);"; @@ -2321,12 +2321,12 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ref a a 10 const,test.t1.b 2 Using where; Using index EXPLAIN SELECT * FROM v1 WHERE a=1; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 ref a a 5 const 1 Using where; Using index -1 PRIMARY t2 ref a a 10 const,test.t1.b 2 Using where; Using index +1 SIMPLE t1 ref a a 5 const 1 Using where; Using index +1 SIMPLE t2 ref a a 10 const,test.t1.b 2 Using where; Using index EXPLAIN SELECT * FROM v2 WHERE a=1; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 ref a a 5 const 1 Using where; Using index -1 PRIMARY t3 ALL NULL NULL NULL NULL 3 Using where +1 SIMPLE t1 ref a a 5 const 1 Using where; Using index +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where DROP VIEW v1,v2; DROP TABLE t1,t2,t3; create table t1 (f1 int); @@ -2409,7 +2409,7 @@ insert into t1 values (1),(2); create view v1 as select * from t1; explain select id from v1 order by id; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 index NULL PRIMARY 4 NULL 2 Using index +1 SIMPLE t1 index NULL PRIMARY 4 NULL 2 Using index drop view v1; drop table t1; create table t1(f1 int, f2 int); @@ -2480,7 +2480,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away EXPLAIN SELECT MAX(a) FROM v1; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away SELECT MIN(a) FROM t1; MIN(a) 0 @@ -2492,7 +2492,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away EXPLAIN SELECT MIN(a) FROM v1; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away DROP VIEW v1; DROP TABLE t1; CREATE TABLE t1 (x varchar(10)); @@ -2586,13 +2586,13 @@ INSERT INTO t1 VALUES (4, '2005-01-03'), (5, '2005-01-04'), (6, '2005-01-05'), (7, '2005-01-05'), (8, '2005-01-05'), (9, '2005-01-06'); CREATE VIEW v1 AS SELECT * FROM t1; -SELECT * FROM t1 WHERE td BETWEEN '2005.01.02' AND '2005.01.04'; +SELECT * FROM t1 WHERE td BETWEEN CAST('2005.01.02' AS DATE) AND CAST('2005.01.04' AS DATE); id td 2 2005-01-02 3 2005-01-02 4 2005-01-03 5 2005-01-04 -SELECT * FROM v1 WHERE td BETWEEN '2005.01.02' AND '2005.01.04'; +SELECT * FROM v1 WHERE td BETWEEN CAST('2005.01.02' AS DATE) AND CAST('2005.01.04' AS DATE); id td 2 2005-01-02 3 2005-01-02 @@ -2911,9 +2911,49 @@ INSERT INTO v2 VALUES (0); RETURN 0; END | SELECT f2(); -ERROR HY000: The target table v2 of the INSERT is not updatable +ERROR HY000: The target table v2 of the INSERT is not insertable-into DROP FUNCTION f1; DROP FUNCTION f2; DROP VIEW v1, v2; DROP TABLE t1; +CREATE TABLE t1 (s1 int); +CREATE VIEW v1 AS SELECT * FROM t1; +EXPLAIN SELECT * FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 system NULL NULL NULL NULL 0 const row not found +EXPLAIN SELECT * FROM v1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 system NULL NULL NULL NULL 0 const row not found +INSERT INTO t1 VALUES (1), (3), (2); +EXPLAIN SELECT * FROM t1 t WHERE t.s1+1 < (SELECT MAX(t1.s1) FROM t1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t ALL NULL NULL NULL NULL 3 Using where +2 SUBQUERY t1 ALL NULL NULL NULL NULL 3 +EXPLAIN SELECT * FROM v1 t WHERE t.s1+1 < (SELECT MAX(t1.s1) FROM t1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +2 SUBQUERY t1 ALL NULL NULL NULL NULL 3 +DROP VIEW v1; +DROP TABLE t1; +create table t1 (s1 int); +create view v1 as select s1 as a, s1 as b from t1; +insert into v1 values (1,1); +ERROR HY000: The target table v1 of the INSERT is not insertable-into +update v1 set a = 5; +drop view v1; +drop table t1; +CREATE TABLE t1(pk int PRIMARY KEY); +CREATE TABLE t2(pk int PRIMARY KEY, fk int, ver int, org int); +CREATE ALGORITHM=MERGE VIEW v1 AS +SELECT t1.* +FROM t1 JOIN t2 +ON t2.fk = t1.pk AND +t2.ver = (SELECT MAX(t.ver) FROM t2 t WHERE t.org = t2.org); +SHOW WARNINGS; +Level Code Message +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=MERGE DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`pk` AS `pk` from (`t1` join `t2` on(((`t2`.`fk` = `t1`.`pk`) and (`t2`.`ver` = (select max(`t`.`ver`) AS `MAX(t.ver)` from `t2` `t` where (`t`.`org` = `t2`.`org`)))))) +DROP VIEW v1; +DROP TABLE t1, t2; End of 5.0 tests. diff --git a/mysql-test/r/view_grant.result b/mysql-test/r/view_grant.result index 30e9d1010ba..f325d1ed7b6 100644 --- a/mysql-test/r/view_grant.result +++ b/mysql-test/r/view_grant.result @@ -105,7 +105,7 @@ ERROR 42000: SHOW VIEW command denied to user 'mysqltest_1'@'localhost' for tabl grant select on mysqltest.t1 to mysqltest_1@localhost; explain select c from mysqltest.v1; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found +1 SIMPLE t1 system NULL NULL NULL NULL 0 const row not found show create view mysqltest.v1; ERROR 42000: SHOW VIEW command denied to user 'mysqltest_1'@'localhost' for table 'v1' explain select c from mysqltest.v2; @@ -125,7 +125,7 @@ ERROR 42000: SHOW VIEW command denied to user 'mysqltest_1'@'localhost' for tabl grant show view on mysqltest.* to mysqltest_1@localhost; explain select c from mysqltest.v1; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found +1 SIMPLE t1 system NULL NULL NULL NULL 0 const row not found show create view mysqltest.v1; View Create View v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `mysqltest`.`v1` AS select (`mysqltest`.`t1`.`a` + 1) AS `c`,(`mysqltest`.`t1`.`b` + 1) AS `d` from `mysqltest`.`t1` @@ -138,7 +138,7 @@ View Create View v2 CREATE ALGORITHM=TEMPTABLE DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `mysqltest`.`v2` AS select (`mysqltest`.`t1`.`a` + 1) AS `c`,(`mysqltest`.`t1`.`b` + 1) AS `d` from `mysqltest`.`t1` explain select c from mysqltest.v3; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t2 system NULL NULL NULL NULL 0 const row not found +1 SIMPLE t2 system NULL NULL NULL NULL 0 const row not found show create view mysqltest.v3; View Create View v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `mysqltest`.`v3` AS select (`mysqltest`.`t2`.`a` + 1) AS `c`,(`mysqltest`.`t2`.`b` + 1) AS `d` from `mysqltest`.`t2` diff --git a/mysql-test/r/warnings.result b/mysql-test/r/warnings.result index 26b3c1625aa..d07f90d41b9 100644 --- a/mysql-test/r/warnings.result +++ b/mysql-test/r/warnings.result @@ -31,19 +31,19 @@ Error 1064 You have an error in your SQL syntax; check the manual that correspon insert into t1 values (1); insert into t1 values ("hej"); Warnings: -Warning 1264 Out of range value for column 'a' at row 1 +Warning 1366 Incorrect integer value: 'hej' for column 'a' at row 1 insert into t1 values ("hej"),("då"); Warnings: -Warning 1264 Out of range value for column 'a' at row 1 -Warning 1264 Out of range value for column 'a' at row 2 +Warning 1366 Incorrect integer value: 'hej' for column 'a' at row 1 +Warning 1366 Incorrect integer value: 'd?' for column 'a' at row 2 set SQL_WARNINGS=1; insert into t1 values ("hej"); Warnings: -Warning 1264 Out of range value for column 'a' at row 1 +Warning 1366 Incorrect integer value: 'hej' for column 'a' at row 1 insert into t1 values ("hej"),("då"); Warnings: -Warning 1264 Out of range value for column 'a' at row 1 -Warning 1264 Out of range value for column 'a' at row 2 +Warning 1366 Incorrect integer value: 'hej' for column 'a' at row 1 +Warning 1366 Incorrect integer value: 'd?' for column 'a' at row 2 drop table t1; set SQL_WARNINGS=0; drop temporary table if exists not_exists; @@ -180,44 +180,44 @@ create table t1 (a int); insert into t1 (a) values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); update t1 set a='abc'; Warnings: -Warning 1264 Out of range value for column 'a' at row 1 -Warning 1264 Out of range value for column 'a' at row 2 -Warning 1264 Out of range value for column 'a' at row 3 -Warning 1264 Out of range value for column 'a' at row 4 -Warning 1264 Out of range value for column 'a' at row 5 -Warning 1264 Out of range value for column 'a' at row 6 -Warning 1264 Out of range value for column 'a' at row 7 -Warning 1264 Out of range value for column 'a' at row 8 -Warning 1264 Out of range value for column 'a' at row 9 -Warning 1264 Out of range value for column 'a' at row 10 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 1 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 2 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 3 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 4 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 5 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 6 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 7 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 8 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 9 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 10 show warnings limit 2, 1; Level Code Message -Warning 1264 Out of range value for column 'a' at row 3 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 3 show warnings limit 0, 10; Level Code Message -Warning 1264 Out of range value for column 'a' at row 1 -Warning 1264 Out of range value for column 'a' at row 2 -Warning 1264 Out of range value for column 'a' at row 3 -Warning 1264 Out of range value for column 'a' at row 4 -Warning 1264 Out of range value for column 'a' at row 5 -Warning 1264 Out of range value for column 'a' at row 6 -Warning 1264 Out of range value for column 'a' at row 7 -Warning 1264 Out of range value for column 'a' at row 8 -Warning 1264 Out of range value for column 'a' at row 9 -Warning 1264 Out of range value for column 'a' at row 10 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 1 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 2 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 3 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 4 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 5 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 6 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 7 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 8 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 9 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 10 show warnings limit 9, 1; Level Code Message -Warning 1264 Out of range value for column 'a' at row 10 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 10 show warnings limit 10, 1; Level Code Message show warnings limit 9, 2; Level Code Message -Warning 1264 Out of range value for column 'a' at row 10 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 10 show warnings limit 0, 0; Level Code Message show warnings limit 1; Level Code Message -Warning 1264 Out of range value for column 'a' at row 1 +Warning 1366 Incorrect integer value: 'abc' for column 'a' at row 1 show warnings limit 0; Level Code Message show warnings limit 1, 0; @@ -229,3 +229,59 @@ a select * from t1 limit 0, 0; a drop table t1; +End of 4.1 tests +CREATE TABLE t1( f1 CHAR(20) ); +CREATE TABLE t2( f1 CHAR(20), f2 CHAR(25) ); +CREATE TABLE t3( f1 CHAR(20), f2 CHAR(25), f3 DATE ); +INSERT INTO t1 VALUES ( 'a`' ); +INSERT INTO t2 VALUES ( 'a`', 'a`' ); +INSERT INTO t3 VALUES ( 'a`', 'a`', '1000-01-1' ); +DROP PROCEDURE IF EXISTS sp1; +Warnings: +Note 1305 PROCEDURE sp1 does not exist +DROP PROCEDURE IF EXISTS sp2; +Warnings: +Note 1305 PROCEDURE sp2 does not exist +DROP PROCEDURE IF EXISTS sp3; +Warnings: +Note 1305 PROCEDURE sp3 does not exist +CREATE PROCEDURE sp1() +BEGIN +DECLARE x NUMERIC ZEROFILL; +SELECT f1 INTO x FROM t1 LIMIT 1; +END// +CREATE PROCEDURE sp2() +BEGIN +DECLARE x NUMERIC ZEROFILL; +SELECT f1 INTO x FROM t2 LIMIT 1; +END// +CREATE PROCEDURE sp3() +BEGIN +DECLARE x NUMERIC ZEROFILL; +SELECT f1 INTO x FROM t3 LIMIT 1; +END// +CALL sp1(); +Warnings: +Warning 1366 Incorrect decimal value: 'a`' for column 'x' at row 1 +CALL sp2(); +Warnings: +Warning 1366 Incorrect decimal value: 'a`' for column 'x' at row 1 +CALL sp3(); +Warnings: +Warning 1366 Incorrect decimal value: 'a`' for column 'x' at row 1 +DROP PROCEDURE IF EXISTS sp1; +CREATE PROCEDURE sp1() +BEGIN +declare x numeric unsigned zerofill; +SELECT f1 into x from t2 limit 1; +END// +CALL sp1(); +Warnings: +Warning 1366 Incorrect decimal value: 'a`' for column 'x' at row 1 +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP PROCEDURE sp1; +DROP PROCEDURE sp2; +DROP PROCEDURE sp3; +End of 5.0 tests diff --git a/mysql-test/r/windows.result b/mysql-test/r/windows.result index e3241daf719..039c5b1476e 100644 --- a/mysql-test/r/windows.result +++ b/mysql-test/r/windows.result @@ -6,31 +6,3 @@ use prn; ERROR 42000: Unknown database 'prn' create table nu (a int); drop table nu; -CREATE TABLE `t1` ( -`TIM` datetime NOT NULL, -`VAL` double default NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1; -CREATE TABLE `t2` ( -`TIM` datetime NOT NULL, -`VAL` double default NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1; -CREATE TABLE `mt` ( -`TIM` datetime NOT NULL, -`VAL` double default NULL -) ENGINE=MRG_MyISAM DEFAULT CHARSET=latin1 INSERT_METHOD=LAST -UNION=(`t1`,`t2`); -INSERT INTO mt VALUES ('2006-01-01',0); -ALTER TABLE `t2` RENAME TO `t`; -INSERT INTO mt VALUES ('2006-01-01',0); -ERROR HY000: Can't lock file (errno: 155) -select * from mt; -ERROR HY000: Can't lock file (errno: 155) -FLUSH TABLES; -select * from mt; -ERROR HY000: Can't find file: 'mt' (errno: 2) -ALTER TABLE `t` RENAME TO `t2`; -INSERT INTO mt VALUES ('2006-01-01',0); -select * from mt; -TIM VAL -2006-01-01 00:00:00 0 -2006-01-01 00:00:00 0 diff --git a/mysql-test/r/xml.result b/mysql-test/r/xml.result index 780be01d7ce..efe7d14095d 100644 --- a/mysql-test/r/xml.result +++ b/mysql-test/r/xml.result @@ -570,7 +570,7 @@ select extractvalue('aB','a|/b'); extractvalue('aB','a|/b') a select extractvalue('A','/'); -ERROR HY000: XPATH syntax error: '' +ERROR HY000: XPATH error: comparison of two nodesets is not supported: '' select extractvalue('bb!','//b!'); ERROR HY000: XPATH syntax error: '!' select extractvalue('ABC','/a/descendant::*'); @@ -710,3 +710,29 @@ Data select extractValue('DataOtherdata','/foo/something'); extractValue('DataOtherdata','/foo/something') Otherdata +select extractValue('<01>10:39:15<02>140','/zot/tim0/02'); +ERROR HY000: XPATH syntax error: '02' +select extractValue('<01>10:39:15<02>140','//*'); +extractValue('<01>10:39:15<02>140','//*') +NULL +Warnings: +Warning 1512 Incorrect XML value: 'parse error at line 1 pos 13: unknown token unexpected (ident or '/' wanted)' +select extractValue('<.>test','//*'); +extractValue('<.>test','//*') +NULL +Warnings: +Warning 1512 Incorrect XML value: 'parse error at line 1 pos 2: unknown token unexpected (ident or '/' wanted)' +select extractValue('<->test','//*'); +extractValue('<->test','//*') +NULL +Warnings: +Warning 1512 Incorrect XML value: 'parse error at line 1 pos 2: unknown token unexpected (ident or '/' wanted)' +select extractValue('<:>test','//*'); +extractValue('<:>test','//*') +test +select extractValue('<_>test','//*'); +extractValue('<_>test','//*') +test +select extractValue('test','//*'); +extractValue('test','//*') +test diff --git a/mysql-test/t/analyse.test b/mysql-test/t/analyse.test index a248c671c18..efcf5f6421c 100644 --- a/mysql-test/t/analyse.test +++ b/mysql-test/t/analyse.test @@ -90,5 +90,16 @@ create table t2 (country_id int primary key, country char(20) not null); insert into t2 values (1, 'USA'),(2,'India'), (3,'Finland'); select product, sum(profit),avg(profit) from t1 group by product with rollup procedure analyse(); drop table t1,t2; -# End of 4.1 tests +# +# Bug #20305 PROCEDURE ANALYSE() returns wrong M for FLOAT(M, D) and DOUBLE(M, D) +# + +create table t1 (f1 double(10,5), f2 char(10), f3 double(10,5)); +insert into t1 values (5.999, "5.9999", 5.99999), (9.555, "9.5555", 9.55555); +select f1 from t1 procedure analyse(1, 1); +select f2 from t1 procedure analyse(1, 1); +select f3 from t1 procedure analyse(1, 1); +drop table t1; + +--echo End of 4.1 tests diff --git a/mysql-test/t/archive.test b/mysql-test/t/archive.test index 294e7730e07..0dccd8f111a 100644 --- a/mysql-test/t/archive.test +++ b/mysql-test/t/archive.test @@ -3,7 +3,7 @@ # Taken FROM the select test # -- source include/have_archive.inc --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc --disable_warnings drop table if exists t1,t2,t3; diff --git a/mysql-test/t/binlog_statement_insert_delayed.test b/mysql-test/t/binlog_statement_insert_delayed.test new file mode 100644 index 00000000000..9b78296236f --- /dev/null +++ b/mysql-test/t/binlog_statement_insert_delayed.test @@ -0,0 +1,9 @@ +# This test is to verify replication with INSERT DELAY through +# unrecommended STATEMENT binlog format + +-- source include/not_embedded.inc +-- source include/have_binlog_format_statement.inc +-- disable_query_log +reset master; # get rid of previous tests binlog +-- enable_query_log +-- source extra/binlog_tests/binlog_insert_delayed.test diff --git a/mysql-test/t/binlog_stm_binlog.test b/mysql-test/t/binlog_stm_binlog.test index f22e7e45aea..280b7a3aef9 100644 --- a/mysql-test/t/binlog_stm_binlog.test +++ b/mysql-test/t/binlog_stm_binlog.test @@ -13,6 +13,6 @@ drop table t1; # For both statement and row based bin logs 9/19/2005 [jbm] -- source include/not_embedded.inc --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed.inc -- source extra/binlog_tests/binlog.test diff --git a/mysql-test/t/binlog_stm_blackhole.test b/mysql-test/t/binlog_stm_blackhole.test index 6047d8ca2fc..02ba2be095b 100644 --- a/mysql-test/t/binlog_stm_blackhole.test +++ b/mysql-test/t/binlog_stm_blackhole.test @@ -2,5 +2,5 @@ # For both statement and row based bin logs 9/19/2005 [jbm] -- source include/not_embedded.inc --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/binlog_tests/blackhole.test diff --git a/mysql-test/t/binlog_stm_ctype_cp932.test b/mysql-test/t/binlog_stm_ctype_cp932.test index 436f95a2453..c0791d81445 100644 --- a/mysql-test/t/binlog_stm_ctype_cp932.test +++ b/mysql-test/t/binlog_stm_ctype_cp932.test @@ -2,5 +2,5 @@ # For both statement and row based bin logs 9/19/2005 [jbm] -- source include/not_embedded.inc --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/binlog_tests/ctype_cp932.test diff --git a/mysql-test/t/binlog_stm_ctype_ucs.test b/mysql-test/t/binlog_stm_ctype_ucs.test index a32ac3155c7..c8cd7e06398 100644 --- a/mysql-test/t/binlog_stm_ctype_ucs.test +++ b/mysql-test/t/binlog_stm_ctype_ucs.test @@ -1,6 +1,6 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 9/19/2005 [jbm] --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/binlog_tests/ctype_ucs_binlog.test diff --git a/mysql-test/t/binlog_stm_drop_tmp_tbl.test b/mysql-test/t/binlog_stm_drop_tmp_tbl.test index e8acd00c779..6017f272d01 100644 --- a/mysql-test/t/binlog_stm_drop_tmp_tbl.test +++ b/mysql-test/t/binlog_stm_drop_tmp_tbl.test @@ -1,5 +1,5 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 9/19/2005 [jbm] --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/binlog_tests/drop_temp_table.test diff --git a/mysql-test/t/binlog_stm_innodb_stat.test b/mysql-test/t/binlog_stm_innodb_stat.test index c6017246e6d..a08039c4a41 100644 --- a/mysql-test/t/binlog_stm_innodb_stat.test +++ b/mysql-test/t/binlog_stm_innodb_stat.test @@ -1,5 +1,5 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 9/19/2005 [jbm] --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/binlog_tests/innodb_stat.test diff --git a/mysql-test/t/binlog_stm_insert_select.test b/mysql-test/t/binlog_stm_insert_select.test index 06792990a55..3aefa1e6cf7 100644 --- a/mysql-test/t/binlog_stm_insert_select.test +++ b/mysql-test/t/binlog_stm_insert_select.test @@ -1,5 +1,5 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 9/19/2005 [jbm] --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/binlog_tests/insert_select-binlog.test diff --git a/mysql-test/t/binlog_stm_mix_innodb_myisam.test b/mysql-test/t/binlog_stm_mix_innodb_myisam.test index 829cf50fe1e..cb6516a3a2f 100644 --- a/mysql-test/t/binlog_stm_mix_innodb_myisam.test +++ b/mysql-test/t/binlog_stm_mix_innodb_myisam.test @@ -1,7 +1,7 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 9/19/2005 [jbm] --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/binlog_tests/mix_innodb_myisam_binlog.test # This piece below cannot be put into diff --git a/mysql-test/t/innodb_cache-master.opt b/mysql-test/t/cache_innodb-master.opt similarity index 100% rename from mysql-test/t/innodb_cache-master.opt rename to mysql-test/t/cache_innodb-master.opt diff --git a/mysql-test/t/cache_innodb.test b/mysql-test/t/cache_innodb.test new file mode 100644 index 00000000000..33a328d1d6c --- /dev/null +++ b/mysql-test/t/cache_innodb.test @@ -0,0 +1,16 @@ +# t/cache_innodb.test +# +# Last update: +# 2006-07-26 ML test refactored (MySQL 5.1) +# main code t/innodb_cache.test --> include/query_cache.inc +# new wrapper t/cache_innodb.test +# + +--source include/have_query_cache.inc + +--source include/have_innodb.inc +let $engine_type= InnoDB; +# InnoDB supports FOREIGN KEYs +let $test_foreign_keys= 1; + +--source include/query_cache.inc diff --git a/mysql-test/t/innodb_concurrent-master.opt b/mysql-test/t/concurrent_innodb-master.opt similarity index 100% rename from mysql-test/t/innodb_concurrent-master.opt rename to mysql-test/t/concurrent_innodb-master.opt diff --git a/mysql-test/t/concurrent_innodb.test b/mysql-test/t/concurrent_innodb.test new file mode 100644 index 00000000000..5e9258af8f1 --- /dev/null +++ b/mysql-test/t/concurrent_innodb.test @@ -0,0 +1,20 @@ +# t/concurrent_innodb.test +# +# Concurrent InnoDB tests, mainly in UPDATE's +# Bug#3300 +# Designed and tested by Sinisa Milivojevic, sinisa@mysql.com +# +# two non-interfering UPDATE's not changing result set +# +# Last update: +# 2006-07-26 ML test refactored (MySQL 5.1) +# main code t/innodb_concurrent.test -> include/concurrent.inc +# new wrapper t/concurrent_innodb.test + +# test takes circa 5 minutes to run, so it's big +--source include/big_test.inc + +--source include/have_innodb.inc +let $engine_type= InnoDB; + +--source include/concurrent.inc diff --git a/mysql-test/t/crash_commit_before-master.opt b/mysql-test/t/crash_commit_before-master.opt new file mode 100644 index 00000000000..a745693594e --- /dev/null +++ b/mysql-test/t/crash_commit_before-master.opt @@ -0,0 +1,2 @@ +--skip-stack-trace --skip-core-file + diff --git a/mysql-test/t/crash_commit_before.test b/mysql-test/t/crash_commit_before.test index c46340d44dc..a10cf254a83 100644 --- a/mysql-test/t/crash_commit_before.test +++ b/mysql-test/t/crash_commit_before.test @@ -1,4 +1,9 @@ +# Don't test this under valgrind, memory leaks will occur +--source include/not_valgrind.inc + +# Binary must be compiled with debug for crash to occur --source include/have_debug.inc + --source include/have_innodb.inc CREATE TABLE t1(a int) engine=innodb; diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index 140cdccc218..aebee0b0c6f 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -670,6 +670,11 @@ alter table t1 max_rows=100000000000; show create table t1; drop table t1; +# +# Bug#21772: can not name a column 'upgrade' when create a table +# +create table t1 (upgrade int); +drop table t1; # End of 5.0 tests diff --git a/mysql-test/t/create_select_tmp.test b/mysql-test/t/create_select_tmp.test index 144bccc5871..ba9898b7752 100644 --- a/mysql-test/t/create_select_tmp.test +++ b/mysql-test/t/create_select_tmp.test @@ -6,7 +6,7 @@ # inconsistency between binlog and the internal list of temp tables. # This does not work for RBR yet. ---source include/have_binlog_format_statement.inc +--source include/have_binlog_format_mixed_or_statement.inc -- source include/have_innodb.inc --disable_warnings diff --git a/mysql-test/t/csv.test b/mysql-test/t/csv.test index 9815da4fb55..f70b5b40766 100644 --- a/mysql-test/t/csv.test +++ b/mysql-test/t/csv.test @@ -1582,3 +1582,26 @@ select * from bug15205_2; select * from bug15205; drop table bug15205; drop table bug15205_2; + +# +# Bug#22080 "CHECK fails to identify some corruption" +# + +create table bug22080_1 (id int,string varchar(64)) Engine=CSV; +create table bug22080_2 (id int,string varchar(64)) Engine=CSV; +create table bug22080_3 (id int,string varchar(64)) Engine=CSV; +insert into bug22080_1 values(1,'string'); +insert into bug22080_1 values(2,'string'); +insert into bug22080_1 values(3,'string'); + +# Currupt the file as described in the bug report +--exec sed -e 's/"2"/2"/' $MYSQLTEST_VARDIR/master-data/test/bug22080_1.CSV > $MYSQLTEST_VARDIR/master-data/test/bug22080_2.CSV +--exec sed -e 's/2","/2",/' $MYSQLTEST_VARDIR/master-data/test/bug22080_1.CSV > $MYSQLTEST_VARDIR/master-data/test/bug22080_3.CSV + +--exec cat $MYSQLTEST_VARDIR/master-data/test/bug22080_2.CSV +check table bug22080_2; + +--exec cat $MYSQLTEST_VARDIR/master-data/test/bug22080_3.CSV +check table bug22080_3; + +drop tables bug22080_1,bug22080_2,bug22080_3; diff --git a/mysql-test/t/ctype_cp932_binlog_stm.test b/mysql-test/t/ctype_cp932_binlog_stm.test index 7a0ac671417..6b591fbe5f5 100644 --- a/mysql-test/t/ctype_cp932_binlog_stm.test +++ b/mysql-test/t/ctype_cp932_binlog_stm.test @@ -1,7 +1,7 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 11/07/2005 [jbm] --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/binlog_tests/ctype_cp932_binlog.test # diff --git a/mysql-test/t/ctype_gbk.test b/mysql-test/t/ctype_gbk.test index 7aec48586d8..5ff138fa97b 100644 --- a/mysql-test/t/ctype_gbk.test +++ b/mysql-test/t/ctype_gbk.test @@ -42,3 +42,13 @@ DROP TABLE t1; select hex(convert(_gbk 0xA14041 using ucs2)); # End of 4.1 tests + +# +# Bug#21620 ALTER TABLE affects other columns +# +create table t1 (c1 text not null, c2 text not null) character set gbk; +alter table t1 change c1 c1 mediumtext character set gbk not null; +show create table t1; +drop table t1; + +--echo End of 5.0 tests diff --git a/mysql-test/t/ctype_ucs.test b/mysql-test/t/ctype_ucs.test index 8116d39e3db..6c814368c88 100644 --- a/mysql-test/t/ctype_ucs.test +++ b/mysql-test/t/ctype_ucs.test @@ -484,6 +484,27 @@ select make_set(3, name, upper(name)) from bug20536; select export_set(5, name, upper(name)) from bug20536; select export_set(5, name, upper(name), ",", 5) from bug20536; +# +# Bug #20108: corrupted default enum value for a ucs2 field +# + +CREATE TABLE t1 ( + status enum('active','passive') collate latin1_general_ci + NOT NULL default 'passive' +); +SHOW CREATE TABLE t1; +ALTER TABLE t1 ADD a int NOT NULL AFTER status; + +CREATE TABLE t2 ( + status enum('active','passive') collate ucs2_turkish_ci + NOT NULL default 'passive' +); +SHOW CREATE TABLE t2; +ALTER TABLE t2 ADD a int NOT NULL AFTER status; + +DROP TABLE t1,t2; + + # Some broken functions: add these tests just to document current behavior. # PASSWORD and OLD_PASSWORD don't work with UCS2 strings, but to fix it would diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test index 54ded650a1e..ecf118dae59 100644 --- a/mysql-test/t/ctype_utf8.test +++ b/mysql-test/t/ctype_utf8.test @@ -1070,21 +1070,15 @@ select a from t1 group by a; drop table t1; # -# Bug#20393: User name truncation in mysql client -# Bug#21432: Database/Table name limited to 64 bytes, not chars, problems with multi-byte +# Bug #20204: "order by" changes the results returned # -set names utf8; -#create user юзер_юзер@localhost; -grant select on test.* to юзер_юзер@localhost; ---exec $MYSQL --default-character-set=utf8 --user=юзер_юзер -e "select user()" -revoke all on test.* from юзер_юзер@localhost; -drop user юзер_юзер@localhost; -create database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45; -use имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45; -select database(); -drop database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45; -use test; +create table t1(a char(10)) default charset utf8; +insert into t1 values ('123'), ('456'); +explain + select substr(Z.a,-1), Z.a from t1 as Y join t1 as Z on Y.a=Z.a order by 1; +select substr(Z.a,-1), Z.a from t1 as Y join t1 as Z on Y.a=Z.a order by 1; +drop table t1; # End of 4.1 tests @@ -1164,3 +1158,23 @@ execute my_stmt using @a; set @a:=null; execute my_stmt using @a; drop table if exists t1; + +# +# Bug#19960: Inconsistent results when joining +# InnoDB tables using partial UTF8 indexes +# +CREATE TABLE t1 ( + colA int(11) NOT NULL, + colB varchar(255) character set utf8 NOT NULL, + PRIMARY KEY (colA) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO t1 (colA, colB) VALUES (1, 'foo'), (2, 'foo bar'); +CREATE TABLE t2 ( + colA int(11) NOT NULL, + colB varchar(255) character set utf8 NOT NULL, + KEY bad (colA,colB(3)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO t2 (colA, colB) VALUES (1, 'foo'),(2, 'foo bar'); +SELECT * FROM t1 JOIN t2 ON t1.colA=t2.colA AND t1.colB=t2.colB +WHERE t1.colA < 3; +DROP TABLE t1, t2; diff --git a/mysql-test/t/date_formats.test b/mysql-test/t/date_formats.test index 6898cd5802d..c19ebeb2e28 100644 --- a/mysql-test/t/date_formats.test +++ b/mysql-test/t/date_formats.test @@ -6,9 +6,9 @@ drop table if exists t1; --enable_warnings ---replace_result ROW STATEMENT +--replace_result ROW STATEMENT MIXED SHOW GLOBAL VARIABLES LIKE "%_format%"; ---replace_result ROW STATEMENT +--replace_result ROW STATEMENT MIXED SHOW SESSION VARIABLES LIKE "%_format%"; # @@ -36,7 +36,7 @@ set datetime_format= '%H:%i:%s.%f %m-%d-%Y'; set datetime_format= '%h:%i:%s %p %Y-%m-%d'; set datetime_format= '%h:%i:%s.%f %p %Y-%m-%d'; ---replace_result ROW STATEMENT +--replace_result ROW STATEMENT MIXED SHOW SESSION VARIABLES LIKE "%format"; --error 1231 diff --git a/mysql-test/t/deadlock_innodb.test b/mysql-test/t/deadlock_innodb.test new file mode 100644 index 00000000000..08e3e256790 --- /dev/null +++ b/mysql-test/t/deadlock_innodb.test @@ -0,0 +1,16 @@ +# t/deadlock_innodb.test +# +# +# Last update: +# 2006-07-26 ML test refactored (MySQL 5.1) +# main code --> include/deadlock.inc +# new wrapper t/deadlock_innodb.test +# + +# Can't test this with embedded server +-- source include/not_embedded.inc + +--source include/have_innodb.inc +let $engine_type= InnoDB; + +--source include/deadlock.inc diff --git a/mysql-test/t/delayed.test b/mysql-test/t/delayed.test index 55e8f81f763..03d8e20dd8f 100644 --- a/mysql-test/t/delayed.test +++ b/mysql-test/t/delayed.test @@ -17,7 +17,8 @@ insert delayed into t1 set a = 4; insert delayed into t1 set a = 5, tmsp = 19711006010203; insert delayed into t1 (a, tmsp) values (6, 19711006010203); insert delayed into t1 (a, tmsp) values (7, NULL); ---sleep 2 +# Wait until the rows are flushed to the table files. +FLUSH TABLE t1; insert into t1 set a = 8,tmsp=19711006010203; select * from t1 where tmsp=0; select * from t1 where tmsp=19711006010203; @@ -34,8 +35,8 @@ insert delayed into t1 values (null,"c"); insert delayed into t1 values (3,"d"),(null,"e"); --error 1136 insert delayed into t1 values (3,"this will give an","error"); -# 2 was not enough for --ps-protocol ---sleep 4 +# Wait until the rows are flushed to the table files. +FLUSH TABLE t1; show status like 'not_flushed_delayed_rows'; select * from t1; drop table t1; @@ -92,10 +93,145 @@ insert delayed into t1 values(null); # Works, since the delayed-counter is 8, which is unused insert delayed into t1 values(null); +# Wait until the rows are flushed to the table files. +FLUSH TABLE t1; # Check what we have now -# must wait so that the delayed thread finishes -# Note: this must be increased if the test fails ---sleep 1 select * from t1 order by a; DROP TABLE t1; + +# +# Bug#20627 - INSERT DELAYED does not honour auto_increment_* variables +# +SET @bug20627_old_auto_increment_offset= + @@auto_increment_offset= 2; +SET @bug20627_old_auto_increment_increment= + @@auto_increment_increment= 3; +SET @bug20627_old_session_auto_increment_offset= + @@session.auto_increment_offset= 4; +SET @bug20627_old_session_auto_increment_increment= + @@session.auto_increment_increment= 5; +SET @@auto_increment_offset= 2; +SET @@auto_increment_increment= 3; +SET @@session.auto_increment_offset= 4; +SET @@session.auto_increment_increment= 5; +# +# Normal insert as reference. +CREATE TABLE t1 ( + c1 INT NOT NULL AUTO_INCREMENT, + PRIMARY KEY (c1) + ); +INSERT INTO t1 VALUES (NULL),(NULL),(NULL); +# Check what we have now +SELECT * FROM t1; +DROP TABLE t1; +# +# Delayed insert. +CREATE TABLE t1 ( + c1 INT NOT NULL AUTO_INCREMENT, + PRIMARY KEY (c1) + ); +INSERT DELAYED INTO t1 VALUES (NULL),(NULL),(NULL); +# Wait until the rows are flushed to the table files. +FLUSH TABLE t1; +# Check what we have now +SELECT * FROM t1; +DROP TABLE t1; +# +# Cleanup +SET @@auto_increment_offset= + @bug20627_old_auto_increment_offset; +SET @@auto_increment_increment= + @bug20627_old_auto_increment_increment; +SET @@session.auto_increment_offset= + @bug20627_old_session_auto_increment_offset; +SET @@session.auto_increment_increment= + @bug20627_old_session_auto_increment_increment; + +# +# Bug#20830 - INSERT DELAYED does not honour SET INSERT_ID +# +SET @bug20830_old_auto_increment_offset= + @@auto_increment_offset= 2; +SET @bug20830_old_auto_increment_increment= + @@auto_increment_increment= 3; +SET @bug20830_old_session_auto_increment_offset= + @@session.auto_increment_offset= 4; +SET @bug20830_old_session_auto_increment_increment= + @@session.auto_increment_increment= 5; +SET @@auto_increment_offset= 2; +SET @@auto_increment_increment= 3; +SET @@session.auto_increment_offset= 4; +SET @@session.auto_increment_increment= 5; +# +# Normal insert as reference. +CREATE TABLE t1 ( + c1 INT(11) NOT NULL AUTO_INCREMENT, + c2 INT(11) DEFAULT NULL, + PRIMARY KEY (c1) + ); +SET insert_id= 14; +INSERT INTO t1 VALUES(NULL, 11), (NULL, 12), (NULL, 13); +INSERT INTO t1 VALUES(NULL, 21), (NULL, 22), (NULL, 23); +# Restart sequence at a different value. +INSERT INTO t1 VALUES( 69, 31), (NULL, 32), (NULL, 33); +INSERT INTO t1 VALUES(NULL, 41), (NULL, 42), (NULL, 43); +# Restart sequence at a different value. +SET insert_id= 114; +INSERT INTO t1 VALUES(NULL, 51), (NULL, 52), (NULL, 53); +INSERT INTO t1 VALUES(NULL, 61), (NULL, 62), (NULL, 63); +# Set one value below the maximum value. +INSERT INTO t1 VALUES( 49, 71), (NULL, 72), (NULL, 73); +INSERT INTO t1 VALUES(NULL, 81), (NULL, 82), (NULL, 83); +# Create a duplicate value. +SET insert_id= 114; +--error 1062 +INSERT INTO t1 VALUES(NULL, 91); +INSERT INTO t1 VALUES (NULL, 92), (NULL, 93); +# Check what we have now +SELECT * FROM t1; +SELECT COUNT(*) FROM t1; +SELECT SUM(c1) FROM t1; +DROP TABLE t1; +# +# Delayed insert. +CREATE TABLE t1 ( + c1 INT(11) NOT NULL AUTO_INCREMENT, + c2 INT(11) DEFAULT NULL, + PRIMARY KEY (c1) + ); +SET insert_id= 14; +INSERT DELAYED INTO t1 VALUES(NULL, 11), (NULL, 12), (NULL, 13); +INSERT DELAYED INTO t1 VALUES(NULL, 21), (NULL, 22), (NULL, 23); +# Restart sequence at a different value. +INSERT DELAYED INTO t1 VALUES( 69, 31), (NULL, 32), (NULL, 33); +INSERT DELAYED INTO t1 VALUES(NULL, 41), (NULL, 42), (NULL, 43); +# Restart sequence at a different value. +SET insert_id= 114; +INSERT DELAYED INTO t1 VALUES(NULL, 51), (NULL, 52), (NULL, 53); +INSERT DELAYED INTO t1 VALUES(NULL, 61), (NULL, 62), (NULL, 63); +# Set one value below the maximum value. +INSERT DELAYED INTO t1 VALUES( 49, 71), (NULL, 72), (NULL, 73); +INSERT DELAYED INTO t1 VALUES(NULL, 81), (NULL, 82), (NULL, 83); +# Create a duplicate value. +SET insert_id= 114; +INSERT DELAYED INTO t1 VALUES(NULL, 91); +INSERT DELAYED INTO t1 VALUES (NULL, 92), (NULL, 93); +# Wait until the rows are flushed to the table files. +FLUSH TABLE t1; +# Check what we have now +SELECT * FROM t1; +SELECT COUNT(*) FROM t1; +SELECT SUM(c1) FROM t1; +DROP TABLE t1; +# +# Cleanup +SET @@auto_increment_offset= + @bug20830_old_auto_increment_offset; +SET @@auto_increment_increment= + @bug20830_old_auto_increment_increment; +SET @@session.auto_increment_offset= + @bug20830_old_session_auto_increment_offset; +SET @@session.auto_increment_increment= + @bug20830_old_session_auto_increment_increment; + diff --git a/mysql-test/t/delete.test b/mysql-test/t/delete.test index 677ffaa2860..865e1746fd3 100644 --- a/mysql-test/t/delete.test +++ b/mysql-test/t/delete.test @@ -153,6 +153,16 @@ DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a LIMIT 1; SELECT * FROM t1; DROP TABLE t1; +# +# Bug #21392: multi-table delete with alias table name fails with +# 1003: Incorrect table name +# + +create table t1 (a int); +delete `4.t1` from t1 as `4.t1` where `4.t1`.a = 5; +delete FROM `4.t1` USING t1 as `4.t1` where `4.t1`.a = 5; +drop table t1; + # End of 4.1 tests # diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 0e338e5f1db..8c4a76c78a9 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -15,6 +15,9 @@ #events_scheduling : BUG#19170 2006-04-26 andrey Test case of 19170 fails on some platforms. Has to be checked. im_options : Bug#20294 2006-07-24 stewart Instance manager test im_options fails randomly #im_life_cycle : Bug#20368 2006-06-10 alik im_life_cycle test fails +im_daemon_life_cycle : BUG#22379 2006-09-15 ingo im_daemon_life_cycle.test fails on merge of 5.1 -> 5.1-engines +im_instance_conf : BUG#20294 2006-09-16 ingo Instance manager test im_instance_conf fails randomly +concurrent_innodb : BUG#21579 2006-08-11 mleich innodb_concurrent random failures with varying differences ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog #ndb_binlog_ignore_db : BUG#21279 2006-07-25 ingo Randomly throws a warning @@ -32,15 +35,12 @@ rpl_ndb_innodb2ndb : Bug #19710 Cluster replication to partition table fa rpl_ndb_myisam2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly rpl_sp : BUG#16456 2006-02-16 jmiller -rpl_sp_effects : BUG#19862 2006-06-15 mkindahl +rpl_multi_engine : BUG#22583 2006-09-23 lars # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open #ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events #rpl_ndb_idempotent : BUG#21298 2006-07-27 msvensson #rpl_row_basic_7ndb : BUG#21298 2006-07-27 msvensson #rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson -crash_commit_before : 2006-08-02 msvensson -func_group : BUG#21924 2006-08-30 reggie -func_in : BUG#21925 2006-08-30 reggie ndb_binlog_discover : bug#21806 2006-08-24 ndb_autodiscover3 : bug#21806 diff --git a/mysql-test/t/events.test b/mysql-test/t/events.test index add5dbcaa24..32863308687 100644 --- a/mysql-test/t/events.test +++ b/mysql-test/t/events.test @@ -49,35 +49,26 @@ drop event event2; create event event2 on schedule every 2 second starts now() ends date_add(now(), interval 5 hour) comment "some" DO begin end; drop event event2; +# # BUG #16537 (Events: mysql.event.starts is null) +# CREATE EVENT event_starts_test ON SCHEDULE EVERY 10 SECOND COMMENT "" DO SELECT 1; ---replace_column 8 # 9 # -SHOW EVENTS; -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +SELECT interval_field, interval_value, body FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; ALTER EVENT event_starts_test ON SCHEDULE AT '2020-02-02 20:00:02'; ---replace_column 8 # 9 # -SHOW EVENTS; -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; ALTER EVENT event_starts_test COMMENT "non-empty comment"; ---replace_column 8 # 9 # -SHOW EVENTS; -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; ALTER EVENT event_starts_test COMMENT ""; ---replace_column 8 # 9 # -SHOW EVENTS; -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; DROP EVENT event_starts_test; + CREATE EVENT event_starts_test ON SCHEDULE EVERY 20 SECOND STARTS '2020-02-02 20:00:02' ENDS '2022-02-02 20:00:02' DO SELECT 2; ---replace_column 8 # 9 # -SHOW EVENTS; -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; ALTER EVENT event_starts_test COMMENT "non-empty comment"; ---replace_column 8 # 9 # -SHOW EVENTS; -SELECT starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; ALTER EVENT event_starts_test COMMENT ""; ---replace_column 8 # 9 # -SHOW EVENTS; +SELECT execute_at IS NULL, starts IS NULL, ends IS NULL, comment FROM mysql.event WHERE db='events_test' AND name='event_starts_test'; DROP EVENT event_starts_test; # # diff --git a/mysql-test/t/events_bugs.test b/mysql-test/t/events_bugs.test index c9a8842f8f0..60e8c78d8bb 100644 --- a/mysql-test/t/events_bugs.test +++ b/mysql-test/t/events_bugs.test @@ -234,4 +234,23 @@ create event e_53 on schedule every 5 second ends (select s1 from ttx) do drop t # END - BUG#16394: Events: Crash if schedule contains SELECT # +# +# START - BUG#22397: Events: crash with procedure which alters events +# +--disable_warnings +drop event if exists e_16; +drop procedure if exists p_16; +--enable_warnings +create event e_16 on schedule every 1 second do set @a=5; +create procedure p_16 () alter event e_16 on schedule every @a second; +set @a = null; +--error ER_WRONG_VALUE +call p_16(); +--error ER_WRONG_VALUE +call p_16(); +set @a= 6; +call p_16(); + +drop procedure p_16; +drop event e_16; drop database events_test; diff --git a/mysql-test/t/events_restart_phase0.log b/mysql-test/t/events_restart_phase0.log new file mode 100644 index 00000000000..218b804a302 --- /dev/null +++ b/mysql-test/t/events_restart_phase0.log @@ -0,0 +1,22 @@ +SHOW VARIABLES LIKE 'event%'; +Variable_name Value +event_scheduler DISABLED +SELECT @@global.event_scheduler; +@@global.event_scheduler +DISABLED +SET GLOBAL event_scheduler=on; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=off; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=0; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=1; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=2; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of '2' +SET GLOBAL event_scheduler=SUSPEND; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'SUSPEND' +SET GLOBAL event_scheduler=SUSPENDED; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'SUSPENDED' +SET GLOBAL event_scheduler=disabled; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'disabled' diff --git a/mysql-test/t/events_restart_phase0.result b/mysql-test/t/events_restart_phase0.result new file mode 100644 index 00000000000..218b804a302 --- /dev/null +++ b/mysql-test/t/events_restart_phase0.result @@ -0,0 +1,22 @@ +SHOW VARIABLES LIKE 'event%'; +Variable_name Value +event_scheduler DISABLED +SELECT @@global.event_scheduler; +@@global.event_scheduler +DISABLED +SET GLOBAL event_scheduler=on; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=off; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=0; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=1; +ERROR HY000: The MySQL server is running with the --event-scheduler=DISABLED option so it cannot execute this statement +SET GLOBAL event_scheduler=2; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of '2' +SET GLOBAL event_scheduler=SUSPEND; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'SUSPEND' +SET GLOBAL event_scheduler=SUSPENDED; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'SUSPENDED' +SET GLOBAL event_scheduler=disabled; +ERROR 42000: Variable 'event_scheduler' can't be set to the value of 'disabled' diff --git a/mysql-test/t/execution_constants.test b/mysql-test/t/execution_constants.test new file mode 100644 index 00000000000..00967b2eeba --- /dev/null +++ b/mysql-test/t/execution_constants.test @@ -0,0 +1,74 @@ +# +# Bug#21476: Lost Database Connection During Query +# +# When the amount of stack space we think we need to report an error is +# actually too small, then we can get SEGVs. But, we don't want to reserve +# space that we could use to get real work done. So, we want the reserved +# space small, and this test verifies that the reservation is not too small. + +CREATE TABLE `t_bug21476` ( + `ID_BOARD` smallint(5) unsigned NOT NULL default '0', + `ID_MEMBER` mediumint(8) unsigned NOT NULL default '0', + `logTime` int(10) unsigned NOT NULL default '0', + `ID_MSG` mediumint(8) unsigned NOT NULL default '0', + PRIMARY KEY (`ID_MEMBER`,`ID_BOARD`), + KEY `logTime` (`logTime`) +) ENGINE=MyISAM DEFAULT CHARSET=cp1251 COLLATE=cp1251_bulgarian_ci; + +INSERT INTO `t_bug21476` VALUES (2,2,1154870939,0),(1,2,1154870957,0),(2,183,1154941362,0),(2,84,1154904301,0),(1,84,1154905867,0),(2,13,1154947484,10271),(3,84,1154880549,0),(1,6,1154892183,0),(2,25,1154947581,10271),(3,25,1154904760,0),(1,25,1154947373,10271),(1,179,1154899992,0),(2,179,1154899410,0),(5,25,1154901666,0),(2,329,1154902026,0),(3,329,1154902040,0),(1,329,1154902058,0),(1,13,1154930841,0),(3,85,1154904987,0),(1,183,1154929665,0),(3,13,1154931268,0),(1,85,1154936888,0),(1,169,1154937959,0),(2,169,1154941717,0),(3,183,1154939810,0),(3,169,1154941734,0); + +delimiter //; +let $query_head=UPDATE t_bug21476 SET ID_MSG = IF(logTime BETWEEN 1 AND 1101770053, 2, // +let $query_tail =) WHERE logTime BETWEEN 1 AND 1104091539 AND ID_MSG = 0// + +# Scan over the possible stack heights, trying to recurse to exactly that +# depth. Eventually, we will reach our imposed limit on height and try to +# raise an error. If the remaining stack space is enough to raise that error, +# we will get an error-number of 1436 and quit the loop. If it's not enough +# space, we should get a SEGV + +# Well more than enough recursions to find the end of our stack. +let $i = 100000// +disable_query_log// +disable_result_log// +while ($i) +{ + # If we SEGV because the min stack size is exceeded, this would return error + # 2013 . + error 0,1436 // + eval $query_head 0 $query_tail// + + if ($mysql_errno != 1436) + { + # We reached the place where we reported an error about the stack limit, + # and we successfully returned the error. That means that at the stack + # limit, we still have enough space reserved to report an error. + let $i = 1// + } + + # Multiplying by three stack frames should be fine enough resolution. + # Trading exactness for speed. + + # go one more level deep + let $query_head = $query_head IF(logTime <= 1104091$i, $i, // + let $query_tail =) $query_tail// + + # go one more level deep + let $query_head = $query_head IF(logTime <= 1105091$i, $i, // + let $query_tail =) $query_tail// + + # go one more level deep + let $query_head = $query_head IF(logTime <= 1106091$i, $i, // + let $query_tail =) $query_tail// + + dec $i// +} +enable_result_log// +enable_query_log// + +echo Assertion: mysql_errno 1436 == $mysql_errno// + +delimiter ;// +DROP TABLE `t_bug21476`; + +--echo End of 5.0 tests. diff --git a/mysql-test/t/func_gconcat.test b/mysql-test/t/func_gconcat.test index 7a0d90961e4..19753430dde 100644 --- a/mysql-test/t/func_gconcat.test +++ b/mysql-test/t/func_gconcat.test @@ -447,3 +447,18 @@ SELECT a, CHAR_LENGTH(b) FROM t1; SELECT CHAR_LENGTH( GROUP_CONCAT(b) ) FROM t1; SET GROUP_CONCAT_MAX_LEN = 1024; DROP TABLE t1; + +# +# Bug #22015: crash with GROUP_CONCAT over a derived table that +# returns the results of aggregation by GROUP_CONCAT +# + +CREATE TABLE t1 (a int, b int); + +INSERT INTO t1 VALUES (2,1), (1,2), (2,2), (1,3); + +SELECT GROUP_CONCAT(a), x + FROM (SELECT a, GROUP_CONCAT(b) x FROM t1 GROUP BY a) AS s + GROUP BY x; + +DROP TABLE t1; diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 3a62b9feb3a..78393ee1104 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -573,6 +573,19 @@ INSERT INTO t1 VALUES SELECT MAX(b) FROM t1; EXPLAIN SELECT MAX(b) FROM t1; DROP TABLE t1; +# +# Bug #16792 query with subselect, join, and group not returning proper values +# +CREATE TABLE t1 (a INT, b INT); +INSERT INTO t1 VALUES (1,1),(1,2),(2,3); + +SELECT (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; +SELECT (SELECT COUNT(DISTINCT 12)) FROM t1 GROUP BY t1.a; +# an attempt to test all aggregate function with no table. +SELECT AVG(2), BIT_AND(2), BIT_OR(2), BIT_XOR(2), COUNT(*), COUNT(12), + COUNT(DISTINCT 12), MIN(2),MAX(2),STD(2), VARIANCE(2),SUM(2), + GROUP_CONCAT(2),GROUP_CONCAT(DISTINCT 2); +DROP TABLE t1; # End of 4.1 tests diff --git a/mysql-test/t/func_in.test b/mysql-test/t/func_in.test index 8ddf1fbe314..906747c2f78 100644 --- a/mysql-test/t/func_in.test +++ b/mysql-test/t/func_in.test @@ -232,3 +232,27 @@ select some_id from t1 where some_id not in(2,-1); select some_id from t1 where some_id not in(-4,-1,-4); select some_id from t1 where some_id not in(-4,-1,3423534,2342342); drop table t1; + +# +# Bug#18360: Type aggregation for IN and CASE may lead to a wrong result +# +create table t1(f1 char(1)); +insert into t1 values ('a'),('b'),('1'); +select f1 from t1 where f1 in ('a',1); +select f1, case f1 when 'a' then '+' when 1 then '-' end from t1; +create index t1f1_idx on t1(f1); +select f1 from t1 where f1 in ('a',1); +explain select f1 from t1 where f1 in ('a',1); +select f1 from t1 where f1 in ('a','b'); +explain select f1 from t1 where f1 in ('a','b'); +select f1 from t1 where f1 in (2,1); +explain select f1 from t1 where f1 in (2,1); +create table t2(f2 int, index t2f2(f2)); +insert into t2 values(0),(1),(2); +select f2 from t2 where f2 in ('a',2); +explain select f2 from t2 where f2 in ('a',2); +select f2 from t2 where f2 in ('a','b'); +explain select f2 from t2 where f2 in ('a','b'); +select f2 from t2 where f2 in (1,'b'); +explain select f2 from t2 where f2 in (1,'b'); +drop table t1, t2; diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test index 8753db0ebe1..45415882ac7 100644 --- a/mysql-test/t/func_str.test +++ b/mysql-test/t/func_str.test @@ -753,4 +753,31 @@ select cast(rtrim(ltrim(' 20.06 ')) as decimal(19,2)); select conv("18383815659218730760",10,10) + 0; select "18383815659218730760" + 0; +# +# Bug #21698: substitution of a string field for a constant under a function +# + +CREATE TABLE t1 (code varchar(10)); +INSERT INTO t1 VALUES ('a12'), ('A12'), ('a13'); + +SELECT ASCII(code), code FROM t1 WHERE code='A12'; +SELECT ASCII(code), code FROM t1 WHERE code='A12' AND ASCII(code)=65; + +INSERT INTO t1 VALUES ('a12 '), ('A12 '); + +SELECT LENGTH(code), code FROM t1 WHERE code='A12'; +SELECT LENGTH(code), code FROM t1 WHERE code='A12' AND LENGTH(code)=5; + +ALTER TABLE t1 ADD INDEX (code); +CREATE TABLE t2 (id varchar(10) PRIMARY KEY); +INSERT INTO t2 VALUES ('a11'), ('a12'), ('a13'), ('a14'); + +SELECT * FROM t1 INNER JOIN t2 ON t1.code=t2.id + WHERE t2.id='a12' AND (LENGTH(code)=5 OR code < 'a00'); +EXPLAIN EXTENDED +SELECT * FROM t1 INNER JOIN t2 ON code=id + WHERE id='a12' AND (LENGTH(code)=5 OR code < 'a00'); + +DROP TABLE t1,t2; + --echo End of 5.0 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 7ca1b3e5433..d49a4fed9d2 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -419,20 +419,20 @@ drop table t1; # # Bug#16377 result of DATE/TIME functions were compared as strings which # can lead to a wrong result. -# +# Now wrong dates should be compared only with CAST() create table t1(f1 date, f2 time, f3 datetime); insert into t1 values ("2006-01-01", "12:01:01", "2006-01-01 12:01:01"); insert into t1 values ("2006-01-02", "12:01:02", "2006-01-02 12:01:02"); -select f1 from t1 where f1 between "2006-1-1" and 20060101; -select f1 from t1 where f1 between "2006-1-1" and "2006.1.1"; -select f1 from t1 where date(f1) between "2006-1-1" and "2006.1.1"; -select f2 from t1 where f2 between "12:1:2" and "12:2:2"; -select f2 from t1 where time(f2) between "12:1:2" and "12:2:2"; -select f3 from t1 where f3 between "2006-1-1 12:1:1" and "2006-1-1 12:1:2"; -select f3 from t1 where timestamp(f3) between "2006-1-1 12:1:1" and "2006-1-1 12:1:2"; -select f1 from t1 where "2006-1-1" between f1 and f3; -select f1 from t1 where "2006-1-1" between date(f1) and date(f3); -select f1 from t1 where "2006-1-1" between f1 and 'zzz'; +select f1 from t1 where f1 between CAST("2006-1-1" as date) and CAST(20060101 as date); +select f1 from t1 where f1 between cast("2006-1-1" as date) and cast("2006.1.1" as date); +select f1 from t1 where date(f1) between cast("2006-1-1" as date) and cast("2006.1.1" as date); +select f2 from t1 where f2 between cast("12:1:2" as time) and cast("12:2:2" as time); +select f2 from t1 where time(f2) between cast("12:1:2" as time) and cast("12:2:2" as time); +select f3 from t1 where f3 between cast("2006-1-1 12:1:1" as datetime) and cast("2006-1-1 12:1:2" as datetime); +select f3 from t1 where timestamp(f3) between cast("2006-1-1 12:1:1" as datetime) and cast("2006-1-1 12:1:2" as datetime); +select f1 from t1 where cast("2006-1-1" as date) between f1 and f3; +select f1 from t1 where cast("2006-1-1" as date) between date(f1) and date(f3); +select f1 from t1 where cast("2006-1-1" as date) between f1 and cast('zzz' as date); select f1 from t1 where makedate(2006,1) between date(f1) and date(f3); select f1 from t1 where makedate(2006,2) between date(f1) and date(f3); drop table t1; @@ -446,6 +446,25 @@ create table t1 select now() - now(), curtime() - curtime(), show create table t1; drop table t1; +# +# 21913: DATE_FORMAT() Crashes mysql server if I use it through +# mysql-connector-j driver. +# + +SET NAMES latin1; +SET character_set_results = NULL; +SHOW VARIABLES LIKE 'character_set_results'; + +CREATE TABLE testBug8868 (field1 DATE, field2 VARCHAR(32) CHARACTER SET BINARY); +INSERT INTO testBug8868 VALUES ('2006-09-04', 'abcd'); + +SELECT DATE_FORMAT(field1,'%b-%e %l:%i%p') as fmtddate, field2 FROM testBug8868; + +DROP TABLE testBug8868; + +SET NAMES DEFAULT; + + # # Bug #19844 time_format in Union truncates values # diff --git a/mysql-test/t/handler_innodb.test b/mysql-test/t/handler_innodb.test new file mode 100644 index 00000000000..02982716f78 --- /dev/null +++ b/mysql-test/t/handler_innodb.test @@ -0,0 +1,20 @@ +# t/handler_innodb.test +# +# test of HANDLER ... +# +# Last update: +# 2006-07-31 ML test refactored (MySQL 5.1) +# code of t/handler.test and t/innodb_handler.test united +# main testing code put into include/handler.inc +# rename t/innodb_handler.test to t/handler_innodb.test +# + +# should work in embedded server after mysqltest is fixed +--source include/not_embedded.inc + +--source include/have_innodb.inc +let $engine_type= InnoDB; +let $other_engine_type= MEMORY; +let $other_handler_engine_type= MyISAM; + +--source include/handler.inc diff --git a/mysql-test/t/handler_myisam.test b/mysql-test/t/handler_myisam.test new file mode 100644 index 00000000000..644c28de5b2 --- /dev/null +++ b/mysql-test/t/handler_myisam.test @@ -0,0 +1,21 @@ +# t/handler_myisam.test +# +# test of HANDLER ... +# +# Last update: +# 2006-07-31 ML test refactored (MySQL 5.1) +# code of t/handler.test and t/innodb_handler.test united +# main testing code put into include/handler.inc +# rename t/handler.test to t/handler_myisam.test +# + +# should work in embedded server after mysqltest is fixed +--source include/not_embedded.inc + +let $engine_type= MyISAM; +let $other_engine_type= MEMORY; +# There is unfortunately no other all time available storage engine +# which supports the handler interface +let $other_handler_engine_type= MyISAM; + +--source include/handler.inc diff --git a/mysql-test/t/im_daemon_life_cycle.imtest b/mysql-test/t/im_daemon_life_cycle.imtest index a07da161279..55e52f4e464 100644 --- a/mysql-test/t/im_daemon_life_cycle.imtest +++ b/mysql-test/t/im_daemon_life_cycle.imtest @@ -14,3 +14,47 @@ # process. --exec $MYSQL_TEST_DIR/t/kill_n_check.sh $IM_PATH_PID restarted 30 + +########################################################################### + +# +# BUG#12751: Instance Manager: client hangs +# + +--echo +--echo -------------------------------------------------------------------- +--echo -- Test for BUG#12751 +--echo -------------------------------------------------------------------- + +# Give some time to begin accepting connections after restart. +# FIXME: race condition here. + +--sleep 3 + +# 1. Start mysqld; + +START INSTANCE mysqld2; +# FIXME: START INSTANCE should be synchronous. +--exec $MYSQL_TEST_DIR/t/wait_for_process.sh $IM_MYSQLD2_PATH_PID 30 started + +# 2. Restart IM-main: kill it and IM-angel will restart it. + +--exec $MYSQL_TEST_DIR/t/kill_n_check.sh $IM_PATH_PID restarted 30 + +# 3. Issue some statement -- connection should be re-established. + +# Give some time to begin accepting connections after restart. +# FIXME: race condition here. + +--sleep 3 + +--replace_column 3 VERSION_NUMBER 4 VERSION +SHOW INSTANCE STATUS mysqld1; + +# 4. Stop mysqld2, because it will not be stopped by IM, as it is nonguarded. +# So, if it we do not stop it, it will be stopped by mysql-test-run.pl with +# warning. + +STOP INSTANCE mysqld2; +# FIXME: STOP INSTANCE should be synchronous. +--exec $MYSQL_TEST_DIR/t/wait_for_process.sh $IM_MYSQLD2_PATH_PID 30 stopped diff --git a/mysql-test/t/index_merge_innodb.test b/mysql-test/t/index_merge_innodb.test index 7e6c524e811..a3bda0ad00c 100644 --- a/mysql-test/t/index_merge_innodb.test +++ b/mysql-test/t/index_merge_innodb.test @@ -1,329 +1,28 @@ +# t/index_merge_innodb.test # # Index merge tests # --- source include/have_innodb.inc +# Last update: +# 2006-08-07 ML test refactored (MySQL 5.1) +# Main code of several index_merge tests +# -> include/index_merge*.inc +# wrapper t/index_merge_innodb.test sources now several +# include/index_merge*.inc files +# ---disable_warnings -drop table if exists t1,t2; ---enable_warnings +--source include/have_innodb.inc +let $engine_type= InnoDB; +# InnoDB does not support Merge tables (affects include/index_merge1.inc) +let $merge_table_support= 0; -create table t1 -( - key1 int not null, - key2 int not null, - - INDEX i1(key1), - INDEX i2(key2) -) engine=innodb; - ---disable_query_log -let $1=200; -while ($1) -{ - eval insert into t1 values (200-$1, $1); - dec $1; -} ---enable_query_log - -# No primary key -explain select * from t1 where key1 < 5 or key2 > 197; - -select * from t1 where key1 < 5 or key2 > 197; - -explain select * from t1 where key1 < 3 or key2 > 195; -select * from t1 where key1 < 3 or key2 > 195; - -# Primary key as case-sensitive string with \0s. -# also make primary key be longer then max. index length of MyISAM. -alter table t1 add str1 char (255) not null, - add zeroval int not null default 0, - add str2 char (255) not null, - add str3 char (255) not null; - -update t1 set str1='aaa', str2='bbb', str3=concat(key2, '-', key1 div 2, '_' ,if(key1 mod 2 = 0, 'a', 'A')); - -alter table t1 add primary key (str1, zeroval, str2, str3); - -explain select * from t1 where key1 < 5 or key2 > 197; - -select * from t1 where key1 < 5 or key2 > 197; - -explain select * from t1 where key1 < 3 or key2 > 195; -select * from t1 where key1 < 3 or key2 > 195; - -# Test for BUG#5401 -drop table t1; -create table t1 ( - pk integer not null auto_increment primary key, - key1 integer, - key2 integer not null, - filler char (200), - index (key1), - index (key2) -) engine=innodb; -show warnings; ---disable_query_log -let $1=30; -while ($1) -{ - eval insert into t1 (key1, key2, filler) values ($1/4, $1/8, 'filler-data'); - dec $1; -} ---enable_query_log -explain select pk from t1 where key1 = 1 and key2 = 1; -select pk from t1 where key2 = 1 and key1 = 1; -select pk from t1 ignore index(key1,key2) where key2 = 1 and key1 = 1; - -# More tests for BUG#5401. -drop table t1; -create table t1 ( - pk int primary key auto_increment, - key1a int, - key2a int, - key1b int, - key2b int, - dummy1 int, - dummy2 int, - dummy3 int, - dummy4 int, - key3a int, - key3b int, - filler1 char (200), - index i1(key1a, key1b), - index i2(key2a, key2b), - index i3(key3a, key3b) -) engine=innodb; - -create table t2 (a int); -insert into t2 values (0),(1),(2),(3),(4),(NULL); - -insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) - select A.a, B.a, C.a, D.a, C.a, D.a from t2 A,t2 B,t2 C, t2 D; -insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) - select key1a, key1b, key2a, key2b, key3a, key3b from t1; -insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) - select key1a, key1b, key2a, key2b, key3a, key3b from t1; -analyze table t1; -select count(*) from t1; - -explain select count(*) from t1 where - key1a = 2 and key1b is null and key2a = 2 and key2b is null; - -select count(*) from t1 where - key1a = 2 and key1b is null and key2a = 2 and key2b is null; - -explain select count(*) from t1 where - key1a = 2 and key1b is null and key3a = 2 and key3b is null; - -select count(*) from t1 where - key1a = 2 and key1b is null and key3a = 2 and key3b is null; - -drop table t1,t2; - -# Test for BUG#8441 -create table t1 ( - id1 int, - id2 date , - index idx2 (id1,id2), - index idx1 (id2) -) engine = innodb; -insert into t1 values(1,'20040101'), (2,'20040102'); -select * from t1 where id1 = 1 and id2= '20040101'; -drop table t1; - -# Test for BUG#12720 ---disable_warnings -drop view if exists v1; ---enable_warnings -CREATE TABLE t1 ( - `oid` int(11) unsigned NOT NULL auto_increment, - `fk_bbk_niederlassung` int(11) unsigned NOT NULL, - `fk_wochentag` int(11) unsigned NOT NULL, - `uhrzeit_von` time NOT NULL COMMENT 'HH:MM', - `uhrzeit_bis` time NOT NULL COMMENT 'HH:MM', - `geloescht` tinyint(4) NOT NULL, - `version` int(5) NOT NULL, - PRIMARY KEY (`oid`), - KEY `fk_bbk_niederlassung` (`fk_bbk_niederlassung`), - KEY `fk_wochentag` (`fk_wochentag`), - KEY `ix_version` (`version`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -insert into t1 values -(1, 38, 1, '08:00:00', '13:00:00', 0, 1), -(2, 38, 2, '08:00:00', '13:00:00', 0, 1), -(3, 38, 3, '08:00:00', '13:00:00', 0, 1), -(4, 38, 4, '08:00:00', '13:00:00', 0, 1), -(5, 38, 5, '08:00:00', '13:00:00', 0, 1), -(6, 38, 5, '08:00:00', '13:00:00', 1, 2), -(7, 38, 3, '08:00:00', '13:00:00', 1, 2), -(8, 38, 1, '08:00:00', '13:00:00', 1, 2), -(9, 38, 2, '08:00:00', '13:00:00', 1, 2), -(10, 38, 4, '08:00:00', '13:00:00', 1, 2), -(11, 38, 1, '08:00:00', '13:00:00', 0, 3), -(12, 38, 2, '08:00:00', '13:00:00', 0, 3), -(13, 38, 3, '08:00:00', '13:00:00', 0, 3), -(14, 38, 4, '08:00:00', '13:00:00', 0, 3), -(15, 38, 5, '08:00:00', '13:00:00', 0, 3), -(16, 38, 4, '08:00:00', '13:00:00', 0, 4), -(17, 38, 5, '08:00:00', '13:00:00', 0, 4), -(18, 38, 1, '08:00:00', '13:00:00', 0, 4), -(19, 38, 2, '08:00:00', '13:00:00', 0, 4), -(20, 38, 3, '08:00:00', '13:00:00', 0, 4), -(21, 7, 1, '08:00:00', '13:00:00', 0, 1), -(22, 7, 2, '08:00:00', '13:00:00', 0, 1), -(23, 7, 3, '08:00:00', '13:00:00', 0, 1), -(24, 7, 4, '08:00:00', '13:00:00', 0, 1), -(25, 7, 5, '08:00:00', '13:00:00', 0, 1); - -create view v1 as -select - zeit1.oid AS oid, - zeit1.fk_bbk_niederlassung AS fk_bbk_niederlassung, - zeit1.fk_wochentag AS fk_wochentag, - zeit1.uhrzeit_von AS uhrzeit_von, - zeit1.uhrzeit_bis AS uhrzeit_bis, - zeit1.geloescht AS geloescht, - zeit1.version AS version -from - t1 zeit1 -where -(zeit1.version = - (select max(zeit2.version) AS `max(version)` - from t1 zeit2 - where - ((zeit1.fk_bbk_niederlassung = zeit2.fk_bbk_niederlassung) and - (zeit1.fk_wochentag = zeit2.fk_wochentag) and - (zeit1.uhrzeit_von = zeit2.uhrzeit_von) and - (zeit1.uhrzeit_bis = zeit2.uhrzeit_bis) - ) - ) -) -and (zeit1.geloescht = 0); - -select * from v1 where oid = 21; -drop view v1; -drop table t1; -## -CREATE TABLE t1( - t_cpac varchar(2) NOT NULL, - t_vers varchar(4) NOT NULL, - t_rele varchar(2) NOT NULL, - t_cust varchar(4) NOT NULL, - filler1 char(250) default NULL, - filler2 char(250) default NULL, - PRIMARY KEY (t_cpac,t_vers,t_rele,t_cust), - UNIQUE KEY IX_4 (t_cust,t_cpac,t_vers,t_rele), - KEY IX_5 (t_vers,t_rele,t_cust) -) ENGINE=InnoDB; - -insert into t1 values -('tm','2.5 ','a ',' ','',''), ('tm','2.5U','a ','stnd','',''), -('da','3.3 ','b ',' ','',''), ('da','3.3U','b ','stnd','',''), -('tl','7.6 ','a ',' ','',''), ('tt','7.6 ','a ',' ','',''), -('bc','B61 ','a ',' ','',''), ('bp','B61 ','a ',' ','',''), -('ca','B61 ','a ',' ','',''), ('ci','B61 ','a ',' ','',''), -('cp','B61 ','a ',' ','',''), ('dm','B61 ','a ',' ','',''), -('ec','B61 ','a ',' ','',''), ('ed','B61 ','a ',' ','',''), -('fm','B61 ','a ',' ','',''), ('nt','B61 ','a ',' ','',''), -('qm','B61 ','a ',' ','',''), ('tc','B61 ','a ',' ','',''), -('td','B61 ','a ',' ','',''), ('tf','B61 ','a ',' ','',''), -('tg','B61 ','a ',' ','',''), ('ti','B61 ','a ',' ','',''), -('tp','B61 ','a ',' ','',''), ('ts','B61 ','a ',' ','',''), -('wh','B61 ','a ',' ','',''), ('bc','B61U','a ','stnd','',''), -('bp','B61U','a ','stnd','',''), ('ca','B61U','a ','stnd','',''), -('ci','B61U','a ','stnd','',''), ('cp','B61U','a ','stnd','',''), -('dm','B61U','a ','stnd','',''), ('ec','B61U','a ','stnd','',''), -('fm','B61U','a ','stnd','',''), ('nt','B61U','a ','stnd','',''), -('qm','B61U','a ','stnd','',''), ('tc','B61U','a ','stnd','',''), -('td','B61U','a ','stnd','',''), ('tf','B61U','a ','stnd','',''), -('tg','B61U','a ','stnd','',''), ('ti','B61U','a ','stnd','',''), -('tp','B61U','a ','stnd','',''), ('ts','B61U','a ','stnd','',''), -('wh','B61U','a ','stnd','',''); -show create table t1; - -select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6'; -select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6' - and t_rele='a' and t_cust = ' '; - -drop table t1; - -# BUG#19021: Crash in index_merge/ROR-intersection optimizer under -# specific circumstances. -create table t1 ( - pk int(11) not null auto_increment, - a int(11) not null default '0', - b int(11) not null default '0', - c int(11) not null default '0', - - filler1 datetime, filler2 varchar(15), - filler3 longtext, - - kp1 varchar(4), kp2 varchar(7), - kp3 varchar(2), kp4 varchar(4), - kp5 varchar(7), - filler4 char(1), - - primary key (pk), - key idx1(a,b,c), - key idx2(c), - key idx3(kp1,kp2,kp3,kp4,kp5) -) engine=innodb default charset=latin1; ---disable_query_log -set @fill= uncompress(unhex(concat( -'F91D0000789CDD993D6FDB301086F7FE0A6D4E0105B8E3F1335D5BA028DA0EEDE28E1D320408', -'52A0713BF4D7571FB62C51A475924839080307B603E77DEE787C8FA41F9E9EEF7F1F8A87A7C3', -'AFE280C5DF9F8F7FEE9F8B1B2CB114D6902E918455245DB91300FA16E42D5201FA4EE29DA05D', -'B9FB3718A33718A3FA8C30AEFAFDE1F317D016AA67BA7A60FDE45BF5F8BA7B5BDE8812AA9F1A', -'069DB03C9804346644F3A3A6A1338DB572756A3C4D1BCC804CABF912C654AE9BB855A2B85962', -'3A479259CAE6A86C0411D01AE5483581EDCBD9A39C45252D532E533979EB9F82E971D979BDB4', -'8531105670740AFBFD1E34AAB0029E4AD0A1D46A6D0946A21A16038A5CD965CD2D524673F712', -'20C304477315CE18405EAF9BD0AFFEAC74FDA14F1FBF5BD34C769D73FBBEDF4750ADD4E5A99C', -'5C8DC04934AFA275D483D536D174C11B12AF27F8F888B41B6FC9DBA569E1FD7BD72D698130B7', -'91B23A98803512B3D31881E8DCDA2AC1754E3644C4BB3A8466750B911681274A39E35E8624B7', -'444A42AC1213F354758E3CF1A4CDD5A688C767CF1B11ABC5867CB15D8A18E0B91E9EC275BB94', -'58F33C2936F64690D55BC29E4A293D95A798D84217736CEAAA538CE1354269EE2162053FBC66', -'496D90CB53323CB279D3A6AF651B4B22B9E430743D83BE48E995A09D4FC9871C22D8D189B945', -'706911BCB8C3C774B9C08D2FC6ED853ADACA37A14A4CB2E027630E5B80ECACD939431B1CDF62', -'7D71487536EA2C678F59685E91F4B6C144BCCB94C1EBA9FA6F5552DDCA4E4539BE326A2720CB', -'45ED028EB3616AC93C46E775FEA9FA6DA7CFCEC6DEBA5FCD1F915EED4D983BDDB881528AD9AB', -'43C1576F29AAB35BDFBC21D422F52B307D350589D45225A887AC46C8EDD72D99EC3ED2E1BCEF', -'7AF26FC4C74097B6768A5EDAFA660CC64278F7E63F99AC954B'))); -prepare x from @fill; -execute x; -deallocate prepare x; ---enable_query_log -set @fill=NULL; -SELECT COUNT(*) FROM t1 WHERE b = 0 AND a = 0 AND c = 13286427 AND - kp1='279' AND kp2='ELM0678' AND kp3='6' AND kp4='10' AND kp5 = 'R '; - -drop table t1; - -# BUG#21277: Index Merge/sort_union: wrong query results -create table t1 -( - key1 int not null, - key2 int not null default 0, - key3 int not null default 0 -); - -insert into t1(key1) values (1),(2),(3),(4),(5),(6),(7),(8); - -let $1=7; -set @d=8; -while ($1) -{ - eval insert into t1 (key1) select key1+@d from t1; - eval set @d=@d*2; - dec $1; -} - -alter table t1 add index i2(key2); -alter table t1 add index i3(key3); -update t1 set key2=key1,key3=key1; - -# to test the bug, the following must use "sort_union": -explain select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); -select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); -drop table t1; +# The first two tests are disabled because of non deterministic explain output. +# If include/index_merge1.inc can be enabled for InnoDB and all other +# storage engines, please remove the subtest for Bug#21277 from +# include/index_merge2.inc. +# This test exists already in include/index_merge1.inc. +# --source include/index_merge1.inc +# --source include/index_merge_ror.inc +--source include/index_merge2.inc +--source include/index_merge_2sweeps.inc +--source include/index_merge_ror_cpk.inc diff --git a/mysql-test/t/index_merge_myisam.test b/mysql-test/t/index_merge_myisam.test new file mode 100644 index 00000000000..8fdda2b772b --- /dev/null +++ b/mysql-test/t/index_merge_myisam.test @@ -0,0 +1,21 @@ +# t/index_merge_myisam.test +# +# Index merge tests +# +# Last update: +# 2006-08-07 ML test refactored (MySQL 5.1) +# Main code of several index_merge tests +# -> include/index_merge*.inc +# wrapper t/index_merge_innodb.test sources now several +# include/index_merge*.inc files +# + +let $engine_type= MyISAM; +# MyISAM supports Merge tables +let $merge_table_support= 1; + +--source include/index_merge1.inc +--source include/index_merge_ror.inc +--source include/index_merge2.inc +--source include/index_merge_2sweeps.inc +--source include/index_merge_ror_cpk.inc diff --git a/mysql-test/t/innodb-big.test b/mysql-test/t/innodb-big.test deleted file mode 100644 index ade69ffdb45..00000000000 --- a/mysql-test/t/innodb-big.test +++ /dev/null @@ -1,46 +0,0 @@ -# -# Test some things that takes a long time - --- source include/big_test.inc --- source include/have_innodb.inc - ---disable_warnings -DROP TABLE IF EXISTS t1, t2, t3, t4; ---enable_warnings - -# -# Test test how filesort and buffered-record-reads works with innodb -# - -CREATE TABLE t1 (id INTEGER) ENGINE=MYISAM; -CREATE TABLE t2 (id INTEGER primary key) ENGINE=INNODB; -CREATE TABLE t3 (a char(32) primary key,id INTEGER) ENGINE=INNODB; -CREATE TABLE t4 (a char(32) primary key,id INTEGER) ENGINE=MYISAM; - -INSERT INTO t1 (id) VALUES (1); -INSERT INTO t1 SELECT id+1 FROM t1; -INSERT INTO t1 SELECT id+2 FROM t1; -INSERT INTO t1 SELECT id+4 FROM t1; -INSERT INTO t1 SELECT id+8 FROM t1; -INSERT INTO t1 SELECT id+16 FROM t1; -INSERT INTO t1 SELECT id+32 FROM t1; -INSERT INTO t1 SELECT id+64 FROM t1; -INSERT INTO t1 SELECT id+128 FROM t1; -INSERT INTO t1 SELECT id+256 FROM t1; -INSERT INTO t1 SELECT id+512 FROM t1; -INSERT INTO t1 SELECT id+1024 FROM t1; -INSERT INTO t1 SELECT id+2048 FROM t1; -INSERT INTO t1 SELECT id+4096 FROM t1; -INSERT INTO t1 SELECT id+8192 FROM t1; -INSERT INTO t1 SELECT id+16384 FROM t1; -INSERT INTO t1 SELECT id+32768 FROM t1; -INSERT INTO t1 SELECT id+65536 FROM t1; -INSERT INTO t1 SELECT id+131072 FROM t1; -INSERT INTO t1 SELECT id+262144 FROM t1; -INSERT INTO t1 SELECT id+524288 FROM t1; -INSERT INTO t1 SELECT id+1048576 FROM t1; -INSERT INTO t2 SELECT * FROM t1; -INSERT INTO t3 SELECT concat(id),id from t2 ORDER BY -id; -INSERT INTO t4 SELECT * from t3 ORDER BY concat(a); -select sum(id) from t3; -drop table t1,t2,t3,t4; diff --git a/mysql-test/t/innodb-deadlock.test b/mysql-test/t/innodb-deadlock.test deleted file mode 100644 index 41741942963..00000000000 --- a/mysql-test/t/innodb-deadlock.test +++ /dev/null @@ -1,117 +0,0 @@ --- source include/have_innodb.inc -# Can't test this with embedded server --- source include/not_embedded.inc - -connect (con1,localhost,root,,); -connect (con2,localhost,root,,); - ---disable_warnings -drop table if exists t1,t2; ---enable_warnings - -# -# Testing of FOR UPDATE -# - -connection con1; -create table t1 (id integer, x integer) engine=INNODB; -insert into t1 values(0, 0); -set autocommit=0; -SELECT * from t1 where id = 0 FOR UPDATE; - -connection con2; -set autocommit=0; - -# The following query should hang because con1 is locking the page ---send -update t1 set x=2 where id = 0; ---sleep 2 - -connection con1; -update t1 set x=1 where id = 0; -select * from t1; -commit; - -connection con2; -reap; -commit; - -connection con1; -select * from t1; -commit; - -drop table t1; -# -# Testing of FOR UPDATE -# - -connection con1; -create table t1 (id integer, x integer) engine=INNODB; -create table t2 (b integer, a integer) engine=INNODB; -insert into t1 values(0, 0), (300, 300); -insert into t2 values(0, 10), (1, 20), (2, 30); -commit; -set autocommit=0; -select * from t2; -update t2 set a=100 where b=(SELECT x from t1 where id = b FOR UPDATE); -select * from t2; -select * from t1; - -connection con2; -set autocommit=0; - -# The following query should hang because con1 is locking the page ---send -update t1 set x=2 where id = 0; ---sleep 2 - -connection con1; -update t1 set x=1 where id = 0; -select * from t1; -commit; - -connection con2; -reap; -commit; - -connection con1; -select * from t1; -commit; - -drop table t1, t2; -create table t1 (id integer, x integer) engine=INNODB; -create table t2 (b integer, a integer) engine=INNODB; -insert into t1 values(0, 0), (300, 300); -insert into t2 values(0, 0), (1, 20), (2, 30); -commit; - -connection con1; -select a,b from t2 UNION SELECT id, x from t1 FOR UPDATE; -select * from t2; -select * from t1; - -connection con2; - -# The following query should hang because con1 is locking the page -update t2 set a=2 where b = 0; -select * from t2; ---send -update t1 set x=2 where id = 0; ---sleep 2 - -connection con1; -update t1 set x=1 where id = 0; -select * from t1; -commit; - -connection con2; -reap; -commit; - -connection con1; -select * from t1; -commit; - -drop table t1, t2; - -# End of 4.1 tests diff --git a/mysql-test/t/innodb_handler.test b/mysql-test/t/innodb_handler.test deleted file mode 100644 index 18cec97af0d..00000000000 --- a/mysql-test/t/innodb_handler.test +++ /dev/null @@ -1,96 +0,0 @@ --- source include/have_innodb.inc - -# -# test of HANDLER ... -# - ---disable_warnings -drop table if exists t1,t2; ---enable_warnings - -create table t1 (a int, b char(10), key a(a), key b(a,b)) engine=innodb; -insert into t1 values -(17,"ddd"),(18,"eee"),(19,"fff"),(19,"yyy"), -(14,"aaa"),(15,"bbb"),(16,"ccc"),(16,"xxx"), -(20,"ggg"),(21,"hhh"),(22,"iii"); -handler t1 open as t2; -handler t2 read a first; -handler t2 read a next; -handler t2 read a next; -handler t2 read a prev; -handler t2 read a last; -handler t2 read a prev; -handler t2 read a prev; - -handler t2 read a first; -handler t2 read a prev; - -handler t2 read a last; -handler t2 read a prev; -handler t2 read a next; -handler t2 read a next; - -handler t2 read a=(15); -handler t2 read a=(16); - ---error 1070 -handler t2 read a=(19,"fff"); - -handler t2 read b=(19,"fff"); -handler t2 read b=(19,"yyy"); -handler t2 read b=(19); - ---error 1109 -handler t1 read a last; - -handler t2 read a=(11); -handler t2 read a>=(11); - -handler t2 read a=(18); -handler t2 read a>=(18); -handler t2 read a>(18); -handler t2 read a<=(18); -handler t2 read a<(18); - -handler t2 read a first limit 5; -handler t2 read a next limit 3; -handler t2 read a prev limit 10; - -handler t2 read a>=(16) limit 4; -handler t2 read a>=(16) limit 2,2; -handler t2 read a last limit 3; - -handler t2 read a=(19); -handler t2 read a=(19) where b="yyy"; - -handler t2 read first; -handler t2 read next; ---error 1064 -handler t2 read last; -handler t2 close; - -handler t1 open; -handler t1 read a next; # this used to crash as a bug#5373 -handler t1 read a next; -handler t1 close; - -handler t1 open; -handler t1 read a prev; # this used to crash as a bug#5373 -handler t1 read a prev; -handler t1 close; - -handler t1 open as t2; -handler t2 read first; -alter table t1 engine=innodb; ---error 1109 -handler t2 read first; - -drop table t1; -CREATE TABLE t1 ( no1 smallint(5) NOT NULL default '0', no2 int(10) NOT NULL default '0', PRIMARY KEY (no1,no2)) ENGINE=InnoDB; -INSERT INTO t1 VALUES (1,274),(1,275),(2,6),(2,8),(4,1),(4,2); -HANDLER t1 OPEN; -HANDLER t1 READ `primary` = (1, 1000); -HANDLER t1 READ `primary` PREV; -DROP TABLE t1; - -# End of 4.1 tests diff --git a/mysql-test/t/innodb_mysql.test b/mysql-test/t/innodb_mysql.test index 73c642cd334..93495538141 100644 --- a/mysql-test/t/innodb_mysql.test +++ b/mysql-test/t/innodb_mysql.test @@ -1,421 +1,14 @@ +# t/innodb_mysql.test +# +# Last update: +# 2006-07-26 ML test refactored (MySQL 5.1) +# main testing code t/innodb_mysql.test -> include/mix1.inc +# + -- source include/have_innodb.inc +let $engine_type= InnoDB; +let $other_engine_type= MEMORY; +# InnoDB does support FOREIGN KEYFOREIGN KEYs +let $test_foreign_keys= 1; ---disable_warnings -drop table if exists t1,t2,t1m,t1i,t2m,t2i,t4; ---enable_warnings - -# -# Bug#17530: Incorrect key truncation on table creation caused server crash. -# -create table t1(f1 varchar(800) binary not null, key(f1)) engine = innodb - character set utf8 collate utf8_general_ci; -insert into t1 values('aaa'); -drop table t1; - -# BUG#16798: Uninitialized row buffer reads in ref-or-null optimizer -# (repeatable only w/innodb). -create table t1 ( - c_id int(11) not null default '0', - org_id int(11) default null, - unique key contacts$c_id (c_id), - key contacts$org_id (org_id) -) engine=innodb; -insert into t1 values - (2,null),(120,null),(141,null),(218,7), (128,1), - (151,2),(234,2),(236,2),(243,2),(255,2),(259,2),(232,3),(235,3),(238,3), - (246,3),(253,3),(269,3),(285,3),(291,3),(293,3),(131,4),(230,4),(231,4); - -create table t2 ( - slai_id int(11) not null default '0', - owner_tbl int(11) default null, - owner_id int(11) default null, - sla_id int(11) default null, - inc_web int(11) default null, - inc_email int(11) default null, - inc_chat int(11) default null, - inc_csr int(11) default null, - inc_total int(11) default null, - time_billed int(11) default null, - activedate timestamp null default null, - expiredate timestamp null default null, - state int(11) default null, - sla_set int(11) default null, - unique key t2$slai_id (slai_id), - key t2$owner_id (owner_id), - key t2$sla_id (sla_id) -) engine=innodb; -insert into t2(slai_id, owner_tbl, owner_id, sla_id) values - (1,3,1,1), (3,3,10,2), (4,3,3,6), (5,3,2,5), (6,3,8,3), (7,3,9,7), - (8,3,6,8), (9,3,4,9), (10,3,5,10), (11,3,11,11), (12,3,7,12); - -flush tables; -select si.slai_id -from t1 c join t2 si on - ((si.owner_tbl = 3 and si.owner_id = c.org_id) or - ( si.owner_tbl = 2 and si.owner_id = c.c_id)) -where - c.c_id = 218 and expiredate is null; - -select * from t1 where org_id is null; -select si.slai_id -from t1 c join t2 si on - ((si.owner_tbl = 3 and si.owner_id = c.org_id) or - ( si.owner_tbl = 2 and si.owner_id = c.c_id)) -where - c.c_id = 218 and expiredate is null; - -drop table t1, t2; - -# -# Bug#17212: results not sorted correctly by ORDER BY when using index -# (repeatable only w/innodb because of index props) -# -CREATE TABLE t1 (a int, b int, KEY b (b)) Engine=InnoDB; -CREATE TABLE t2 (a int, b int, PRIMARY KEY (a,b)) Engine=InnoDB; -CREATE TABLE t3 (a int, b int, c int, PRIMARY KEY (a), - UNIQUE KEY b (b,c), KEY a (a,b,c)) Engine=InnoDB; - -INSERT INTO t1 VALUES (1, 1); -INSERT INTO t1 SELECT a + 1, b + 1 FROM t1; -INSERT INTO t1 SELECT a + 2, b + 2 FROM t1; - -INSERT INTO t2 VALUES (1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8); -INSERT INTO t2 SELECT a + 1, b FROM t2; -DELETE FROM t2 WHERE a = 1 AND b < 2; - -INSERT INTO t3 VALUES (1,1,1),(2,1,2); -INSERT INTO t3 SELECT a + 2, a + 2, 3 FROM t3; -INSERT INTO t3 SELECT a + 4, a + 4, 3 FROM t3; - -# demonstrate a problem when a must-use-sort table flag -# (sort_by_table=1) is being neglected. -SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE - t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2) - ORDER BY t1.b LIMIT 2; - -# demonstrate the problem described in the bug report -SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE - t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2) - ORDER BY t1.b LIMIT 5; -DROP TABLE t1, t2, t3; - - -# BUG#21077 (The testcase is not deterministic so correct execution doesn't -# prove anything) For proof one should track if sequence of ha_innodb::* func -# calls is correct. -CREATE TABLE `t1` (`id1` INT) ; -INSERT INTO `t1` (`id1`) VALUES (1),(5),(2); - -CREATE TABLE `t2` ( - `id1` INT, - `id2` INT NOT NULL, - `id3` INT, - `id4` INT NOT NULL, - UNIQUE (`id2`,`id4`), - KEY (`id1`) -) ENGINE=InnoDB; - -INSERT INTO `t2`(`id1`,`id2`,`id3`,`id4`) VALUES -(1,1,1,0), -(1,1,2,1), -(5,1,2,2), -(6,1,2,3), -(1,2,2,2), -(1,2,1,1); - -SELECT `id1` FROM `t1` WHERE `id1` NOT IN (SELECT `id1` FROM `t2` WHERE `id2` = 1 AND `id3` = 2); -DROP TABLE t1, t2; -# -# Bug #12882 min/max inconsistent on empty table -# - ---disable_warnings -create table t1m (a int) engine=myisam; -create table t1i (a int) engine=innodb; -create table t2m (a int) engine=myisam; -create table t2i (a int) engine=innodb; ---enable_warnings -insert into t2m values (5); -insert into t2i values (5); - -# test with MyISAM -select min(a) from t1m; -select min(7) from t1m; -select min(7) from DUAL; -explain select min(7) from t2m join t1m; -select min(7) from t2m join t1m; - -select max(a) from t1m; -select max(7) from t1m; -select max(7) from DUAL; -explain select max(7) from t2m join t1m; -select max(7) from t2m join t1m; - -select 1, min(a) from t1m where a=99; -select 1, min(a) from t1m where 1=99; -select 1, min(1) from t1m where a=99; -select 1, min(1) from t1m where 1=99; - -select 1, max(a) from t1m where a=99; -select 1, max(a) from t1m where 1=99; -select 1, max(1) from t1m where a=99; -select 1, max(1) from t1m where 1=99; - -# test with InnoDB -select min(a) from t1i; -select min(7) from t1i; -select min(7) from DUAL; -explain select min(7) from t2i join t1i; -select min(7) from t2i join t1i; - -select max(a) from t1i; -select max(7) from t1i; -select max(7) from DUAL; -explain select max(7) from t2i join t1i; -select max(7) from t2i join t1i; - -select 1, min(a) from t1i where a=99; -select 1, min(a) from t1i where 1=99; -select 1, min(1) from t1i where a=99; -select 1, min(1) from t1i where 1=99; - -select 1, max(a) from t1i where a=99; -select 1, max(a) from t1i where 1=99; -select 1, max(1) from t1i where a=99; -select 1, max(1) from t1i where 1=99; - -# mixed MyISAM/InnoDB test -explain select count(*), min(7), max(7) from t1m, t1i; -select count(*), min(7), max(7) from t1m, t1i; - -explain select count(*), min(7), max(7) from t1m, t2i; -select count(*), min(7), max(7) from t1m, t2i; - -explain select count(*), min(7), max(7) from t2m, t1i; -select count(*), min(7), max(7) from t2m, t1i; - -drop table t1m, t1i, t2m, t2i; - -# -# Bug #12672: primary key implcitly included in every innodb index -# (was part of group_min_max.test) -# - -create table t1 ( - a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(64) default ' ' -); - -insert into t1 (a1, a2, b, c, d) values -('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'), -('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'), -('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'), -('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'), -('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'), -('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'), -('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'), -('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'), -('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'), -('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'), -('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'), -('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'), -('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'), -('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'), -('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'), -('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'), -('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'), -('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'), -('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'), -('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'), -('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'), -('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'), -('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'), -('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'), -('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'), -('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'), -('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'), -('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'), -('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'), -('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'), -('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'), -('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'); ---disable_warnings -create table t4 ( - pk_col int auto_increment primary key, a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(64) default ' ' -) engine=innodb; ---enable_warnings -insert into t4 (a1, a2, b, c, d, dummy) select * from t1; - -create index idx12672_0 on t4 (a1); -create index idx12672_1 on t4 (a1,a2,b,c); -create index idx12672_2 on t4 (a1,a2,b); -analyze table t1; - -select distinct a1 from t4 where pk_col not in (1,2,3,4); - -drop table t1,t4; - -# -# Bug #6142: a problem with the empty innodb table -# (was part of group_min_max.test) -# - ---disable_warnings -create table t1 ( - a varchar(30), b varchar(30), primary key(a), key(b) -) engine=innodb; ---enable_warnings -select distinct a from t1; -drop table t1; - -# -# Bug #9798: group by with rollup -# (was part of group_min_max.test) -# - ---disable_warnings -create table t1(a int, key(a)) engine=innodb; ---enable_warnings -insert into t1 values(1); -select a, count(a) from t1 group by a with rollup; -drop table t1; - -# -# Bug #13293 Wrongly used index results in endless loop. -# (was part of group_min_max.test) -# -create table t1 (f1 int, f2 char(1), primary key(f1,f2)) engine=innodb; -insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d"); -alter table t1 drop primary key, add primary key (f2, f1); -explain select distinct f1 a, f1 b from t1; -explain select distinct f1, f2 from t1; -drop table t1; - -# -# Test for bug #17164: ORed FALSE blocked conversion of outer join into join -# - -CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20), - INDEX (name)) ENGINE=InnoDB; -CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11), - FOREIGN KEY (fkey) REFERENCES t2(id)) ENGINE=InnoDB; -INSERT INTO t1 VALUES (1,'A1'),(2,'A2'),(3,'B'); -INSERT INTO t2 VALUES (1,1),(2,2),(3,2),(4,3),(5,3); - -EXPLAIN -SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id - WHERE t1.name LIKE 'A%'; - -EXPLAIN -SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id - WHERE t1.name LIKE 'A%' OR FALSE; - -DROP TABLE t1,t2; - -# -# Test of behaviour with CREATE ... SELECT -# - -set storage_engine=innodb; -CREATE TABLE t1 (a int, b int); -insert into t1 values (1,1),(1,2); ---error 1062 -CREATE TABLE t2 (primary key (a)) select * from t1; -# This should give warning -drop table if exists t2; ---error 1062 -CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1; -# This should give warning -drop table if exists t2; -CREATE TABLE t2 (a int, b int, primary key (a)); -BEGIN; -INSERT INTO t2 values(100,100); ---error 1062 -CREATE TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1; -SELECT * from t2; -ROLLBACK; -SELECT * from t2; -TRUNCATE table t2; ---error 1062 -INSERT INTO t2 select * from t1; -SELECT * from t2; -drop table t2; - -CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)); -BEGIN; -INSERT INTO t2 values(100,100); ---error 1062 -CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1; -SELECT * from t2; -COMMIT; -BEGIN; -INSERT INTO t2 values(101,101); ---error 1062 -CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) select * from t1; -SELECT * from t2; -ROLLBACK; -SELECT * from t2; -TRUNCATE table t2; ---error 1062 -INSERT INTO t2 select * from t1; -SELECT * from t2; -drop table t1,t2; - -# -# Bug#17530: Incorrect key truncation on table creation caused server crash. -# -create table t1(f1 varchar(800) binary not null, key(f1)) engine = innodb - character set utf8 collate utf8_general_ci; -insert into t1 values('aaa'); -drop table t1; - -# Fix for BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY -# UPDATE": if the row is updated, it's like a regular UPDATE: -# LAST_INSERT_ID() is not affected. -CREATE TABLE `t2` ( - `k` int(11) NOT NULL auto_increment, - `a` int(11) default NULL, - `c` int(11) default NULL, - PRIMARY KEY (`k`), - UNIQUE KEY `idx_1` (`a`) -) ENGINE=InnoDB; - insert into t2 ( a ) values ( 6 ) on duplicate key update c = -ifnull( c, -0 ) + 1; -insert into t2 ( a ) values ( 7 ) on duplicate key update c = -ifnull( c, -0 ) + 1; -select last_insert_id(); -select * from t2; -insert into t2 ( a ) values ( 6 ) on duplicate key update c = -ifnull( c, -0 ) + 1; -select last_insert_id(); -# test again when last_insert_id() is 0 initially -select last_insert_id(0); -insert into t2 ( a ) values ( 6 ) on duplicate key update c = -ifnull( c, -0 ) + 1; -select last_insert_id(); -select * from t2; - -# Test of LAST_INSERT_ID() when autogenerated will fail: -# last_insert_id() should not change -insert ignore into t2 values (null,6,1),(10,8,1); -select last_insert_id(); -# First and second autogenerated will fail, last_insert_id() should -# point to third -insert ignore into t2 values (null,6,1),(null,8,1),(null,15,1),(null,20,1); -select last_insert_id(); -select * from t2; - -# Test of the workaround which enables people to know the id of the -# updated row in INSERT ON DUPLICATE KEY UPDATE, by using -# LAST_INSERT_ID(autoinc_col) in the UPDATE clause. - -insert into t2 ( a ) values ( 6 ) on duplicate key update c = -ifnull( c, -0 ) + 1, k=last_insert_id(k); -select last_insert_id(); -select * from t2; - -drop table t2; +--source include/mix1.inc diff --git a/mysql-test/t/insert_update.test b/mysql-test/t/insert_update.test index eda768be1bc..b3813864464 100644 --- a/mysql-test/t/insert_update.test +++ b/mysql-test/t/insert_update.test @@ -115,4 +115,27 @@ INSERT INTO t1 ( a ) SELECT 0 ON DUPLICATE KEY UPDATE a = a + VALUES (a) ; DROP TABLE t1; +# +# Bug#21555: incorrect behavior with INSERT ... ON DUPL KEY UPDATE and VALUES +# + + # End of 4.1 tests +CREATE TABLE t1 +( + a BIGINT UNSIGNED, + b BIGINT UNSIGNED, + PRIMARY KEY (a) +); + +INSERT INTO t1 VALUES (45, 1) ON DUPLICATE KEY UPDATE b = + IF(VALUES(b) > t1.b, VALUES(b), t1.b); +SELECT * FROM t1; +INSERT INTO t1 VALUES (45, 2) ON DUPLICATE KEY UPDATE b = + IF(VALUES(b) > t1.b, VALUES(b), t1.b); +SELECT * FROM t1; +INSERT INTO t1 VALUES (45, 1) ON DUPLICATE KEY UPDATE b = + IF(VALUES(b) > t1.b, VALUES(b), t1.b); +SELECT * FROM t1; + +DROP TABLE t1; diff --git a/mysql-test/t/join.test b/mysql-test/t/join.test index 98bfb33b1e6..d0005f3b8f7 100644 --- a/mysql-test/t/join.test +++ b/mysql-test/t/join.test @@ -609,3 +609,24 @@ explain select * from t2,t3 where t2.a < 200 and t2.b=t3.b; drop table t1, t2, t3; +# BUG#14940 {Wrong query plan is chosen because of odd results of +# prev_record_reads() function } +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t2 (a int, b int, primary key(a)); +insert into t2 select @v:=A.a+10*B.a, @v from t1 A, t1 B; + +explain select * from t1; +show status like '%cost%'; +select 'The cost of accessing t1 (dont care if it changes' '^'; + +select 'vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv' Z; + +explain select * from t1, t2 A, t2 B where A.a = t1.a and B.a=A.b; +show status like '%cost%'; +select '^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error' Z; + + + +drop table t1, t2; diff --git a/mysql-test/t/kill_n_check.sh b/mysql-test/t/kill_n_check.sh index 64cc869d1ec..a54fb6ef8bb 100755 --- a/mysql-test/t/kill_n_check.sh +++ b/mysql-test/t/kill_n_check.sh @@ -49,7 +49,7 @@ if [ -z "$pid_path" ]; then exit 0 fi -if [ $expected_result = 'killed' -a ! -r "$pid_path" ]; then +if [ ! -r "$pid_path" ]; then echo "Error: PID file ($pid_path) does not exist." exit 0 fi diff --git a/mysql-test/t/log_tables.test b/mysql-test/t/log_tables.test index 97c83310b4d..d9e17129799 100644 --- a/mysql-test/t/log_tables.test +++ b/mysql-test/t/log_tables.test @@ -178,21 +178,21 @@ select * from mysql.slow_log; # check that appropriate error messages are given when one attempts to alter # or drop a log tables, while corresponding logs are enabled ---error ER_CANT_ALTER_LOG_TABLE +--error ER_BAD_LOG_STATEMENT alter table mysql.general_log engine=myisam; ---error ER_CANT_ALTER_LOG_TABLE +--error ER_BAD_LOG_STATEMENT alter table mysql.slow_log engine=myisam; ---error ER_CANT_DROP_LOG_TABLE +--error ER_BAD_LOG_STATEMENT drop table mysql.general_log; ---error ER_CANT_DROP_LOG_TABLE +--error ER_BAD_LOG_STATEMENT drop table mysql.slow_log; # check that one can alter log tables to MyISAM set global general_log='OFF'; # cannot convert another log table ---error ER_CANT_ALTER_LOG_TABLE +--error ER_BAD_LOG_STATEMENT alter table mysql.slow_log engine=myisam; # alter both tables @@ -252,13 +252,13 @@ set global general_log='OFF'; set global slow_query_log='OFF'; # check that alter table doesn't work for other engines ---error ER_BAD_LOG_ENGINE +--error ER_UNSUPORTED_LOG_ENGINE alter table mysql.slow_log engine=ndb; ---error ER_BAD_LOG_ENGINE +--error ER_UNSUPORTED_LOG_ENGINE alter table mysql.slow_log engine=innodb; ---error ER_BAD_LOG_ENGINE +--error ER_UNSUPORTED_LOG_ENGINE alter table mysql.slow_log engine=archive; ---error ER_BAD_LOG_ENGINE +--error ER_UNSUPORTED_LOG_ENGINE alter table mysql.slow_log engine=blackhole; drop table mysql.slow_log; diff --git a/mysql-test/t/lowercase_fs_off.test b/mysql-test/t/lowercase_fs_off.test index 7f7b573e7ee..883315994fe 100644 --- a/mysql-test/t/lowercase_fs_off.test +++ b/mysql-test/t/lowercase_fs_off.test @@ -3,6 +3,7 @@ # i.e. lower_case_filesystem=OFF # -- source include/have_case_sensitive_file_system.inc +-- source include/not_embedded.inc connect (master,localhost,root,,); connection master; diff --git a/mysql-test/t/merge.test b/mysql-test/t/merge.test index f8bed9c92cd..5498468d902 100644 --- a/mysql-test/t/merge.test +++ b/mysql-test/t/merge.test @@ -124,6 +124,7 @@ drop table t3,t2,t1; # Test table without unions # create table t1 (a int not null, key(a)) engine=merge; +--error 1030 select * from t1; drop table t1; @@ -381,6 +382,31 @@ select * from t3; check table t1, t2; drop table t1, t2, t3; +# +# BUG#21617 - crash when selecting from merge table with inconsistent +# indexes +# +CREATE TABLE t1(a INT); +INSERT INTO t1 VALUES(2),(1); +CREATE TABLE t2(a INT, KEY(a)) ENGINE=MERGE UNION=(t1); +--error 1030 +SELECT * FROM t2 WHERE a=2; +DROP TABLE t1, t2; + +# +# BUG#10974 - No error message if merge table based on union of innodb, +# memory +# +CREATE TABLE t1(a INT) ENGINE=MEMORY; +CREATE TABLE t2(a INT) ENGINE=MERGE UNION=(t1); +--error 1168 +SELECT * FROM t2; +DROP TABLE t1, t2; +CREATE TABLE t2(a INT) ENGINE=MERGE UNION=(t3); +--error 1168 +SELECT * FROM t2; +DROP TABLE t2; + # End of 4.1 tests # diff --git a/mysql-test/t/mix2_myisam.test b/mysql-test/t/mix2_myisam.test new file mode 100644 index 00000000000..afb3caca0ea --- /dev/null +++ b/mysql-test/t/mix2_myisam.test @@ -0,0 +1,26 @@ +# t/mix2_myisam.test +# +# Last update: 2006-07-26 ML create this test as derivate from innodb.test +# + +let $engine_type= MyISAM; +let $other_engine_type= MEMORY; +# There are unfortunately only MyISAM and MEMORY all time available +# Therefore use here MEMORY again. +let $other_engine_type1= MEMORY; +let $other_non_trans_engine_type= MEMORY; +let $other_non_live_chks_engine_type= MEMORY; +# Therefore use here MyISAM again. +let $other_live_chks_engine_type= MyISAM; +# MyISAM does not support transactions +let $test_transactions= 0; +# MyISAM does not support FOREIGN KEYFOREIGN KEYs +let $test_foreign_keys= 0; +# MyISAM supports fulltext queries +let $fulltext_query_unsupported= 0; +# MyISAM internal autoincrement values are updated during updates +let $no_autoinc_update= 0; +# MyISAM supports keys on spatial data types +let $no_spatial_key= 0; + +-- source include/mix2.inc diff --git a/mysql-test/t/myisam.test b/mysql-test/t/myisam.test index 69d7ee51d81..9f698f40536 100644 --- a/mysql-test/t/myisam.test +++ b/mysql-test/t/myisam.test @@ -488,6 +488,42 @@ insert into t1 values ('a'), ('b'); select c1 from t1 order by c1 limit 1; drop table t1; +# +# Bug #14400 Join could miss concurrently inserted row +# +# Partial key. +create table t1 (a int not null, primary key(a)); +create table t2 (a int not null, b int not null, primary key(a,b)); +insert into t1 values (1),(2),(3),(4),(5),(6); +insert into t2 values (1,1),(2,1); +lock tables t1 read local, t2 read local; +select straight_join * from t1,t2 force index (primary) where t1.a=t2.a; +connect (root,localhost,root,,test,$MASTER_MYPORT,master.sock); +insert into t2 values(2,0); +disconnect root; +connection default; +select straight_join * from t1,t2 force index (primary) where t1.a=t2.a; +drop table t1,t2; +# +# Full key. +CREATE TABLE t1 (c1 varchar(250) NOT NULL); +CREATE TABLE t2 (c1 varchar(250) NOT NULL, PRIMARY KEY (c1)); +INSERT INTO t1 VALUES ('test000001'), ('test000002'), ('test000003'); +INSERT INTO t2 VALUES ('test000002'), ('test000003'), ('test000004'); +LOCK TABLES t1 READ LOCAL, t2 READ LOCAL; +SELECT t1.c1 AS t1c1, t2.c1 AS t2c1 FROM t1, t2 + WHERE t1.c1 = t2.c1 HAVING t1c1 != t2c1; +connect (con1,localhost,root,,); +connection con1; +INSERT INTO t2 VALUES ('test000001'), ('test000005'); +disconnect con1; +connection default; +SELECT t1.c1 AS t1c1, t2.c1 AS t2c1 FROM t1, t2 + WHERE t1.c1 = t2.c1 HAVING t1c1 != t2c1; +DROP TABLE t1,t2; + +# End of 4.0 tests + # # Test RTREE index # diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test index 57c4854bf06..9e3eabf474b 100644 --- a/mysql-test/t/mysql.test +++ b/mysql-test/t/mysql.test @@ -85,7 +85,6 @@ drop table t1; --exec $MYSQL test -e "select unhex('zz');" --exec $MYSQL -t test -e "select unhex('zz');" -# # Bug#19265 describe command does not work from mysql prompt # diff --git a/mysql-test/t/mysqlbinlog.test b/mysql-test/t/mysqlbinlog.test index 178816cbf43..8e2035f45b0 100644 --- a/mysql-test/t/mysqlbinlog.test +++ b/mysql-test/t/mysqlbinlog.test @@ -1,6 +1,6 @@ # We are using .opt file since we need small binlog size # TODO: Need to look at making a row based version once the new row based client is completed. [jbm] --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc # Embedded server doesn't support binlogging -- source include/not_embedded.inc diff --git a/mysql-test/t/mysqlbinlog2.test b/mysql-test/t/mysqlbinlog2.test index 91da502da02..69cd5d90453 100644 --- a/mysql-test/t/mysqlbinlog2.test +++ b/mysql-test/t/mysqlbinlog2.test @@ -2,7 +2,7 @@ # and a few others. # TODO: Need to look at making row based version once new binlog client is complete. --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc # Embedded server doesn't support binlogging -- source include/not_embedded.inc diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 1334d5e57c8..327b071afeb 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -16,9 +16,9 @@ INSERT INTO t1 VALUES (1), (2); --exec $MYSQL_DUMP --skip-create --skip-comments -X test t1 DROP TABLE t1; -# -# Bug #2005 -# +--echo # +--echo # Bug #2005 +--echo # CREATE TABLE t1 (a decimal(64, 20)); INSERT INTO t1 VALUES ("1234567890123456789012345678901234567890"), @@ -26,9 +26,9 @@ INSERT INTO t1 VALUES ("1234567890123456789012345678901234567890"), --exec $MYSQL_DUMP --compact test t1 DROP TABLE t1; -# -# Bug #2055 -# +--echo # +--echo # Bug #2055 +--echo # CREATE TABLE t1 (a double); INSERT INTO t1 VALUES ('-9e999999'); @@ -38,9 +38,9 @@ INSERT INTO t1 VALUES ('-9e999999'); --exec $MYSQL_DUMP --compact test t1 DROP TABLE t1; -# -# Bug #3361 mysqldump quotes DECIMAL values inconsistently -# +--echo # +--echo # Bug #3361 mysqldump quotes DECIMAL values inconsistently +--echo # CREATE TABLE t1 (a DECIMAL(10,5), b FLOAT); @@ -69,28 +69,28 @@ INSERT INTO t1 VALUES (1, "test", "tes"), (2, "TEST", "TES"); --exec $MYSQL_DUMP --skip-create --compact -X test t1 DROP TABLE t1; -# -# Bug #1707 -# +--echo # +--echo # Bug #1707 +--echo # CREATE TABLE t1 (`a"b"` char(2)); INSERT INTO t1 VALUES ("1\""), ("\"2"); --exec $MYSQL_DUMP --compact --skip-create -X test t1 DROP TABLE t1; -# -# Bug #1994 -# Bug #4261 -# +--echo # +--echo # Bug #1994 +--echo # Bug #4261 +--echo # CREATE TABLE t1 (a VARCHAR(255)) DEFAULT CHARSET koi8r; INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5'), (NULL); --exec $MYSQL_DUMP --skip-comments --skip-extended-insert test t1 DROP TABLE t1; -# -# Bug #2634 -# +--echo # +--echo # Bug #2634 +--echo # CREATE TABLE t1 (a int) ENGINE=MYISAM; INSERT INTO t1 VALUES (1), (2); @@ -98,17 +98,17 @@ INSERT INTO t1 VALUES (1), (2); --exec $MYSQL_DUMP --skip-comments --compatible=mysql323 test t1 DROP TABLE t1; -# -# Bug #2592 'mysqldump doesn't quote "tricky" names correctly' -# +--echo # +--echo # Bug #2592 'mysqldump doesn't quote "tricky" names correctly' +--echo # create table ```a` (i int); --exec $MYSQL_DUMP --compact test drop table ```a`; -# -# Bug #2591 "mysqldump quotes names inconsistently" -# +--echo # +--echo # Bug #2591 "mysqldump quotes names inconsistently" +--echo # create table t1(a int); --exec $MYSQL_DUMP --comments=0 test @@ -119,9 +119,9 @@ set global sql_mode='ANSI_QUOTES'; set global sql_mode=''; drop table t1; -# -# Bug #2705 'mysqldump --tab extra output' -# +--echo # +--echo # Bug #2705 'mysqldump --tab extra output' +--echo # create table t1(a int); insert into t1 values (1),(2),(3); @@ -135,9 +135,9 @@ insert into t1 values (1),(2),(3); --exec rm $MYSQLTEST_VARDIR/tmp/t1.txt drop table t1; -# -# Bug #6101: create database problem -# +--echo # +--echo # Bug #6101: create database problem +--echo # --exec $MYSQL_DUMP --skip-comments --databases test @@ -145,32 +145,34 @@ create database mysqldump_test_db character set latin2 collate latin2_bin; --exec $MYSQL_DUMP --skip-comments --databases mysqldump_test_db drop database mysqldump_test_db; -# -# Bug #7020 -# Check that we don't dump in UTF8 in compatible mode by default, -# but use the default compiled values, or the values given in -# --default-character-set=xxx. However, we should dump in UTF8 -# if it is explicitely set. +--echo # +--echo # Bug #7020 +--echo # Check that we don't dump in UTF8 in compatible mode by default, +--echo # but use the default compiled values, or the values given in +--echo # --default-character-set=xxx. However, we should dump in UTF8 +--echo # if it is explicitely set. CREATE TABLE t1 (a CHAR(10)); INSERT INTO t1 VALUES (_latin1 'ÄÖÜß'); --exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments test t1 -# -# Bug#8063: make test mysqldump [ fail ] -# We cannot tes this command because its output depends -# on --default-character-set incompiled into "mysqldump" program. -# If the future we can move this command into a separate test with -# checking that "mysqldump" is compiled with "latin1" -# + +--echo # +--echo # Bug#8063: make test mysqldump [ fail ] +--echo # We cannot tes this command because its output depends +--echo # on --default-character-set incompiled into "mysqldump" program. +--echo # If the future we can move this command into a separate test with +--echo # checking that "mysqldump" is compiled with "latin1" +--echo # + #--exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --compatible=mysql323 test t1 --exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --compatible=mysql323 --default-character-set=cp850 test t1 --exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --default-character-set=cp850 --compatible=mysql323 test t1 --exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --default-character-set=utf8 --compatible=mysql323 test t1 DROP TABLE t1; -# -# WL #2319: Exclude Tables from dump -# +--echo # +--echo # WL #2319: Exclude Tables from dump +--echo # CREATE TABLE t1 (a int); CREATE TABLE t2 (a int); @@ -180,18 +182,18 @@ INSERT INTO t2 VALUES (4),(5),(6); DROP TABLE t1; DROP TABLE t2; -# -# Bug #8830 -# +--echo # +--echo # Bug #8830 +--echo # CREATE TABLE t1 (`b` blob); INSERT INTO `t1` VALUES (0x602010000280100005E71A); --exec $MYSQL_DUMP --skip-extended-insert --hex-blob test --skip-comments t1 DROP TABLE t1; -# -# Test for --insert-ignore -# +--echo # +--echo # Test for --insert-ignore +--echo # CREATE TABLE t1 (a int); INSERT INTO t1 VALUES (1),(2),(3); @@ -200,10 +202,10 @@ INSERT INTO t1 VALUES (4),(5),(6); --exec $MYSQL_DUMP --skip-comments --insert-ignore --delayed-insert test t1 DROP TABLE t1; -# -# Bug #10286: mysqldump -c crashes on table that has many fields with long -# names -# +--echo # +--echo # Bug #10286: mysqldump -c crashes on table that has many fields with long +--echo # names +--echo # create table t1 ( F_c4ca4238a0b923820dcc509a6f75849b int, F_c81e728d9d4c2f636f067f89cc14862c int, @@ -539,18 +541,18 @@ insert into t1 (F_8d3bba7425e7c98c50f52ca1b52d3735) values (1); --exec $MYSQL_DUMP --skip-comments -c test drop table t1; -# -# Test for --add-drop-database -# +--echo # +--echo # Test for --add-drop-database +--echo # CREATE TABLE t1 (a int); INSERT INTO t1 VALUES (1),(2),(3); --exec $MYSQL_DUMP --add-drop-database --skip-comments --databases test DROP TABLE t1; -# -# Bug #9558 mysqldump --no-data db t1 t2 format still dumps data -# +--echo # +--echo # Bug #9558 mysqldump --no-data db t1 t2 format still dumps data +--echo # CREATE DATABASE mysqldump_test_db; USE mysqldump_test_db; @@ -565,11 +567,11 @@ INSERT INTO t2 VALUES (1), (2); DROP TABLE t1, t2; DROP DATABASE mysqldump_test_db; -# -# Testing with tables and databases that don't exists -# or contains illegal characters -# (Bug #9358 mysqldump crashes if tablename starts with \) -# +--echo # +--echo # Testing with tables and databases that don't exists +--echo # or contains illegal characters +--echo # (Bug #9358 mysqldump crashes if tablename starts with \) +--echo # create database mysqldump_test_db; use mysqldump_test_db; create table t1(a varchar(30) primary key, b int not null); @@ -629,9 +631,9 @@ drop database mysqldump_test_db; use test; -# -# Bug #9657 mysqldump xml ( -x ) does not format NULL fields correctly -# +--echo # +--echo # Bug #9657 mysqldump xml ( -x ) does not format NULL fields correctly +--echo # create table t1 (a int(10)); create table t2 (pk int primary key auto_increment, @@ -641,9 +643,10 @@ insert into t2 (a, b) values (NULL, NULL),(10, NULL),(NULL, "twenty"),(30, "thir --exec $MYSQL_DUMP --skip-comments --xml --no-create-info test drop table t1, t2; -# -# BUG #12123 -# +--echo # +--echo # BUG #12123 +--echo # + create table t1 (a text character set utf8, b text character set latin1); insert t1 values (0x4F736E616272C3BC636B, 0x4BF66C6E); select * from t1; @@ -654,15 +657,16 @@ select * from t1; drop table t1; -# -# BUG#15328 Segmentation fault occured if my.cnf is invalid for escape sequence -# +--echo # +--echo # BUG#15328 Segmentation fault occured if my.cnf is invalid for escape sequence +--echo # --exec $MYSQL_MY_PRINT_DEFAULTS --config-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump -# -# BUG #19025 mysqldump doesn't correctly dump "auto_increment = [int]" -# +--echo # +--echo # BUG #19025 mysqldump doesn't correctly dump "auto_increment = [int]" +--echo # + create table `t1` ( t1_name varchar(255) default null, t1_id int(10) unsigned not null auto_increment, @@ -689,9 +693,9 @@ show create table `t1`; drop table `t1`; -# -# Bug #18536: wrong table order -# +--echo # +--echo # Bug #18536: wrong table order +--echo # create table t1(a int); create table t2(a int); @@ -700,9 +704,10 @@ create table t3(a int); --exec $MYSQL_DUMP --skip-comments --force --no-data test t3 t1 non_existing t2 drop table t1, t2, t3; -# -# Bug #21288: mysqldump segmentation fault when using --where -# +--echo # +--echo # Bug #21288: mysqldump segmentation fault when using --where +--echo # + create table t1 (a int); --error 2 --exec $MYSQL_DUMP --skip-comments --force test t1 --where='xx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' 2>&1 @@ -710,9 +715,9 @@ drop table t1; --echo End of 4.1 tests -# -# Bug #10213 mysqldump crashes when dumping VIEWs(on MacOS X) -# +--echo # +--echo # Bug #10213 mysqldump crashes when dumping VIEWs(on MacOS X) +--echo # create database db1; use db1; @@ -734,9 +739,9 @@ drop view v2; drop database db1; use test; -# -# Bug 10713 mysqldump includes database in create view and referenced tables -# +--echo # +--echo # Bug 10713 mysqldump includes database in create view and referenced tables +--echo # # create table and views in db2 create database db2; @@ -770,18 +775,19 @@ drop table t1, t2; drop database db1; use test; -# -# dump of view -# +--echo # +--echo # dump of view +--echo # + create table t1(a int); create view v1 as select * from t1; --exec $MYSQL_DUMP --skip-comments test drop view v1; drop table t1; -# -# Bug #10213 mysqldump crashes when dumping VIEWs(on MacOS X) -# +--echo # +--echo # Bug #10213 mysqldump crashes when dumping VIEWs(on MacOS X) +--echo # create database mysqldump_test_db; use mysqldump_test_db; @@ -803,18 +809,18 @@ drop view v2; drop database mysqldump_test_db; use test; -# -# Bug #9756 -# +--echo # +--echo # Bug #9756 +--echo # CREATE TABLE t1 (a char(10)); INSERT INTO t1 VALUES ('\''); --exec $MYSQL_DUMP --skip-comments test t1 DROP TABLE t1; -# -# Bug #10927 mysqldump: Can't reload dump with view that consist of other view -# +--echo # +--echo # Bug #10927 mysqldump: Can't reload dump with view that consist of other view +--echo # create table t1(a int, b int, c varchar(30)); @@ -834,9 +840,9 @@ select v3.a from v3, v1 where v1.a=v3.a and v3.b=3 limit 1; drop view v1, v2, v3; drop table t1; -# -# Test for dumping triggers -# +--echo # +--echo # Test for dumping triggers +--echo # CREATE TABLE t1 (a int, b bigint default NULL); CREATE TABLE t2 (a int); @@ -884,9 +890,9 @@ show tables; show triggers; DROP TABLE t1, t2; -# -# Bugs #9136, #12917: problems with --defaults-extra-file option -# +--echo # +--echo # Bugs #9136, #12917: problems with --defaults-extra-file option +--echo # --system echo '[mysqltest1]' > $MYSQLTEST_VARDIR/tmp/tmp.cnf --system echo 'port=1234' >> $MYSQLTEST_VARDIR/tmp/tmp.cnf @@ -894,9 +900,9 @@ DROP TABLE t1, t2; --exec $MYSQL_MY_PRINT_DEFAULTS -e $MYSQLTEST_VARDIR/tmp/tmp.cnf mysqltest1 mysqltest1 --system rm $MYSQLTEST_VARDIR/tmp/tmp.cnf -# -# Test of fix to BUG 12597 -# +--echo # +--echo # Test of fix to BUG 12597 +--echo # DROP TABLE IF EXISTS `test1`; CREATE TABLE `test1` ( @@ -932,9 +938,9 @@ DROP TRIGGER testref; DROP TABLE test1; DROP TABLE test2; -# -# BUG#9056 - mysqldump does not dump routines -# +--echo # +--echo # BUG#9056 - mysqldump does not dump routines +--echo # --disable_warnings DROP TABLE IF EXISTS t1; @@ -981,9 +987,10 @@ DROP PROCEDURE bug9056_proc2; DROP PROCEDURE `a'b`; drop table t1; -# -# BUG# 13052 - mysqldump timestamp reloads broken -# +--echo # +--echo # BUG# 13052 - mysqldump timestamp reloads broken +--echo # + --disable_warnings drop table if exists t1; --enable_warnings @@ -1003,9 +1010,10 @@ drop table t1; set global time_zone=default; set time_zone=default; -# -# Test of fix to BUG 13146 - ansi quotes break loading of triggers -# +--echo # +--echo # Test of fix to BUG 13146 - ansi quotes break loading of triggers +--echo # + --disable_warnings DROP TABLE IF EXISTS `t1 test`; DROP TABLE IF EXISTS `t2 test`; @@ -1036,9 +1044,9 @@ DROP TRIGGER `test trig`; DROP TABLE `t1 test`; DROP TABLE `t2 test`; -# -# BUG# 12838 mysqldump -x with views exits with error -# +--echo # +--echo # BUG# 12838 mysqldump -x with views exits with error +--echo # --disable_warnings drop table if exists t1; @@ -1059,10 +1067,10 @@ drop view v0; drop view v1; drop table t1; -# -# BUG#14554 - mysqldump does not separate words "ROW" and "BEGIN" -# for tables with trigger created in the IGNORE_SPACE sql mode. -# +--echo # +--echo # BUG#14554 - mysqldump does not separate words "ROW" and "BEGIN" +--echo # for tables with trigger created in the IGNORE_SPACE sql mode. +--echo # SET @old_sql_mode = @@SQL_MODE; SET SQL_MODE = IGNORE_SPACE; @@ -1083,18 +1091,19 @@ SET SQL_MODE = @old_sql_mode; DROP TRIGGER tr1; DROP TABLE t1; -# -# Bug #13318: Bad result with empty field and --hex-blob -# +--echo # +--echo # Bug #13318: Bad result with empty field and --hex-blob +--echo # + create table t1 (a binary(1), b blob); insert into t1 values ('',''); --exec $MYSQL_DUMP --skip-comments --skip-extended-insert --hex-blob test t1 --exec $MYSQL_DUMP --skip-comments --hex-blob test t1 drop table t1; -# -# Bug 14871 Invalid view dump output -# +--echo # +--echo # Bug 14871 Invalid view dump output +--echo # create table t1 (a int); insert into t1 values (289), (298), (234), (456), (789); @@ -1121,9 +1130,9 @@ select * from v3 order by a; drop table t1; drop view v1, v2, v3, v4, v5; -# -# Bug #16878 dump of trigger -# +--echo # +--echo # Bug #16878 dump of trigger +--echo # create table t1 (a int, created datetime); create table t2 (b int, created datetime); @@ -1151,11 +1160,10 @@ drop trigger tr1; drop trigger tr2; drop table t1, t2; +--echo # +--echo # Bug#18462 mysqldump does not dump view structures correctly +--echo # -# -# Bug#18462 mysqldump does not dump view structures correctly -# -# create table t (qty int, price int); insert into t values(3, 50); insert into t values(5, 51); @@ -1172,11 +1180,11 @@ drop view v2; drop table t; -# -# Bug#14857 Reading dump files with single statement stored routines fails. -# fixed by patch for bug#16878 -# -# +--echo # +--echo # Bug#14857 Reading dump files with single statement stored routines fails. +--echo # fixed by patch for bug#16878 +--echo # + DELIMITER |; /*!50003 CREATE FUNCTION `f`() RETURNS bigint(20) return 42 */| @@ -1188,10 +1196,10 @@ show create procedure p; drop function f; drop procedure p; -# -# Bug #17371 Unable to dump a schema with invalid views -# -# +--echo # +--echo # Bug #17371 Unable to dump a schema with invalid views +--echo # + create table t1 ( id serial ); create view v1 as select * from t1; drop table t1; @@ -1202,9 +1210,9 @@ drop table t1; --echo } mysqldump drop view v1; -# BUG#17201 Spurious 'DROP DATABASE' in output, -# also confusion between tables and views. -# Example code from Markus Popp +--echo # BUG#17201 Spurious 'DROP DATABASE' in output, +--echo # also confusion between tables and views. +--echo # Example code from Markus Popp create database mysqldump_test_db; use mysqldump_test_db; @@ -1219,9 +1227,10 @@ drop view v1; drop table t1; drop database mysqldump_test_db; -# -# Bug21014 Segmentation fault of mysqldump on view -# +--echo # +--echo # Bug21014 Segmentation fault of mysqldump on view +--echo # + create database mysqldump_tables; use mysqldump_tables; create table basetable ( id serial, tag varchar(64) ); @@ -1237,9 +1246,10 @@ drop database mysqldump_views; drop table mysqldump_tables.basetable; drop database mysqldump_tables; -# -# Bug20221 Dumping of multiple databases containing view(s) yields maleformed dumps -# +--echo # +--echo # Bug20221 Dumping of multiple databases containing view(s) yields maleformed dumps +--echo # + create database mysqldump_dba; use mysqldump_dba; create table t1 (f1 int, f2 int); @@ -1277,9 +1287,9 @@ drop table t1; drop database mysqldump_dbb; use test; -# -# Bug#21215 mysqldump creating incomplete backups without warning -# +--echo # +--echo # Bug#21215 mysqldump creating incomplete backups without warning +--echo # # Create user without sufficient privs to perform the requested operation create user mysqltest_1@localhost; @@ -1321,12 +1331,12 @@ grant REPLICATION CLIENT on *.* to mysqltest_1@localhost; drop table t1; drop user mysqltest_1@localhost; -# -# Bug #21527 mysqldump incorrectly tries to LOCK TABLES on the -# information_schema database. -# -# Bug #21424 mysqldump failing to export/import views -# +--echo # +--echo # Bug #21527 mysqldump incorrectly tries to LOCK TABLES on the +--echo # information_schema database. +--echo # +--echo # Bug #21424 mysqldump failing to export/import views +--echo # # Do as root connect (root,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK); @@ -1360,8 +1370,10 @@ drop user myDB_User; drop database mysqldump_myDB; flush privileges; -# Bug #21424 continues from here. -# Restore. Flush Privileges test ends. +--echo # Bug #21424 continues from here. +--echo # Restore. Flush Privileges test ends. +--echo # + --exec $MYSQL < $MYSQLTEST_VARDIR/tmp/bug21527.sql; # Do as a user @@ -1383,7 +1395,28 @@ drop user myDB_User; drop database mysqldump_myDB; use test; ---echo End of 5.0 tests +--echo # +--echo # BUG#13926: --order-by-primary fails if PKEY contains quote character +--echo # + +--disable_warnings +DROP TABLE IF EXISTS `t1`; +CREATE TABLE `t1` ( + `a b` INT, + `c"d` INT, + `e``f` INT, + PRIMARY KEY (`a b`, `c"d`, `e``f`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +insert into t1 values (0815, 4711, 2006); + +--exec $MYSQL_DUMP --skip-comments --compatible=ansi --order-by-primary test t1 +--exec $MYSQL_DUMP --skip-comments --order-by-primary test t1 +DROP TABLE `t1`; +--enable_warnings + +--echo # +--echo # End of 5.0 tests +--echo # # Check new --replace option @@ -1438,9 +1471,10 @@ drop table t2; drop table words2; -# -# BUG# 16853: mysqldump doesn't show events -# +--echo # +--echo # BUG# 16853: mysqldump doesn't show events +--echo # + create database first; use first; set time_zone = 'UTC'; @@ -1476,11 +1510,11 @@ drop database third; set time_zone = 'SYSTEM'; use test; -##### -# -# BUG#17201 Spurious 'DROP DATABASE' in output, -# also confusion between tables and views. -# Example code from Markus Popp +--echo # +--echo # BUG#17201 Spurious 'DROP DATABASE' in output, +--echo # also confusion between tables and views. +--echo # Example code from Markus Popp +--echo # create database mysqldump_test_db; use mysqldump_test_db; @@ -1495,4 +1529,6 @@ drop view v1; drop table t1; drop database mysqldump_test_db; ---echo End of 5.1 tests +--echo # +--echo # End of 5.1 tests +--echo # diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test index 04e0532851a..25f8d63d0b2 100644 --- a/mysql-test/t/mysqltest.test +++ b/mysql-test/t/mysqltest.test @@ -507,9 +507,21 @@ echo $where2; let $where3=a long $where variable content; echo $where3; +let $where3=a long \\\$where variable content; +echo $where3; + let $novar1= $novar2; echo $novar1; +let $cat=na; +let $cat=ba$cat$cat; +echo banana = $cat; + +# ba\$cat\$cat should have been sufficient. +# ba\\\$cat\\\$cat -> ba\$cat\$cat -> ba$cat$cat -> banana +# Magnus' upcoming patch will fix the missing second interpretation. +let $cat=ba\\\$cat\\\$cat; +echo Not a banana: $cat; # Test illegal uses of let diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index e86420f4364..ac79dbc3434 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -988,6 +988,58 @@ execute stmt; drop temporary table t1; deallocate prepare stmt; +# +# BUG#22085: Crash on the execution of a prepared statement that +# uses an IN subquery with aggregate functions in HAVING +# + +CREATE TABLE t1( + ID int(10) unsigned NOT NULL auto_increment, + Member_ID varchar(15) NOT NULL default '', + Action varchar(12) NOT NULL, + Action_Date datetime NOT NULL, + Track varchar(15) default NULL, + User varchar(12) default NULL, + Date_Updated timestamp NOT NULL default CURRENT_TIMESTAMP on update + CURRENT_TIMESTAMP, + PRIMARY KEY (ID), + KEY Action (Action), + KEY Action_Date (Action_Date) +); + +INSERT INTO t1(Member_ID, Action, Action_Date, Track) VALUES + ('111111', 'Disenrolled', '2006-03-01', 'CAD' ), + ('111111', 'Enrolled', '2006-03-01', 'CAD' ), + ('111111', 'Disenrolled', '2006-07-03', 'CAD' ), + ('222222', 'Enrolled', '2006-03-07', 'CAD' ), + ('222222', 'Enrolled', '2006-03-07', 'CHF' ), + ('222222', 'Disenrolled', '2006-08-02', 'CHF' ), + ('333333', 'Enrolled', '2006-03-01', 'CAD' ), + ('333333', 'Disenrolled', '2006-03-01', 'CAD' ), + ('444444', 'Enrolled', '2006-03-01', 'CAD' ), + ('555555', 'Disenrolled', '2006-03-01', 'CAD' ), + ('555555', 'Enrolled', '2006-07-21', 'CAD' ), + ('555555', 'Disenrolled', '2006-03-01', 'CHF' ), + ('666666', 'Enrolled', '2006-02-09', 'CAD' ), + ('666666', 'Enrolled', '2006-05-12', 'CHF' ), + ('666666', 'Disenrolled', '2006-06-01', 'CAD' ); + +PREPARE STMT FROM +"SELECT GROUP_CONCAT(Track SEPARATOR ', ') FROM t1 + WHERE Member_ID=? AND Action='Enrolled' AND + (Track,Action_Date) IN (SELECT Track, MAX(Action_Date) FROM t1 + WHERE Member_ID=? + GROUP BY Track + HAVING Track>='CAD' AND + MAX(Action_Date)>'2006-03-01')"; +SET @id='111111'; +EXECUTE STMT USING @id,@id; +SET @id='222222'; +EXECUTE STMT USING @id,@id; + +DEALLOCATE PREPARE STMT; +DROP TABLE t1; + --echo End of 4.1 tests ############################# 5.0 tests start ################################ # @@ -1358,6 +1410,33 @@ DEALLOCATE PREPARE stmt; DROP TABLE IF EXISTS t1, t2, t3; +# +# BUG#21081: SELECT inside stored procedure returns wrong results +# +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (i INT KEY); +CREATE TABLE t2 (i INT); + +INSERT INTO t1 VALUES (1), (2); +INSERT INTO t2 VALUES (1); + +PREPARE stmt FROM "SELECT t2.i FROM t1 LEFT JOIN t2 ON t2.i = t1.i + WHERE t1.i = ?"; + +SET @arg= 1; +EXECUTE stmt USING @arg; +SET @arg= 2; +EXECUTE stmt USING @arg; +SET @arg= 1; +EXECUTE stmt USING @arg; + +DEALLOCATE PREPARE stmt; +DROP TABLE t1, t2; + + --echo End of 5.0 tests. # diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index 89508f468a7..1a80234e485 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -740,3 +740,30 @@ SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-02 00:00:00'; DROP TABLE t1; # End of 5.0 tests + +# BUG#22393 fix: Adjust 'ref' estimate if we have 'range' estimate for +# a smaller scan interval +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t2 (a int, b int, filler char(100)); +insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A, +t1 B, t1 C where A.a < 5; + +insert into t2 select 1000, b, 'filler' from t2; +alter table t2 add index (a,b); +# t2 values +# ( 1 , 10, 'filler') +# ( 2 , 10, 'filler') +# ( 3 , 10, 'filler') +# (... , 10, 'filler') +# ... +# (1000, 10, 'filler') - 500 times + +# 500 rows, 1 row + +select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z; +explain select * from t2 where a=1000 and b<11; + +drop table t1, t2; + diff --git a/mysql-test/t/read_many_rows_innodb.test b/mysql-test/t/read_many_rows_innodb.test new file mode 100644 index 00000000000..ed86275447f --- /dev/null +++ b/mysql-test/t/read_many_rows_innodb.test @@ -0,0 +1,17 @@ +# t/read_many_rows_innodb.test +# +# Check how filesort and buffered-record-reads works with InnoDB. +# This test takes a long time. +# +# Last update: +# 2006-08-03 ML test refactored (MySQL 5.1) +# main code --> include/read_many_rows_innodb.inc +# + +--source include/big_test.inc + +--source include/have_innodb.inc +let $engine_type= InnoDB; +let $other_engine_type= MyISAM; + +--source include/read_many_rows.inc diff --git a/mysql-test/t/round.test b/mysql-test/t/round.test new file mode 100644 index 00000000000..d018fa7e34e --- /dev/null +++ b/mysql-test/t/round.test @@ -0,0 +1,145 @@ +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (sint8 tinyint not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('127.4'); +INSERT INTO t1 VALUES ('127.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('-127.4'); +INSERT INTO t1 VALUES ('-127.5'); +INSERT INTO t1 VALUES ('-128.4'); +INSERT INTO t1 VALUES ('-128.5'); +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (uint8 tinyint unsigned not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('127.4'); +INSERT INTO t1 VALUES ('127.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('255.4'); +INSERT INTO t1 VALUES ('255.5'); +SELECT * FROM t1; +DROP TABLE t1; + + +CREATE TABLE t1 (sint16 smallint not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('32767.4'); +INSERT INTO t1 VALUES ('32767.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('-32767.4'); +INSERT INTO t1 VALUES ('-32767.5'); +INSERT INTO t1 VALUES ('-32768.4'); +INSERT INTO t1 VALUES ('-32768.5'); +SELECT * FROM t1; +DROP TABLE t1; + + +CREATE TABLE t1 (uint16 smallint unsigned not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('32767.4'); +INSERT INTO t1 VALUES ('32767.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('65535.4'); +INSERT INTO t1 VALUES ('65535.5'); +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (sint24 mediumint not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('8388607.4'); +INSERT INTO t1 VALUES ('8388607.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('-8388607.4'); +INSERT INTO t1 VALUES ('-8388607.5'); +INSERT INTO t1 VALUES ('-8388608.4'); +INSERT INTO t1 VALUES ('-8388608.5'); +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (uint24 mediumint unsigned not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('8388607.4'); +INSERT INTO t1 VALUES ('8388607.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('16777215.4'); +INSERT INTO t1 VALUES ('16777215.5'); +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (sint64 bigint not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('9223372036854775807.4'); +INSERT INTO t1 VALUES ('9223372036854775807.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('-9223372036854775807.4'); +INSERT INTO t1 VALUES ('-9223372036854775807.5'); +INSERT INTO t1 VALUES ('-9223372036854775808.4'); +INSERT INTO t1 VALUES ('-9223372036854775808.5'); +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (uint64 bigint unsigned not null); +INSERT INTO t1 VALUES ('0.1'); +INSERT INTO t1 VALUES ('0.5'); +INSERT INTO t1 VALUES ('9223372036854775807.4'); +INSERT INTO t1 VALUES ('9223372036854775807.5'); +INSERT INTO t1 VALUES ('-0.1'); +INSERT INTO t1 VALUES ('-0.5'); +INSERT INTO t1 VALUES ('18446744073709551615.4'); +INSERT INTO t1 VALUES ('18446744073709551615.5'); +INSERT INTO t1 VALUES ('1844674407370955161.0'); +INSERT INTO t1 VALUES ('1844674407370955161.1'); +INSERT INTO t1 VALUES ('1844674407370955161.2'); +INSERT INTO t1 VALUES ('1844674407370955161.3'); +INSERT INTO t1 VALUES ('1844674407370955161.4'); +INSERT INTO t1 VALUES ('1844674407370955161.5'); +INSERT INTO t1 VALUES ('1844674407370955161.0e1'); +INSERT INTO t1 VALUES ('1844674407370955161.1e1'); +INSERT INTO t1 VALUES ('1844674407370955161.2e1'); +INSERT INTO t1 VALUES ('1844674407370955161.3e1'); +INSERT INTO t1 VALUES ('1844674407370955161.4e1'); +INSERT INTO t1 VALUES ('1844674407370955161.5e1'); +INSERT INTO t1 VALUES ('18446744073709551610e-1'); +INSERT INTO t1 VALUES ('18446744073709551611e-1'); +INSERT INTO t1 VALUES ('18446744073709551612e-1'); +INSERT INTO t1 VALUES ('18446744073709551613e-1'); +INSERT INTO t1 VALUES ('18446744073709551614e-1'); +INSERT INTO t1 VALUES ('18446744073709551615e-1'); +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (str varchar(128), sint64 bigint not null default 0); +INSERT INTO t1 (str) VALUES ('1.5'); +INSERT INTO t1 (str) VALUES ('1.00005e4'); +INSERT INTO t1 (str) VALUES ('1.0005e3'); +INSERT INTO t1 (str) VALUES ('1.005e2'); +INSERT INTO t1 (str) VALUES ('1.05e1'); +INSERT INTO t1 (str) VALUES ('1.5e0'); +INSERT INTO t1 (str) VALUES ('100005e-1'); +INSERT INTO t1 (str) VALUES ('100050e-2'); +INSERT INTO t1 (str) VALUES ('100500e-3'); +INSERT INTO t1 (str) VALUES ('105000e-4'); +INSERT INTO t1 (str) VALUES ('150000e-5'); +UPDATE t1 SET sint64=str; +SELECT * FROM t1; +DROP TABLE t1; + + diff --git a/mysql-test/t/row.test b/mysql-test/t/row.test index 6301cc0f584..63c611e6be6 100644 --- a/mysql-test/t/row.test +++ b/mysql-test/t/row.test @@ -92,3 +92,50 @@ SELECT ROW(NULL,10) <=> ROW(3,NULL); # SELECT ROW(1,1,1) = ROW(1,1,1) as `1`, ROW(1,1,1) = ROW(1,2,1) as `0`, ROW(1,NULL,1) = ROW(2,2,1) as `0`, ROW(1,NULL,1) = ROW(1,2,2) as `0`, ROW(1,NULL,1) = ROW(1,2,1) as `null` ; select row(NULL,1)=(2,0); + +# +# Bug #16081: row equalities are to be used for query optimizations +# + +CREATE TABLE t1 (a int, b int, PRIMARY KEY (a,b)); +INSERT INTO t1 VALUES (1,1), (2,1), (3,1), (1,2), (3,2), (3,3); + +EXPLAIN SELECT * FROM t1 WHERE a=3 AND b=2; +EXPLAIN SELECT * FROM t1 WHERE (a,b)=(3,2); +SELECT * FROM t1 WHERE a=3 and b=2; +SELECT * FROM t1 WHERE (a,b)=(3,2); + +CREATE TABLE t2 (a int, b int, c int, PRIMARY KEY (a,b,c)); +INSERT INTO t2 VALUES + (1,1,2), (3,1,3), (1,2,2), (4,4,2), + (1,1,1), (3,1,1), (1,2,1); + +EXPLAIN SELECT * FROM t1,t2 WHERE t1.a=t2.a AND t1.b=t2.b; +EXPLAIN SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,t2.b); +SELECT * FROM t1,t2 WHERE t1.a=t2.a and t1.b=t2.b; +SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,t2.b); + +EXPLAIN SELECT * FROM t1,t2 WHERE t1.a=t2.a AND t1.b=2; +EXPLAIN SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,2); +SELECT * FROM t1,t2 WHERE t1.a=1 and t1.b=t2.b; +SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,2); + +EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,t2.b+1); +SELECT * FROM t1,t2 WHERE (t1.a,t1.b)=(t2.a,t2.b+1); + +EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE (t1.a-1,t1.b)=(t2.a-1,t2.b+1); +SELECT * FROM t1,t2 WHERE (t1.a-1,t1.b)=(t2.a-1,t2.b+1); + +EXPLAIN SELECT * FROM t2 WHERE a=3 AND b=2; +EXPLAIN SELECT * FROM t2 WHERE (a,b)=(3,2); +SELECT * FROM t2 WHERE a=3 and b=2; +SELECT * FROM t2 WHERE (a,b)=(3,2); + +EXPLAIN SELECT * FROM t1,t2 WHERE t2.a=t1.a AND t2.b=2 AND t2.c=1; +EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE (t2.a,(t2.b,t2.c))=(t1.a,(2,1)); +SELECT * FROM t1,t2 WHERE (t2.a,(t2.b,t2.c))=(t1.a,(2,1)); + +EXPLAIN EXTENDED SELECT * FROM t1,t2 WHERE t2.a=t1.a AND (t2.b,t2.c)=(2,1); +SELECT * FROM t1,t2 WHERE t2.a=t1.a AND (t2.b,t2.c)=(2,1); + +DROP TABLE t1,t2; diff --git a/mysql-test/t/rowid_order_innodb.test b/mysql-test/t/rowid_order_innodb.test index fb4959d78e6..152eb28d388 100644 --- a/mysql-test/t/rowid_order_innodb.test +++ b/mysql-test/t/rowid_order_innodb.test @@ -1,108 +1,14 @@ +# t/rowid_order_innodb.test # # Test for rowid ordering (and comparison) functions. # do index_merge select for tables with PK of various types. # ---disable_warnings -drop table if exists t1, t2, t3,t4; ---enable_warnings +# Last update: +# 2006-07-28 ML test refactored (MySQL 5.1) +# main code t/rowid_order_innodb.test -> include/rowid_order.inc +# --- source include/have_innodb.inc - -# Signed number as rowid -create table t1 ( - pk1 int not NULL, - key1 int(11), - key2 int(11), - PRIMARY KEY (pk1), - KEY key1 (key1), - KEY key2 (key2) -) engine=innodb; -insert into t1 values (-5, 1, 1), - (-100, 1, 1), - (3, 1, 1), - (0, 1, 1), - (10, 1, 1); -explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# Unsigned numbers as rowids -create table t1 ( - pk1 int unsigned not NULL, - key1 int(11), - key2 int(11), - PRIMARY KEY (pk1), - KEY key1 (key1), - KEY key2 (key2) -) engine=innodb; -insert into t1 values (0, 1, 1), - (0xFFFFFFFF, 1, 1), - (0xFFFFFFFE, 1, 1), - (1, 1, 1), - (2, 1, 1); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# Case-insensitive char(N) -create table t1 ( - pk1 char(4) not NULL, - key1 int(11), - key2 int(11), - PRIMARY KEY (pk1), - KEY key1 (key1), - KEY key2 (key2) -) engine=innodb collate latin2_general_ci; -insert into t1 values ('a1', 1, 1), - ('b2', 1, 1), - ('A3', 1, 1), - ('B4', 1, 1); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# Multi-part PK -create table t1 ( - pk1 int not NULL, - pk2 char(4) not NULL collate latin1_german1_ci, - pk3 char(4) not NULL collate latin1_bin, - key1 int(11), - key2 int(11), - PRIMARY KEY (pk1,pk2,pk3), - KEY key1 (key1), - KEY key2 (key2) -) engine=innodb; -insert into t1 values - (1, 'u', 'u', 1, 1), - (1, 'u', char(0xEC), 1, 1), - (1, 'u', 'x', 1, 1); -insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1; -insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u'; -insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1; -select * from t1; -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; - -# Hidden PK -alter table t1 drop primary key; -select * from t1; -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; -drop table t1; - -# Variable-length PK -# this is also test for Bug#2688 -create table t1 ( - pk1 varchar(8) NOT NULL default '', - pk2 varchar(4) NOT NULL default '', - key1 int(11), - key2 int(11), - primary key(pk1, pk2), - KEY key1 (key1), - KEY key2 (key2) -) engine=innodb; -insert into t1 values ('','empt',2,2), - ('a','a--a',2,2), - ('bb','b--b',2,2), - ('ccc','c--c',2,2), - ('dddd','d--d',2,2); -select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3; - -drop table t1; +--source include/have_innodb.inc +let $engine_type= InnoDB; +--source include/rowid_order.inc diff --git a/mysql-test/t/rpl000013.test b/mysql-test/t/rpl000013.test index c681e76cf51..69a102e84ce 100644 --- a/mysql-test/t/rpl000013.test +++ b/mysql-test/t/rpl000013.test @@ -7,7 +7,7 @@ # in row-based, it hangs waiting for an offset which is never # reached (the "sync_with_master 1"), logically. ---source include/have_binlog_format_statement.inc +--source include/have_binlog_format_mixed_or_statement.inc source include/master-slave.inc; save_master_pos; connection slave; diff --git a/mysql-test/t/rpl_insert.test b/mysql-test/t/rpl_insert.test index 9beaff6bab6..0d471a0e0a9 100644 --- a/mysql-test/t/rpl_insert.test +++ b/mysql-test/t/rpl_insert.test @@ -1,7 +1,6 @@ - -# -# Bug#20821: INSERT DELAYED fails to write some rows to binlog -# +--echo # +--echo # Bug#20821: INSERT DELAYED fails to write some rows to binlog +--echo # --source include/master-slave.inc --source include/not_embedded.inc @@ -35,7 +34,11 @@ SELECT COUNT(*) FROM mysqlslap.t1; sync_slave_with_master; SELECT COUNT(*) FROM mysqlslap.t1; -connection master; -DROP SCHEMA IF EXISTS mysqlslap; -sync_slave_with_master; +--echo # +--echo # Cleanup +--echo # +connection master; +USE test; +DROP SCHEMA mysqlslap; +sync_slave_with_master; diff --git a/mysql-test/t/rpl_loaddata_s.test b/mysql-test/t/rpl_loaddata_s.test index 72a5d1a8ec1..2c94c8ef953 100644 --- a/mysql-test/t/rpl_loaddata_s.test +++ b/mysql-test/t/rpl_loaddata_s.test @@ -2,7 +2,7 @@ # replicated LOAD DATA INFILE correctly when it has binlog_*_db rules. # This is for BUG#1100 (LOAD DATA INFILE was half-logged). --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source include/master-slave.inc connection slave; diff --git a/mysql-test/t/rpl_mixed_ddl_dml.test b/mysql-test/t/rpl_mixed_ddl_dml.test index 5b9ed6898b8..6a1f81abed3 100644 --- a/mysql-test/t/rpl_mixed_ddl_dml.test +++ b/mysql-test/t/rpl_mixed_ddl_dml.test @@ -1,7 +1,7 @@ # Mixed DDL-DML (CREATE ... SELECT ...) statements can only be # replicated properly in statement-based replication. # Currently statement based due to bug 12345 ---source include/have_binlog_format_statement.inc +--source include/have_binlog_format_mixed_or_statement.inc source include/master-slave.inc; diff --git a/mysql-test/t/rpl_rbr_to_sbr.test b/mysql-test/t/rpl_rbr_to_sbr.test index c10199f8ff5..0c5368197b3 100644 --- a/mysql-test/t/rpl_rbr_to_sbr.test +++ b/mysql-test/t/rpl_rbr_to_sbr.test @@ -1,12 +1,12 @@ -- source include/have_row_based.inc --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc +-- source include/not_ndb_default.inc -- source include/master-slave.inc # Test that the slave temporarily switches to ROW when seeing binrow # events when it is in STATEMENT or MIXED mode SET BINLOG_FORMAT=MIXED; -SELECT @@GLOBAL.BINLOG_FORMAT, @@SESSION.BINLOG_FORMAT; SET GLOBAL BINLOG_FORMAT=MIXED; SELECT @@GLOBAL.BINLOG_FORMAT, @@SESSION.BINLOG_FORMAT; diff --git a/mysql-test/t/rpl_rewrt_db.test b/mysql-test/t/rpl_rewrt_db.test index 8acc05f6ff5..52f04e073dd 100644 --- a/mysql-test/t/rpl_rewrt_db.test +++ b/mysql-test/t/rpl_rewrt_db.test @@ -1,5 +1,5 @@ # TBF - difference in row level logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source include/master-slave.inc --disable_warnings diff --git a/mysql-test/t/rpl_rotate_logs.test b/mysql-test/t/rpl_rotate_logs.test index a5c8a87c74d..2249dff1449 100644 --- a/mysql-test/t/rpl_rotate_logs.test +++ b/mysql-test/t/rpl_rotate_logs.test @@ -13,7 +13,7 @@ # - Test creating a duplicate key error and recover from it # Requires statement logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc connect (master,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK); --disable_warnings diff --git a/mysql-test/t/rpl_row_basic_11bugs.test b/mysql-test/t/rpl_row_basic_11bugs.test index af7e9af4005..37bfd01e260 100644 --- a/mysql-test/t/rpl_row_basic_11bugs.test +++ b/mysql-test/t/rpl_row_basic_11bugs.test @@ -54,3 +54,63 @@ UPDATE t1 SET a=99 WHERE a = 0; --replace_result $SERVER_VERSION SERVER_VERSION --replace_regex /table_id: [0-9]+/table_id: #/ SHOW BINLOG EVENTS; + +DROP TABLE t1; + +# BUG#17620: Replicate (Row Based) Fails when Query Cache enabled on +# slave +--echo ================ Test for BUG#17620 ================ +--disable_query_log +--source include/master-slave-reset.inc +--enable_query_log + +--echo **** On Slave **** +connection slave; +SET GLOBAL QUERY_CACHE_SIZE=0; + +--echo **** On Master **** +connection master; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); + +--echo **** On Slave **** +sync_slave_with_master; +SET GLOBAL QUERY_CACHE_SIZE=16*1024*1024; + +--echo **** On Master **** +connection master; +INSERT INTO t1 VALUES (4),(5),(6); + +--echo **** On Slave **** +sync_slave_with_master; +SELECT * FROM t1; + +--echo **** On Master **** +connection master; +INSERT INTO t1 VALUES (7),(8),(9); + +--echo **** On Slave **** +sync_slave_with_master; +SELECT * FROM t1; + +# Bug#22550: Replication of BIT columns failing +--echo ================ Test for BUG#22550 ================ +--disable_query_log +--source include/master-slave-reset.inc +--enable_query_log + +connection master; +CREATE TABLE t1 (a BIT(1), b INT) ENGINE=MYISAM; +sync_slave_with_master; + +connection master; +INSERT INTO t1 VALUES(1,2); +SELECT HEX(a),b FROM t1; +sync_slave_with_master; +SELECT HEX(a),b FROM t1; + +connection master; +UPDATE t1 SET a=0 WHERE b=2; +SELECT HEX(a),b FROM t1; +sync_slave_with_master; +SELECT HEX(a),b FROM t1; diff --git a/mysql-test/t/rpl_row_basic_2myisam.test b/mysql-test/t/rpl_row_basic_2myisam.test index 471f4d6dbc5..c2cef800ec8 100644 --- a/mysql-test/t/rpl_row_basic_2myisam.test +++ b/mysql-test/t/rpl_row_basic_2myisam.test @@ -1,3 +1,3 @@ let $type= 'MYISAM' ; let $extra_index= ; --- source include/rpl_row_basic.inc +-- source extra/rpl_tests/rpl_row_basic.test diff --git a/mysql-test/t/rpl_row_basic_3innodb.test b/mysql-test/t/rpl_row_basic_3innodb.test index b97f1543cc3..89effc4b3bb 100644 --- a/mysql-test/t/rpl_row_basic_3innodb.test +++ b/mysql-test/t/rpl_row_basic_3innodb.test @@ -2,5 +2,5 @@ let $type= 'INNODB' ; let $extra_index= ; --- source include/rpl_row_basic.inc +-- source extra/rpl_tests/rpl_row_basic.test diff --git a/mysql-test/t/rpl_row_basic_7ndb.test b/mysql-test/t/rpl_row_basic_7ndb.test index 464600c590a..1ec2fb333ae 100644 --- a/mysql-test/t/rpl_row_basic_7ndb.test +++ b/mysql-test/t/rpl_row_basic_7ndb.test @@ -1,5 +1,5 @@ -- source include/have_ndb.inc let $type= 'NDB' ; let $extra_index= ; --- source include/rpl_row_basic.inc +-- source extra/rpl_tests/rpl_row_basic.test -- source include/master-slave-end.inc diff --git a/mysql-test/t/rpl_row_basic_8partition.test b/mysql-test/t/rpl_row_basic_8partition.test index 640a420c10e..f262ef05c58 100644 --- a/mysql-test/t/rpl_row_basic_8partition.test +++ b/mysql-test/t/rpl_row_basic_8partition.test @@ -8,12 +8,17 @@ ############################################################ --source include/have_row_based.inc +--source include/have_binlog_format_row.inc --source include/have_partition.inc +--source include/not_ndb_default.inc --source include/master-slave.inc connection master; --disable_warnings DROP TABLE IF EXISTS t1; -SET BINLOG_FORMAT=ROW; + +let $maybe_ro_var = @@BINLOG_FORMAT; +let $val4var = ROW; +--source include/safe_set_to_maybe_ro_var.inc --echo **** Partition RANGE testing **** diff --git a/mysql-test/t/rpl_row_max_relay_size.test b/mysql-test/t/rpl_row_max_relay_size.test index 7e5f1145801..eb3f63db8aa 100644 --- a/mysql-test/t/rpl_row_max_relay_size.test +++ b/mysql-test/t/rpl_row_max_relay_size.test @@ -5,6 +5,8 @@ # Requires statement logging -- source include/not_ndb_default.inc --- source include/have_binlog_format_row.inc +-- source include/have_row_based.inc +SET SESSION BINLOG_FORMAT=ROW; +SET GLOBAL BINLOG_FORMAT=ROW; -- source extra/rpl_tests/rpl_max_relay_size.test diff --git a/mysql-test/t/rpl_row_tabledefs_2myisam.test b/mysql-test/t/rpl_row_tabledefs_2myisam.test new file mode 100644 index 00000000000..ab4914e15fa --- /dev/null +++ b/mysql-test/t/rpl_row_tabledefs_2myisam.test @@ -0,0 +1,8 @@ + +-- source include/have_binlog_format_row.inc +-- source include/master-slave.inc + +let $engine_type = 'MyISAM'; +-- source extra/rpl_tests/rpl_row_tabledefs.test + + diff --git a/mysql-test/t/rpl_row_tabledefs_3innodb.test b/mysql-test/t/rpl_row_tabledefs_3innodb.test new file mode 100644 index 00000000000..7824fbfb663 --- /dev/null +++ b/mysql-test/t/rpl_row_tabledefs_3innodb.test @@ -0,0 +1,9 @@ + +-- source include/have_binlog_format_row.inc +-- source include/have_innodb.inc +-- source include/master-slave.inc + +let $engine_type = 'InnoDB'; +-- source extra/rpl_tests/rpl_row_tabledefs.test + + diff --git a/mysql-test/t/rpl_stm_EE_err2.test b/mysql-test/t/rpl_stm_EE_err2.test index dbcc66686ec..face651b9a1 100644 --- a/mysql-test/t/rpl_stm_EE_err2.test +++ b/mysql-test/t/rpl_stm_EE_err2.test @@ -3,6 +3,6 @@ # Date: 2006-01-11 # Purpose: Engine Wrapper for rpl_stm_EE_err2.test ############################## --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc let $engine_type=myisam; -- source extra/rpl_tests/rpl_stm_EE_err2.test diff --git a/mysql-test/t/rpl_stm_flsh_tbls.test b/mysql-test/t/rpl_stm_flsh_tbls.test index 3a6102de279..43a5234ccc7 100644 --- a/mysql-test/t/rpl_stm_flsh_tbls.test +++ b/mysql-test/t/rpl_stm_flsh_tbls.test @@ -1,5 +1,5 @@ # depends on the binlog output ---source include/have_binlog_format_statement.inc +--source include/have_binlog_format_mixed_or_statement.inc let $rename_event_pos= 652; -- source extra/rpl_tests/rpl_flsh_tbls.test diff --git a/mysql-test/t/rpl_stm_log.test b/mysql-test/t/rpl_stm_log.test index d11e1fd8ac1..5a1e0facc83 100644 --- a/mysql-test/t/rpl_stm_log.test +++ b/mysql-test/t/rpl_stm_log.test @@ -1,5 +1,5 @@ # Requires statement logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc let $engine_type=MyISAM; -- source extra/rpl_tests/rpl_log.test diff --git a/mysql-test/t/rpl_stm_max_relay_size.test b/mysql-test/t/rpl_stm_max_relay_size.test index 50008533388..950aa8b322a 100644 --- a/mysql-test/t/rpl_stm_max_relay_size.test +++ b/mysql-test/t/rpl_stm_max_relay_size.test @@ -4,7 +4,7 @@ # Test of manual relay log rotation with FLUSH LOGS. # Requires statement logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/rpl_tests/rpl_max_relay_size.test # End of 4.1 tests diff --git a/mysql-test/t/rpl_stm_multi_query.test b/mysql-test/t/rpl_stm_multi_query.test index 97c322ac780..c39d1fad015 100644 --- a/mysql-test/t/rpl_stm_multi_query.test +++ b/mysql-test/t/rpl_stm_multi_query.test @@ -4,7 +4,7 @@ # one binlog event containing all queries) # Requires statement logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/rpl_tests/rpl_multi_query.test diff --git a/mysql-test/t/rpl_stm_mystery22.test b/mysql-test/t/rpl_stm_mystery22.test index a43f2610350..017593fdfba 100644 --- a/mysql-test/t/rpl_stm_mystery22.test +++ b/mysql-test/t/rpl_stm_mystery22.test @@ -15,7 +15,7 @@ #should proceed in a correct way. ################################# --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source include/master-slave.inc # first, cause a duplicate key problem on the slave diff --git a/mysql-test/t/rpl_stm_no_op.test b/mysql-test/t/rpl_stm_no_op.test index f82bbd8cd55..66dc89bd712 100644 --- a/mysql-test/t/rpl_stm_no_op.test +++ b/mysql-test/t/rpl_stm_no_op.test @@ -4,7 +4,7 @@ # case. So this test is meaningul only in statement-based (and if it was # enabled in row-based, it would fail as expected). --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc source include/master-slave.inc; diff --git a/mysql-test/t/rpl_stm_reset_slave.test b/mysql-test/t/rpl_stm_reset_slave.test index 282033bf6d1..6a99d4e1613 100644 --- a/mysql-test/t/rpl_stm_reset_slave.test +++ b/mysql-test/t/rpl_stm_reset_slave.test @@ -1,5 +1,5 @@ # TBF - difference in row level logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source extra/rpl_tests/rpl_reset_slave.test # End of 4.1 tests diff --git a/mysql-test/t/rpl_stm_until.test b/mysql-test/t/rpl_stm_until.test index 9a4e4471fe1..f42965c0eb0 100644 --- a/mysql-test/t/rpl_stm_until.test +++ b/mysql-test/t/rpl_stm_until.test @@ -1,4 +1,4 @@ --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source include/master-slave.inc # Test is dependent on binlog positions diff --git a/mysql-test/t/rpl_switch_stm_row_mixed.test b/mysql-test/t/rpl_switch_stm_row_mixed.test index 6d282069ba1..d345b62b8eb 100644 --- a/mysql-test/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/t/rpl_switch_stm_row_mixed.test @@ -1,4 +1,5 @@ --- source include/have_binlog_format_row.inc +-- source include/have_row_based.inc +-- source include/not_ndb_default.inc -- source include/master-slave.inc connection master; @@ -8,6 +9,9 @@ create database mysqltest1; --enable_warnings use mysqltest1; +set session binlog_format=row; +set global binlog_format=row; + show global variables like "binlog_format%"; show session variables like "binlog_format%"; select @@global.binlog_format, @@session.binlog_format; @@ -462,6 +466,41 @@ if ($you_want_to_test_UDF) } sync_slave_with_master; + +# +# Bug#20863 If binlog format is changed between update and unlock of +# tables, wrong binlog +# + +connection master; +DROP TABLE IF EXISTS t11; +SET SESSION BINLOG_FORMAT=STATEMENT; +CREATE TABLE t11 (song VARCHAR(255)); +LOCK TABLES t11 WRITE; +SET SESSION BINLOG_FORMAT=ROW; +INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict'); +SET SESSION BINLOG_FORMAT=STATEMENT; +INSERT INTO t11 VALUES('Careful With That Axe, Eugene'); +UNLOCK TABLES; + +--query_vertical SELECT * FROM t11 +sync_slave_with_master; +USE mysqltest1; +--query_vertical SELECT * FROM t11 + +connection master; +DROP TABLE IF EXISTS t12; +SET SESSION BINLOG_FORMAT=MIXED; +CREATE TABLE t12 (data LONG); +LOCK TABLES t12 WRITE; +INSERT INTO t12 VALUES(UUID()); +UNLOCK TABLES; + +--replace_column 2 # 5 # +--replace_regex /table_id: [0-9]+/table_id: #/ +show binlog events from 102; +sync_slave_with_master; + # as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID --exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql --exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql diff --git a/mysql-test/t/rpl_temp_table.test b/mysql-test/t/rpl_temp_table.test index c29fa8e676d..9b73961aeea 100644 --- a/mysql-test/t/rpl_temp_table.test +++ b/mysql-test/t/rpl_temp_table.test @@ -1,7 +1,7 @@ # drop table t1 t2 t3 are included int master-slave.inc # meaningful only in statement-based: --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc -- source include/master-slave.inc diff --git a/mysql-test/t/rpl_trigger.test b/mysql-test/t/rpl_trigger.test index 3671763ee18..279574a8aaa 100644 --- a/mysql-test/t/rpl_trigger.test +++ b/mysql-test/t/rpl_trigger.test @@ -2,7 +2,7 @@ # Test of triggers with replication # Adding statement include due to Bug 12574 # TODO: Remove statement include once 12574 is patched ---source include/have_binlog_format_statement.inc +--source include/have_binlog_format_mixed_or_statement.inc --source include/master-slave.inc --disable_warnings diff --git a/mysql-test/t/rpl_trunc_temp.test b/mysql-test/t/rpl_trunc_temp.test index 56f858dc9a2..28bcb0c06c3 100644 --- a/mysql-test/t/rpl_trunc_temp.test +++ b/mysql-test/t/rpl_trunc_temp.test @@ -1,5 +1,5 @@ # Requires statement logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc source include/master-slave.inc; diff --git a/mysql-test/t/rpl_truncate_2myisam.test b/mysql-test/t/rpl_truncate_2myisam.test index 1a2cb1d0fb3..a0f0ea04f44 100644 --- a/mysql-test/t/rpl_truncate_2myisam.test +++ b/mysql-test/t/rpl_truncate_2myisam.test @@ -1,4 +1,4 @@ - +--source include/not_ndb_default.inc let $engine=MyISAM; --source extra/rpl_tests/rpl_truncate.test diff --git a/mysql-test/t/rpl_truncate_3innodb.test b/mysql-test/t/rpl_truncate_3innodb.test index 7f3145feb1b..a31fd62a29a 100644 --- a/mysql-test/t/rpl_truncate_3innodb.test +++ b/mysql-test/t/rpl_truncate_3innodb.test @@ -1,5 +1,6 @@ --source include/have_innodb.inc +--source include/not_ndb_default.inc let $engine=InnoDB; --source extra/rpl_tests/rpl_truncate.test diff --git a/mysql-test/t/rpl_view.test b/mysql-test/t/rpl_view.test index 687c702c79d..3eff8f7550a 100644 --- a/mysql-test/t/rpl_view.test +++ b/mysql-test/t/rpl_view.test @@ -132,4 +132,24 @@ DROP TABLE t1; --sync_with_master --connection master +# +# BUG#19419: "VIEW: View that the column name is different +# by master and slave is made". +# +connection master; +create table t1(a int, b int); +insert into t1 values (1, 1), (1, 2), (1, 3); +create view v1(a, b) as select a, sum(b) from t1 group by a; + +sync_slave_with_master; +explain v1; +show create table v1; +select * from v1; + +connection master; +drop table t1; +drop view v1; + +sync_slave_with_master; + --echo End of 5.0 tests diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index bd480436d8c..b3563e21bdd 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -1,13 +1,9 @@ # # Basic stored PROCEDURE tests # -# Please keep this file free of --error cases and other -# things that will not run in a single debugged mysqld -# process (e.g. master-slave things). -# # Test cases for bugs are added at the end. See template there. # -# Tests that require --error go into sp-error.test +# Some tests that require --error go into sp-error.test # Tests that require inndb go into sp_trans.test # Tests that check privilege and security issues go to sp-security.test. # Tests that require multiple connections, except security/privilege tests, @@ -6262,6 +6258,32 @@ SELECT * FROM t11| DROP TABLE t11, t12| DROP FUNCTION bug19862| + +# Bug#21002 "Derived table not selecting from a "real" table fails in JOINs" +# +# A regression caused by the fix for Bug#18444: for derived tables we should +# set an empty string as the current database. They do not belong to any +# database and must be usable even if there is no database +# selected. +--disable_warnings +drop table if exists t3| +drop database if exists mysqltest1| +--enable_warnings +create table t3 (a int)| +insert into t3 (a) values (1), (2)| + +create database mysqltest1| +use mysqltest1| +drop database mysqltest1| + +# No current database +select database()| + +select * from (select 1 as a) as t1 natural join (select * from test.t3) as t2| +use test| +drop table t3| + + # Test for BUG#16899: Possible buffer overflow in handling of DEFINER-clause. # # Prepare. @@ -6295,7 +6317,111 @@ create procedure bug21416() show create procedure bug21416| call bug21416()| drop procedure bug21416| + +# +# BUG#21414: SP: Procedure undroppable, to some extent +# +--disable_warnings +DROP PROCEDURE IF EXISTS bug21414| +--enable_warnings + +CREATE PROCEDURE bug21414() SELECT 1| + +FLUSH TABLES WITH READ LOCK| + +--error ER_CANT_UPDATE_WITH_READLOCK +DROP PROCEDURE bug21414| + +UNLOCK TABLES| + +--echo The following should succeed. +DROP PROCEDURE bug21414| + + +# +# BUG#21311: Possible stack overrun if SP has non-latin1 name +# +set names utf8| +--disable_warnings +drop database if exists това_е_дълго_име_за_база_данни_нали| +--enable_warnings +create database това_е_дълго_име_за_база_данни_нали| +INSERT INTO mysql.proc VALUES ('това_е_дълго_име_за_база_данни_нали','това_е_процедура_Ñ_доÑта_дълго_име_нали_и_още_по_дълго','PROCEDURE','това_е_процедура_Ñ_доÑта_дълго_име_нали_и_още_по_дълго','SQL','CONTAINS_SQL','NO','DEFINER','','','bad_body','root@localhost',now(), now(),'','')| +--error ER_SP_PROC_TABLE_CORRUPT +call това_е_дълго_име_за_база_данни_нали.това_е_процедура_Ñ_доÑта_дълго_име_нали_и_още_по_дълго()| +drop database това_е_дълго_име_за_база_данни_нали| + + +# +# BUG#21493: Crash on the second call of a procedure containing +# a select statement that uses an IN aggregating subquery +# + +CREATE TABLE t3 ( + Member_ID varchar(15) NOT NULL, + PRIMARY KEY (Member_ID) +)| + +CREATE TABLE t4 ( + ID int(10) unsigned NOT NULL auto_increment, + Member_ID varchar(15) NOT NULL default '', + Action varchar(12) NOT NULL, + Action_Date datetime NOT NULL, + Track varchar(15) default NULL, + User varchar(12) default NULL, + Date_Updated timestamp NOT NULL default CURRENT_TIMESTAMP on update + CURRENT_TIMESTAMP, + PRIMARY KEY (ID), + KEY Action (Action), + KEY Action_Date (Action_Date) +)| + + +INSERT INTO t3(Member_ID) VALUES + ('111111'), ('222222'), ('333333'), ('444444'), ('555555'), ('666666')| + +INSERT INTO t4(Member_ID, Action, Action_Date, Track) VALUES + ('111111', 'Disenrolled', '2006-03-01', 'CAD' ), + ('111111', 'Enrolled', '2006-03-01', 'CAD' ), + ('111111', 'Disenrolled', '2006-07-03', 'CAD' ), + ('222222', 'Enrolled', '2006-03-07', 'CAD' ), + ('222222', 'Enrolled', '2006-03-07', 'CHF' ), + ('222222', 'Disenrolled', '2006-08-02', 'CHF' ), + ('333333', 'Enrolled', '2006-03-01', 'CAD' ), + ('333333', 'Disenrolled', '2006-03-01', 'CAD' ), + ('444444', 'Enrolled', '2006-03-01', 'CAD' ), + ('555555', 'Disenrolled', '2006-03-01', 'CAD' ), + ('555555', 'Enrolled', '2006-07-21', 'CAD' ), + ('555555', 'Disenrolled', '2006-03-01', 'CHF' ), + ('666666', 'Enrolled', '2006-02-09', 'CAD' ), + ('666666', 'Enrolled', '2006-05-12', 'CHF' ), + ('666666', 'Disenrolled', '2006-06-01', 'CAD' )| + +--disable_warnings +DROP FUNCTION IF EXISTS bug21493| +--enable_warnings + +CREATE FUNCTION bug21493(paramMember VARCHAR(15)) RETURNS varchar(45) +BEGIN +DECLARE tracks VARCHAR(45); +SELECT GROUP_CONCAT(Track SEPARATOR ', ') INTO tracks FROM t4 + WHERE Member_ID=paramMember AND Action='Enrolled' AND + (Track,Action_Date) IN (SELECT Track, MAX(Action_Date) FROM t4 + WHERE Member_ID=paramMember GROUP BY Track); +RETURN tracks; +END| + +SELECT bug21493('111111')| +SELECT bug21493('222222')| + +SELECT bug21493(Member_ID) FROM t3| + +DROP FUNCTION bug21493| +DROP TABLE t3,t4| + --echo End of 5.0 tests + + # # BUG#NNNN: New bug synopsis # diff --git a/mysql-test/t/status.test b/mysql-test/t/status.test index 55f9d95adc5..2afcd49962c 100644 --- a/mysql-test/t/status.test +++ b/mysql-test/t/status.test @@ -13,6 +13,7 @@ connect (con2,localhost,root,,); flush status; show status like 'Table_lock%'; +select * from information_schema.session_status where variable_name like 'Table_lock%'; connection con1; SET SQL_LOG_BIN=0; --disable_warnings @@ -34,6 +35,7 @@ unlock tables; connection con1; reap; show status like 'Table_lock%'; +select * from information_schema.session_status where variable_name like 'Table_lock%'; drop table t1; disconnect con2; @@ -102,6 +104,7 @@ while ($wait_more) # Prerequisite. SHOW STATUS LIKE 'max_used_connections'; +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; # Save original setting. SET @save_thread_cache_size=@@thread_cache_size; @@ -115,6 +118,7 @@ disconnect con2; # Check that max_used_connections still reflects maximum value. SHOW STATUS LIKE 'max_used_connections'; +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; # Check that after flush max_used_connections equals to current number # of connections. First wait for previous disconnect to finish. @@ -138,15 +142,18 @@ while ($wait_more) --enable_result_log # Check that we don't count disconnected thread any longer. SHOW STATUS LIKE 'max_used_connections'; +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; # Check that max_used_connections is updated when cached thread is # reused... connect (con2,localhost,root,,); SHOW STATUS LIKE 'max_used_connections'; +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; # ...and when new thread is created. connect (con3,localhost,root,,); SHOW STATUS LIKE 'max_used_connections'; +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'max_used_connections'; # Restore original setting. connection default; diff --git a/mysql-test/t/strict.test b/mysql-test/t/strict.test index 4037e1231cd..ae5e4365af3 100644 --- a/mysql-test/t/strict.test +++ b/mysql-test/t/strict.test @@ -649,9 +649,9 @@ UPDATE t1 SET col2 =col2 + 50 WHERE col2 > 0; UPDATE t1 SET col1 =col1 / 0 WHERE col1 > 0; --error 1365 UPDATE t1 SET col1= MOD(col1,0) WHERE col1 > 0; ---error 1264 +--error 1366 INSERT INTO t1 (col1) VALUES (''); ---error 1264 +--error 1366 INSERT INTO t1 (col1) VALUES ('a59b'); --error 1265 INSERT INTO t1 (col1) VALUES ('1a'); @@ -710,9 +710,9 @@ INSERT INTO t1 (col2) VALUES(18446744073709551616.0); UPDATE t1 SET col1 =col1 / 0 WHERE col1 > 0; --error 1365 UPDATE t1 SET col1= MOD(col1,0) WHERE col1 > 0; ---error 1264 +--error 1366 INSERT INTO t1 (col1) VALUES (''); ---error 1264 +--error 1366 INSERT INTO t1 (col1) VALUES ('a59b'); --error 1265 INSERT INTO t1 (col1) VALUES ('1a'); diff --git a/mysql-test/t/strict_autoinc_1myisam.test b/mysql-test/t/strict_autoinc_1myisam.test new file mode 100644 index 00000000000..d9ecce30974 --- /dev/null +++ b/mysql-test/t/strict_autoinc_1myisam.test @@ -0,0 +1,8 @@ +# +# Bug#20573 Strict mode auto-increment +# + +let $type= 'MYISAM' ; +--source include/strict_autoinc.inc + +# end of test diff --git a/mysql-test/t/strict_autoinc_2innodb.test b/mysql-test/t/strict_autoinc_2innodb.test new file mode 100644 index 00000000000..83dfe950938 --- /dev/null +++ b/mysql-test/t/strict_autoinc_2innodb.test @@ -0,0 +1,10 @@ +-- source include/have_innodb.inc + +# +# Bug#20573 Strict mode auto-increment +# + +let $type= 'InnoDB' ; +--source include/strict_autoinc.inc + +# end of test diff --git a/mysql-test/t/strict_autoinc_3heap.test b/mysql-test/t/strict_autoinc_3heap.test new file mode 100644 index 00000000000..f266ecdfda2 --- /dev/null +++ b/mysql-test/t/strict_autoinc_3heap.test @@ -0,0 +1,8 @@ +# +# Bug#20573 Strict mode auto-increment +# + +let $type= 'MEMORY' ; +--source include/strict_autoinc.inc + +# end of test diff --git a/mysql-test/t/strict_autoinc_5ndb.test b/mysql-test/t/strict_autoinc_5ndb.test new file mode 100644 index 00000000000..9e2090fddef --- /dev/null +++ b/mysql-test/t/strict_autoinc_5ndb.test @@ -0,0 +1,10 @@ +-- source include/have_ndb.inc + +# +# Bug#20573 Strict mode auto-increment +# + +let $type= 'NDB' ; +--source include/strict_autoinc.inc + +# end of test diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index ed122e9ff5a..7811301a9bc 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -665,7 +665,7 @@ CREATE TABLE t2 ( ) ENGINE=MyISAM; INSERT INTO t2 VALUES ('AUS','Australia','Oceania','Australia and New Zealand',7741220.00,1901,18886000,79.8,351182.00,392911.00,'Australia','Constitutional Monarchy, Federation','Elisabeth II',135,'AU'); -INSERT INTO t2 VALUES ('AZE','Azerbaijan','Asia','Middle East',86600.00,1991,7734000,62.9,4127.00,4100.00,'Azärbaycan','Federal Republic','Heydär Äliyev',144,'AZ'); +INSERT INTO t2 VALUES ('AZE','Azerbaijan','Asia','Middle East',86600.00,1991,7734000,62.9,4127.00,4100.00,'Azärbaycan','Federal Republic','Heydär Äliyev',144,'AZ'); select t2.Continent, t1.Name, t1.Population from t2 LEFT JOIN t1 ON t2.Code = t1.t2 where t1.Population IN (select max(t1.Population) AS Population from t1, t2 where t1.t2 = t2.Code group by Continent); @@ -1526,7 +1526,7 @@ CREATE TABLE t1 ( ) ENGINE=MyISAM; INSERT INTO t1 VALUES ('XXX','Xxxxx','Oceania','Xxxxxx',26.00,0,0,0,0,0,'Xxxxx','Xxxxx','Xxxxx',NULL,'XX'); INSERT INTO t1 VALUES ('ASM','American Samoa','Oceania','Polynesia',199.00,0,68000,75.1,334.00,NULL,'Amerika Samoa','US Territory','George W. Bush',54,'AS'); -INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); +INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); INSERT INTO t1 VALUES ('UMI','United States Minor Outlying Islands','Oceania','Micronesia/Caribbean',16.00,0,0,NULL,0.00,NULL,'United States Minor Outlying Islands','Dependent Territory of the US','George W. Bush',NULL,'UM'); /*!40000 ALTER TABLE t1 ENABLE KEYS */; SELECT DISTINCT Continent AS c FROM t1 WHERE Code <> SOME ( SELECT Code FROM t1 WHERE Continent = c AND Population < 200); @@ -1892,6 +1892,69 @@ select * from t1 r1 group by r2.retailerId); drop table t1; +# +# Bug #21180: Subselect with index for both WHERE and ORDER BY +# produces empty result +# +create table t1(a int, primary key (a)); +insert into t1 values (10); + +create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b)); +insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989'); + +explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r + ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' + ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10; +SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r + ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' + ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10; + +explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r + ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' + ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10; +SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r + ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' + ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10; + +drop table t1,t2; + +# +# Bug #21853: assert failure for a grouping query with +# an ALL/ANY quantified subquery in HAVING +# + +CREATE TABLE t1 ( + field1 int NOT NULL, + field2 int NOT NULL, + field3 int NOT NULL, + PRIMARY KEY (field1,field2,field3) +); +CREATE TABLE t2 ( + fieldA int NOT NULL, + fieldB int NOT NULL, + PRIMARY KEY (fieldA,fieldB) +); + +INSERT INTO t1 VALUES + (1,1,1), (1,1,2), (1,2,1), (1,2,2), (1,2,3), (1,3,1); +INSERT INTO t2 VALUES (1,1), (1,2), (1,3); + +SELECT field1, field2, COUNT(*) + FROM t1 GROUP BY field1, field2; + +SELECT field1, field2 + FROM t1 + GROUP BY field1, field2 + HAVING COUNT(*) >= ALL (SELECT fieldB + FROM t2 WHERE fieldA = field1); +SELECT field1, field2 + FROM t1 + GROUP BY field1, field2 + HAVING COUNT(*) < ANY (SELECT fieldB + FROM t2 WHERE fieldA = field1); + +DROP TABLE t1, t2; + # End of 4.1 tests # @@ -2280,3 +2343,60 @@ SELECT * FROM t1,t2 ORDER BY t1.t DESC LIMIT 1); DROP TABLE t1, t2; + +# +# Bug#14654 : Cannot select from the same table twice within a UNION +# statement +# +CREATE TABLE t1 (i INT); + +(SELECT i FROM t1) UNION (SELECT i FROM t1); +SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS + ( + (SELECT i FROM t1) UNION + (SELECT i FROM t1) + ); + +SELECT * FROM t1 +WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1))); + +#TODO:not supported +--error 1064 +explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12)) + from t1; +#supported +explain select * from t1 where not exists + ((select t11.i from t1 t11) union (select t12.i from t1 t12)); + +DROP TABLE t1; + +# +# Bug #21540: Subqueries with no from and aggregate functions return +# wrong results +CREATE TABLE t1 (a INT, b INT); +CREATE TABLE t2 (a INT); +INSERT INTO t2 values (1); +INSERT INTO t1 VALUES (1,1),(1,2),(2,3),(3,4); +SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; +SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) + FROM t1 GROUP BY t1.a; +SELECT COUNT(DISTINCT t1.b), (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; +SELECT COUNT(DISTINCT t1.b), + (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) + FROM t1 GROUP BY t1.a; +SELECT ( + SELECT ( + SELECT COUNT(DISTINCT t1.b) + ) +) +FROM t1 GROUP BY t1.a; +SELECT ( + SELECT ( + SELECT ( + SELECT COUNT(DISTINCT t1.b) + ) + ) + FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 t2 +GROUP BY t2.a; +DROP TABLE t1,t2; diff --git a/mysql-test/t/temp_table.test b/mysql-test/t/temp_table.test index 9121283fb32..123007b10c7 100644 --- a/mysql-test/t/temp_table.test +++ b/mysql-test/t/temp_table.test @@ -116,7 +116,54 @@ insert into t2 values (NULL, 'foo'), (NULL, 'bar'); select d, c from t1 left join t2 on b = c where a = 3 order by d; drop table t1, t2; -# End of 4.1 tests + +# +# BUG#21096: locking issue ; temporary table conflicts. +# +# The problem was that on DROP TEMPORARY table name lock was acquired, +# which should not be done. +# +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (i INT); + +LOCK TABLE t1 WRITE; + +connect (conn1, localhost, root,,); + +CREATE TEMPORARY TABLE t1 (i INT); + +--echo The following command should not block +DROP TEMPORARY TABLE t1; + +disconnect conn1; +connection default; + +DROP TABLE t1; + +# +# Check that it's not possible to drop a base table with +# DROP TEMPORARY statement. +# +CREATE TABLE t1 (i INT); +CREATE TEMPORARY TABLE t2 (i INT); + +--error 1051 +DROP TEMPORARY TABLE t2, t1; + +# Table t2 should have been dropped. +--error 1146 +SELECT * FROM t2; +# But table t1 should still be there. +SELECT * FROM t1; + +DROP TABLE t1; + + +--echo End of 4.1 tests. + # # Test truncate with temporary tables diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index cd6de45a7ce..ccc17e55dc8 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -1421,4 +1421,23 @@ DROP TABLE t1; DROP TABLE t2; +# +# Bug#20670 "UPDATE using key and invoking trigger that modifies +# this key does not stop" +# + +--disable_warnings +drop table if exists t1; +--enable_warnings +create table t1 (i int, j int key); +insert into t1 values (1,1), (2,2), (3,3); +create trigger t1_bu before update on t1 for each row + set new.j = new.j + 10; +# This should not work indefinitely and should cause +# expected result +update t1 set i= i+ 10 where j > 2; +select * from t1; +drop table t1; + + --echo End of 5.0 tests diff --git a/mysql-test/t/type_bit.test b/mysql-test/t/type_bit.test index 998f8f18fbe..d46ba667665 100644 --- a/mysql-test/t/type_bit.test +++ b/mysql-test/t/type_bit.test @@ -252,5 +252,13 @@ select hex(b + 0), bin(b + 0), oct(b + 0), hex(n), bin(n), oct(n) from bug15583; select conv(b, 10, 2), conv(b + 0, 10, 2) from bug15583; drop table bug15583; +# +# Bug #22271: data casting may affect data stored in the next column(s?) +# + +create table t1(a bit(1), b smallint unsigned); +insert into t1 (b, a) values ('2', '1'); +select hex(a), b from t1; +drop table t1; --echo End of 5.0 tests diff --git a/mysql-test/t/type_date.test b/mysql-test/t/type_date.test index 78bdd9b8a80..c6050753943 100644 --- a/mysql-test/t/type_date.test +++ b/mysql-test/t/type_date.test @@ -36,8 +36,8 @@ INSERT INTO t1 VALUES ( "2000-1-2" ); INSERT INTO t1 VALUES ( "2000-1-3" ); INSERT INTO t1 VALUES ( "2000-1-4" ); INSERT INTO t1 VALUES ( "2000-1-5" ); -SELECT * FROM t1 WHERE datum BETWEEN "2000-1-2" AND "2000-1-4"; -SELECT * FROM t1 WHERE datum BETWEEN "2000-1-2" AND datum - INTERVAL 100 DAY; +SELECT * FROM t1 WHERE datum BETWEEN cast("2000-1-2" as date) AND cast("2000-1-4" as date); +SELECT * FROM t1 WHERE datum BETWEEN cast("2000-1-2" as date) AND datum - INTERVAL 100 DAY; DROP TABLE t1; # @@ -115,4 +115,11 @@ INSERT INTO t1 VALUES ('abc'); SELECT * FROM t1; DROP TABLE t1; +# +# Bug#21677: Wrong result when comparing a DATE and a DATETIME in BETWEEN +# +create table t1(start_date date, end_date date); +insert into t1 values ('2000-01-01','2000-01-02'); +select 1 from t1 where cast('2000-01-01 12:01:01' as datetime) between start_date and end_date; +drop table t1; # End of 4.1 tests diff --git a/mysql-test/t/type_float.test b/mysql-test/t/type_float.test index 5bfd744e4a8..965826629bd 100644 --- a/mysql-test/t/type_float.test +++ b/mysql-test/t/type_float.test @@ -178,7 +178,16 @@ show warnings; desc t3; drop table t1,t2,t3; -# End of 4.1 tests +# +# Bug #22129: A small double precision number becomes zero +# +# check if underflows are detected correctly +select 1e-308, 1.00000001e-300, 100000000e-300; + +# check if overflows are detected correctly +select 10e307; + +--echo End of 4.1 tests # # bug #12694 (float(m,d) specifications) diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test index bf5c5e066f0..2bdf8420d6d 100644 --- a/mysql-test/t/union.test +++ b/mysql-test/t/union.test @@ -774,6 +774,7 @@ drop table t1,t2; # # correct conversion long string to TEXT (BUG#10025) # + CREATE TABLE t1 (a mediumtext); CREATE TABLE t2 (b varchar(20)); INSERT INTO t1 VALUES ('a'),('b'); @@ -782,6 +783,50 @@ create table t3 SELECT left(a,100000000) FROM t1 UNION SELECT b FROM t2; show create table t3; drop tables t1,t2,t3; +# +# Extended fix to Bug#10025 - the test above should result to mediumtext +# and the one below to longtext. Earlier above test resulted to longtext +# type also. +# + +CREATE TABLE t1 (a longtext); +CREATE TABLE t2 (b varchar(20)); +INSERT INTO t1 VALUES ('a'),('b'); +SELECT left(a,100000000) FROM t1 UNION SELECT b FROM t2; +create table t3 SELECT left(a,100000000) FROM t1 UNION SELECT b FROM t2; +show create table t3; +drop tables t1,t2,t3; + +# +# Testing here that mediumtext converts into longtext if the result +# exceeds mediumtext maximum length +# + +SELECT @tmp_max:= @@max_allowed_packet; +SET max_allowed_packet=25000000; +CREATE TABLE t1 (a mediumtext); +CREATE TABLE t2 (b varchar(20)); +INSERT INTO t1 VALUES ('a'); +CREATE TABLE t3 SELECT REPEAT(a,20000000) AS a FROM t1 UNION SELECT b FROM t2; +SHOW CREATE TABLE t3; +DROP TABLES t1,t3; +CREATE TABLE t1 (a tinytext); +INSERT INTO t1 VALUES ('a'); +CREATE TABLE t3 SELECT REPEAT(a,2) AS a FROM t1 UNION SELECT b FROM t2; +SHOW CREATE TABLE t3; +DROP TABLES t1,t3; +CREATE TABLE t1 (a mediumtext); +INSERT INTO t1 VALUES ('a'); +CREATE TABLE t3 SELECT REPEAT(a,2) AS a FROM t1 UNION SELECT b FROM t2; +SHOW CREATE TABLE t3; +DROP TABLES t1,t3; +CREATE TABLE t1 (a tinyblob); +INSERT INTO t1 VALUES ('a'); +CREATE TABLE t3 SELECT REPEAT(a,2) AS a FROM t1 UNION SELECT b FROM t2; +SHOW CREATE TABLE t3; +DROP TABLES t1,t2,t3; +SET max_allowed_packet:= @tmp_max; + # # Bug #10032 Bug in parsing UNION with ORDER BY when one node does not use FROM # diff --git a/mysql-test/t/innodb_unsafe_binlog-master.opt b/mysql-test/t/unsafe_binlog_innodb-master.opt similarity index 100% rename from mysql-test/t/innodb_unsafe_binlog-master.opt rename to mysql-test/t/unsafe_binlog_innodb-master.opt diff --git a/mysql-test/t/unsafe_binlog_innodb.test b/mysql-test/t/unsafe_binlog_innodb.test new file mode 100644 index 00000000000..a0516749451 --- /dev/null +++ b/mysql-test/t/unsafe_binlog_innodb.test @@ -0,0 +1,16 @@ +# t/unsafe_binlog_innodb.test +# +# Note that this test uses at least in case of InnoDB options +# innodb_locks_unsafe_for_binlog = true +# innodb_lock_timeout = 5 +# +# Last update: +# 2006-08-02 ML test refactored +# old name was innodb_unsafe_binlog.test +# main code went into include/unsafe_binlog.inc +# + +--source include/have_innodb.inc +let $engine_type= InnoDB; + +--source include/unsafe_binlog.inc diff --git a/mysql-test/t/user_var-binlog.test b/mysql-test/t/user_var-binlog.test index 7f1561b925b..6615e48ca42 100644 --- a/mysql-test/t/user_var-binlog.test +++ b/mysql-test/t/user_var-binlog.test @@ -1,5 +1,5 @@ # Requires statement logging --- source include/have_binlog_format_statement.inc +-- source include/have_binlog_format_mixed_or_statement.inc # TODO: Create row based version once $MYSQL_BINLOG has new RB version # Embedded server does not support binlogging --source include/not_embedded.inc diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index 9985ed4f7f5..7cbc7ee153b 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -83,16 +83,24 @@ drop table t1; set GLOBAL max_join_size=10; set max_join_size=100; show variables like 'max_join_size'; +select * from information_schema.session_variables where variable_name like 'max_join_size'; --replace_result 18446744073709551615 HA_POS_ERROR 4294967295 HA_POS_ERROR show global variables like 'max_join_size'; +--replace_result 18446744073709551615 HA_POS_ERROR 4294967295 HA_POS_ERROR +select * from information_schema.global_variables where variable_name like 'max_join_size'; set GLOBAL max_join_size=2000; show global variables like 'max_join_size'; +select * from information_schema.global_variables where variable_name like 'max_join_size'; set max_join_size=DEFAULT; --replace_result 18446744073709551615 HA_POS_ERROR 4294967295 HA_POS_ERROR show variables like 'max_join_size'; +--replace_result 18446744073709551615 HA_POS_ERROR 4294967295 HA_POS_ERROR +select * from information_schema.session_variables where variable_name like 'max_join_size'; set GLOBAL max_join_size=DEFAULT; --replace_result 18446744073709551615 HA_POS_ERROR 4294967295 HA_POS_ERROR show global variables like 'max_join_size'; +--replace_result 18446744073709551615 HA_POS_ERROR 4294967295 HA_POS_ERROR +select * from information_schema.global_variables where variable_name like 'max_join_size'; set @@max_join_size=1000, @@global.max_join_size=2000; select @@local.max_join_size, @@global.max_join_size; select @@identity, length(@@version)>0; @@ -106,50 +114,68 @@ set big_tables=OFF, big_tables=ON, big_tables=0, big_tables=1, big_tables="OFF", set global concurrent_insert=2; show variables like 'concurrent_insert'; +select * from information_schema.session_variables where variable_name like 'concurrent_insert'; set global concurrent_insert=1; show variables like 'concurrent_insert'; +select * from information_schema.session_variables where variable_name like 'concurrent_insert'; set global concurrent_insert=0; show variables like 'concurrent_insert'; +select * from information_schema.session_variables where variable_name like 'concurrent_insert'; set global concurrent_insert=DEFAULT; select @@concurrent_insert; set global timed_mutexes=ON; show variables like 'timed_mutexes'; +select * from information_schema.session_variables where variable_name like 'timed_mutexes'; set global timed_mutexes=0; show variables like 'timed_mutexes'; +select * from information_schema.session_variables where variable_name like 'timed_mutexes'; set storage_engine=MYISAM, storage_engine="HEAP", global storage_engine="MERGE"; show local variables like 'storage_engine'; +select * from information_schema.session_variables where variable_name like 'storage_engine'; show global variables like 'storage_engine'; +select * from information_schema.global_variables where variable_name like 'storage_engine'; set GLOBAL query_cache_size=100000; set GLOBAL myisam_max_sort_file_size=2000000; show global variables like 'myisam_max_sort_file_size'; +select * from information_schema.global_variables where variable_name like 'myisam_max_sort_file_size'; set GLOBAL myisam_max_sort_file_size=default; --replace_result 2147483647 FILE_SIZE 9223372036854775807 FILE_SIZE show variables like 'myisam_max_sort_file_size'; +--replace_result 2147483647 FILE_SIZE 9223372036854775807 FILE_SIZE +select * from information_schema.session_variables where variable_name like 'myisam_max_sort_file_size'; set global net_retry_count=10, session net_retry_count=10; set global net_buffer_length=1024, net_write_timeout=200, net_read_timeout=300; set session net_buffer_length=2048, net_write_timeout=500, net_read_timeout=600; show global variables like 'net_%'; +select * from information_schema.global_variables where variable_name like 'net_%'; show session variables like 'net_%'; +select * from information_schema.session_variables where variable_name like 'net_%'; set session net_buffer_length=8000, global net_read_timeout=900, net_write_timeout=1000; show global variables like 'net_%'; +select * from information_schema.global_variables where variable_name like 'net_%'; show session variables like 'net_%'; +select * from information_schema.session_variables where variable_name like 'net_%'; set net_buffer_length=1; show variables like 'net_buffer_length'; +select * from information_schema.session_variables where variable_name like 'net_buffer_length'; set net_buffer_length=2000000000; show variables like 'net_buffer_length'; +select * from information_schema.session_variables where variable_name like 'net_buffer_length'; set character set cp1251_koi8; show variables like "character_set_client"; +select * from information_schema.session_variables where variable_name like 'character_set_client'; select @@timestamp>0; set @@rand_seed1=10000000,@@rand_seed2=1000000; select ROUND(RAND(),5); show variables like '%alloc%'; +select * from information_schema.session_variables where variable_name like '%alloc%'; set @@range_alloc_block_size=1024*16; set @@query_alloc_block_size=1024*17+2; set @@query_prealloc_size=1024*18; @@ -157,10 +183,12 @@ set @@transaction_alloc_block_size=1024*20-1; set @@transaction_prealloc_size=1024*21-1; select @@query_alloc_block_size; show variables like '%alloc%'; +select * from information_schema.session_variables where variable_name like '%alloc%'; set @@range_alloc_block_size=default; set @@query_alloc_block_size=default, @@query_prealloc_size=default; set transaction_alloc_block_size=default, @@transaction_prealloc_size=default; show variables like '%alloc%'; +select * from information_schema.session_variables where variable_name like '%alloc%'; # # Bug #10904 Illegal mix of collations between @@ -363,6 +391,8 @@ set global ft_boolean_syntax = @@init_connect; set global myisam_max_sort_file_size=4294967296; --replace_result 4294967296 MAX_FILE_SIZE 2146435072 MAX_FILE_SIZE show global variables like 'myisam_max_sort_file_size'; +--replace_result 4294967296 MAX_FILE_SIZE 2146435072 MAX_FILE_SIZE +select * from information_schema.global_variables where variable_name like 'myisam_max_sort_file_size'; set global myisam_max_sort_file_size=default; # @@ -398,12 +428,16 @@ SELECT @@global.local.key_buffer_size; # BUG#5135: cannot turn on log_warnings with SET in 4.1 (and 4.0) set @tstlw = @@log_warnings; show global variables like 'log_warnings'; +select * from information_schema.global_variables where variable_name like 'log_warnings'; set global log_warnings = 0; show global variables like 'log_warnings'; +select * from information_schema.global_variables where variable_name like 'log_warnings'; set global log_warnings = 42; show global variables like 'log_warnings'; +select * from information_schema.global_variables where variable_name like 'log_warnings'; set global log_warnings = @tstlw; show global variables like 'log_warnings'; +select * from information_schema.global_variables where variable_name like 'log_warnings'; # # BUG#4788 show create table provides incorrect statement @@ -435,6 +469,7 @@ drop table t1; SET GLOBAL MYISAM_DATA_POINTER_SIZE= 7; SHOW VARIABLES LIKE 'MYISAM_DATA_POINTER_SIZE'; +SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'MYISAM_DATA_POINTER_SIZE'; # # Bug #6958: negative arguments to integer options wrap around @@ -442,6 +477,7 @@ SHOW VARIABLES LIKE 'MYISAM_DATA_POINTER_SIZE'; SET GLOBAL table_open_cache=-1; SHOW VARIABLES LIKE 'table_open_cache'; +SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'table_open_cache'; SET GLOBAL table_open_cache=DEFAULT; # @@ -527,6 +563,7 @@ select @@global.character_set_filesystem; set @old_sql_big_selects = @@sql_big_selects; set @@sql_big_selects = 1; show variables like 'sql_big_selects'; +select * from information_schema.session_variables where variable_name like 'sql_big_selects'; set @@sql_big_selects = @old_sql_big_selects; # @@ -535,10 +572,14 @@ set @@sql_big_selects = @old_sql_big_selects; # set @@sql_notes = 0, @@sql_warnings = 0; show variables like 'sql_notes'; +select * from information_schema.session_variables where variable_name like 'sql_notes'; show variables like 'sql_warnings'; +select * from information_schema.session_variables where variable_name like 'sql_warnings'; set @@sql_notes = 1, @@sql_warnings = 1; show variables like 'sql_notes'; +select * from information_schema.session_variables where variable_name like 'sql_notes'; show variables like 'sql_warnings'; +select * from information_schema.session_variables where variable_name like 'sql_warnings'; # # Bug #12792: @@system_time_zone is not SELECTable. @@ -565,9 +606,15 @@ select @@basedir, @@datadir, @@tmpdir; --replace_column 2 # show variables like 'basedir'; --replace_column 2 # +select * from information_schema.session_variables where variable_name like 'basedir'; +--replace_column 2 # show variables like 'datadir'; --replace_column 2 # +select * from information_schema.session_variables where variable_name like 'datadir'; +--replace_column 2 # show variables like 'tmpdir'; +--replace_column 2 # +select * from information_schema.session_variables where variable_name like 'tmpdir'; # # Bug #19606: make ssl settings available via SHOW VARIABLES and @@variables @@ -577,6 +624,8 @@ show variables like 'tmpdir'; select @@ssl_ca, @@ssl_capath, @@ssl_cert, @@ssl_cipher, @@ssl_key; --replace_column 2 # show variables like 'ssl%'; +--replace_column 2 # +select * from information_schema.session_variables where variable_name like 'ssl%'; # # Bug #19616: make log_queries_not_using_indexes available in SHOW VARIABLES @@ -584,6 +633,7 @@ show variables like 'ssl%'; # select @@log_queries_not_using_indexes; show variables like 'log_queries_not_using_indexes'; +select * from information_schema.session_variables where variable_name like 'log_queries_not_using_indexes'; # # Bug#20908: Crash if select @@"" diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index ee9c3b2ed45..934e0624fd6 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -347,13 +347,13 @@ create view v3 (x,y,z) as select b, a, b from t1; create view v4 (x,y,z) as select c+1, b, a from t1; create algorithm=temptable view v5 (x,y,z) as select c, b, a from t1; # try insert to VIEW with fields duplicate --- error 1288 +-- error 1573 insert into v3 values (-60,4,30); # try insert to VIEW with expression in SELECT list --- error 1288 +-- error 1573 insert into v4 values (-60,4,30); # try insert to VIEW using temporary table algorithm --- error 1288 +-- error 1573 insert into v5 values (-60,4,30); insert into v1 values (-60,4,30); insert into v1 (z,y,x) values (50,6,-100); @@ -375,13 +375,13 @@ create view v3 (x,y,z) as select b, a, b from t1; create view v4 (x,y,z) as select c+1, b, a from t1; create algorithm=temptable view v5 (x,y,z) as select c, b, a from t1; # try insert to VIEW with fields duplicate --- error 1288 +-- error 1573 insert into v3 select c, b, a from t2; # try insert to VIEW with expression in SELECT list --- error 1288 +-- error 1573 insert into v4 select c, b, a from t2; # try insert to VIEW using temporary table algorithm --- error 1288 +-- error 1573 insert into v5 select c, b, a from t2; insert into v1 select c, b, a from t2; insert into v1 (z,y,x) select a+20,b+2,-100 from t2; @@ -1249,14 +1249,14 @@ drop table t1; # create table t1 (s1 smallint); create view v1 as select * from t1 where 20 < (select (s1) from t1); --- error 1288 +-- error 1573 insert into v1 values (30); create view v2 as select * from t1; create view v3 as select * from t1 where 20 < (select (s1) from v2); --- error 1288 +-- error 1573 insert into v3 values (30); create view v4 as select * from v2 where 20 < (select (s1) from t1); --- error 1288 +-- error 1573 insert into v4 values (30); drop view v4, v3, v2, v1; drop table t1; @@ -2443,7 +2443,7 @@ DROP TABLE t1, t2; # # Bug #16069: VIEW does return the same results as underlying SELECT # with WHERE condition containing BETWEEN over dates - +# Dates as strings should be casted to date type CREATE TABLE t1 (id int NOT NULL PRIMARY KEY, td date DEFAULT NULL, KEY idx(td)); @@ -2454,8 +2454,8 @@ INSERT INTO t1 VALUES CREATE VIEW v1 AS SELECT * FROM t1; -SELECT * FROM t1 WHERE td BETWEEN '2005.01.02' AND '2005.01.04'; -SELECT * FROM v1 WHERE td BETWEEN '2005.01.02' AND '2005.01.04'; +SELECT * FROM t1 WHERE td BETWEEN CAST('2005.01.02' AS DATE) AND CAST('2005.01.04' AS DATE); +SELECT * FROM v1 WHERE td BETWEEN CAST('2005.01.02' AS DATE) AND CAST('2005.01.04' AS DATE); DROP VIEW v1; DROP TABLE t1; @@ -2830,7 +2830,7 @@ BEGIN END | delimiter ;| ---error ER_NON_UPDATABLE_TABLE +--error ER_NON_INSERTABLE_TABLE SELECT f2(); DROP FUNCTION f1; @@ -2838,5 +2838,50 @@ DROP FUNCTION f2; DROP VIEW v1, v2; DROP TABLE t1; +# +# Bug #5500: wrong select_type in EXPLAIN output for queries over views +# +CREATE TABLE t1 (s1 int); +CREATE VIEW v1 AS SELECT * FROM t1; + +EXPLAIN SELECT * FROM t1; +EXPLAIN SELECT * FROM v1; + +INSERT INTO t1 VALUES (1), (3), (2); + +EXPLAIN SELECT * FROM t1 t WHERE t.s1+1 < (SELECT MAX(t1.s1) FROM t1); +EXPLAIN SELECT * FROM v1 t WHERE t.s1+1 < (SELECT MAX(t1.s1) FROM t1); + +DROP VIEW v1; +DROP TABLE t1; + +# +# Bug #5505: Wrong error message on INSERT into a view +# +create table t1 (s1 int); +create view v1 as select s1 as a, s1 as b from t1; +--error 1573 +insert into v1 values (1,1); +update v1 set a = 5; +drop view v1; +drop table t1; + +# +# Bug #21646: view qith a subquery in ON expression +# + +CREATE TABLE t1(pk int PRIMARY KEY); +CREATE TABLE t2(pk int PRIMARY KEY, fk int, ver int, org int); + +CREATE ALGORITHM=MERGE VIEW v1 AS +SELECT t1.* + FROM t1 JOIN t2 + ON t2.fk = t1.pk AND + t2.ver = (SELECT MAX(t.ver) FROM t2 t WHERE t.org = t2.org); +SHOW WARNINGS; +SHOW CREATE VIEW v1; + +DROP VIEW v1; +DROP TABLE t1, t2; --echo End of 5.0 tests. diff --git a/mysql-test/t/warnings.test b/mysql-test/t/warnings.test index c20763f203f..a8b8e5f5103 100644 --- a/mysql-test/t/warnings.test +++ b/mysql-test/t/warnings.test @@ -147,4 +147,60 @@ select * from t1 limit 1, 0; select * from t1 limit 0, 0; drop table t1; -# End of 4.1 tests +--echo End of 4.1 tests + +# +# Bug#20778: strange characters in warning message 1366 when called in SP +# + +let $engine_type= innodb; + +CREATE TABLE t1( f1 CHAR(20) ); +CREATE TABLE t2( f1 CHAR(20), f2 CHAR(25) ); +CREATE TABLE t3( f1 CHAR(20), f2 CHAR(25), f3 DATE ); + +INSERT INTO t1 VALUES ( 'a`' ); +INSERT INTO t2 VALUES ( 'a`', 'a`' ); +INSERT INTO t3 VALUES ( 'a`', 'a`', '1000-01-1' ); + +DROP PROCEDURE IF EXISTS sp1; +DROP PROCEDURE IF EXISTS sp2; +DROP PROCEDURE IF EXISTS sp3; +delimiter //; +CREATE PROCEDURE sp1() +BEGIN + DECLARE x NUMERIC ZEROFILL; + SELECT f1 INTO x FROM t1 LIMIT 1; +END// +CREATE PROCEDURE sp2() +BEGIN + DECLARE x NUMERIC ZEROFILL; + SELECT f1 INTO x FROM t2 LIMIT 1; +END// +CREATE PROCEDURE sp3() +BEGIN + DECLARE x NUMERIC ZEROFILL; + SELECT f1 INTO x FROM t3 LIMIT 1; +END// +delimiter ;// +CALL sp1(); +CALL sp2(); +CALL sp3(); + +DROP PROCEDURE IF EXISTS sp1; +delimiter //; +CREATE PROCEDURE sp1() +BEGIN +declare x numeric unsigned zerofill; +SELECT f1 into x from t2 limit 1; +END// +delimiter ;// +CALL sp1(); +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP PROCEDURE sp1; +DROP PROCEDURE sp2; +DROP PROCEDURE sp3; + +--echo End of 5.0 tests diff --git a/mysql-test/t/windows.test b/mysql-test/t/windows.test index 79517df6517..d6bcfeb8cb3 100644 --- a/mysql-test/t/windows.test +++ b/mysql-test/t/windows.test @@ -18,42 +18,3 @@ create table nu (a int); drop table nu; # End of 4.1 tests - -# -# Bug #20789: Merge Subtable Rename Causes Crash -# -CREATE TABLE `t1` ( - `TIM` datetime NOT NULL, - `VAL` double default NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1; -CREATE TABLE `t2` ( - `TIM` datetime NOT NULL, - `VAL` double default NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1; -CREATE TABLE `mt` ( - `TIM` datetime NOT NULL, - `VAL` double default NULL -) ENGINE=MRG_MyISAM DEFAULT CHARSET=latin1 INSERT_METHOD=LAST -UNION=(`t1`,`t2`); - -# insert into the merge table and thus open it. -INSERT INTO mt VALUES ('2006-01-01',0); - -# Alter one of the tables that are part of the merge table -ALTER TABLE `t2` RENAME TO `t`; - -# Insert into the merge table that has just been altered ---error 1015 -INSERT INTO mt VALUES ('2006-01-01',0); ---error 1015 -select * from mt; - -FLUSH TABLES; ---error 1017 -select * from mt; - -# Alter one of the tables that are part of the merge table -ALTER TABLE `t` RENAME TO `t2`; -INSERT INTO mt VALUES ('2006-01-01',0); -select * from mt; - diff --git a/mysql-test/t/xml.test b/mysql-test/t/xml.test index d510a61f04d..3347573b4b7 100644 --- a/mysql-test/t/xml.test +++ b/mysql-test/t/xml.test @@ -360,3 +360,19 @@ select extractValue('a','/ns:element/@x # select extractValue('DataOtherdata','/foo/foo.bar'); select extractValue('DataOtherdata','/foo/something'); + +# +# Bug#20854 XML functions: wrong result in ExtractValue +# +--error 1105 +select extractValue('<01>10:39:15<02>140','/zot/tim0/02'); +select extractValue('<01>10:39:15<02>140','//*'); +# dot and dash are bad identtifier start character +select extractValue('<.>test','//*'); +select extractValue('<->test','//*'); +# semicolon is good identifier start character +select extractValue('<:>test','//*'); +# underscore is good identifier start character +select extractValue('<_>test','//*'); +# dot, dash, underscore and semicolon are good identifier middle characters +select extractValue('test','//*'); diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp index 6dd11df5de6..e88df49a707 100644 --- a/mysql-test/valgrind.supp +++ b/mysql-test/valgrind.supp @@ -15,11 +15,12 @@ } { - pthread allocate_tls memory loss 2 + pthread allocate_tls memory loss Memcheck:Leak fun:calloc fun:_dl_allocate_tls - fun:pthread_create@@GLIBC_2.1 + fun:pthread_create* + } { @@ -182,6 +183,24 @@ fun:compress2 } +{ + libz longest_match 3 + Memcheck:Cond + fun:longest_match + fun:deflate_slow + fun:deflate + fun:gzclose +} + +{ + libz longest_match 4 + Memcheck:Cond + fun:longest_match + fun:deflate_slow + fun:deflate + fun:gzflush +} + { libz longest_match3 Memcheck:Cond diff --git a/mysys/my_chsize.c b/mysys/my_chsize.c index 9760de29a08..fe0d0ffa607 100644 --- a/mysys/my_chsize.c +++ b/mysys/my_chsize.c @@ -46,7 +46,9 @@ int my_chsize(File fd, my_off_t newlength, int filler, myf MyFlags) DBUG_PRINT("my",("fd: %d length: %lu MyFlags: %d",fd,(ulong) newlength, MyFlags)); - oldsize = my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME+MY_FAE)); + if ((oldsize = my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME+MY_FAE))) == newlength) + DBUG_RETURN(0); + DBUG_PRINT("info",("old_size: %ld", (ulong) oldsize)); if (oldsize > newlength) diff --git a/mysys/my_libwrap.c b/mysys/my_libwrap.c index be8adbab0a1..80fca127716 100644 --- a/mysys/my_libwrap.c +++ b/mysys/my_libwrap.c @@ -31,12 +31,12 @@ void my_fromhost(struct request_info *req) int my_hosts_access(struct request_info *req) { - hosts_access(req); + return hosts_access(req); } char *my_eval_client(struct request_info *req) { - eval_client(req); + return eval_client(req); } #endif /* HAVE_LIBWRAP */ diff --git a/mysys/queues.c b/mysys/queues.c index 8e572a0f195..3900ecad3f2 100644 --- a/mysys/queues.c +++ b/mysys/queues.c @@ -208,28 +208,22 @@ void delete_queue(QUEUE *queue) void queue_insert(register QUEUE *queue, byte *element) { - reg2 uint idx,next; + reg2 uint idx, next; int cmp; - -#ifndef DBUG_OFF - if (queue->elements < queue->max_elements) -#endif + DBUG_ASSERT(queue->elements < queue->max_elements); + queue->root[0]= element; + idx= ++queue->elements; + /* max_at_top swaps the comparison if we want to order by desc */ + while ((cmp= queue->compare(queue->first_cmp_arg, + element + queue->offset_to_key, + queue->root[(next= idx >> 1)] + + queue->offset_to_key)) && + (cmp ^ queue->max_at_top) < 0) { - queue->root[0]=element; - idx= ++queue->elements; - - /* max_at_top swaps the comparison if we want to order by desc */ - while ((cmp=queue->compare(queue->first_cmp_arg, - element+queue->offset_to_key, - queue->root[(next=idx >> 1)] + - queue->offset_to_key)) && - (cmp ^ queue->max_at_top) < 0) - { - queue->root[idx]=queue->root[next]; - idx=next; - } - queue->root[idx]=element; + queue->root[idx]= queue->root[next]; + idx= next; } + queue->root[idx]= element; } /* @@ -262,16 +256,12 @@ int queue_insert_safe(register QUEUE *queue, byte *element) byte *queue_remove(register QUEUE *queue, uint idx) { -#ifndef DBUG_OFF - if (idx >= queue->max_elements) - return 0; -#endif - { - byte *element=queue->root[++idx]; /* Intern index starts from 1 */ - queue->root[idx]=queue->root[queue->elements--]; - _downheap(queue,idx); - return element; - } + byte *element; + DBUG_ASSERT(idx < queue->max_elements); + element= queue->root[++idx]; /* Intern index starts from 1 */ + queue->root[idx]= queue->root[queue->elements--]; + _downheap(queue, idx); + return element; } /* Fix when element on top has been replaced */ diff --git a/netware/BUILD/mwccnlm b/netware/BUILD/mwccnlm index e6840e781f8..030d87288f2 100755 --- a/netware/BUILD/mwccnlm +++ b/netware/BUILD/mwccnlm @@ -3,9 +3,12 @@ # stop on errors set -e -# mwccnlm is having a hard time understanding "-I./../include" -# convert it to "-I../include" -args=" "`echo $* | sed -e 's/-I.\/../-I../g'` +# mwccnlm is having a hard time understanding: +# * "-I./../include", convert it to "-I../include" +# * "-I.../..", convert it to "-I../../" +args=" "`echo $* | sed \ +-e 's/-I.\/../-I../g' \ +-e 's/\(-I[.\/]*.\) /\1\/ /g'` # NOTE: Option 'pipefail' is not standard sh set -o pipefail diff --git a/netware/BUILD/mwenv b/netware/BUILD/mwenv index b88b6347668..d8d53293b2c 100755 --- a/netware/BUILD/mwenv +++ b/netware/BUILD/mwenv @@ -1,19 +1,39 @@ #! /bin/sh -# F:/mydev, /home/kp/mydev, and 4.0.21 must be correct before compiling -# This values are normally changed by the nwbootstrap script +if test ! -r ./sql/mysqld.cc +then + echo "you must start from the top source directory" + exit 1 +fi -# the default is "F:/mydev" -export MYDEV=WINE_BUILD_DIR +# The base path(in wineformat) where compilers, includes and +# libraries are installed +if test -z $MYDEV +then + # the default is "F:/mydev" + export MYDEV="F:/mydev" +fi +echo "MYDEV: $MYDEV" -export MWCNWx86Includes="$MYDEV/libc/include;$MYDEV/fs64/headers;$MYDEV/zlib-1.2.3;$MYDEV/mysql-VERSION/include;$MYDEV" -export MWNWx86Libraries="$MYDEV/libc/imports;$MYDEV/mw/lib;$MYDEV/fs64/imports;$MYDEV/zlib-1.2.3;$MYDEV/openssl;$MYDEV/mysql-VERSION/netware/BUILD" +# Get current dir +BUILD_DIR=`pwd` +echo "BUILD_DIR: $BUILD_DIR" + +# Get current dir in wine format +base=`echo $MYDEV |sed 's/\/.*//'` +base_unix_part=`winepath -- -u $base/` +WINE_BUILD_DIR=`echo "$BUILD_DIR" | sed 's_'$base_unix_part'/__'` +WINE_BUILD_DIR="$base/$WINE_BUILD_DIR" +echo "WINE_BUILD_DIR: $WINE_BUILD_DIR" + +export MWCNWx86Includes="$MYDEV/libc/include;$MYDEV/fs64/headers;$MYDEV/zlib-1.2.3;$WINE_BUILD_DIR/include;$MYDEV" +export MWNWx86Libraries="$MYDEV/libc/imports;$MYDEV/mw/lib;$MYDEV/fs64/imports;$MYDEV/zlib-1.2.3;$MYDEV/openssl;$WINE_BUILD_DIR/netware/BUILD" export MWNWx86LibraryFiles="libcpre.o;libc.imp;netware.imp;mwcrtl.lib;mwcpp.lib;libz.a;neb.imp;zPublics.imp;knetware.imp" export WINEPATH="$MYDEV/mw/bin" -# the default added path is "$HOME/mydev/mysql-x.x-x/netware/BUILD" -export PATH="$PATH:BUILD_DIR/mysql-VERSION/netware/BUILD" +# the default added path is "$BUILD_DIR/netware/BUILD" +export PATH="$PATH:$BUILD_DIR/netware/BUILD" export AR='mwldnlm' export AR_FLAGS='-type library -o' diff --git a/netware/Makefile.am b/netware/Makefile.am index 5b40933994b..beb3fda35ee 100644 --- a/netware/Makefile.am +++ b/netware/Makefile.am @@ -23,7 +23,8 @@ mysqld_safe_SOURCES= mysqld_safe.c my_manage.c mysql_install_db_SOURCES= mysql_install_db.c my_manage.c mysql_test_run_SOURCES= mysql_test_run.c my_manage.c libmysql_SOURCES= libmysqlmain.c -libmysql_LDADD = ../libmysql/.libs/libmysqlclient.a @openssl_libs@ +libmysql_LDADD = ../libmysql/.libs/libmysqlclient.a \ + @openssl_libs@ @yassl_libs@ netware_build_files = client/mysql.def client/mysqladmin.def \ client/mysqlbinlog.def client/mysqlcheck.def \ @@ -49,8 +50,20 @@ link_sources: @LN_CP_F@ $(srcdir)/$$org ../$$f; \ done else -EXTRA_DIST= comp_err.def init_db.sql install_test_db.ncf \ - libmysql.def libmysql.imp \ + +BUILT_SOURCES = libmysql.imp +DISTCLEANFILES = $(BUILT_SOURCES) + +# Create the libmysql.imp from libmysql/libmysql.def +libmysql.imp: $(top_srcdir)/libmysql/libmysql.def + awk 'BEGIN{x=0;} \ + END{printf("\n");} \ + x==1 {printf(" %s",$$1); x++; next} \ + x>1 {printf(",\n %s", $$1); next} \ + /EXPORTS/{x=1}' $(top_srcdir)/libmysql/libmysql.def > libmysql.imp + +EXTRA_DIST= $(BUILT_SOURCES) comp_err.def init_db.sql install_test_db.ncf \ + libmysql.def \ libmysqlmain.c my_manage.c my_manage.h \ my_print_defaults.def myisam_ftdump.def myisamchk.def \ myisamlog.def myisampack.def mysql.def mysql.xdc \ diff --git a/netware/libmysql.imp b/netware/libmysql.imp deleted file mode 100644 index 977fb1b0b1f..00000000000 --- a/netware/libmysql.imp +++ /dev/null @@ -1,85 +0,0 @@ -myodbc_remove_escape, -mysql_add_slave, -mysql_affected_rows, -mysql_change_user, -mysql_character_set_name, -mysql_close, -mysql_data_seek, -mysql_debug, -mysql_disable_reads_from_master, -mysql_disable_rpl_parse, -mysql_dump_debug_info, -mysql_enable_reads_from_master, -mysql_enable_rpl_parse, -mysql_eof, -mysql_errno, -mysql_error, -mysql_escape_string, -mysql_fetch_field, -mysql_fetch_field_direct, -mysql_fetch_fields, -mysql_fetch_lengths, -mysql_fetch_row, -mysql_field_count, -mysql_field_seek, -mysql_field_tell, -mysql_free_result, -mysql_get_client_info, -mysql_get_host_info, -mysql_get_proto_info, -mysql_get_server_info, -mysql_info, -mysql_init, -mysql_insert_id, -mysql_kill, -mysql_list_dbs, -mysql_list_fields, -mysql_list_processes, -mysql_list_tables, -mysql_manager_close, -mysql_manager_command, -mysql_manager_connect, -mysql_manager_fetch_line, -mysql_manager_init, -mysql_master_query, -mysql_master_send_query, -mysql_num_fields, -mysql_num_rows, -mysql_odbc_escape_string, -mysql_options, -mysql_ping, -mysql_query, -mysql_read_query_result, -mysql_reads_from_master_enabled, -mysql_real_connect, -mysql_real_escape_string, -mysql_real_query, -mysql_refresh, -mysql_row_seek, -mysql_row_tell, -mysql_rpl_parse_enabled, -mysql_rpl_probe, -mysql_rpl_query_type, -mysql_select_db, -mysql_send_query, -mysql_server_end, -mysql_server_init, -mysql_set_master, -mysql_shutdown, -mysql_slave_query, -mysql_slave_send_query, -mysql_ssl_set, -mysql_stat, -mysql_store_result, -mysql_thread_end, -mysql_thread_id, -mysql_thread_init, -mysql_thread_safe, -mysql_use_result, -net_safe_read, -#simple_command, -mysql_connect, -mysql_create_db, -mysql_drop_db, - - diff --git a/plugin/fulltext/plugin_example.c b/plugin/fulltext/plugin_example.c index 7da6672190c..f09462f2d1a 100644 --- a/plugin/fulltext/plugin_example.c +++ b/plugin/fulltext/plugin_example.c @@ -226,6 +226,7 @@ mysql_declare_plugin(ftexample) "simple_parser", /* name */ "MySQL AB", /* author */ "Simple Full-Text Parser", /* description */ + PLUGIN_LICENSE_GPL, simple_parser_plugin_init, /* init function (when loaded) */ simple_parser_plugin_deinit,/* deinit function (when unloaded) */ 0x0001, /* version */ diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 7bf0bcac82f..c254a77e805 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -331,7 +331,8 @@ if [ x$NDBCLUSTER = x1 ]; then $CP $BASE/ndb-stage@bindir@/* $BASE/bin/. $CP $BASE/ndb-stage@libexecdir@/* $BASE/bin/. $CP $BASE/ndb-stage@pkglibdir@/* $BASE/lib/. - $CP -r $BASE/ndb-stage@pkgincludedir@/ndb $BASE/include + test -d $BASE/include/storage || mkdir $BASE/include/storage + $CP -r $BASE/ndb-stage@pkgincludedir@/storage/ndb $BASE/include/storage/ $CP -r $BASE/ndb-stage@prefix@/mysql-test/ndb $BASE/mysql-test/. || exit 1 rm -rf $BASE/ndb-stage fi diff --git a/scripts/mysql_config.sh b/scripts/mysql_config.sh index 84ff518c381..92201a2c8af 100644 --- a/scripts/mysql_config.sh +++ b/scripts/mysql_config.sh @@ -97,11 +97,11 @@ port='@MYSQL_TCP_PORT@' ldflags='@LDFLAGS@' # Create options -# We intentionally add a space to the beginning of lib strings, simplifies replace later +# We intentionally add a space to the beginning and end of lib strings, simplifies replace later libs=" $ldflags -L$pkglibdir -lmysqlclient @ZLIB_DEPS@ @NON_THREADED_LIBS@" -libs="$libs @openssl_libs@ @STATIC_NSS_FLAGS@" -libs_r=" $ldflags -L$pkglibdir -lmysqlclient_r @ZLIB_DEPS@ @LIBS@ @openssl_libs@" -embedded_libs=" $ldflags -L$pkglibdir -lmysqld @ZLIB_DEPS@ @LIBS@ @WRAPLIBS@ @innodb_system_libs@" +libs="$libs @openssl_libs@ @STATIC_NSS_FLAGS@ " +libs_r=" $ldflags -L$pkglibdir -lmysqlclient_r @ZLIB_DEPS@ @LIBS@ @openssl_libs@ " +embedded_libs=" $ldflags -L$pkglibdir -lmysqld @ZLIB_DEPS@ @LIBS@ @WRAPLIBS@ @innodb_system_libs@ @openssl_libs@ " cflags="-I$pkgincludedir @CFLAGS@ " #note: end space! include="-I$pkgincludedir" @@ -111,8 +111,9 @@ include="-I$pkgincludedir" # and -xstrconst to make --cflags usable for Sun Forte C++ for remove in DDBUG_OFF DSAFEMALLOC USAFEMALLOC DSAFE_MUTEX \ DPEDANTIC_SAFEMALLOC DUNIV_MUST_NOT_INLINE DFORCE_INIT_OF_VARS \ - DEXTRA_DEBUG DHAVE_purify 'O[0-9]' 'W[-A-Za-z]*' \ - Xa xstrconst "xc99=none" + DEXTRA_DEBUG DHAVE_purify O 'O[0-9]' 'xO[0-9]' 'W[-A-Za-z]*' \ + Xa xstrconst "xc99=none" \ + unroll2 ip mp restrict do # The first option we might strip will always have a space before it because # we set -I$pkgincludedir as the first option @@ -121,7 +122,7 @@ done cflags=`echo "$cflags"|sed -e 's/ *\$//'` # Same for --libs(_r) -for remove in lmtmalloc +for remove in lmtmalloc static-libcxa i-static do # We know the strings starts with a space libs=`echo "$libs"|sed -e "s/ -$remove */ /g"` diff --git a/server-tools/instance-manager/guardian.cc b/server-tools/instance-manager/guardian.cc index b58241272f9..68d89a4b375 100644 --- a/server-tools/instance-manager/guardian.cc +++ b/server-tools/instance-manager/guardian.cc @@ -146,20 +146,35 @@ void Guardian_thread::process_instance(Instance *instance, if (instance->is_running()) { - /* clear status fields */ - current_node->restart_counter= 0; - current_node->crash_moment= 0; - current_node->state= STARTED; + /* The instance can be contacted on it's port */ + + /* If STARTING also check that pidfile has been created */ + if (current_node->state == STARTING && + current_node->instance->options.get_pid() == 0) + { + /* Pid file not created yet, don't go to STARTED state yet */ + } + else + { + /* clear status fields */ + log_info("guardian: instance %s is running, set state to STARTED", + instance->options.instance_name); + current_node->restart_counter= 0; + current_node->crash_moment= 0; + current_node->state= STARTED; + } } else { switch (current_node->state) { case NOT_STARTED: - instance->start(); - current_node->last_checked= current_time; log_info("guardian: starting instance %s", instance->options.instance_name); + + /* NOTE, set state to STARTING _before_ start() is called */ current_node->state= STARTING; + instance->start(); + current_node->last_checked= current_time; break; case STARTED: /* fallthrough */ case STARTING: /* let the instance start or crash */ diff --git a/server-tools/instance-manager/instance.cc b/server-tools/instance-manager/instance.cc index 53f859ef7ae..dfe26397414 100644 --- a/server-tools/instance-manager/instance.cc +++ b/server-tools/instance-manager/instance.cc @@ -611,18 +611,19 @@ void Instance::kill_instance(int signum) /* if there are no pid, everything seems to be fine */ if ((pid= options.get_pid()) != 0) /* get pid from pidfile */ { - /* - If we cannot kill mysqld, then it has propably crashed. - Let us try to remove staled pidfile and return successfully - as mysqld is probably stopped. - */ - if (!kill(pid, signum)) - options.unlink_pidfile(); - else if (signum == SIGKILL) /* really killed instance with SIGKILL */ - log_error("The instance %s is being stopped forsibly. Normally \ - it should not happed. Probably the instance has been \ - hanging. You should also check your IM setup", - options.instance_name); + if (kill(pid, signum) == 0) + { + /* Kill suceeded */ + if (signum == SIGKILL) /* really killed instance with SIGKILL */ + { + log_error("The instance %s is being stopped forcibly. Normally" \ + "it should not happen. Probably the instance has been" \ + "hanging. You should also check your IM setup", + options.instance_name); + /* After sucessful hard kill the pidfile need to be removed */ + options.unlink_pidfile(); + } + } } return; } diff --git a/server-tools/instance-manager/instance_options.cc b/server-tools/instance-manager/instance_options.cc index 576096adfd2..f6ec751678f 100644 --- a/server-tools/instance-manager/instance_options.cc +++ b/server-tools/instance-manager/instance_options.cc @@ -406,7 +406,8 @@ pid_t Instance_options::get_pid() { pid_t pid; - fscanf(pid_file_stream, "%i", &pid); + if (fscanf(pid_file_stream, "%i", &pid) != 1) + pid= -1; my_fclose(pid_file_stream, MYF(0)); return pid; } diff --git a/server-tools/instance-manager/listener.cc b/server-tools/instance-manager/listener.cc index da9338e28c5..197c315dee8 100644 --- a/server-tools/instance-manager/listener.cc +++ b/server-tools/instance-manager/listener.cc @@ -38,6 +38,27 @@ #include "thread_registry.h" +static void set_non_blocking(int socket) +{ +#ifndef __WIN__ + int flags= fcntl(socket, F_GETFL, 0); + fcntl(socket, F_SETFL, flags | O_NONBLOCK); +#else + u_long arg= 1; + ioctlsocket(socket, FIONBIO, &arg); +#endif +} + + +static void set_no_inherit(int socket) +{ +#ifndef __WIN__ + int flags= fcntl(socket, F_GETFD, 0); + fcntl(socket, F_SETFD, flags | FD_CLOEXEC); +#endif +} + + /* Listener_thread - incapsulates listening functionality */ @@ -158,6 +179,8 @@ void Listener_thread::run() /* accept may return -1 (failure or spurious wakeup) */ if (client_fd >= 0) // connection established { + set_no_inherit(client_fd); + Vio *vio= vio_new(client_fd, socket_index == 0 ? VIO_TYPE_SOCKET : VIO_TYPE_TCPIP, socket_index == 0 ? 1 : 0); @@ -199,25 +222,6 @@ err: return; } -void set_non_blocking(int socket) -{ -#ifndef __WIN__ - int flags= fcntl(socket, F_GETFL, 0); - fcntl(socket, F_SETFL, flags | O_NONBLOCK); -#else - u_long arg= 1; - ioctlsocket(socket, FIONBIO, &arg); -#endif -} - -void set_no_inherit(int socket) -{ -#ifndef __WIN__ - int flags= fcntl(socket, F_GETFD, 0); - fcntl(socket, F_SETFD, flags | FD_CLOEXEC); -#endif -} - int Listener_thread::create_tcp_socket() { /* value to be set by setsockopt */ diff --git a/sql-common/client.c b/sql-common/client.c index 6722597531d..79a5be938b2 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -1753,7 +1753,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, const char *passwd, const char *db, uint port, const char *unix_socket,ulong client_flag) { - char buff[NAME_BYTE_LEN+USERNAME_BYTE_LENGTH+100]; + char buff[NAME_LEN+USERNAME_LENGTH+100]; char *end,*host_info; my_socket sock; in_addr_t ip_addr; @@ -2212,7 +2212,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, mysql->server_status, client_flag)); /* This needs to be changed as it's not useful with big packets */ if (user && user[0]) - strmake(end,user,USERNAME_BYTE_LENGTH); /* Max user name */ + strmake(end,user,USERNAME_LENGTH); /* Max user name */ else read_user_name((char*) end); @@ -2242,7 +2242,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, /* Add database if needed */ if (db && (mysql->server_capabilities & CLIENT_CONNECT_WITH_DB)) { - end= strmake(end, db, NAME_BYTE_LEN) + 1; + end= strmake(end, db, NAME_LEN) + 1; mysql->db= my_strdup(db,MYF(MY_WME)); db= 0; } diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 8d97edd4652..1c2944de530 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -53,7 +53,7 @@ ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_data_objects.cc event_queue.cc event_db_repository.cc sql_tablespace.cc events.cc ../sql-common/my_user.c - partition_info.cc rpl_injector.cc sql_locale.cc + partition_info.cc rpl_utility.cc rpl_injector.cc sql_locale.cc ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h ${PROJECT_SOURCE_DIR}/include/mysqld_error.h @@ -117,3 +117,7 @@ ADD_CUSTOM_COMMAND( ) ADD_DEPENDENCIES(mysqld gen_lex_hash) + +ADD_LIBRARY(udf_example MODULE udf_example.c udf_example.def) +ADD_DEPENDENCIES(udf_example strings) +TARGET_LINK_LIBRARIES(udf_example wsock32) diff --git a/sql/Makefile.am b/sql/Makefile.am index e472e6ae8f0..38a99aaef88 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -27,7 +27,7 @@ INCLUDES = @ZLIB_INCLUDES@ \ WRAPLIBS= @WRAPLIBS@ SUBDIRS = share libexec_PROGRAMS = mysqld -noinst_PROGRAMS = gen_lex_hash +EXTRA_PROGRAMS = gen_lex_hash bin_PROGRAMS = mysql_tzinfo_to_sql gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@ LDADD = $(top_builddir)/vio/libvio.a \ @@ -52,7 +52,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ ha_partition.h \ ha_ndbcluster.h ha_ndbcluster_binlog.h \ ha_ndbcluster_tables.h \ - opt_range.h protocol.h rpl_tblmap.h \ + opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \ log.h sql_show.h rpl_rli.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h \ lex.h lex_symbol.h sql_acl.h sql_crypt.h \ @@ -93,7 +93,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \ sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \ slave.cc sql_repl.cc rpl_filter.cc rpl_tblmap.cc \ - rpl_injector.cc \ + rpl_utility.cc rpl_injector.cc \ sql_union.cc sql_derived.cc \ client.c sql_client.cc mini_client_errors.c pack.c\ stacktrace.c repl_failsafe.h repl_failsafe.cc \ @@ -121,8 +121,9 @@ DEFS = -DMYSQL_SERVER \ @DEFS@ BUILT_SOURCES = sql_yacc.cc sql_yacc.h lex_hash.h -EXTRA_DIST = udf_example.c $(BUILT_SOURCES) \ - nt_servc.cc nt_servc.h message.mc CMakeLists.txt +EXTRA_DIST = udf_example.c udf_example.def $(BUILT_SOURCES) \ + nt_servc.cc nt_servc.h message.mc CMakeLists.txt \ + udf_example.c udf_example.def CLEANFILES = lex_hash.h sql_yacc.cc sql_yacc.h sql_yacc.output AM_YFLAGS = -d --debug --verbose @@ -156,7 +157,11 @@ sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS) @echo "If it fails, re-run configure with --with-low-memory" $(CXXCOMPILE) $(LM_CFLAGS) -c $< -lex_hash.h: gen_lex_hash$(EXEEXT) +# This generates lex_hash.h +# NOTE Built sources should depend on their sources not the tool +# this avoid the rebuild of the built files in a source dist +lex_hash.h: gen_lex_hash.cc lex.h + $(MAKE) $(AM_MAKEFLAGS) gen_lex_hash$(EXEEXT) ./gen_lex_hash$(EXEEXT) > $@ # the following three should eventually be moved out of this directory diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 6f865a2bcac..4b9aa43b14b 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -353,7 +353,7 @@ Event_parse_data::init_interval(THD *thd) DBUG_RETURN(0); wrong_value: - report_bad_value("INTERVAL", item_execute_at); + report_bad_value("INTERVAL", item_expression); DBUG_RETURN(ER_WRONG_VALUE); } diff --git a/sql/events.h b/sql/events.h index b5d2866aa38..79c4a419388 100644 --- a/sql/events.h +++ b/sql/events.h @@ -55,7 +55,7 @@ public: { EVENTS_OFF= 0, EVENTS_ON= 1, - EVENTS_DISABLED= 5 + EVENTS_DISABLED= 4 }; static enum_opt_event_scheduler opt_event_scheduler; diff --git a/sql/field.cc b/sql/field.cc index 8d852b644bc..09e919d872a 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1257,6 +1257,16 @@ void Field::hash(ulong *nr, ulong *nr2) } } +my_size_t +Field::do_last_null_byte() const +{ + DBUG_ASSERT(null_ptr == NULL || (byte*) null_ptr >= table->record[0]); + if (null_ptr) + return (byte*) null_ptr - table->record[0] + 1; + else + return LAST_NULL_BYTE_UNDEF; +} + void Field::copy_from_tmp(int row_offset) { @@ -2355,11 +2365,16 @@ int Field_new_decimal::store(const char *from, uint length, from, length, charset, &decimal_value)) && table->in_use->abort_on_warning) { + /* Because "from" is not NUL-terminated and we use %s in the ER() */ + String from_as_str; + from_as_str.copy(from, length, &my_charset_bin); + push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), - "decimal", from, field_name, + "decimal", from_as_str.c_ptr(), field_name, (ulong) table->in_use->row_count); + DBUG_RETURN(err); } @@ -2372,13 +2387,20 @@ int Field_new_decimal::store(const char *from, uint length, set_value_on_overflow(&decimal_value, decimal_value.sign()); break; case E_DEC_BAD_NUM: + { + /* Because "from" is not NUL-terminated and we use %s in the ER() */ + String from_as_str; + from_as_str.copy(from, length, &my_charset_bin); + push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), - "decimal", from, field_name, + "decimal", from_as_str.c_ptr(), field_name, (ulong) table->in_use->row_count); my_decimal_set_zero(&decimal_value); + break; + } } #ifndef DBUG_OFF @@ -2543,28 +2565,26 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) ASSERT_COLUMN_MARKED_FOR_WRITE; int not_used; // We can ignore result from str2int char *end; - long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); - int error= 0; + int error; if (unsigned_flag) { - if (tmp < 0) + ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); + if (error == MY_ERRNO_ERANGE || tmp > 255) { - tmp=0; /* purecov: inspected */ - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (tmp > 255) - { - tmp= 255; + set_if_smaller(tmp, 255); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; + else + error= 0; + ptr[0]= (char) tmp; } else { + longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); if (tmp < -128) { tmp= -128; @@ -2579,8 +2599,10 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) } else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; + else + error= 0; + ptr[0]= (char) tmp; } - ptr[0]= (char) tmp; return error; } @@ -2753,28 +2775,33 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) ASSERT_COLUMN_MARKED_FOR_WRITE; int not_used; // We can ignore result from str2int char *end; - long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); - int error= 0; + int error; if (unsigned_flag) { - if (tmp < 0) + ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); + if (error == MY_ERRNO_ERANGE || tmp > UINT_MAX16) { - tmp=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (tmp > UINT_MAX16) - { - tmp=UINT_MAX16; + set_if_smaller(tmp, UINT_MAX16); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; + else + error= 0; +#ifdef WORDS_BIGENDIAN + if (table->s->db_low_byte_first) + { + int2store(ptr,tmp); + } + else +#endif + shortstore(ptr,(short) tmp); } else { + longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); if (tmp < INT_MIN16) { tmp= INT_MIN16; @@ -2789,15 +2816,17 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) } else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } + else + error= 0; #ifdef WORDS_BIGENDIAN - if (table->s->db_low_byte_first) - { - int2store(ptr,tmp); - } - else + if (table->s->db_low_byte_first) + { + int2store(ptr,tmp); + } + else #endif - shortstore(ptr,(short) tmp); + shortstore(ptr,(short) tmp); + } return error; } @@ -3033,28 +3062,26 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) ASSERT_COLUMN_MARKED_FOR_WRITE; int not_used; // We can ignore result from str2int char *end; - long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); - int error= 0; + int error; if (unsigned_flag) { - if (tmp < 0) + ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); + if (error == MY_ERRNO_ERANGE || tmp > UINT_MAX24) { - tmp=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (tmp >= (long) (1L << 24)) - { - tmp=(long) (1L << 24)-1L; + set_if_smaller(tmp, UINT_MAX24); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; + else + error= 0; + int3store(ptr,tmp); } else { + longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); if (tmp < INT_MIN24) { tmp= INT_MIN24; @@ -3069,9 +3096,10 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) } else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; + else + error= 0; + int3store(ptr,tmp); } - - int3store(ptr,tmp); return error; } @@ -3280,58 +3308,43 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs) int error; char *end; - tmp_scan= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); - len-= tmp_scan; - from+= tmp_scan; - - end= (char*) from+len; - tmp= cs->cset->strtoll10(cs, from, &end, &error); - - if (error != MY_ERRNO_EDOM) + if (unsigned_flag) { - if (unsigned_flag) + ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); + if (error == MY_ERRNO_ERANGE || tmp > (ulonglong) UINT_MAX32) { - if (error < 0) - { - error= 1; - tmp= 0; - } - else if ((ulonglong) tmp > (ulonglong) UINT_MAX32) - { - tmp= UINT_MAX32; - error= 1; - } - else - error= 0; + set_if_smaller(tmp, (ulonglong) UINT_MAX32); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + error= 1; else - { - if (error < 0) - { - error= 0; - if (tmp < INT_MIN32) - { - tmp= INT_MIN32; - error= 1; - } - } - else if (tmp > INT_MAX32) - { - tmp= INT_MAX32; - error= 1; - } - } + error= 0; + store_tmp= (long) tmp; } - if (error) + else { - error= error != MY_ERRNO_EDOM ? 1 : 2; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); + if (tmp < INT_MIN32) + { + tmp= INT_MIN32; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (tmp > INT_MAX32) + { + tmp=INT_MAX32; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + error= 1; + else + error= 0; + store_tmp= (long) tmp; } - else if (from+len != end && table->in_use->count_cuted_fields && - check_int(from,len,end,cs)) - error= 2; - store_tmp= (long) tmp; #ifdef WORDS_BIGENDIAN if (table->s->db_low_byte_first) { @@ -3573,33 +3586,20 @@ void Field_long::sql_type(String &res) const int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; - longlong tmp; int error= 0; char *end; + ulonglong tmp; - tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); - len-= (uint)tmp; - from+= tmp; - if (unsigned_flag) - { - if (!len || test_if_minus(cs, from, from + len)) - { - tmp=0; // Set negative to 0 - error= 1; - } - else - tmp=(longlong) my_strntoull(cs,from,len,10,&end,&error); - } - else - tmp=my_strntoll(cs,from,len,10,&end,&error); - if (error) + tmp= cs->cset->strntoull10rnd(cs,from,len,unsigned_flag,&end,&error); + if (error == MY_ERRNO_ERANGE) { set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (from+len != end && table->in_use->count_cuted_fields && - check_int(from,len,end,cs)) - error= 2; + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + error= 1; + else + error= 0; #ifdef WORDS_BIGENDIAN if (table->s->db_low_byte_first) { @@ -8094,6 +8094,33 @@ Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, } +my_size_t +Field_bit::do_last_null_byte() const +{ + /* + Code elsewhere is assuming that bytes are 8 bits, so I'm using + that value instead of the correct one: CHAR_BIT. + + REFACTOR SUGGESTION (Matz): Change to use the correct number of + bits. On systems with CHAR_BIT > 8 (not very common), the storage + will lose the extra bits. + */ + DBUG_PRINT("debug", ("bit_ofs=%d, bit_len=%d, bit_ptr=%p", + bit_ofs, bit_len, bit_ptr)); + uchar *result; + if (bit_len == 0) + result= null_ptr; + else if (bit_ofs + bit_len > 8) + result= bit_ptr + 1; + else + result= bit_ptr; + + if (result) + return (byte*) result - table->record[0] + 1; + else + return LAST_NULL_BYTE_UNDEF; +} + Field *Field_bit::new_key_field(MEM_ROOT *root, struct st_table *new_table, char *new_ptr, uchar *new_null_ptr, @@ -8126,7 +8153,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs) (delta == -1 && (uchar) *from > ((1 << bit_len) - 1)) || (!bit_len && delta < 0)) { - set_rec_bits(0xff, bit_ptr, bit_ofs, bit_len); + set_rec_bits((1 << bit_len) - 1, bit_ptr, bit_ofs, bit_len); memset(ptr, 0xff, bytes_in_rec); if (table->in_use->really_abort_on_warning()) set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1); @@ -8345,6 +8372,14 @@ const char *Field_bit::unpack(char *to, const char *from) } +void Field_bit::set_default() +{ + my_ptrdiff_t const offset= table->s->default_values - table->record[0]; + uchar bits= get_rec_bits(bit_ptr + offset, bit_ofs, bit_len); + set_rec_bits(bits, bit_ptr, bit_ofs, bit_len); + Field::set_default(); +} + /* Bit field support for non-MyISAM tables. */ @@ -9115,7 +9150,7 @@ create_field::create_field(Field *old_field,Field *orig_field) case 3: sql_type= FIELD_TYPE_MEDIUM_BLOB; break; default: sql_type= FIELD_TYPE_LONG_BLOB; break; } - length=(length+charset->mbmaxlen-1) / charset->mbmaxlen; + length/= charset->mbmaxlen; key_length/= charset->mbmaxlen; break; case MYSQL_TYPE_STRING: diff --git a/sql/field.h b/sql/field.h index fce3b51c04b..9b81931d416 100644 --- a/sql/field.h +++ b/sql/field.h @@ -217,6 +217,33 @@ public: { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } inline bool real_maybe_null(void) { return null_ptr != 0; } + + enum { + LAST_NULL_BYTE_UNDEF= 0 + }; + + /* + Find the position of the last null byte for the field. + + SYNOPSIS + last_null_byte() + + DESCRIPTION + Return a pointer to the last byte of the null bytes where the + field conceptually is placed. + + RETURN VALUE + The position of the last null byte relative to the beginning of + the record. If the field does not use any bits of the null + bytes, the value 0 (LAST_NULL_BYTE_UNDEF) is returned. + */ + my_size_t last_null_byte() const { + my_size_t bytes= do_last_null_byte(); + DBUG_PRINT("debug", ("last_null_byte() ==> %d", bytes)); + DBUG_ASSERT(bytes <= table->s->null_bytes); + return bytes; + } + virtual void make_field(Send_field *); virtual void sort_string(char *buff,uint length)=0; virtual bool optimize_range(uint idx, uint part); @@ -377,6 +404,20 @@ public: friend class Item_sum_min; friend class Item_sum_max; friend class Item_func_group_concat; + +private: + /* + Primitive for implementing last_null_byte(). + + SYNOPSIS + do_last_null_byte() + + DESCRIPTION + Primitive for the implementation of the last_null_byte() + function. This represents the inheritance interface and can be + overridden by subclasses. + */ + virtual my_size_t do_last_null_byte() const; }; @@ -1412,6 +1453,8 @@ public: void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); const char *unpack(char* to, const char *from); + virtual void set_default(); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, char *new_ptr, uchar *new_null_ptr, uint new_null_bit); @@ -1432,6 +1475,9 @@ public: Field::move_field_offset(ptr_diff); bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*); } + +private: + virtual my_size_t do_last_null_byte() const; }; @@ -1476,6 +1522,8 @@ public: uint decimals, flags, pack_length, key_length; Field::utype unireg_check; TYPELIB *interval; // Which interval to use + TYPELIB *save_interval; // Temporary copy for the above + // Used only for UCS2 intervals List interval_list; CHARSET_INFO *charset; Field::geometry_type geom_type; diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 20d1e372a2c..7bc6c432d1c 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -427,6 +427,21 @@ static void do_varstring2(Copy_field *copy) length); } + +static void do_varstring2_mb(Copy_field *copy) +{ + int well_formed_error; + CHARSET_INFO *cs= copy->from_field->charset(); + uint char_length= (copy->to_length - HA_KEY_BLOB_LENGTH) / cs->mbmaxlen; + uint from_length= uint2korr(copy->from_ptr); + const char *from_beg= copy->from_ptr + HA_KEY_BLOB_LENGTH; + uint length= cs->cset->well_formed_len(cs, from_beg, from_beg + from_length, + char_length, &well_formed_error); + int2store(copy->to_ptr, length); + memcpy(copy->to_ptr+HA_KEY_BLOB_LENGTH, from_beg, length); +} + + /*************************************************************************** ** The different functions that fills in a Copy_field class ***************************************************************************/ @@ -586,7 +601,8 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) return do_field_string; if (to_length != from_length) return (((Field_varstring*) to)->length_bytes == 1 ? - do_varstring1 : do_varstring2); + do_varstring1 : (from->charset()->mbmaxlen == 1 ? + do_varstring2 : do_varstring2_mb)); } else if (to_length < from_length) return (from->charset()->mbmaxlen == 1 ? diff --git a/sql/filesort.cc b/sql/filesort.cc index 1851d5a5cff..01f3bb97557 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -626,6 +626,7 @@ static inline void store_length(uchar *to, uint length, uint pack_length) break; case 3: mi_int3store(to, length); + break; default: mi_int4store(to, length); break; @@ -1344,6 +1345,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, switch ((sortorder->result_type=sortorder->item->result_type())) { case STRING_RESULT: sortorder->length=sortorder->item->max_length; + set_if_smaller(sortorder->length, thd->variables.max_sort_length); if (use_strnxfrm((cs=sortorder->item->collation.collation))) { sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 01a7fe5250f..d86d87a0bd0 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -74,18 +74,26 @@ static const int max_transactions= 3; // should really be 2 but there is a trans static uint ndbcluster_partition_flags(); static uint ndbcluster_alter_table_flags(uint flags); -static int ndbcluster_init(void); -static int ndbcluster_end(ha_panic_function flag); -static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type); -static int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info); -static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond); +static int ndbcluster_init(void *); +static int ndbcluster_end(handlerton *hton, ha_panic_function flag); +static bool ndbcluster_show_status(handlerton *hton, THD*, + stat_print_fn *, + enum ha_stat_type); +static int ndbcluster_alter_tablespace(handlerton *hton, + THD* thd, + st_alter_tablespace *info); +static int ndbcluster_fill_files_table(handlerton *hton, + THD *thd, + TABLE_LIST *tables, + COND *cond); -handlerton ndbcluster_hton; +handlerton *ndbcluster_hton; -static handler *ndbcluster_create_handler(TABLE_SHARE *table, +static handler *ndbcluster_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root) { - return new (mem_root) ha_ndbcluster(table); + return new (mem_root) ha_ndbcluster(hton, table); } static uint ndbcluster_partition_flags() @@ -2508,9 +2516,11 @@ int ha_ndbcluster::write_row(byte *record) if (has_auto_increment) { THD *thd= table->in_use; + int error; m_skip_auto_increment= FALSE; - update_auto_increment(); + if ((error= update_auto_increment())) + DBUG_RETURN(error); m_skip_auto_increment= (insert_id_for_cur_row == 0); } } @@ -4034,7 +4044,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) thd_ndb->init_open_tables(); thd_ndb->stmt= trans; thd_ndb->query_state&= NDB_QUERY_NORMAL; - trans_register_ha(thd, FALSE, &ndbcluster_hton); + trans_register_ha(thd, FALSE, ndbcluster_hton); } else { @@ -4050,7 +4060,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) thd_ndb->init_open_tables(); thd_ndb->all= trans; thd_ndb->query_state&= NDB_QUERY_NORMAL; - trans_register_ha(thd, TRUE, &ndbcluster_hton); + trans_register_ha(thd, TRUE, ndbcluster_hton); /* If this is the start of a LOCK TABLE, a table look @@ -4204,7 +4214,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) ERR_RETURN(ndb->getNdbError()); no_uncommitted_rows_reset(thd); thd_ndb->stmt= trans; - trans_register_ha(thd, FALSE, &ndbcluster_hton); + trans_register_ha(thd, FALSE, ndbcluster_hton); } thd_ndb->query_state&= NDB_QUERY_NORMAL; m_active_trans= trans; @@ -4221,7 +4231,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) Commit a transaction started in NDB */ -static int ndbcluster_commit(THD *thd, bool all) +static int ndbcluster_commit(handlerton *hton, THD *thd, bool all) { int res= 0; Thd_ndb *thd_ndb= get_thd_ndb(thd); @@ -4272,7 +4282,7 @@ static int ndbcluster_commit(THD *thd, bool all) Rollback a transaction started in NDB */ -static int ndbcluster_rollback(THD *thd, bool all) +static int ndbcluster_rollback(handlerton *hton, THD *thd, bool all) { int res= 0; Thd_ndb *thd_ndb= get_thd_ndb(thd); @@ -5572,8 +5582,8 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, HA_HAS_OWN_BINLOGGING | \ HA_HAS_RECORDS -ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg): - handler(&ndbcluster_hton, table_arg), +ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg): + handler(hton, table_arg), m_active_trans(NULL), m_active_cursor(NULL), m_table(NULL), @@ -5836,7 +5846,7 @@ int ha_ndbcluster::check_ndb_connection(THD* thd) } -static int ndbcluster_close_connection(THD *thd) +static int ndbcluster_close_connection(handlerton *hton, THD *thd) { Thd_ndb *thd_ndb= get_thd_ndb(thd); DBUG_ENTER("ndbcluster_close_connection"); @@ -5853,8 +5863,10 @@ static int ndbcluster_close_connection(THD *thd) Try to discover one table from NDB */ -int ndbcluster_discover(THD* thd, const char *db, const char *name, - const void** frmblob, uint* frmlen) +int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, + const char *name, + const void** frmblob, + uint* frmlen) { int error= 0; NdbError ndb_error; @@ -5934,7 +5946,8 @@ err: */ -int ndbcluster_table_exists_in_engine(THD* thd, const char *db, +int ndbcluster_table_exists_in_engine(handlerton *hton, THD* thd, + const char *db, const char *name) { Ndb* ndb; @@ -6034,7 +6047,7 @@ int ndbcluster_drop_database_impl(const char *path) DBUG_RETURN(ret); } -static void ndbcluster_drop_database(char *path) +static void ndbcluster_drop_database(handlerton *hton, char *path) { THD *thd= current_thd; DBUG_ENTER("ndbcluster_drop_database"); @@ -6195,7 +6208,9 @@ int ndbcluster_find_all_files(THD *thd) DBUG_RETURN(-(skipped + unhandled)); } -int ndbcluster_find_files(THD *thd,const char *db,const char *path, +int ndbcluster_find_files(handlerton *hton, THD *thd, + const char *db, + const char *path, const char *wild, bool dir, List *files) { DBUG_ENTER("ndbcluster_find_files"); @@ -6305,7 +6320,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, DBUG_PRINT("info", ("%s existed on disk", name)); // The .ndb file exists on disk, but it's not in list of tables in ndb // Verify that handler agrees table is gone. - if (ndbcluster_table_exists_in_engine(thd, db, file_name) == 0) + if (ndbcluster_table_exists_in_engine(hton, thd, db, file_name) == 0) { DBUG_PRINT("info", ("NDB says %s does not exists", file_name)); it.remove(); @@ -6416,35 +6431,36 @@ static int connect_callback() extern int ndb_dictionary_is_mysqld; -static int ndbcluster_init() +static int ndbcluster_init(void *p) { int res; DBUG_ENTER("ndbcluster_init"); ndb_dictionary_is_mysqld= 1; + ndbcluster_hton= (handlerton *)p; { - handlerton &h= ndbcluster_hton; - h.state= have_ndbcluster; - h.db_type= DB_TYPE_NDBCLUSTER; - h.close_connection= ndbcluster_close_connection; - h.commit= ndbcluster_commit; - h.rollback= ndbcluster_rollback; - h.create= ndbcluster_create_handler; /* Create a new handler */ - h.drop_database= ndbcluster_drop_database; /* Drop a database */ - h.panic= ndbcluster_end; /* Panic call */ - h.show_status= ndbcluster_show_status; /* Show status */ - h.alter_tablespace= ndbcluster_alter_tablespace; /* Show status */ - h.partition_flags= ndbcluster_partition_flags; /* Partition flags */ - h.alter_table_flags=ndbcluster_alter_table_flags; /* Alter table flags */ - h.fill_files_table= ndbcluster_fill_files_table; + handlerton *h= ndbcluster_hton; + h->state= have_ndbcluster; + h->db_type= DB_TYPE_NDBCLUSTER; + h->close_connection= ndbcluster_close_connection; + h->commit= ndbcluster_commit; + h->rollback= ndbcluster_rollback; + h->create= ndbcluster_create_handler; /* Create a new handler */ + h->drop_database= ndbcluster_drop_database; /* Drop a database */ + h->panic= ndbcluster_end; /* Panic call */ + h->show_status= ndbcluster_show_status; /* Show status */ + h->alter_tablespace= ndbcluster_alter_tablespace; /* Show status */ + h->partition_flags= ndbcluster_partition_flags; /* Partition flags */ + h->alter_table_flags=ndbcluster_alter_table_flags; /* Alter table flags */ + h->fill_files_table= ndbcluster_fill_files_table; #ifdef HAVE_NDB_BINLOG ndbcluster_binlog_init_handlerton(); #endif - h.flags= HTON_CAN_RECREATE | HTON_TEMPORARY_NOT_SUPPORTED; - h.discover= ndbcluster_discover; - h.find_files= ndbcluster_find_files; - h.table_exists_in_engine= ndbcluster_table_exists_in_engine; + h->flags= HTON_CAN_RECREATE | HTON_TEMPORARY_NOT_SUPPORTED; + h->discover= ndbcluster_discover; + h->find_files= ndbcluster_find_files; + h->table_exists_in_engine= ndbcluster_table_exists_in_engine; } if (have_ndbcluster != SHOW_OPTION_YES) @@ -6553,10 +6569,12 @@ ndbcluster_init_error: delete g_ndb_cluster_connection; g_ndb_cluster_connection= NULL; have_ndbcluster= SHOW_OPTION_DISABLED; // If we couldn't use handler + ndbcluster_hton->state= SHOW_OPTION_DISABLED; // If we couldn't use handler + DBUG_RETURN(TRUE); } -static int ndbcluster_end(ha_panic_function type) +static int ndbcluster_end(handlerton *hton, ha_panic_function type) { DBUG_ENTER("ndbcluster_end"); @@ -6640,7 +6658,7 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op) share.db.length= 0; share.table_name.str= (char *) tab_name; share.table_name.length= strlen(tab_name); - ha_ndbcluster error_handler(&share); + ha_ndbcluster error_handler(ndbcluster_hton, &share); error_handler.print_error(error, MYF(0)); DBUG_VOID_RETURN; } @@ -8157,7 +8175,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) Wait for cluster to start */ pthread_mutex_lock(&LOCK_ndb_util_thread); - while (!ndb_cluster_node_id && (ndbcluster_hton.slot != ~(uint)0)) + while (!ndb_cluster_node_id && (ndbcluster_hton->slot != ~(uint)0)) { /* ndb not connected yet */ set_timespec(abstime, 1); @@ -9720,7 +9738,7 @@ err: Implements the SHOW NDB STATUS command. */ bool -ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, +ndbcluster_show_status(handlerton *hton, THD* thd, stat_print_fn *stat_print, enum ha_stat_type stat_type) { char buf[IO_SIZE]; @@ -10184,7 +10202,7 @@ bool set_up_undofile(st_alter_tablespace *info, return false; } -int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) +int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace *info) { DBUG_ENTER("ha_ndbcluster::alter_tablespace"); @@ -10445,7 +10463,9 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts) DBUG_RETURN(TRUE); } -static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, +static int ndbcluster_fill_files_table(handlerton *hton, + THD *thd, + TABLE_LIST *tables, COND *cond) { TABLE* table= tables->table; @@ -10766,7 +10786,7 @@ SHOW_VAR ndb_status_variables_export[]= { }; struct st_mysql_storage_engine ndbcluster_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &ndbcluster_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(ndbcluster) { @@ -10775,6 +10795,7 @@ mysql_declare_plugin(ndbcluster) ndbcluster_hton_name, "MySQL AB", "Clustered, fault-tolerant tables", + PLUGIN_LICENSE_GPL, ndbcluster_init, /* Plugin Init */ NULL, /* Plugin Deinit */ 0x0100 /* 1.0 */, diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 5cd5bb2c716..19ff513f9b1 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -627,7 +627,7 @@ class Thd_ndb class ha_ndbcluster: public handler { public: - ha_ndbcluster(TABLE_SHARE *table); + ha_ndbcluster(handlerton *hton, TABLE_SHARE *table); ~ha_ndbcluster(); int ha_initialise(); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 2d70e953f91..5f5c8bcb221 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -506,7 +506,7 @@ ndbcluster_binlog_index_purge_file(THD *thd, const char *file) } static void -ndbcluster_binlog_log_query(THD *thd, enum_binlog_command binlog_command, +ndbcluster_binlog_log_query(handlerton *hton, THD *thd, enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name) { @@ -637,7 +637,9 @@ static void ndbcluster_reset_slave(THD *thd) /* Initialize the binlog part of the ndb handlerton */ -static int ndbcluster_binlog_func(THD *thd, enum_binlog_func fn, void *arg) +static int ndbcluster_binlog_func(handlerton *hton, THD *thd, + enum_binlog_func fn, + void *arg) { switch(fn) { @@ -662,9 +664,9 @@ static int ndbcluster_binlog_func(THD *thd, enum_binlog_func fn, void *arg) void ndbcluster_binlog_init_handlerton() { - handlerton &h= ndbcluster_hton; - h.binlog_func= ndbcluster_binlog_func; - h.binlog_log_query= ndbcluster_binlog_log_query; + handlerton *h= ndbcluster_hton; + h->binlog_func= ndbcluster_binlog_func; + h->binlog_log_query= ndbcluster_binlog_log_query; } @@ -3473,7 +3475,7 @@ restart: if (thd_ndb == NULL) { - DBUG_ASSERT(ndbcluster_hton.slot != ~(uint)0); + DBUG_ASSERT(ndbcluster_hton->slot != ~(uint)0); if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) { sql_print_error("Could not allocate Thd_ndb object"); diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index 774efb5984c..233d1a58aaa 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -31,6 +31,8 @@ extern ulong ndb_extra_logging; #define NDB_INVALID_SCHEMA_OBJECT 241 +extern handlerton *ndbcluster_hton; + /* The numbers below must not change as they are passed between mysql servers, and if changed @@ -103,7 +105,6 @@ extern pthread_mutex_t injector_mutex; extern pthread_cond_t injector_cond; extern unsigned char g_node_id_map[max_ndb_nodes]; -extern handlerton ndbcluster_hton; extern pthread_t ndb_util_thread; extern pthread_mutex_t LOCK_ndb_util_thread; extern pthread_cond_t COND_ndb_util_thread; @@ -214,10 +215,10 @@ inline void real_free_share(NDB_SHARE **share) inline Thd_ndb * -get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton.slot]; } +get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton->slot]; } inline void -set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton.slot]= thd_ndb; } +set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton->slot]= thd_ndb; } Ndb* check_ndb_in_thd(THD* thd); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index bd8d604ef0d..deb3bedb203 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -69,21 +69,26 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table); MODULE create/delete handler object ****************************************************************************/ -static handler *partition_create_handler(TABLE_SHARE *share, +static handler *partition_create_handler(handlerton *hton, + TABLE_SHARE *share, MEM_ROOT *mem_root); static uint partition_flags(); static uint alter_table_flags(uint flags); -handlerton partition_hton; -static int partition_initialize() +static int partition_initialize(void *p) { - partition_hton.state= SHOW_OPTION_YES; - partition_hton.db_type= DB_TYPE_PARTITION_DB; - partition_hton.create= partition_create_handler; - partition_hton.partition_flags= partition_flags; - partition_hton.alter_table_flags= alter_table_flags; - partition_hton.flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN; + + handlerton *partition_hton; + partition_hton= (handlerton *)p; + + partition_hton->state= SHOW_OPTION_YES; + partition_hton->db_type= DB_TYPE_PARTITION_DB; + partition_hton->create= partition_create_handler; + partition_hton->partition_flags= partition_flags; + partition_hton->alter_table_flags= alter_table_flags; + partition_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN; + return 0; } @@ -98,10 +103,11 @@ static int partition_initialize() New partition object */ -static handler *partition_create_handler(TABLE_SHARE *share, +static handler *partition_create_handler(handlerton *hton, + TABLE_SHARE *share, MEM_ROOT *mem_root) { - ha_partition *file= new (mem_root) ha_partition(share); + ha_partition *file= new (mem_root) ha_partition(hton, share); if (file && file->initialise_partition(mem_root)) { delete file; @@ -151,8 +157,8 @@ static uint alter_table_flags(uint flags __attribute__((unused))) NONE */ -ha_partition::ha_partition(TABLE_SHARE *share) - :handler(&partition_hton, share), m_part_info(NULL), m_create_handler(FALSE), +ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) + :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), m_is_sub_partitioned(0) { DBUG_ENTER("ha_partition::ha_partition(table)"); @@ -172,8 +178,8 @@ ha_partition::ha_partition(TABLE_SHARE *share) NONE */ -ha_partition::ha_partition(partition_info *part_info) - :handler(&partition_hton, NULL), m_part_info(part_info), +ha_partition::ha_partition(handlerton *hton, partition_info *part_info) + :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), m_is_sub_partitioned(m_part_info->is_sub_partitioned()) @@ -2016,7 +2022,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i])); } /* For the moment we only support partition over the same table engine */ - if (m_engine_array[0] == &myisam_hton) + if (m_engine_array[0] == myisam_hton) { DBUG_PRINT("info", ("MyISAM")); m_myisam= TRUE; @@ -2089,7 +2095,7 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root) (uint) ha_legacy_type(part_elem->engine_type))); } } while (++i < m_part_info->no_parts); - if (part_elem->engine_type == &myisam_hton) + if (part_elem->engine_type == myisam_hton) { DBUG_PRINT("info", ("MyISAM")); m_myisam= TRUE; @@ -5628,7 +5634,7 @@ static int free_share(PARTITION_SHARE *share) #endif /* NOT_USED */ struct st_mysql_storage_engine partition_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &partition_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(partition) { @@ -5637,6 +5643,7 @@ mysql_declare_plugin(partition) "partition", "Mikael Ronstrom, MySQL AB", "Partition Storage Engine Helper", + PLUGIN_LICENSE_GPL, partition_initialize, /* Plugin Init */ NULL, /* Plugin Deinit */ 0x0100, /* 1.0 */ diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 97086d7b632..b66db205549 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -149,8 +149,8 @@ public: partition handler. ------------------------------------------------------------------------- */ - ha_partition(TABLE_SHARE * table); - ha_partition(partition_info * part_info); + ha_partition(handlerton *hton, TABLE_SHARE * table); + ha_partition(handlerton *hton, partition_info * part_info); ~ha_partition(); /* A partition handler has no characteristics in itself. It only inherits diff --git a/sql/handler.cc b/sql/handler.cc index cf080a843c7..ccf1a1ef8d9 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -97,7 +97,7 @@ handlerton *ha_default_handlerton(THD *thd) return (thd->variables.table_type != NULL) ? thd->variables.table_type : (global_system_variables.table_type != NULL ? - global_system_variables.table_type : &myisam_hton); + global_system_variables.table_type : myisam_hton); } @@ -170,7 +170,7 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type) static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root) { handlerton *hton= ha_default_handlerton(current_thd); - return (hton && hton->create) ? hton->create(table, mem_root) : NULL; + return (hton && hton->create) ? hton->create(hton, table, mem_root) : NULL; } @@ -232,7 +232,7 @@ handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc, if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create) { - if ((file= db_type->create(share, alloc))) + if ((file= db_type->create(db_type, share, alloc))) file->init(); DBUG_RETURN(file); } @@ -251,7 +251,7 @@ handler *get_ha_partition(partition_info *part_info) { ha_partition *partition; DBUG_ENTER("get_ha_partition"); - if ((partition= new ha_partition(part_info))) + if ((partition= new ha_partition(partition_hton, part_info))) { if (partition->initialise_partition(current_thd->mem_root)) { @@ -332,6 +332,8 @@ static int ha_init_errors(void) SETMSG(HA_ERR_FOREIGN_DUPLICATE_KEY, "FK constraint would lead to duplicate key"); SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER(ER_TABLE_NEEDS_UPGRADE)); SETMSG(HA_ERR_TABLE_READONLY, ER(ER_OPEN_AS_READONLY)); + SETMSG(HA_ERR_AUTOINC_READ_FAILED, ER(ER_AUTOINC_READ_FAILED)); + SETMSG(HA_ERR_AUTOINC_ERANGE, ER(ER_WARN_DATA_OUT_OF_RANGE)); /* Register the error messages for use with my_error(). */ return my_error_register(errmsgs, HA_ERR_FIRST, HA_ERR_LAST); @@ -374,20 +376,49 @@ int ha_finalize_handlerton(st_plugin_int *plugin) case SHOW_OPTION_YES: if (installed_htons[hton->db_type] == hton) installed_htons[hton->db_type]= NULL; - if (hton->panic && hton->panic(HA_PANIC_CLOSE)) + if (hton->panic && hton->panic(hton, HA_PANIC_CLOSE)) DBUG_RETURN(1); break; }; + + if (plugin->plugin->deinit) + { + /* + Today we have no defined/special behavior for uninstalling + engine plugins. + */ + DBUG_PRINT("info", ("Deinitializing plugin: '%s'", plugin->name.str)); + if (plugin->plugin->deinit(NULL)) + { + DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.", + plugin->name.str)); + } + } + + my_free((gptr)hton, MYF(0)); + DBUG_RETURN(0); } int ha_initialize_handlerton(st_plugin_int *plugin) { - handlerton *hton= ((st_mysql_storage_engine *)plugin->plugin->info)->handlerton; + handlerton *hton; DBUG_ENTER("ha_initialize_handlerton"); + hton= (handlerton *)my_malloc(sizeof(handlerton), + MYF(MY_WME | MY_ZEROFILL)); + /* Historical Requirement */ plugin->data= hton; // shortcut for the future + if (plugin->plugin->init) + { + if (plugin->plugin->init(hton)) + { + sql_print_error("Plugin '%s' init function returned error.", + plugin->name.str); + goto err; + } + } /* the switch below and hton->state should be removed when @@ -434,7 +465,29 @@ int ha_initialize_handlerton(st_plugin_int *plugin) hton->state= SHOW_OPTION_DISABLED; break; } + + /* + This is entirely for legacy. We will create a new "disk based" hton and a + "memory" hton which will be configurable longterm. We should be able to + remove partition and myisammrg. + */ + switch (hton->db_type) { + case DB_TYPE_HEAP: + heap_hton= hton; + break; + case DB_TYPE_MYISAM: + myisam_hton= hton; + break; + case DB_TYPE_PARTITION_DB: + partition_hton= hton; + break; + default: + break; + }; + DBUG_RETURN(0); +err: + DBUG_RETURN(1); } int ha_init() @@ -465,7 +518,7 @@ static my_bool panic_handlerton(THD *unused1, st_plugin_int *plugin, void *arg) { handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->panic) - ((int*)arg)[0]|= hton->panic((enum ha_panic_function)((int*)arg)[1]); + ((int*)arg)[0]|= hton->panic(hton, (enum ha_panic_function)((int*)arg)[1]); return FALSE; } @@ -487,7 +540,7 @@ static my_bool dropdb_handlerton(THD *unused1, st_plugin_int *plugin, { handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->drop_database) - hton->drop_database((char *)path); + hton->drop_database(hton, (char *)path); return FALSE; } @@ -508,7 +561,7 @@ static my_bool closecon_handlerton(THD *thd, st_plugin_int *plugin, */ if (hton->state == SHOW_OPTION_YES && hton->close_connection && thd->ha_data[hton->slot]) - hton->close_connection(thd); + hton->close_connection(hton, thd); return FALSE; } @@ -584,7 +637,7 @@ int ha_prepare(THD *thd) statistic_increment(thd->status_var.ha_prepare_count,&LOCK_status); if ((*ht)->prepare) { - if ((err= (*(*ht)->prepare)(thd, all))) + if ((err= (*(*ht)->prepare)(*ht, thd, all))) { my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); ha_rollback_trans(thd, all); @@ -658,7 +711,7 @@ int ha_commit_trans(THD *thd, bool all) for (; *ht && !error; ht++) { int err; - if ((err= (*(*ht)->prepare)(thd, all))) + if ((err= (*(*ht)->prepare)(*ht, thd, all))) { my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); error= 1; @@ -705,7 +758,7 @@ int ha_commit_one_phase(THD *thd, bool all) for (ht=trans->ht; *ht; ht++) { int err; - if ((err= (*(*ht)->commit)(thd, all))) + if ((err= (*(*ht)->commit)(*ht, thd, all))) { my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); error=1; @@ -761,7 +814,7 @@ int ha_rollback_trans(THD *thd, bool all) for (handlerton **ht=trans->ht; *ht; ht++) { int err; - if ((err= (*(*ht)->rollback)(thd, all))) + if ((err= (*(*ht)->rollback)(*ht, thd, all))) { // cannot happen my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); error=1; @@ -838,7 +891,7 @@ static my_bool xacommit_handlerton(THD *unused1, st_plugin_int *plugin, handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->recover) { - hton->commit_by_xid(((struct xahton_st *)arg)->xid); + hton->commit_by_xid(hton, ((struct xahton_st *)arg)->xid); ((struct xahton_st *)arg)->result= 0; } return FALSE; @@ -850,7 +903,7 @@ static my_bool xarollback_handlerton(THD *unused1, st_plugin_int *plugin, handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->recover) { - hton->rollback_by_xid(((struct xahton_st *)arg)->xid); + hton->rollback_by_xid(hton, ((struct xahton_st *)arg)->xid); ((struct xahton_st *)arg)->result= 0; } return FALSE; @@ -960,7 +1013,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin, if (hton->state == SHOW_OPTION_YES && hton->recover) { - while ((got= hton->recover(info->list, info->len)) > 0 ) + while ((got= hton->recover(hton, info->list, info->len)) > 0 ) { sql_print_information("Found %d prepared transaction(s) in %s", got, hton2plugin[hton->slot]->name.str); @@ -991,7 +1044,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin, char buf[XIDDATASIZE*4+6]; // see xid_to_str sql_print_information("commit xid %s", xid_to_str(buf, info->list+i)); #endif - hton->commit_by_xid(info->list+i); + hton->commit_by_xid(hton, info->list+i); } else { @@ -1000,7 +1053,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin, sql_print_information("rollback xid %s", xid_to_str(buf, info->list+i)); #endif - hton->rollback_by_xid(info->list+i); + hton->rollback_by_xid(hton, info->list+i); } } if (got < info->len) @@ -1146,7 +1199,7 @@ static my_bool release_temporary_latches(THD *thd, st_plugin_int *plugin, handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->release_temporary_latches) - hton->release_temporary_latches(thd); + hton->release_temporary_latches(hton, thd); return FALSE; } @@ -1179,7 +1232,7 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv) { int err; DBUG_ASSERT((*ht)->savepoint_set != 0); - if ((err= (*(*ht)->savepoint_rollback)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + if ((err= (*(*ht)->savepoint_rollback)(*ht, thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) { // cannot happen my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); error=1; @@ -1195,7 +1248,7 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv) for (; *ht ; ht++) { int err; - if ((err= (*(*ht)->rollback)(thd, !thd->in_sub_stmt))) + if ((err= (*(*ht)->rollback)(*ht, thd, !thd->in_sub_stmt))) { // cannot happen my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); error=1; @@ -1229,7 +1282,7 @@ int ha_savepoint(THD *thd, SAVEPOINT *sv) error=1; break; } - if ((err= (*(*ht)->savepoint_set)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + if ((err= (*(*ht)->savepoint_set)(*ht, thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) { // cannot happen my_error(ER_GET_ERRNO, MYF(0), err); error=1; @@ -1255,7 +1308,9 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv) int err; if (!(*ht)->savepoint_release) continue; - if ((err= (*(*ht)->savepoint_release)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + if ((err= (*(*ht)->savepoint_release)(*ht, thd, + (byte *)(sv+1)+ + (*ht)->savepoint_offset))) { // cannot happen my_error(ER_GET_ERRNO, MYF(0), err); error=1; @@ -1272,7 +1327,7 @@ static my_bool snapshot_handlerton(THD *thd, st_plugin_int *plugin, if (hton->state == SHOW_OPTION_YES && hton->start_consistent_snapshot) { - hton->start_consistent_snapshot(thd); + hton->start_consistent_snapshot(hton, thd); *((bool *)arg)= false; } return FALSE; @@ -1300,7 +1355,8 @@ static my_bool flush_handlerton(THD *thd, st_plugin_int *plugin, void *arg) { handlerton *hton= (handlerton *)plugin->data; - if (hton->state == SHOW_OPTION_YES && hton->flush_logs && hton->flush_logs()) + if (hton->state == SHOW_OPTION_YES && hton->flush_logs && + hton->flush_logs(hton)) return TRUE; return FALSE; } @@ -1317,7 +1373,7 @@ bool ha_flush_logs(handlerton *db_type) else { if (db_type->state != SHOW_OPTION_YES || - (db_type->flush_logs && db_type->flush_logs())) + (db_type->flush_logs && db_type->flush_logs(db_type))) return TRUE; } return FALSE; @@ -1403,6 +1459,17 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, /**************************************************************************** ** General handler functions ****************************************************************************/ +handler *handler::clone(MEM_ROOT *mem_root) +{ + handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type); + if (new_handler && !new_handler->ha_open(table, + table->s->normalized_path.str, + table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) + return new_handler; + return NULL; +} + void handler::ha_statistic_increment(ulong SSV::*offset) const @@ -1614,7 +1681,10 @@ prev_insert_id(ulonglong nr, struct system_variables *variables) RETURN 0 ok - 1 get_auto_increment() was called and returned ~(ulonglong) 0 + HA_ERR_AUTOINC_READ_FAILED + get_auto_increment() was called and returned ~(ulonglong) 0 + HA_ERR_AUTOINC_ERANGE + storing value in field caused strict mode failure. IMPLEMENTATION @@ -1683,14 +1753,13 @@ prev_insert_id(ulonglong nr, struct system_variables *variables) #define AUTO_INC_DEFAULT_NB_MAX_BITS 16 #define AUTO_INC_DEFAULT_NB_MAX ((1 << AUTO_INC_DEFAULT_NB_MAX_BITS) - 1) -bool handler::update_auto_increment() +int handler::update_auto_increment() { ulonglong nr, nb_reserved_values; bool append= FALSE; THD *thd= table->in_use; struct system_variables *variables= &thd->variables; bool auto_increment_field_not_null; - bool result= 0; DBUG_ENTER("handler::update_auto_increment"); /* @@ -1767,7 +1836,7 @@ bool handler::update_auto_increment() nb_desired_values, &nr, &nb_reserved_values); if (nr == ~(ulonglong) 0) - result= 1; // Mark failure + DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure /* That rounding below should not be needed when all engines actually @@ -1801,6 +1870,12 @@ bool handler::update_auto_increment() if (unlikely(table->next_number_field->store((longlong) nr, TRUE))) { + /* + first test if the query was aborted due to strict mode constraints + */ + if (thd->killed == THD::KILL_BAD_DATA) + DBUG_RETURN(HA_ERR_AUTOINC_ERANGE); + /* field refused this value (overflow) and truncated it, use the result of the truncation (which is going to be inserted); however we try to @@ -1819,7 +1894,6 @@ bool handler::update_auto_increment() variables->auto_increment_increment); /* Row-based replication does not need to store intervals in binlog */ if (!thd->current_stmt_binlog_row_based) - result= result || thd->auto_inc_intervals_in_cur_stmt_for_binlog.append(auto_inc_interval_for_cur_row.minimum(), auto_inc_interval_for_cur_row.values(), variables->auto_increment_increment); @@ -1839,7 +1913,7 @@ bool handler::update_auto_increment() */ set_next_insert_id(compute_next_insert_id(nr, variables)); - DBUG_RETURN(result); + DBUG_RETURN(0); } @@ -2134,6 +2208,12 @@ void handler::print_error(int error, myf errflag) case HA_ERR_TABLE_READONLY: textno= ER_OPEN_AS_READONLY; break; + case HA_ERR_AUTOINC_READ_FAILED: + textno= ER_AUTOINC_READ_FAILED; + break; + case HA_ERR_AUTOINC_ERANGE: + textno= ER_WARN_DATA_OUT_OF_RANGE; + break; default: { /* The error was "unknown" to this function. @@ -2708,7 +2788,9 @@ static my_bool discover_handlerton(THD *thd, st_plugin_int *plugin, st_discover_args *vargs= (st_discover_args *)arg; handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->discover && - (!(hton->discover(thd, vargs->db, vargs->name, vargs->frmblob, vargs->frmlen)))) + (!(hton->discover(hton, thd, vargs->db, vargs->name, + vargs->frmblob, + vargs->frmlen)))) return TRUE; return FALSE; @@ -2757,7 +2839,7 @@ static my_bool find_files_handlerton(THD *thd, st_plugin_int *plugin, if (hton->state == SHOW_OPTION_YES && hton->find_files) - if (hton->find_files(thd, vargs->db, vargs->path, vargs->wild, + if (hton->find_files(hton, thd, vargs->db, vargs->path, vargs->wild, vargs->dir, vargs->files)) return TRUE; @@ -2804,7 +2886,7 @@ static my_bool table_exists_in_engine_handlerton(THD *thd, st_plugin_int *plugin handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->table_exists_in_engine) - if ((hton->table_exists_in_engine(thd, vargs->db, vargs->name)) == 1) + if ((hton->table_exists_in_engine(hton, thd, vargs->db, vargs->name)) == 1) return TRUE; return FALSE; @@ -2873,7 +2955,7 @@ static my_bool binlog_func_foreach(THD *thd, binlog_func_st *bfn) uint i= 0, sz= hton_list.sz; while(i < sz) - hton_list.hton[i++]->binlog_func(thd, bfn->fn, bfn->arg); + hton_list.hton[i++]->binlog_func(hton, thd, bfn->fn, bfn->arg); return FALSE; } @@ -2920,12 +3002,12 @@ struct binlog_log_query_st }; static my_bool binlog_log_query_handlerton2(THD *thd, - const handlerton *hton, + handlerton *hton, void *args) { struct binlog_log_query_st *b= (struct binlog_log_query_st*)args; if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query) - hton->binlog_log_query(thd, + hton->binlog_log_query(hton, thd, b->binlog_command, b->query, b->query_length, @@ -2938,10 +3020,10 @@ static my_bool binlog_log_query_handlerton(THD *thd, st_plugin_int *plugin, void *args) { - return binlog_log_query_handlerton2(thd, (const handlerton *)plugin->data, args); + return binlog_log_query_handlerton2(thd, (handlerton *)plugin->data, args); } -void ha_binlog_log_query(THD *thd, const handlerton *hton, +void ha_binlog_log_query(THD *thd, handlerton *hton, enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name) @@ -3239,7 +3321,7 @@ static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin, handlerton *hton= (handlerton *)plugin->data; handler *file; if (hton->state == SHOW_OPTION_YES && hton->create && - (file= hton->create((TABLE_SHARE*) 0, current_thd->mem_root))) + (file= hton->create(hton, (TABLE_SHARE*) 0, current_thd->mem_root))) { List_iterator_fast it(*found_exts); const char **ext, *old_ext; @@ -3314,7 +3396,7 @@ static my_bool showstat_handlerton(THD *thd, st_plugin_int *plugin, enum ha_stat_type stat= *(enum ha_stat_type *) arg; handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->show_status && - hton->show_status(thd, stat_print, stat)) + hton->show_status(hton, thd, stat_print, stat)) return TRUE; return FALSE; } @@ -3348,7 +3430,7 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat) } else result= db_type->show_status && - db_type->show_status(thd, stat_print, stat) ? 1 : 0; + db_type->show_status(db_type, thd, stat_print, stat) ? 1 : 0; } if (!result) @@ -3669,7 +3751,7 @@ int example_of_iterator_using_for_logs_cleanup(handlerton *hton) if (!hton->create_iterator) return 1; /* iterator creator is not supported */ - if ((*hton->create_iterator)(HA_TRANSACTLOG_ITERATOR, &iterator) != + if ((*hton->create_iterator)(hton, HA_TRANSACTLOG_ITERATOR, &iterator) != HA_ITERATOR_OK) { /* error during creation of log iterator or iterator is not supported */ diff --git a/sql/handler.h b/sql/handler.h index abc3e18d7a0..5e26d9c7b63 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -449,7 +449,7 @@ class st_alter_tablespace : public Sql_alloc ulonglong autoextend_size; ulonglong max_size; uint nodegroup_id; - const handlerton *storage_engine; + handlerton *storage_engine; bool wait_until_completed; const char *ts_comment; enum tablespace_access_mode ts_access_mode; @@ -605,18 +605,18 @@ struct handlerton this storage area - set it to something, so that MySQL would know this storage engine was accessed in this connection */ - int (*close_connection)(THD *thd); + int (*close_connection)(handlerton *hton, THD *thd); /* sv points to an uninitialized storage area of requested size (see savepoint_offset description) */ - int (*savepoint_set)(THD *thd, void *sv); + int (*savepoint_set)(handlerton *hton, THD *thd, void *sv); /* sv points to a storage area, that was earlier passed to the savepoint_set call */ - int (*savepoint_rollback)(THD *thd, void *sv); - int (*savepoint_release)(THD *thd, void *sv); + int (*savepoint_rollback)(handlerton *hton, THD *thd, void *sv); + int (*savepoint_release)(handlerton *hton, THD *thd, void *sv); /* 'all' is true if it's a real commit, that makes persistent changes 'all' is false if it's not in fact a commit but an end of the @@ -624,25 +624,25 @@ struct handlerton NOTE 'all' is also false in auto-commit mode where 'end of statement' and 'real commit' mean the same event. */ - int (*commit)(THD *thd, bool all); - int (*rollback)(THD *thd, bool all); - int (*prepare)(THD *thd, bool all); - int (*recover)(XID *xid_list, uint len); - int (*commit_by_xid)(XID *xid); - int (*rollback_by_xid)(XID *xid); - void *(*create_cursor_read_view)(); - void (*set_cursor_read_view)(void *); - void (*close_cursor_read_view)(void *); - handler *(*create)(TABLE_SHARE *table, MEM_ROOT *mem_root); - void (*drop_database)(char* path); - int (*panic)(enum ha_panic_function flag); - int (*start_consistent_snapshot)(THD *thd); - bool (*flush_logs)(); - bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat); + int (*commit)(handlerton *hton, THD *thd, bool all); + int (*rollback)(handlerton *hton, THD *thd, bool all); + int (*prepare)(handlerton *hton, THD *thd, bool all); + int (*recover)(handlerton *hton, XID *xid_list, uint len); + int (*commit_by_xid)(handlerton *hton, XID *xid); + int (*rollback_by_xid)(handlerton *hton, XID *xid); + void *(*create_cursor_read_view)(handlerton *hton, THD *thd); + void (*set_cursor_read_view)(handlerton *hton, THD *thd, void *read_view); + void (*close_cursor_read_view)(handlerton *hton, THD *thd, void *read_view); + handler *(*create)(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root); + void (*drop_database)(handlerton *hton, char* path); + int (*panic)(handlerton *hton, enum ha_panic_function flag); + int (*start_consistent_snapshot)(handlerton *hton, THD *thd); + bool (*flush_logs)(handlerton *hton); + bool (*show_status)(handlerton *hton, THD *thd, stat_print_fn *print, enum ha_stat_type stat); uint (*partition_flags)(); uint (*alter_table_flags)(uint flags); - int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info); - int (*fill_files_table)(THD *thd, + int (*alter_tablespace)(handlerton *hton, THD *thd, st_alter_tablespace *ts_info); + int (*fill_files_table)(handlerton *hton, THD *thd, struct st_table_list *tables, class Item *cond); uint32 flags; /* global handler flags */ @@ -650,11 +650,12 @@ struct handlerton Those handlerton functions below are properly initialized at handler init. */ - int (*binlog_func)(THD *thd, enum_binlog_func fn, void *arg); - void (*binlog_log_query)(THD *thd, enum_binlog_command binlog_command, + int (*binlog_func)(handlerton *hton, THD *thd, enum_binlog_func fn, void *arg); + void (*binlog_log_query)(handlerton *hton, THD *thd, + enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name); - int (*release_temporary_latches)(THD *thd); + int (*release_temporary_latches)(handlerton *hton, THD *thd); /* Get log status. @@ -663,21 +664,26 @@ struct handlerton (see example of implementation in handler.cc, TRANS_LOG_MGM_EXAMPLE_CODE) */ - enum log_status (*get_log_status)(char *log); + enum log_status (*get_log_status)(handlerton *hton, char *log); /* Iterators creator. Presence of the pointer should be checked before using */ enum handler_create_iterator_result - (*create_iterator)(enum handler_iterator_type type, + (*create_iterator)(handlerton *hton, enum handler_iterator_type type, struct handler_iterator *fill_this_in); - int (*discover)(THD* thd, const char *db, const char *name, - const void** frmblob, uint* frmlen); - int (*find_files)(THD *thd,const char *db,const char *path, + int (*discover)(handlerton *hton, THD* thd, const char *db, + const char *name, + const void** frmblob, + uint* frmlen); + int (*find_files)(handlerton *hton, THD *thd, + const char *db, + const char *path, const char *wild, bool dir, List *files); - int (*table_exists_in_engine)(THD* thd, const char *db, + int (*table_exists_in_engine)(handlerton *hton, THD* thd, const char *db, const char *name); + uint32 license; /* Flag for Engine License */ }; @@ -690,6 +696,8 @@ struct handlerton #define HTON_FLUSH_AFTER_RENAME (1 << 4) #define HTON_NOT_USER_SELECTABLE (1 << 5) #define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported +#define HTON_SUPPORT_LOG_TABLES (1 << 7) //Engine supports log tables +#define HTON_NO_PARTITION (1 << 8) //You can not partition these tables typedef struct st_thd_trans { @@ -892,7 +900,7 @@ class handler :public Sql_alloc virtual void start_bulk_insert(ha_rows rows) {} virtual int end_bulk_insert() {return 0; } public: - const handlerton *ht; /* storage engine of this handler */ + handlerton *ht; /* storage engine of this handler */ byte *ref; /* Pointer to current row */ byte *dup_ref; /* Pointer to duplicate row */ @@ -942,7 +950,7 @@ public: */ Discrete_interval auto_inc_interval_for_cur_row; - handler(const handlerton *ht_arg, TABLE_SHARE *share_arg) + handler(handlerton *ht_arg, TABLE_SHARE *share_arg) :table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg), ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY), ref_length(sizeof(my_off_t)), @@ -953,6 +961,7 @@ public: { /* TODO: DBUG_ASSERT(inited == NONE); */ } + virtual handler *clone(MEM_ROOT *mem_root); /* This is called after create to allow us to set up cached variables */ void init() { @@ -989,7 +998,7 @@ public: ulong type, TABLE *table); int ha_open(TABLE *table, const char *name, int mode, int test_if_locked); void adjust_next_insert_id_after_explicit_value(ulonglong nr); - bool update_auto_increment(); + int update_auto_increment(); void print_keydup_error(uint key_nr, const char *msg); virtual void print_error(int error, myf errflag); virtual bool get_error_message(int error, String *buf); @@ -1714,7 +1723,7 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht); int ha_reset_logs(THD *thd); int ha_binlog_index_purge_file(THD *thd, const char *file); void ha_reset_slave(THD *thd); -void ha_binlog_log_query(THD *thd, const handlerton *db_type, +void ha_binlog_log_query(THD *thd, handlerton *db_type, enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name); diff --git a/sql/item.cc b/sql/item.cc index 577eff3e5f6..f8a8b4a6272 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1210,6 +1210,7 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, split_sum_func(thd, ref_pointer_array, fields); } else if ((type() == SUM_FUNC_ITEM || (used_tables() & ~PARAM_TABLE_BIT)) && + type() != SUBSELECT_ITEM && (type() != REF_ITEM || ((Item_ref*)this)->ref_type() == Item_ref::VIEW_REF)) { @@ -3795,6 +3796,7 @@ void Item_field::cleanup() I.e. we can drop 'field'. */ field= result_field= 0; + null_value= FALSE; DBUG_VOID_RETURN; } @@ -3839,14 +3841,49 @@ Item_equal *Item_field::find_item_equal(COND_EQUAL *cond_equal) } +/* + Check whether a field can be substituted by an equal item + + SYNOPSIS + equal_fields_propagator() + arg - *arg != NULL <-> the field is in the context where + substitution for an equal item is valid + + DESCRIPTION + The function checks whether a substitution of the field + occurrence for an equal item is valid. + + NOTES + The following statement is not always true: + x=y => F(x)=F(x/y). + This means substitution of an item for an equal item not always + yields an equavalent condition. + Here's an example: + 'a'='a ' + (LENGTH('a')=1) != (LENGTH('a ')=2) + Such a substitution is surely valid if either the substituted + field is not of a STRING type or if it is an argument of + a comparison predicate. + + RETURN + TRUE substitution is valid + FALSE otherwise +*/ + +bool Item_field::subst_argument_checker(byte **arg) +{ + return (result_type() != STRING_RESULT) || (*arg); +} + + /* Set a pointer to the multiple equality the field reference belongs to (if any) SYNOPSIS equal_fields_propagator() - arg - reference to list of multiple equalities where - the field (this object) is to be looked for + arg - reference to list of multiple equalities where + the field (this object) is to be looked for DESCRIPTION The function looks for a multiple equality containing the field item @@ -3858,7 +3895,7 @@ Item_equal *Item_field::find_item_equal(COND_EQUAL *cond_equal) NOTES This function is supposed to be called as a callback parameter in calls - of the transform method. + of the compile method. RETURN VALUES pointer to the replacing constant item, if the field item was substituted @@ -3966,21 +4003,27 @@ void Item::make_field(Send_field *tmp_field) } -void Item_empty_string::make_field(Send_field *tmp_field) +enum_field_types Item::string_field_type() const { enum_field_types type= FIELD_TYPE_VAR_STRING; if (max_length >= 16777216) type= FIELD_TYPE_LONG_BLOB; else if (max_length >= 65536) type= FIELD_TYPE_MEDIUM_BLOB; - init_make_field(tmp_field, type); + return type; +} + + +void Item_empty_string::make_field(Send_field *tmp_field) +{ + init_make_field(tmp_field, string_field_type()); } enum_field_types Item::field_type() const { switch (result_type()) { - case STRING_RESULT: return MYSQL_TYPE_VARCHAR; + case STRING_RESULT: return string_field_type(); case INT_RESULT: return FIELD_TYPE_LONGLONG; case DECIMAL_RESULT: return FIELD_TYPE_NEWDECIMAL; case REAL_RESULT: return FIELD_TYPE_DOUBLE; @@ -5669,11 +5712,6 @@ void Item_trigger_field::cleanup() } -/* - If item is a const function, calculate it and return a const item - The original item is freed if not returned -*/ - Item_result item_cmp_type(Item_result a,Item_result b) { if (a == STRING_RESULT && b == STRING_RESULT) diff --git a/sql/item.h b/sql/item.h index f5a5e6e4fd9..3dd7d3d220e 100644 --- a/sql/item.h +++ b/sql/item.h @@ -439,7 +439,19 @@ public: }; -typedef bool (Item::*Item_processor)(byte *arg); +typedef bool (Item::*Item_processor) (byte *arg); +/* + Analyzer function + SYNOPSIS + argp in/out IN: Analysis parameter + OUT: Parameter to be passed to the transformer + + RETURN + TRUE Invoke the transformer + FALSE Don't do it + +*/ +typedef bool (Item::*Item_analyzer) (byte **argp); typedef Item* (Item::*Item_transformer) (byte *arg); typedef void (*Cond_traverser) (const Item *item, void *arg); @@ -536,6 +548,7 @@ public: virtual bool eq(const Item *, bool binary_cmp) const; virtual Item_result result_type() const { return REAL_RESULT; } virtual Item_result cast_to_int_type() const { return result_type(); } + virtual enum_field_types string_field_type() const; virtual enum_field_types field_type() const; virtual enum Type type() const =0; @@ -776,6 +789,30 @@ public: virtual Item* transform(Item_transformer transformer, byte *arg); + /* + This function performs a generic "compilation" of the Item tree. + The process of compilation is assumed to go as follows: + + compile() + { + if (this->*some_analyzer(...)) + { + compile children if any; + this->*some_transformer(...); + } + } + + i.e. analysis is performed top-down while transformation is done + bottom-up. + */ + virtual Item* compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t) + { + if ((this->*analyzer) (arg_p)) + return ((this->*transformer) (arg_t)); + return 0; + } + virtual void traverse_cond(Cond_traverser traverser, void *arg, traverse_order order) { @@ -813,6 +850,12 @@ public: */ virtual bool check_partition_func_processor(byte *bool_arg) { *(bool *)bool_arg= FALSE; return 0; } + virtual bool subst_argument_checker(byte **arg) + { + if (*arg) + *arg= NULL; + return TRUE; + } virtual Item *equal_fields_propagator(byte * arg) { return this; } virtual bool set_no_const_sub(byte *arg) { return FALSE; } @@ -1315,6 +1358,7 @@ public: return field->can_be_compared_as_longlong(); } Item_equal *find_item_equal(COND_EQUAL *cond_equal); + bool subst_argument_checker(byte **arg); Item *equal_fields_propagator(byte *arg); bool set_no_const_sub(byte *arg); Item *replace_equal_field(byte *arg); @@ -2222,7 +2266,11 @@ public: { return Item_field::save_in_field(field_arg, no_conversions); } - table_map used_tables() const { return (table_map)0L; } + /* + We use RAND_TABLE_BIT to prevent Item_insert_value from + being treated as a constant and precalculated before execution + */ + table_map used_tables() const { return RAND_TABLE_BIT; } bool walk(Item_processor processor, bool walk_subquery, byte *args) { diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index fd0b348ac85..135e4596996 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -66,142 +66,54 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems) /* Aggregates result types from the array of items. - SYNOPSIS: + SYNOPSIS agg_cmp_type() - thd thread handle - type [out] the aggregated type - items array of items to aggregate the type from - nitems number of items in the array + items array of items to aggregate the type from + nitems number of items in the array DESCRIPTION This function aggregates result types from the array of items. Found type supposed to be used later for comparison of values of these items. Aggregation itself is performed by the item_cmp_type() function. - - NOTES - Aggregation rules: - If there are DATE/TIME fields/functions in the list and no string - fields/functions in the list then: - The INT_RESULT type will be used for aggregation instead of original - result type of any DATE/TIME field/function in the list - All constant items in the list will be converted to a DATE/TIME using - found field or result field of found function. - - Implementation notes: - The code is equivalent to: - 1. Check the list for presence of a STRING field/function. - Collect the is_const flag. - 2. Get a Field* object to use for type coercion - 3. Perform type conversion. - 1 and 2 are implemented in 2 loops. The first searches for a DATE/TIME - field/function and checks presence of a STRING field/function. - The second loop works only if a DATE/TIME field/function is found. - It checks presence of a STRING field/function in the rest of the list. - - TODO - 1) The current implementation can produce false comparison results for - expressions like: - date_time_field BETWEEN string_field_with_dates AND string_constant - if the string_constant will omit some of leading zeroes. - In order to fully implement correct comparison of DATE/TIME the new - DATETIME_RESULT result type should be introduced and agg_cmp_type() - should return the DATE/TIME field used for the conversion. Later - this field can be used by comparison functions like Item_func_between to - convert string values to ints on the fly and thus return correct results. - This modification will affect functions BETWEEN, IN and CASE. - - 2) If in the list a DATE field/function and a DATETIME field/function - are present in the list then the first found field/function will be - used for conversion. This may lead to wrong results and probably should - be fixed. */ -static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems) +static Item_result agg_cmp_type(Item **items, uint nitems) { uint i; - Item::Type res= (Item::Type)0; - /* Used only for date/time fields, max_length = 19 */ - char buff[20]; - uchar null_byte; - Field *field= NULL; + Item_result type= items[0]->result_type(); + for (i= 1 ; i < nitems ; i++) + type= item_cmp_type(type, items[i]->result_type()); + return type; +} - /* - Do not convert items while creating a or showing a view in order - to store/display the original query in these cases. - */ - if (thd->lex->sql_command != SQLCOM_CREATE_VIEW && - thd->lex->sql_command != SQLCOM_SHOW_CREATE) - { - /* Search for date/time fields/functions */ - for (i= 0; i < nitems; i++) - { - if (!items[i]->result_as_longlong()) - { - /* Do not convert anything if a string field/function is present */ - if (!items[i]->const_item() && items[i]->result_type() == STRING_RESULT) - { - i= nitems; - break; - } - continue; - } - if ((res= items[i]->real_item()->type()) == Item::FIELD_ITEM && - items[i]->result_type() != INT_RESULT) - { - field= ((Item_field *)items[i]->real_item())->field; - break; - } - else if (res == Item::FUNC_ITEM) - { - field= items[i]->tmp_table_field_from_field_type(0,0); - if (field) - field->move_field(buff, &null_byte, 0); - break; - } - } - } - if (field) - { - /* Check the rest of the list for presence of a string field/function. */ - for (i++ ; i < nitems; i++) - { - if (!items[i]->const_item() && items[i]->result_type() == STRING_RESULT && - !items[i]->result_as_longlong()) - { - if (res == Item::FUNC_ITEM) - delete field; - field= 0; - break; - } - } - } - /* - If the first item is a date/time function then its result should be - compared as int - */ - if (field) - /* Suppose we are comparing dates */ - type[0]= INT_RESULT; - else - type[0]= items[0]->result_type(); - for (i= 0; i < nitems ; i++) - { - Item_result result= items[i]->result_type(); - /* - Use INT_RESULT as result type for DATE/TIME fields/functions and - for constants successfully converted to DATE/TIME - */ - if (field && - ((!items[i]->const_item() && items[i]->result_as_longlong()) || - (items[i]->const_item() && convert_constant_item(thd, field, - &items[i])))) - result= INT_RESULT; - type[0]= item_cmp_type(type[0], result); - } +/* + Collects different types for comparison of first item with each other items - if (res == Item::FUNC_ITEM && field) - delete field; + SYNOPSIS + collect_cmp_types() + items Array of items to collect types from + nitems Number of items in the array + + DESCRIPTION + This function collects different result types for comparison of the first + item in the list with each of the remaining items in the 'items' array. + + RETURN + Bitmap of collected types +*/ + +static uint collect_cmp_types(Item **items, uint nitems) +{ + uint i; + uint found_types; + Item_result left_result= items[0]->result_type(); + DBUG_ASSERT(nitems > 1); + found_types= 0; + for (i= 1; i < nitems ; i++) + found_types|= 1<< (uint)item_cmp_type(left_result, + items[i]->result_type()); + return found_types; } @@ -1234,11 +1146,33 @@ void Item_func_between::fix_length_and_dec() */ if (!args[0] || !args[1] || !args[2]) return; - agg_cmp_type(thd, &cmp_type, args, 3); - args[0]->cmp_context= args[1]->cmp_context= args[2]->cmp_context= cmp_type; + cmp_type= agg_cmp_type(args, 3); + if (cmp_type == STRING_RESULT && + agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1)) + return; - if (cmp_type == STRING_RESULT) - agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1); + /* + Make a special case of compare with date/time and longlong fields. + They are compared as integers, so for const item this time-consuming + conversion can be done only once, not for every single comparison + */ + if (args[0]->type() == FIELD_ITEM && + thd->lex->sql_command != SQLCOM_CREATE_VIEW && + thd->lex->sql_command != SQLCOM_SHOW_CREATE) + { + Field *field=((Item_field*) args[0])->field; + if (field->can_be_compared_as_longlong()) + { + /* + The following can't be recoded with || as convert_constant_item + changes the argument + */ + if (convert_constant_item(thd, field,&args[1])) + cmp_type=INT_RESULT; // Works for all types. + if (convert_constant_item(thd, field,&args[2])) + cmp_type=INT_RESULT; // Works for all types. + } + } } @@ -1692,94 +1626,65 @@ Item_func_nullif::is_null() return (null_value= (!cmp.compare() ? 1 : args[0]->null_value)); } + /* - CASE expression Return the matching ITEM or NULL if all compares (including else) failed + + SYNOPSIS + find_item() + str Buffer string + + DESCRIPTION + Find and return matching items for CASE or ELSE item if all compares + are failed or NULL if ELSE item isn't defined. + + IMPLEMENTATION + In order to do correct comparisons of the CASE expression (the expression + between CASE and the first WHEN) with each WHEN expression several + comparators are used. One for each result type. CASE expression can be + evaluated up to # of different result types are used. To check whether + the CASE expression already was evaluated for a particular result type + a bit mapped variable value_added_map is used. Result types are mapped + to it according to their int values i.e. STRING_RESULT is mapped to bit + 0, REAL_RESULT to bit 1, so on. + + RETURN + NULL - Nothing found and there is no ELSE expression defined + item - Found item or ELSE item if defined and all comparisons are + failed */ Item *Item_func_case::find_item(String *str) { - String *first_expr_str, *tmp; - my_decimal *first_expr_dec, first_expr_dec_val; - longlong first_expr_int; - double first_expr_real; - char buff[MAX_FIELD_WIDTH]; - String buff_str(buff,sizeof(buff),default_charset()); + uint value_added_map= 0; - /* These will be initialized later */ - LINT_INIT(first_expr_str); - LINT_INIT(first_expr_int); - LINT_INIT(first_expr_real); - LINT_INIT(first_expr_dec); - - if (first_expr_num != -1) + if (first_expr_num == -1) { - switch (cmp_type) - { - case STRING_RESULT: - // We can't use 'str' here as this may be overwritten - if (!(first_expr_str= args[first_expr_num]->val_str(&buff_str))) - return else_expr_num != -1 ? args[else_expr_num] : 0; // Impossible - break; - case INT_RESULT: - first_expr_int= args[first_expr_num]->val_int(); - if (args[first_expr_num]->null_value) - return else_expr_num != -1 ? args[else_expr_num] : 0; - break; - case REAL_RESULT: - first_expr_real= args[first_expr_num]->val_real(); - if (args[first_expr_num]->null_value) - return else_expr_num != -1 ? args[else_expr_num] : 0; - break; - case DECIMAL_RESULT: - first_expr_dec= args[first_expr_num]->val_decimal(&first_expr_dec_val); - if (args[first_expr_num]->null_value) - return else_expr_num != -1 ? args[else_expr_num] : 0; - break; - case ROW_RESULT: - default: - // This case should never be chosen - DBUG_ASSERT(0); - break; - } - } - - // Compare every WHEN argument with it and return the first match - for (uint i=0 ; i < ncases ; i+=2) - { - if (first_expr_num == -1) + for (uint i=0 ; i < ncases ; i+=2) { // No expression between CASE and the first WHEN if (args[i]->val_bool()) return args[i+1]; continue; } - switch (cmp_type) { - case STRING_RESULT: - if ((tmp=args[i]->val_str(str))) // If not null - if (sortcmp(tmp,first_expr_str,cmp_collation.collation)==0) - return args[i+1]; - break; - case INT_RESULT: - if (args[i]->val_int()==first_expr_int && !args[i]->null_value) - return args[i+1]; - break; - case REAL_RESULT: - if (args[i]->val_real() == first_expr_real && !args[i]->null_value) - return args[i+1]; - break; - case DECIMAL_RESULT: + } + else + { + /* Compare every WHEN argument with it and return the first match */ + for (uint i=0 ; i < ncases ; i+=2) { - my_decimal value; - if (my_decimal_cmp(args[i]->val_decimal(&value), first_expr_dec) == 0) - return args[i+1]; - break; - } - case ROW_RESULT: - default: - // This case should never be chosen - DBUG_ASSERT(0); - break; + cmp_type= item_cmp_type(left_result_type, args[i]->result_type()); + DBUG_ASSERT(cmp_type != ROW_RESULT); + DBUG_ASSERT(cmp_items[(uint)cmp_type]); + if (!(value_added_map & (1<<(uint)cmp_type))) + { + cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]); + if ((null_value=args[first_expr_num]->null_value)) + return else_expr_num != -1 ? args[else_expr_num] : 0; + value_added_map|= 1<<(uint)cmp_type; + } + if (!cmp_items[(uint)cmp_type]->cmp(args[i]) && !args[i]->null_value) + return args[i + 1]; } } // No, WHEN clauses all missed, return ELSE expression @@ -1886,7 +1791,7 @@ void Item_func_case::fix_length_and_dec() Item **agg; uint nagg; THD *thd= current_thd; - + uint found_types= 0; if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1)))) return; @@ -1913,16 +1818,31 @@ void Item_func_case::fix_length_and_dec() */ if (first_expr_num != -1) { + uint i; agg[0]= args[first_expr_num]; + left_result_type= agg[0]->result_type(); + for (nagg= 0; nagg < ncases/2 ; nagg++) agg[nagg+1]= args[nagg*2]; nagg++; - agg_cmp_type(thd, &cmp_type, agg, nagg); - if ((cmp_type == STRING_RESULT) && - agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1)) - return; + found_types= collect_cmp_types(agg, nagg); + + for (i= 0; i <= (uint)DECIMAL_RESULT; i++) + { + if (found_types & (1 << i) && !cmp_items[i]) + { + DBUG_ASSERT((Item_result)i != ROW_RESULT); + if ((Item_result)i == STRING_RESULT && + agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1)) + return; + if (!(cmp_items[i]= + cmp_item::get_comparator((Item_result)i, + cmp_collation.collation))) + return; + } + } } - + if (else_expr_num == -1 || args[else_expr_num]->maybe_null) maybe_null=1; @@ -2507,16 +2427,14 @@ static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y) void Item_func_in::fix_length_and_dec() { Item **arg, **arg_end; - uint const_itm= 1; + bool const_itm= 1; THD *thd= current_thd; + uint found_types= 0; + uint type_cnt= 0, i; + left_result_type= args[0]->result_type(); + found_types= collect_cmp_types(args, arg_count); - agg_cmp_type(thd, &cmp_type, args, arg_count); - - if (cmp_type == STRING_RESULT && - agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) - return; - - for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) + for (arg= args + 1, arg_end= args + arg_count; arg != arg_end ; arg++) { if (!arg[0]->const_item()) { @@ -2524,26 +2442,39 @@ void Item_func_in::fix_length_and_dec() break; } } - + for (i= 0; i <= (uint)DECIMAL_RESULT; i++) + { + if (found_types & 1 << i) + (type_cnt)++; + } /* - Row item with NULLs inside can return NULL or FALSE => + Row item with NULLs inside can return NULL or FALSE => they can't be processed as static */ - if (const_itm && !nulls_in_row()) + if (type_cnt == 1 && const_itm && !nulls_in_row()) { + uint tmp_type; + Item_result cmp_type; + /* Only one cmp type was found. Extract it here */ + for (tmp_type= 0; found_types - 1; found_types>>= 1) + tmp_type++; + cmp_type= (Item_result)tmp_type; + switch (cmp_type) { case STRING_RESULT: - array=new in_string(arg_count-1,(qsort2_cmp) srtcmp_in, + if (agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) + return; + array=new in_string(arg_count - 1,(qsort2_cmp) srtcmp_in, cmp_collation.collation); break; case INT_RESULT: - array= new in_longlong(arg_count-1); + array= new in_longlong(arg_count - 1); break; case REAL_RESULT: - array= new in_double(arg_count-1); + array= new in_double(arg_count - 1); break; case ROW_RESULT: - array= new in_row(arg_count-1, args[0]); + array= new in_row(arg_count - 1, args[0]); break; case DECIMAL_RESULT: array= new in_decimal(arg_count - 1); @@ -2563,15 +2494,25 @@ void Item_func_in::fix_length_and_dec() else have_null= 1; } - if ((array->used_count=j)) + if ((array->used_count= j)) array->sort(); } } else { - in_item= cmp_item::get_comparator(cmp_type, cmp_collation.collation); - if (cmp_type == STRING_RESULT) - in_item->cmp_charset= cmp_collation.collation; + for (i= 0; i <= (uint) DECIMAL_RESULT; i++) + { + if (found_types & (1 << i) && !cmp_items[i]) + { + if ((Item_result)i == STRING_RESULT && + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) + return; + if (!(cmp_items[i]= + cmp_item::get_comparator((Item_result)i, + cmp_collation.collation))) + return; + } + } } maybe_null= args[0]->maybe_null; max_length= 1; @@ -2590,25 +2531,61 @@ void Item_func_in::print(String *str) } +/* + Evaluate the function and return its value. + + SYNOPSIS + val_int() + + DESCRIPTION + Evaluate the function and return its value. + + IMPLEMENTATION + If the array object is defined then the value of the function is + calculated by means of this array. + Otherwise several cmp_item objects are used in order to do correct + comparison of left expression and an expression from the values list. + One cmp_item object correspond to one used comparison type. Left + expression can be evaluated up to number of different used comparison + types. A bit mapped variable value_added_map is used to check whether + the left expression already was evaluated for a particular result type. + Result types are mapped to it according to their integer values i.e. + STRING_RESULT is mapped to bit 0, REAL_RESULT to bit 1, so on. + + RETURN + Value of the function +*/ + longlong Item_func_in::val_int() { + cmp_item *in_item; DBUG_ASSERT(fixed == 1); + uint value_added_map= 0; if (array) { int tmp=array->find(args[0]); null_value=args[0]->null_value || (!tmp && have_null); return (longlong) (!null_value && tmp != negated); } - in_item->store_value(args[0]); - if ((null_value=args[0]->null_value)) - return 0; - have_null= 0; - for (uint i=1 ; i < arg_count ; i++) + + for (uint i= 1 ; i < arg_count ; i++) { + Item_result cmp_type= item_cmp_type(left_result_type, args[i]->result_type()); + in_item= cmp_items[(uint)cmp_type]; + DBUG_ASSERT(in_item); + if (!(value_added_map & (1 << (uint)cmp_type))) + { + in_item->store_value(args[0]); + if ((null_value=args[0]->null_value)) + return 0; + have_null= 0; + value_added_map|= 1 << (uint)cmp_type; + } if (!in_item->cmp(args[i]) && !args[i]->null_value) return (longlong) (!negated); have_null|= args[i]->null_value; } + null_value= have_null; return (longlong) (!null_value && negated); } @@ -2762,16 +2739,16 @@ bool Item_cond::walk(Item_processor processor, bool walk_subquery, byte *arg) SYNOPSIS transform() - transformer the transformer callback function to be applied to the nodes - of the tree of the object - arg parameter to be passed to the transformer + transformer the transformer callback function to be applied to the nodes + of the tree of the object + arg parameter to be passed to the transformer DESCRIPTION - The function recursively applies the transform method with the - same transformer to each member item of the condition list. + The function recursively applies the transform method to each + member item of the condition list. If the call of the method for a member item returns a new item the old item is substituted for a new one. - After this the transform method is applied to the root node + After this the transformer is applied to the root node of the Item_cond object. RETURN VALUES @@ -2802,6 +2779,55 @@ Item *Item_cond::transform(Item_transformer transformer, byte *arg) return Item_func::transform(transformer, arg); } + +/* + Compile Item_cond object with a processor and a transformer callback functions + + SYNOPSIS + compile() + analyzer the analyzer callback function to be applied to the nodes + of the tree of the object + arg_p in/out parameter to be passed to the analyzer + transformer the transformer callback function to be applied to the nodes + of the tree of the object + arg_t parameter to be passed to the transformer + + DESCRIPTION + First the function applies the analyzer to the root node of + the Item_func object. Then if the analyzer succeeeds (returns TRUE) + the function recursively applies the compile method to member + item of the condition list. + If the call of the method for a member item returns a new item + the old item is substituted for a new one. + After this the transformer is applied to the root node + of the Item_cond object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_cond::compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t) +{ + if (!(this->*analyzer)(arg_p)) + return 0; + + List_iterator li(list); + Item *item; + while ((item= li++)) + { + /* + The same parameter value of arg_p must be passed + to analyze any argument of the condition formula. + */ + byte *arg_v= *arg_p; + Item *new_item= item->compile(analyzer, &arg_v, transformer, arg_t); + if (new_item && new_item != item) + li.replace(new_item); + } + return Item_func::transform(transformer, arg_t); +} + void Item_cond::traverse_cond(Cond_traverser traverser, void *arg, traverse_order order) { diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 04462e05e9f..f2a8fd7db63 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -241,6 +241,7 @@ public: Item *neg_transformer(THD *thd); virtual Item *negated_item(); bool check_partition_func_processor(byte *bool_arg) { return 0;} + bool subst_argument_checker(byte **arg) { return TRUE; } }; class Item_func_not :public Item_bool_func @@ -588,49 +589,6 @@ public: }; -class Item_func_case :public Item_func -{ - int first_expr_num, else_expr_num; - enum Item_result cached_result_type; - String tmp_value; - uint ncases; - Item_result cmp_type; - DTCollation cmp_collation; -public: - Item_func_case(List &list, Item *first_expr_arg, Item *else_expr_arg) - :Item_func(), first_expr_num(-1), else_expr_num(-1), - cached_result_type(INT_RESULT) - { - ncases= list.elements; - if (first_expr_arg) - { - first_expr_num= list.elements; - list.push_back(first_expr_arg); - } - if (else_expr_arg) - { - else_expr_num= list.elements; - list.push_back(else_expr_arg); - } - set_arguments(list); - } - double val_real(); - longlong val_int(); - String *val_str(String *); - my_decimal *val_decimal(my_decimal *); - bool fix_fields(THD *thd, Item **ref); - void fix_length_and_dec(); - uint decimal_precision() const; - table_map not_null_tables() const { return 0; } - enum Item_result result_type () const { return cached_result_type; } - const char *func_name() const { return "case"; } - void print(String *str); - Item *find_item(String *str); - CHARSET_INFO *compare_collation() { return cmp_collation.collation; } - bool check_partition_func_processor(byte *bool_arg) { return 0;} -}; - - /* Functions to handle the optimized IN */ @@ -685,6 +643,7 @@ public: { return test(compare(collation, base + pos1*size, base + pos2*size)); } + virtual Item_result result_type()= 0; }; class in_string :public in_vector @@ -706,6 +665,7 @@ public: Item_string *to= (Item_string*)item; to->str_value= *str; } + Item_result result_type() { return STRING_RESULT; } }; class in_longlong :public in_vector @@ -728,6 +688,7 @@ public: { ((Item_int*)item)->value= ((longlong*)base)[pos]; } + Item_result result_type() { return INT_RESULT; } }; class in_double :public in_vector @@ -745,6 +706,7 @@ public: { ((Item_float*)item)->value= ((double*) base)[pos]; } + Item_result result_type() { return REAL_RESULT; } }; @@ -765,6 +727,8 @@ public: Item_decimal *item_dec= (Item_decimal*)item; item_dec->set_decimal_value(dec); } + Item_result result_type() { return DECIMAL_RESULT; } + }; @@ -795,7 +759,9 @@ class cmp_item_string :public cmp_item protected: String *value_res; public: + cmp_item_string () {} cmp_item_string (CHARSET_INFO *cs) { cmp_charset= cs; } + void set_charset(CHARSET_INFO *cs) { cmp_charset= cs; } friend class cmp_item_sort_string; friend class cmp_item_sort_string_in_static; }; @@ -806,6 +772,8 @@ protected: char value_buff[STRING_BUFFER_USUAL_SIZE]; String value; public: + cmp_item_sort_string(): + cmp_item_string() {} cmp_item_sort_string(CHARSET_INFO *cs): cmp_item_string(cs), value(value_buff, sizeof(value_buff), cs) {} @@ -827,6 +795,11 @@ public: return sortcmp(value_res, cmp->value_res, cmp_charset); } cmp_item *make_same(); + void set_charset(CHARSET_INFO *cs) + { + cmp_charset= cs; + value.set_quick(value_buff, sizeof(value_buff), cs); + } }; class cmp_item_int :public cmp_item @@ -907,6 +880,7 @@ public: ~in_row(); void set(uint pos,Item *item); byte *get_value(Item *item); + Item_result result_type() { return ROW_RESULT; } }; /* @@ -942,18 +916,109 @@ public: } }; + +/* + The class Item_func_case is the CASE ... WHEN ... THEN ... END function + implementation. + + When there is no expression between CASE and the first WHEN + (the CASE expression) then this function simple checks all WHEN expressions + one after another. When some WHEN expression evaluated to TRUE then the + value of the corresponding THEN expression is returned. + + When the CASE expression is specified then it is compared to each WHEN + expression individually. When an equal WHEN expression is found + corresponding THEN expression is returned. + In order to do correct comparisons several comparators are used. One for + each result type. Different result types that are used in particular + CASE ... END expression are collected in the fix_length_and_dec() member + function and only comparators for there result types are used. +*/ + +class Item_func_case :public Item_func +{ + int first_expr_num, else_expr_num; + enum Item_result cached_result_type, left_result_type; + String tmp_value; + uint ncases; + Item_result cmp_type; + DTCollation cmp_collation; + cmp_item *cmp_items[5]; /* For all result types */ + cmp_item *case_item; +public: + Item_func_case(List &list, Item *first_expr_arg, Item *else_expr_arg) + :Item_func(), first_expr_num(-1), else_expr_num(-1), + cached_result_type(INT_RESULT), left_result_type(INT_RESULT), case_item(0) + { + ncases= list.elements; + if (first_expr_arg) + { + first_expr_num= list.elements; + list.push_back(first_expr_arg); + } + if (else_expr_arg) + { + else_expr_num= list.elements; + list.push_back(else_expr_arg); + } + set_arguments(list); + bzero(&cmp_items, sizeof(cmp_items)); + } + double val_real(); + longlong val_int(); + String *val_str(String *); + my_decimal *val_decimal(my_decimal *); + bool fix_fields(THD *thd, Item **ref); + void fix_length_and_dec(); + uint decimal_precision() const; + table_map not_null_tables() const { return 0; } + enum Item_result result_type () const { return cached_result_type; } + const char *func_name() const { return "case"; } + void print(String *str); + Item *find_item(String *str); + CHARSET_INFO *compare_collation() { return cmp_collation.collation; } + bool check_partition_func_processor(byte *bool_arg) { return 0;} + void cleanup() + { + uint i; + DBUG_ENTER("Item_func_case::cleanup"); + Item_func::cleanup(); + for (i= 0; i <= (uint)DECIMAL_RESULT; i++) + { + delete cmp_items[i]; + cmp_items[i]= 0; + } + DBUG_VOID_RETURN; + } +}; + +/* + The Item_func_in class implements the in_expr IN(values_list) function. + + The current implementation distinguishes 2 cases: + 1) all items in the value_list are constants and have the same + result type. This case is handled by in_vector class. + 2) items in the value_list have different result types or there is some + non-constant items. + In this case Item_func_in employs several cmp_item objects to performs + comparisons of in_expr and an item from the values_list. One cmp_item + object for each result type. Different result types are collected in the + fix_length_and_dec() member function by means of collect_cmp_types() + function. +*/ class Item_func_in :public Item_func_opt_neg { public: - Item_result cmp_type; in_vector *array; - cmp_item *in_item; bool have_null; + Item_result left_result_type; + cmp_item *cmp_items[5]; /* One cmp_item for each result type */ DTCollation cmp_collation; Item_func_in(List &list) - :Item_func_opt_neg(list), array(0), in_item(0), have_null(0) + :Item_func_opt_neg(list), array(0), have_null(0) { + bzero(&cmp_items, sizeof(cmp_items)); allowed_arg_cols= 0; // Fetch this value from first argument } longlong val_int(); @@ -962,12 +1027,16 @@ public: uint decimal_precision() const { return 1; } void cleanup() { + uint i; DBUG_ENTER("Item_func_in::cleanup"); Item_int_func::cleanup(); delete array; - delete in_item; array= 0; - in_item= 0; + for (i= 0; i <= (uint)DECIMAL_RESULT; i++) + { + delete cmp_items[i]; + cmp_items[i]= 0; + } DBUG_VOID_RETURN; } optimize_type select_optimize() const @@ -1186,6 +1255,9 @@ public: void traverse_cond(Cond_traverser, void *arg, traverse_order order); void neg_arguments(THD *thd); bool check_partition_func_processor(byte *bool_arg) { return 0;} + bool subst_argument_checker(byte **arg) { return TRUE; } + Item *compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t); }; diff --git a/sql/item_func.cc b/sql/item_func.cc index e6b673c9014..4a069e662f9 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -236,22 +236,21 @@ void Item_func::traverse_cond(Cond_traverser traverser, } - /* Transform an Item_func object with a transformer callback function SYNOPSIS transform() - transformer the transformer callback function to be applied to the nodes - of the tree of the object - argument parameter to be passed to the transformer + transformer the transformer callback function to be applied to the nodes + of the tree of the object + argument parameter to be passed to the transformer DESCRIPTION - The function recursively applies the transform method with the - same transformer to each argument the function. - If the call of the method for a member item returns a new item + The function recursively applies the transform method to each + argument of the Item_func node. + If the call of the method for an argument item returns a new item the old item is substituted for a new one. - After this the transform method is applied to the root node + After this the transformer is applied to the root node of the Item_func object. RETURN VALUES @@ -285,6 +284,55 @@ Item *Item_func::transform(Item_transformer transformer, byte *argument) } +/* + Compile Item_func object with a processor and a transformer callback functions + + SYNOPSIS + compile() + analyzer the analyzer callback function to be applied to the nodes + of the tree of the object + arg_p in/out parameter to be passed to the processor + transformer the transformer callback function to be applied to the nodes + of the tree of the object + arg_t parameter to be passed to the transformer + + DESCRIPTION + First the function applies the analyzer to the root node of + the Item_func object. Then if the analizer succeeeds (returns TRUE) + the function recursively applies the compile method to each argument + of the Item_func node. + If the call of the method for an argument item returns a new item + the old item is substituted for a new one. + After this the transformer is applied to the root node + of the Item_func object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_func::compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t) +{ + if (!(this->*analyzer)(arg_p)) + return 0; + if (arg_count) + { + Item **arg,**arg_end; + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + /* + The same parameter value of arg_p must be passed + to analyze any argument of the condition formula. + */ + byte *arg_v= *arg_p; + Item *new_item= (*arg)->compile(analyzer, &arg_v, transformer, arg_t); + if (new_item && *arg != new_item) + current_thd->change_item_tree(arg, new_item); + } + } + return (this->*transformer)(arg_t); +} + /* See comments in Item_cmp_func::split_sum_func() */ void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, diff --git a/sql/item_func.h b/sql/item_func.h index 6a4d3376e70..df9f533a2c3 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -187,6 +187,8 @@ public: } bool walk(Item_processor processor, bool walk_subquery, byte *arg); Item *transform(Item_transformer transformer, byte *arg); + Item* compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t); void traverse_cond(Cond_traverser traverser, void * arg, traverse_order order); bool is_expensive_processor(byte *arg); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 851551a86d9..7a82dd753b3 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1149,12 +1149,13 @@ void Item_func_substr::fix_length_and_dec() } if (arg_count == 3 && args[2]->const_item()) { - int32 length= (int32) args[2]->val_int() * collation.collation->mbmaxlen; + int32 length= (int32) args[2]->val_int(); if (length <= 0) max_length=0; /* purecov: inspected */ else set_if_smaller(max_length,(uint) length); } + max_length*= collation.collation->mbmaxlen; } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 7b6be33d715..73e2c5e6935 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -246,7 +246,27 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) aggr_sl->inner_sum_func_list->next= this; } aggr_sl->inner_sum_func_list= this; - + aggr_sl->with_sum_func= 1; + + /* + Mark Item_subselect(s) as containing aggregate function all the way up + to aggregate function's calculation context. + Note that we must not mark the Item of calculation context itself + because with_sum_func on the calculation context st_select_lex is + already set above. + + with_sum_func being set for an Item means that this Item refers + (somewhere in it, e.g. one of its arguments if it's a function) directly + or through intermediate items to an aggregate function that is calculated + in a context "outside" of the Item (e.g. in the current or outer select). + + with_sum_func being set for an st_select_lex means that this st_select_lex + has aggregate functions directly referenced (i.e. not through a sub-select). + */ + for (sl= thd->lex->current_select; + sl && sl != aggr_sl && sl->master_unit()->item; + sl= sl->master_unit()->outer_select() ) + sl->master_unit()->item->with_sum_func= 1; } return FALSE; } @@ -2549,7 +2569,7 @@ bool Item_sum_count_distinct::setup(THD *thd) table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows table->no_rows=1; - if (table->s->db_type == &heap_hton) + if (table->s->db_type == heap_hton) { /* No blobs, otherwise it would have been MyISAM: set up a compare diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index a259fd90485..d32adde5e64 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -65,7 +65,7 @@ static bool make_datetime(date_time_format_types format, TIME *ltime, ltime->hour, ltime->minute, ltime->second); break; case TIME_MICROSECOND: - length= cs->cset->snprintf(cs, buff, length, "%s%02d:%02d:%02d.%06d", + length= cs->cset->snprintf(cs, buff, length, "%s%02d:%02d:%02d.%06ld", ltime->neg ? "-" : "", ltime->hour, ltime->minute, ltime->second, ltime->second_part); @@ -82,7 +82,7 @@ static bool make_datetime(date_time_format_types format, TIME *ltime, break; case DATE_TIME_MICROSECOND: length= cs->cset->snprintf(cs, buff, length, - "%04d-%02d-%02d %02d:%02d:%02d.%06d", + "%04d-%02d-%02d %02d:%02d:%02d.%06ld", ltime->year, ltime->month, ltime->day, ltime->hour, ltime->minute, ltime->second, ltime->second_part); diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index dfa2d2a7325..44a2b690bac 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -105,6 +105,7 @@ typedef struct my_xpath_st String *context_cache; /* last context provider */ String *pxml; /* Parsed XML, an array of MY_XML_NODE */ CHARSET_INFO *cs; /* character set/collation string comparison */ + int error; } MY_XPATH; @@ -913,7 +914,9 @@ static Item *eq_func_reverse(int oper, Item *a, Item *b) RETURN The newly created item. */ -static Item *create_comparator(MY_XPATH *xpath, int oper, Item *a, Item *b) +static Item *create_comparator(MY_XPATH *xpath, + int oper, MY_XPATH_LEX *context, + Item *a, Item *b) { if (a->type() != Item::XPATH_NODESET && b->type() != Item::XPATH_NODESET) @@ -923,6 +926,13 @@ static Item *create_comparator(MY_XPATH *xpath, int oper, Item *a, Item *b) else if (a->type() == Item::XPATH_NODESET && b->type() == Item::XPATH_NODESET) { + uint len= context->end - context->beg; + set_if_bigger(len, 32); + my_printf_error(ER_UNKNOWN_ERROR, + "XPATH error: " + "comparison of two nodesets is not supported: '%.*s'", + MYF(0), len, context->beg); + return 0; // TODO: Comparison of two nodesets } else @@ -1430,7 +1440,7 @@ my_xpath_lex_scan(MY_XPATH *xpath, static int my_xpath_parse_term(MY_XPATH *xpath, int term) { - if (xpath->lasttok.term == term) + if (xpath->lasttok.term == term && !xpath->error) { xpath->prevtok= xpath->lasttok; my_xpath_lex_scan(xpath, &xpath->lasttok, @@ -1558,8 +1568,9 @@ static int my_xpath_parse_AbsoluteLocationPath(MY_XPATH *xpath) return my_xpath_parse_RelativeLocationPath(xpath); } - return my_xpath_parse_term(xpath, MY_XPATH_LEX_EOF) || - my_xpath_parse_RelativeLocationPath(xpath); + my_xpath_parse_RelativeLocationPath(xpath); + + return (xpath->error == 0); } @@ -1596,7 +1607,10 @@ static int my_xpath_parse_RelativeLocationPath(MY_XPATH *xpath) "*", 1, xpath->pxml, 1); if (!my_xpath_parse_Step(xpath)) + { + xpath->error= 1; return 0; + } } return 1; } @@ -1633,10 +1647,16 @@ my_xpath_parse_AxisSpecifier_NodeTest_opt_Predicate_list(MY_XPATH *xpath) xpath->context_cache= context_cache; if(!my_xpath_parse_PredicateExpr(xpath)) + { + xpath->error= 1; return 0; + } if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_RB)) + { + xpath->error= 1; return 0; + } xpath->item= nodeset2bool(xpath, xpath->item); @@ -1893,7 +1913,10 @@ static int my_xpath_parse_UnionExpr(MY_XPATH *xpath) if (!my_xpath_parse_PathExpr(xpath) || xpath->item->type() != Item::XPATH_NODESET) + { + xpath->error= 1; return 0; + } xpath->item= new Item_nodeset_func_union(prev, xpath->item, xpath->pxml); } return 1; @@ -1929,6 +1952,7 @@ static int my_xpath_parse_PathExpr(MY_XPATH *xpath) { return my_xpath_parse_LocationPath(xpath) || my_xpath_parse_FilterExpr_opt_slashes_RelativeLocationPath(xpath); + } @@ -1975,7 +1999,10 @@ static int my_xpath_parse_OrExpr(MY_XPATH *xpath) { Item *prev= xpath->item; if (!my_xpath_parse_AndExpr(xpath)) + { return 0; + xpath->error= 1; + } xpath->item= new Item_cond_or(nodeset2bool(xpath, prev), nodeset2bool(xpath, xpath->item)); } @@ -2003,7 +2030,10 @@ static int my_xpath_parse_AndExpr(MY_XPATH *xpath) { Item *prev= xpath->item; if (!my_xpath_parse_EqualityExpr(xpath)) + { + xpath->error= 1; return 0; + } xpath->item= new Item_cond_and(nodeset2bool(xpath,prev), nodeset2bool(xpath,xpath->item)); @@ -2057,17 +2087,26 @@ static int my_xpath_parse_EqualityOperator(MY_XPATH *xpath) } static int my_xpath_parse_EqualityExpr(MY_XPATH *xpath) { + MY_XPATH_LEX operator_context; if (!my_xpath_parse_RelationalExpr(xpath)) return 0; + + operator_context= xpath->lasttok; while (my_xpath_parse_EqualityOperator(xpath)) { Item *prev= xpath->item; int oper= xpath->extra; if (!my_xpath_parse_RelationalExpr(xpath)) + { + xpath->error= 1; + return 0; + } + + if (!(xpath->item= create_comparator(xpath, oper, &operator_context, + prev, xpath->item))) return 0; - if (!(xpath->item= create_comparator(xpath, oper, prev, xpath->item))) - return 0; + operator_context= xpath->lasttok; } return 1; } @@ -2109,18 +2148,25 @@ static int my_xpath_parse_RelationalOperator(MY_XPATH *xpath) } static int my_xpath_parse_RelationalExpr(MY_XPATH *xpath) { + MY_XPATH_LEX operator_context; if (!my_xpath_parse_AdditiveExpr(xpath)) return 0; + operator_context= xpath->lasttok; while (my_xpath_parse_RelationalOperator(xpath)) { Item *prev= xpath->item; int oper= xpath->extra; if (!my_xpath_parse_AdditiveExpr(xpath)) + { + xpath->error= 1; return 0; + } - if (!(xpath->item= create_comparator(xpath, oper, prev, xpath->item))) + if (!(xpath->item= create_comparator(xpath, oper, &operator_context, + prev, xpath->item))) return 0; + operator_context= xpath->lasttok; } return 1; } @@ -2153,7 +2199,10 @@ static int my_xpath_parse_AdditiveExpr(MY_XPATH *xpath) int oper= xpath->prevtok.term; Item *prev= xpath->item; if (!my_xpath_parse_MultiplicativeExpr(xpath)) + { + xpath->error= 1; return 0; + } if (oper == MY_XPATH_LEX_PLUS) xpath->item= new Item_func_plus(prev, xpath->item); @@ -2198,7 +2247,10 @@ static int my_xpath_parse_MultiplicativeExpr(MY_XPATH *xpath) int oper= xpath->prevtok.term; Item *prev= xpath->item; if (!my_xpath_parse_UnaryExpr(xpath)) + { + xpath->error= 1; return 0; + } switch (oper) { case MY_XPATH_LEX_ASTERISK: diff --git a/sql/key.cc b/sql/key.cc index 69557d971e8..be21bf11c3c 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -359,31 +359,29 @@ void key_unpack(String *to,TABLE *table,uint idx) /* - Return 1 if any field in a list is part of key or the key uses a field - that is automaticly updated (like a timestamp) + Check if key uses field that is marked in passed field bitmap. + + SYNOPSIS + is_key_used() + table TABLE object with which keys and fields are associated. + idx Key to be checked. + fields Bitmap of fields to be checked. + + NOTE + This function uses TABLE::tmp_set bitmap so the caller should care + about saving/restoring its state if it also uses this bitmap. + + RETURN VALUE + TRUE Key uses field from bitmap + FALSE Otherwise */ -bool check_if_key_used(TABLE *table, uint idx, List &fields) +bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields) { - List_iterator_fast f(fields); - KEY_PART_INFO *key_part,*key_part_end; - for (key_part=table->key_info[idx].key_part,key_part_end=key_part+ - table->key_info[idx].key_parts ; - key_part < key_part_end; - key_part++) - { - Item_field *field; - - if (key_part->field == table->timestamp_field) - return 1; // Can't be used for update - - f.rewind(); - while ((field=(Item_field*) f++)) - { - if (key_part->field->eq(field->field)) - return 1; - } - } + bitmap_clear_all(&table->tmp_set); + table->mark_columns_used_by_index_no_reset(idx, &table->tmp_set); + if (bitmap_is_overlapping(&table->tmp_set, fields)) + return 1; /* If table handler has primary key as part of the index, check that primary @@ -391,7 +389,7 @@ bool check_if_key_used(TABLE *table, uint idx, List &fields) */ if (idx != table->s->primary_key && table->s->primary_key < MAX_KEY && (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX)) - return check_if_key_used(table, table->s->primary_key, fields); + return is_key_used(table, table->s->primary_key, fields); return 0; } diff --git a/sql/log.cc b/sql/log.cc index 8a8262c174d..b63ec563baf 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -46,13 +46,13 @@ static Muted_query_log_event invisible_commit; static bool test_if_number(const char *str, long *res, bool allow_wildcards); -static int binlog_init(); -static int binlog_close_connection(THD *thd); -static int binlog_savepoint_set(THD *thd, void *sv); -static int binlog_savepoint_rollback(THD *thd, void *sv); -static int binlog_commit(THD *thd, bool all); -static int binlog_rollback(THD *thd, bool all); -static int binlog_prepare(THD *thd, bool all); +static int binlog_init(void *p); +static int binlog_close_connection(handlerton *hton, THD *thd); +static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv); +static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv); +static int binlog_commit(handlerton *hton, THD *thd, bool all); +static int binlog_rollback(handlerton *hton, THD *thd, bool all); +static int binlog_prepare(handlerton *hton, THD *thd, bool all); sql_print_message_func sql_print_message_handlers[3] = { @@ -90,7 +90,7 @@ struct binlog_trx_data { #endif }; -handlerton binlog_hton; +handlerton *binlog_hton; /* Open log table of a given type (general or slow log) @@ -1155,36 +1155,37 @@ void Log_to_csv_event_handler:: should be moved here. */ -int binlog_init() +int binlog_init(void *p) { - - binlog_hton.state=opt_bin_log ? SHOW_OPTION_YES : SHOW_OPTION_NO; - binlog_hton.db_type=DB_TYPE_BINLOG; - binlog_hton.savepoint_offset= sizeof(my_off_t); - binlog_hton.close_connection= binlog_close_connection; - binlog_hton.savepoint_set= binlog_savepoint_set; - binlog_hton.savepoint_rollback= binlog_savepoint_rollback; - binlog_hton.commit= binlog_commit; - binlog_hton.rollback= binlog_rollback; - binlog_hton.prepare= binlog_prepare; - binlog_hton.flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN; + binlog_hton= (handlerton *)p; + binlog_hton->state=opt_bin_log ? SHOW_OPTION_YES : SHOW_OPTION_NO; + binlog_hton->db_type=DB_TYPE_BINLOG; + binlog_hton->savepoint_offset= sizeof(my_off_t); + binlog_hton->close_connection= binlog_close_connection; + binlog_hton->savepoint_set= binlog_savepoint_set; + binlog_hton->savepoint_rollback= binlog_savepoint_rollback; + binlog_hton->commit= binlog_commit; + binlog_hton->rollback= binlog_rollback; + binlog_hton->prepare= binlog_prepare; + binlog_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN; return 0; } -static int binlog_close_connection(THD *thd) +static int binlog_close_connection(handlerton *hton, THD *thd) { binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton.slot]; + (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open() && trx_data->empty()); close_cached_file(trans_log); - thd->ha_data[binlog_hton.slot]= 0; + thd->ha_data[binlog_hton->slot]= 0; my_free((gptr)trx_data, MYF(0)); return 0; } static int -binlog_end_trans(THD *thd, binlog_trx_data *trx_data, Log_event *end_ev) +binlog_end_trans(THD *thd, binlog_trx_data *trx_data, + Log_event *end_ev) { DBUG_ENTER("binlog_end_trans"); int error=0; @@ -1238,7 +1239,7 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data, Log_event *end_ev) DBUG_RETURN(error); } -static int binlog_prepare(THD *thd, bool all) +static int binlog_prepare(handlerton *hton, THD *thd, bool all) { /* do nothing. @@ -1249,11 +1250,11 @@ static int binlog_prepare(THD *thd, bool all) return 0; } -static int binlog_commit(THD *thd, bool all) +static int binlog_commit(handlerton *hton, THD *thd, bool all) { DBUG_ENTER("binlog_commit"); binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton.slot]; + (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open() && (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))); @@ -1273,12 +1274,12 @@ static int binlog_commit(THD *thd, bool all) DBUG_RETURN(binlog_end_trans(thd, trx_data, &invisible_commit)); } -static int binlog_rollback(THD *thd, bool all) +static int binlog_rollback(handlerton *hton, THD *thd, bool all) { DBUG_ENTER("binlog_rollback"); int error=0; binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton.slot]; + (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; IO_CACHE *trans_log= &trx_data->trans_log; /* First assert is guaranteed - see trans_register_ha() call below. @@ -1326,11 +1327,11 @@ static int binlog_rollback(THD *thd, bool all) that case there is no need to have it in the binlog). */ -static int binlog_savepoint_set(THD *thd, void *sv) +static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv) { DBUG_ENTER("binlog_savepoint_set"); binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton.slot]; + (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(&trx_data->trans_log)); *(my_off_t *)sv= my_b_tell(&trx_data->trans_log); @@ -1342,11 +1343,11 @@ static int binlog_savepoint_set(THD *thd, void *sv) DBUG_RETURN(error); } -static int binlog_savepoint_rollback(THD *thd, void *sv) +static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) { DBUG_ENTER("binlog_savepoint_rollback"); binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton.slot]; + (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(trans_log)); @@ -1579,17 +1580,18 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, if (log_type == LOG_NORMAL) { char *end; - int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s. " + int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s (%s). " #ifdef EMBEDDED_LIBRARY - "embedded library\n", my_progname, server_version + "embedded library\n", + my_progname, server_version, MYSQL_COMPILATION_COMMENT #elif __NT__ "started with:\nTCP Port: %d, Named Pipe: %s\n", - my_progname, server_version, mysqld_port, - mysqld_unix_port + my_progname, server_version, MYSQL_COMPILATION_COMMENT, + mysqld_port, mysqld_unix_port #else "started with:\nTcp port: %d Unix socket: %s\n", - my_progname, server_version, mysqld_port, - mysqld_unix_port + my_progname, server_version, MYSQL_COMPILATION_COMMENT, + mysqld_port, mysqld_unix_port #endif ); end= strnmov(buff + len, "Time Id Command Argument\n", @@ -3076,19 +3078,19 @@ int THD::binlog_setup_trx_data() { DBUG_ENTER("THD::binlog_setup_trx_data"); binlog_trx_data *trx_data= - (binlog_trx_data*) ha_data[binlog_hton.slot]; + (binlog_trx_data*) ha_data[binlog_hton->slot]; if (trx_data) DBUG_RETURN(0); // Already set up - ha_data[binlog_hton.slot]= trx_data= + ha_data[binlog_hton->slot]= trx_data= (binlog_trx_data*) my_malloc(sizeof(binlog_trx_data), MYF(MY_ZEROFILL)); if (!trx_data || open_cached_file(&trx_data->trans_log, mysql_tmpdir, LOG_PREFIX, binlog_cache_size, MYF(MY_WME))) { my_free((gptr)trx_data, MYF(MY_ALLOW_ZERO_PTR)); - ha_data[binlog_hton.slot]= 0; + ha_data[binlog_hton->slot]= 0; DBUG_RETURN(1); // Didn't manage to set it up } trx_data->trans_log.end_of_file= max_binlog_cache_size; @@ -3124,7 +3126,7 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans) if (is_trans) trans_register_ha(this, (options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) != 0, - &binlog_hton); + binlog_hton); if ((error= mysql_bin_log.write(&the_event))) DBUG_RETURN(error); @@ -3138,7 +3140,7 @@ Rows_log_event* THD::binlog_get_pending_rows_event() const { binlog_trx_data *const trx_data= - (binlog_trx_data*) ha_data[binlog_hton.slot]; + (binlog_trx_data*) ha_data[binlog_hton->slot]; /* This is less than ideal, but here's the story: If there is no trx_data, prepare_pending_rows_event() has never been called @@ -3151,11 +3153,11 @@ THD::binlog_get_pending_rows_event() const void THD::binlog_set_pending_rows_event(Rows_log_event* ev) { - if (ha_data[binlog_hton.slot] == NULL) + if (ha_data[binlog_hton->slot] == NULL) binlog_setup_trx_data(); binlog_trx_data *const trx_data= - (binlog_trx_data*) ha_data[binlog_hton.slot]; + (binlog_trx_data*) ha_data[binlog_hton->slot]; DBUG_ASSERT(trx_data); trx_data->pending= ev; @@ -3171,13 +3173,13 @@ int MYSQL_BIN_LOG:: flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event) { DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)"); - DBUG_ASSERT(thd->current_stmt_binlog_row_based && mysql_bin_log.is_open()); + DBUG_ASSERT(mysql_bin_log.is_open()); DBUG_PRINT("enter", ("event=%p", event)); int error= 0; binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton.slot]; + (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; DBUG_ASSERT(trx_data); @@ -3331,14 +3333,14 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) goto err; binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton.slot]; + (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; IO_CACHE *trans_log= &trx_data->trans_log; bool trans_log_in_use= my_b_tell(trans_log) != 0; if (event_info->get_cache_stmt() && !trans_log_in_use) trans_register_ha(thd, (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) != 0, - &binlog_hton); + binlog_hton); if (event_info->get_cache_stmt() || trans_log_in_use) { DBUG_PRINT("info", ("Using trans_log")); @@ -3416,9 +3418,6 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) } } } - /* Forget those values, for next binlogger: */ - thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; - thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty(); } /* @@ -4620,7 +4619,7 @@ int TC_LOG_BINLOG::log(THD *thd, my_xid xid) DBUG_ENTER("TC_LOG_BINLOG::log"); Xid_log_event xle(thd, xid); binlog_trx_data *trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton.slot]; + (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; DBUG_RETURN(!binlog_end_trans(thd, trx_data, &xle)); // invert return value } @@ -4681,7 +4680,7 @@ err1: } struct st_mysql_storage_engine binlog_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &binlog_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(binlog) { @@ -4690,6 +4689,7 @@ mysql_declare_plugin(binlog) "binlog", "MySQL AB", "This is a pseudo storage engine to represent the binlog in a transaction", + PLUGIN_LICENSE_GPL, binlog_init, /* Plugin Init */ NULL, /* Plugin Deinit */ 0x0100 /* 1.0 */, diff --git a/sql/log.h b/sql/log.h index 2b33f70392a..8f75601f02b 100644 --- a/sql/log.h +++ b/sql/log.h @@ -177,7 +177,7 @@ public: pthread_mutex_t LOCK_log; char *name; char log_file_name[FN_REFLEN]; - char time_buff[20], db[NAME_BYTE_LEN + 1]; + char time_buff[20], db[NAME_LEN + 1]; bool write_error, inited; IO_CACHE log_file; enum_log_type log_type; diff --git a/sql/log_event.cc b/sql/log_event.cc index 0768b370ea6..faff2cae48e 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -24,6 +24,7 @@ #include "mysql_priv.h" #include "slave.h" #include "rpl_filter.h" +#include "rpl_utility.h" #include #endif /* MYSQL_CLIENT */ #include @@ -5290,38 +5291,143 @@ int Rows_log_event::do_add_row_data(byte *const row_data, #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) /* - Unpack a row into a record. The row is assumed to only consist of the fields - for which the bitset represented by 'arr' and 'bits'; the other parts of the - record are left alone. + Unpack a row into a record. + + SYNOPSIS + unpack_row() + rli Relay log info + table Table to unpack into + colcnt Number of columns to read from record + record Record where the data should be unpacked + row Packed row data + cols Pointer to columns data to fill in + row_end Pointer to variable that will hold the value of the + one-after-end position for the row + master_reclength + Pointer to variable that will be set to the length of the + record on the master side + rw_set Pointer to bitmap that holds either the read_set or the + write_set of the table + + DESCRIPTION + + The row is assumed to only consist of the fields for which the + bitset represented by 'arr' and 'bits'; the other parts of the + record are left alone. + + At most 'colcnt' columns are read: if the table is larger than + that, the remaining fields are not filled in. + + RETURN VALUE + + Error code, or zero if no error. The following error codes can + be returned: + + ER_NO_DEFAULT_FOR_FIELD + Returned if one of the fields existing on the slave but not on + the master does not have a default value (and isn't nullable) */ -static char const *unpack_row(TABLE *table, - byte *record, char const *row, - MY_BITMAP const *cols) +static int +unpack_row(RELAY_LOG_INFO *rli, + TABLE *table, uint const colcnt, byte *record, + char const *row, MY_BITMAP const *cols, + char const **row_end, ulong *master_reclength, + MY_BITMAP* const rw_set, Log_event_type const event_type) { DBUG_ASSERT(record && row); - - MY_BITMAP *write_set= table->write_set; - my_size_t const n_null_bytes= table->s->null_bytes; my_ptrdiff_t const offset= record - (byte*) table->record[0]; + my_size_t master_null_bytes= table->s->null_bytes; - memcpy(record, row, n_null_bytes); - char const *ptr= row + n_null_bytes; + if (colcnt != table->s->fields) + { + Field **fptr= &table->field[colcnt-1]; + do + master_null_bytes= (*fptr)->last_null_byte(); + while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF && + fptr-- > table->field); + + /* + If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time, + there were no nullable fields nor BIT fields at all in the + columns that are common to the master and the slave. In that + case, there is only one null byte holding the X bit. + + OBSERVE! There might still be nullable columns following the + common columns, so table->s->null_bytes might be greater than 1. + */ + if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF) + master_null_bytes= 1; + } + + DBUG_ASSERT(master_null_bytes <= table->s->null_bytes); + memcpy(record, row, master_null_bytes); // [1] + int error= 0; + + bitmap_set_all(rw_set); - bitmap_set_all(write_set); Field **const begin_ptr = table->field; - for (Field **field_ptr= begin_ptr ; *field_ptr ; ++field_ptr) + Field **field_ptr; + char const *ptr= row + master_null_bytes; + Field **const end_ptr= begin_ptr + colcnt; + for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr) { Field *const f= *field_ptr; - if (bitmap_is_set(cols, (uint) (field_ptr - begin_ptr))) + if (bitmap_is_set(cols, field_ptr - begin_ptr)) { + f->move_field_offset(offset); + ptr= f->unpack(f->ptr, ptr); + f->move_field_offset(-offset); /* Field...::unpack() cannot return 0 */ - ptr= f->unpack(f->ptr + offset, ptr); + DBUG_ASSERT(ptr != NULL); } else - bitmap_clear_bit(write_set, (uint) (field_ptr - begin_ptr)); + bitmap_clear_bit(rw_set, field_ptr - begin_ptr); } - return ptr; + + *row_end = ptr; + if (master_reclength) + { + if (*field_ptr) + *master_reclength = (*field_ptr)->ptr - (char*) table->record[0]; + else + *master_reclength = table->s->reclength; + } + + /* + Set properties for remaining columns, if there are any. We let the + corresponding bit in the write_set be set, to write the value if + it was not there already. We iterate over all remaining columns, + even if there were an error, to get as many error messages as + possible. We are still able to return a pointer to the next row, + so redo that. + + This generation of error messages is only relevant when inserting + new rows. + */ + for ( ; *field_ptr ; ++field_ptr) + { + uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; + + DBUG_PRINT("debug", ("flags = 0x%x, mask = 0x%x, flags & mask = 0x%x", + (*field_ptr)->flags, mask, + (*field_ptr)->flags & mask)); + + if (event_type == WRITE_ROWS_EVENT && + ((*field_ptr)->flags & mask) == mask) + { + slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, + "Field `%s` of table `%s`.`%s` " + "has no default value and cannot be NULL", + (*field_ptr)->field_name, table->s->db.str, + table->s->table_name.str); + error = ER_NO_DEFAULT_FOR_FIELD; + } + else + (*field_ptr)->set_default(); + } + + return error; } int Rows_log_event::exec_event(st_relay_log_info *rli) @@ -5425,6 +5531,9 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) /* When the open and locking succeeded, we add all the tables to the table map and remove them from tables to lock. + + We also invalidate the query cache for all the tables, since + they will now be changed. */ TABLE_LIST *ptr; @@ -5433,6 +5542,9 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) rli->m_table_map.set_table(ptr->table_id, ptr->table); rli->touching_table(ptr->db, ptr->table_name, ptr->table_id); } +#ifdef HAVE_QUERY_CACHE + query_cache.invalidate_locked_for_write(rli->tables_to_lock); +#endif rli->clear_tables_to_lock(); } @@ -5477,7 +5589,11 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) error= do_before_row_operations(table); while (error == 0 && row_start < (const char*) m_rows_end) { - char const *row_end= do_prepare_row(thd, table, row_start); + char const *row_end= NULL; + if ((error= do_prepare_row(thd, rli, table, row_start, &row_end))) + break; // We should perform the after-row operation even in + // the case of error + DBUG_ASSERT(row_end != NULL); // cannot happen DBUG_ASSERT(row_end <= (const char*)m_rows_end); @@ -5682,7 +5798,7 @@ void Rows_log_event::pack_info(Protocol *protocol) #endif /************************************************************************** - Table_map_log_event member functions + Table_map_log_event member functions and support functions **************************************************************************/ /* @@ -5924,72 +6040,9 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) */ DBUG_ASSERT(m_table->in_use); - /* - Check that the number of columns and the field types in the - event match the number of columns and field types in the opened - table. - */ - uint col= m_table->s->fields; - - if (col == m_colcnt) + table_def const def(m_coltype, m_colcnt); + if (def.compatible_with(rli, m_table)) { - while (col-- > 0) - if (m_table->field[col]->type() != m_coltype[col]) - break; - } - - TABLE_SHARE const *const tsh= m_table->s; - - /* - Check the following termination conditions: - - (col == m_table->s->fields) - ==> (m_table->s->fields != m_colcnt) - (0 <= col < m_table->s->fields) - ==> (m_table->field[col]->type() != m_coltype[col]) - - Logically, A ==> B is equivalent to !A || B - - Since col is unsigned, is suffices to check that col <= - tsh->fields. If col wrapped (by decreasing col when it is 0), - the number will be UINT_MAX, which is greater than tsh->fields. - */ - DBUG_ASSERT(!(col == tsh->fields) || tsh->fields != m_colcnt); - DBUG_ASSERT(!(col < tsh->fields) || - (m_table->field[col]->type() != m_coltype[col])); - - if (col <= tsh->fields) - { - /* purecov: begin inspected */ - /* - If we get here, the number of columns in the event didn't - match the number of columns in the table on the slave, *or* - there were a column in the table on the slave that did not - have the same type as given in the event. - - If 'col' has the value that was assigned to it, it was a - mismatch between the number of columns on the master and the - slave. - */ - if (col == tsh->fields) - { - DBUG_ASSERT(tsh->db.str && tsh->table_name.str); - slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, - "Table width mismatch - " - "received %u columns, %s.%s has %u columns", - m_colcnt, tsh->db.str, tsh->table_name.str, tsh->fields); - } - else - { - DBUG_ASSERT(col < m_colcnt && col < tsh->fields); - DBUG_ASSERT(tsh->db.str && tsh->table_name.str); - slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, - "Column %d type mismatch - " - "received type %d, %s.%s has type %d", - col, m_coltype[col], tsh->db.str, tsh->table_name.str, - m_table->field[col]->type()); - } - thd->query_error= 1; error= ERR_BAD_TABLE_DEF; goto err; @@ -6188,19 +6241,21 @@ int Write_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -char const *Write_rows_log_event::do_prepare_row(THD *thd, TABLE *table, - char const *row_start) +int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, + TABLE *table, + char const *row_start, + char const **row_end) { - char const *ptr= row_start; DBUG_ASSERT(table != NULL); - /* - This assertion actually checks that there is at least as many - columns on the slave as on the master. - */ - DBUG_ASSERT(table->s->fields >= m_width); - DBUG_ASSERT(ptr); - ptr= unpack_row(table, (byte*)table->record[0], ptr, &m_cols); - return ptr; + DBUG_ASSERT(row_start && row_end); + + int error; + error= unpack_row(rli, + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->write_set, WRITE_ROWS_EVENT); + bitmap_copy(table->read_set, table->write_set); + return error; } /* @@ -6247,21 +6302,111 @@ namespace { /* - Replace the provided record in the database. + Copy "extra" columns from record[1] to record[0]. - Similar to how it is done in mysql_insert(), we first - try to do a ha_write_row() and of that fails due to - duplicated keys (or indices), we do an ha_update_row() - or a ha_delete_row() instead. - - @param thd Thread context for writing the record. - @param table Table to which record should be written. - - @return Error code on failure, 0 on success. + Copy the extra fields that are not present on the master but are + present on the slave from record[1] to record[0]. This is used + after fetching a record that are to be updated, either inside + replace_record() or as part of executing an update_row(). */ static int -replace_record(THD *thd, TABLE *table) +copy_extra_record_fields(TABLE *table, + my_size_t master_reclength, + my_ptrdiff_t master_fields) { + DBUG_PRINT("info", ("Copying to %p " + "from field %d at offset %u " + "to field %d at offset %u", + table->record[0], + master_fields, master_reclength, + table->s->fields, table->s->reclength)); + /* + Copying the extra fields of the slave that does not exist on + master into record[0] (which are basically the default values). + */ + DBUG_ASSERT(master_reclength <= table->s->reclength); + if (master_reclength < table->s->reclength) + bmove_align(table->record[0] + master_reclength, + table->record[1] + master_reclength, + table->s->reclength - master_reclength); + + /* + Bit columns are special. We iterate over all the remaining + columns and copy the "extra" bits to the new record. This is + not a very good solution: it should be refactored on + opportunity. + + REFACTORING SUGGESTION (Matz). Introduce a member function + similar to move_field_offset() called copy_field_offset() to + copy field values and implement it for all Field subclasses. Use + this function to copy data from the found record to the record + that are going to be inserted. + + The copy_field_offset() function need to be a virtual function, + which in this case will prevent copying an entire range of + fields efficiently. + */ + { + Field **field_ptr= table->field + master_fields; + for ( ; *field_ptr ; ++field_ptr) + { + /* + Set the null bit according to the values in record[1] + */ + if ((*field_ptr)->maybe_null() && + (*field_ptr)->is_null_in_record(reinterpret_cast(table->record[1]))) + (*field_ptr)->set_null(); + else + (*field_ptr)->set_notnull(); + + /* + Do the extra work for special columns. + */ + switch ((*field_ptr)->real_type()) + { + default: + /* Nothing to do */ + break; + + case FIELD_TYPE_BIT: + Field_bit *f= static_cast(*field_ptr); + my_ptrdiff_t const offset= table->record[1] - table->record[0]; + uchar const bits= + get_rec_bits(f->bit_ptr + offset, f->bit_ofs, f->bit_len); + set_rec_bits(bits, f->bit_ptr, f->bit_ofs, f->bit_len); + break; + } + } + } + return 0; // All OK +} + +/* + Replace the provided record in the database. + + SYNOPSIS + replace_record() + thd Thread context for writing the record. + table Table to which record should be written. + master_reclength + Offset to first column that is not present on the master, + alternatively the length of the record on the master + side. + + RETURN VALUE + Error code on failure, 0 on success. + + DESCRIPTION + Similar to how it is done in mysql_insert(), we first try to do + a ha_write_row() and of that fails due to duplicated keys (or + indices), we do an ha_update_row() or a ha_delete_row() instead. + */ +static int +replace_record(THD *thd, TABLE *table, + ulong const master_reclength, + uint const master_fields) +{ + DBUG_ENTER("replace_record"); DBUG_ASSERT(table != NULL && thd != NULL); int error; @@ -6273,7 +6418,7 @@ replace_record(THD *thd, TABLE *table) if ((keynum= table->file->get_dup_key(error)) < 0) { /* We failed to retrieve the duplicate key */ - return HA_ERR_FOUND_DUPP_KEY; + DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); } /* @@ -6290,20 +6435,20 @@ replace_record(THD *thd, TABLE *table) { error= table->file->rnd_pos(table->record[1], table->file->dup_ref); if (error) - return error; + DBUG_RETURN(error); } else { if (table->file->extra(HA_EXTRA_FLUSH_CACHE)) { - return my_errno; + DBUG_RETURN(my_errno); } if (key.get() == NULL) { key.assign(static_cast(my_alloca(table->s->max_unique_length))); if (key.get() == NULL) - return ENOMEM; + DBUG_RETURN(ENOMEM); } key_copy((byte*)key.get(), table->record[0], table->key_info + keynum, 0); @@ -6312,7 +6457,7 @@ replace_record(THD *thd, TABLE *table) table->key_info[keynum].key_length, HA_READ_KEY_EXACT); if (error) - return error; + DBUG_RETURN(error); } /* @@ -6320,6 +6465,12 @@ replace_record(THD *thd, TABLE *table) will enable us to update it or, alternatively, delete it (so that we can insert the new row afterwards). + First we copy the columns into table->record[0] that are not + present on the master from table->record[1], if there are any. + */ + copy_extra_record_fields(table, master_reclength, master_fields); + + /* REPLACE is defined as either INSERT or DELETE + INSERT. If possible, we can replace it with an UPDATE, but that will not work on InnoDB if FOREIGN KEY checks are necessary. @@ -6339,22 +6490,22 @@ replace_record(THD *thd, TABLE *table) { error=table->file->ha_update_row(table->record[1], table->record[0]); - return error; + DBUG_RETURN(error); } else { if ((error= table->file->ha_delete_row(table->record[1]))) - return error; + DBUG_RETURN(error); /* Will retry ha_write_row() with the offending row removed. */ } } - return error; + DBUG_RETURN(error); } int Write_rows_log_event::do_exec_row(TABLE *table) { DBUG_ASSERT(table != NULL); - int error= replace_record(thd, table); + int error= replace_record(thd, table, m_master_reclength, m_width); return error; } #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ @@ -6440,7 +6591,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) /* We have a key: search the table using the index */ if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE))) return error; - + /* We need to set the null bytes to ensure that the filler bit are all set when returning. There are storage engines that just set @@ -6526,6 +6677,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; table->record[1][pos]= 0xFF; error= table->file->rnd_next(table->record[1]); + switch (error) { case 0: @@ -6640,20 +6792,23 @@ int Delete_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -char const *Delete_rows_log_event::do_prepare_row(THD *thd, TABLE *table, - char const *row_start) +int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, + TABLE *table, + char const *row_start, + char const **row_end) { - char const *ptr= row_start; - DBUG_ASSERT(ptr); + int error; + DBUG_ASSERT(row_start && row_end); /* This assertion actually checks that there is at least as many columns on the slave as on the master. */ DBUG_ASSERT(table->s->fields >= m_width); - DBUG_ASSERT(ptr != NULL); - ptr= unpack_row(table, table->record[0], ptr, &m_cols); - + error= unpack_row(rli, + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->read_set, DELETE_ROWS_EVENT); /* If we will access rows using the random access method, m_key will be set to NULL, so we do not need to make a key copy in that case. @@ -6665,7 +6820,7 @@ char const *Delete_rows_log_event::do_prepare_row(THD *thd, TABLE *table, key_copy(m_key, table->record[0], key_info, 0); } - return ptr; + return error; } int Delete_rows_log_event::do_exec_row(TABLE *table) @@ -6779,11 +6934,13 @@ int Update_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -char const *Update_rows_log_event::do_prepare_row(THD *thd, TABLE *table, - char const *row_start) +int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, + TABLE *table, + char const *row_start, + char const **row_end) { - char const *ptr= row_start; - DBUG_ASSERT(ptr); + int error; + DBUG_ASSERT(row_start && row_end); /* This assertion actually checks that there is at least as many columns on the slave as on the master. @@ -6791,10 +6948,20 @@ char const *Update_rows_log_event::do_prepare_row(THD *thd, TABLE *table, DBUG_ASSERT(table->s->fields >= m_width); /* record[0] is the before image for the update */ - ptr= unpack_row(table, table->record[0], ptr, &m_cols); - DBUG_ASSERT(ptr != NULL); + error= unpack_row(rli, + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->read_set, UPDATE_ROWS_EVENT); + row_start = *row_end; /* m_after_image is the after image for the update */ - ptr= unpack_row(table, m_after_image, ptr, &m_cols); + error= unpack_row(rli, + table, m_width, m_after_image, + row_start, &m_cols, row_end, &m_master_reclength, + table->write_set, UPDATE_ROWS_EVENT); + + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_DUMP("m_after_image", m_after_image, table->s->reclength); + /* If we will access rows using the random access method, m_key will @@ -6807,7 +6974,7 @@ char const *Update_rows_log_event::do_prepare_row(THD *thd, TABLE *table, key_copy(m_key, table->record[0], key_info, 0); } - return ptr; + return error; } int Update_rows_log_event::do_exec_row(TABLE *table) @@ -6825,17 +6992,20 @@ int Update_rows_log_event::do_exec_row(TABLE *table) example, the partition engine). Since find_and_fetch_row() puts the fetched record (i.e., the old - record) in record[0], we have to move it out of the way and into - record[1]. After that, we can put the new record (i.e., the after - image) into record[0]. + record) in record[1], we can keep it there. We put the new record + (i.e., the after image) into record[0], and copy the fields that + are on the slave (i.e., in record[1]) into record[0], effectively + overwriting the default values that where put there by the + unpack_row() function. */ - bmove_align(table->record[1], table->record[0], table->s->reclength); bmove_align(table->record[0], m_after_image, table->s->reclength); + copy_extra_record_fields(table, m_master_reclength, m_width); /* - Now we should have the right row to update. The old row (the one - we're looking for) has to be in record[1] and the new row has to - be in record[0] for all storage engines to work correctly. + Now we have the right row to update. The old row (the one we're + looking for) is in record[1] and the new row has is in record[0]. + We also have copied the original values already in the slave's + database into the after image delivered from the master. */ error= table->file->ha_update_row(table->record[1], table->record[0]); diff --git a/sql/log_event.h b/sql/log_event.h index 313b5174da9..801a83dcc97 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1874,6 +1874,7 @@ protected: ulong m_table_id; /* Table ID */ MY_BITMAP m_cols; /* Bitmap denoting columns available */ ulong m_width; /* The width of the columns bitmap */ + ulong m_master_reclength; /* Length of record on master side */ /* Bit buffer in the same memory as the class */ uint32 m_bitbuf[128/(sizeof(uint32)*8)]; @@ -1927,12 +1928,15 @@ private: since SQL thread specific data is not available: that data is made available for the do_exec function. - RETURN VALUE A pointer to the start of the next row, or NULL if the preparation failed. Currently, preparation cannot fail, but don't rely on this behavior. + + RETURN VALUE + Error code, if something went wrong, 0 otherwise. */ - virtual char const *do_prepare_row(THD*, TABLE*, char const *row_start) = 0; + virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + char const *row_start, char const **row_end) = 0; /* Primitive to do the actual execution necessary for a row. @@ -2000,10 +2004,11 @@ private: gptr m_memory; byte *m_after_image; - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual char const *do_prepare_row(THD*, TABLE*, char const *row_start); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + char const *row_start, char const **row_end); + virtual int do_exec_row(TABLE *table); #endif }; @@ -2064,10 +2069,11 @@ private: byte *m_key; byte *m_after_image; - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual char const *do_prepare_row(THD*, TABLE*, char const *row_start); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + char const *row_start, char const **row_end); + virtual int do_exec_row(TABLE *table); #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ }; @@ -2134,10 +2140,11 @@ private: byte *m_key; byte *m_after_image; - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual char const *do_prepare_row(THD*, TABLE*, char const *row_start); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + char const *row_start, char const **row_end); + virtual int do_exec_row(TABLE *table); #endif }; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index cc16beedd9e..339ca9d965a 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -21,6 +21,9 @@ except the part which must be in the server and in the client. */ +#ifndef MYSQL_PRIV_H +#define MYSQL_PRIV_H + #ifndef MYSQL_CLIENT #include @@ -137,7 +140,18 @@ MY_LOCALE *my_locale_by_name(const char *name); #define MAX_ACCEPT_RETRY 10 // Test accept this many times #define MAX_FIELDS_BEFORE_HASH 32 #define USER_VARS_HASH_SIZE 16 -#define STACK_MIN_SIZE 8192 // Abort if less stack during eval. + +/* + Value of 9236 discovered through binary search 2006-09-26 on Ubuntu Dapper + Drake, libc6 2.3.6-0ubuntu2, Linux kernel 2.6.15-27-686, on x86. (Added + 100 bytes as reasonable buffer against growth and other environments' + requirements.) + + Feel free to raise this by the smallest amount you can to get the + "execution_constants" test to pass. + */ +#define STACK_MIN_SIZE 9336 // Abort if less stack during eval. + #define STACK_MIN_SIZE_FOR_OPEN 1024*80 #define STACK_BUFF_ALLOC 256 // For stack overrun checks #ifndef MYSQLD_NET_RETRY_COUNT @@ -427,6 +441,7 @@ void view_store_options(THD *thd, st_table_list *table, String *buff); #define TL_OPTION_UPDATING 1 #define TL_OPTION_FORCE_INDEX 2 #define TL_OPTION_IGNORE_LEAVES 4 +#define TL_OPTION_ALIAS 8 /* Some portable defines */ @@ -454,7 +469,8 @@ enum enum_parsing_place NO_MATTER, IN_HAVING, SELECT_LIST, - IN_WHERE + IN_WHERE, + IN_ON }; struct st_table; @@ -584,7 +600,7 @@ void get_default_definer(THD *thd, LEX_USER *definer); LEX_USER *create_default_definer(THD *thd); LEX_USER *create_definer(THD *thd, LEX_STRING *user_name, LEX_STRING *host_name); LEX_USER *get_current_user(THD *thd, LEX_USER *user); -bool check_string_length(CHARSET_INFO *cs, LEX_STRING *str, +bool check_string_length(LEX_STRING *str, const char *err_msg, uint max_length); enum enum_mysql_completiontype { @@ -1413,7 +1429,7 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info, uint key_length); bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length); void key_unpack(String *to,TABLE *form,uint index); -bool check_if_key_used(TABLE *table, uint idx, List &fields); +bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields); int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length); int key_rec_cmp(void *key_info, byte *a, byte *b); @@ -1423,10 +1439,12 @@ void sql_perror(const char *message); int vprint_msg_to_log(enum loglevel level, const char *format, va_list args); -void sql_print_error(const char *format, ...); -void sql_print_warning(const char *format, ...); -void sql_print_information(const char *format, ...); -typedef void (*sql_print_message_func)(const char *format, ...); +void sql_print_error(const char *format, ...) ATTRIBUTE_FORMAT(printf, 1, 2); +void sql_print_warning(const char *format, ...) ATTRIBUTE_FORMAT(printf, 1, 2); +void sql_print_information(const char *format, ...) + ATTRIBUTE_FORMAT(printf, 1, 2); +typedef void (*sql_print_message_func)(const char *format, ...) + ATTRIBUTE_FORMAT(printf, 1, 2); extern sql_print_message_func sql_print_message_handlers[]; /* type of the log table */ @@ -1554,7 +1572,6 @@ extern my_bool opt_log_queries_not_using_indexes; extern bool opt_disable_networking, opt_skip_show_db; extern my_bool opt_character_set_client_handshake; extern bool volatile abort_loop, shutdown_in_progress, grant_option; -extern bool mysql_proc_table_exists; extern uint volatile thread_count, thread_running, global_read_lock; extern my_bool opt_sql_bin_update, opt_safe_user_create, opt_no_mix_types; extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap; @@ -1624,65 +1641,22 @@ extern TYPELIB log_output_typelib; /* optional things, have_* variables */ -#ifdef WITH_INNOBASE_STORAGE_ENGINE -extern handlerton innobase_hton; -#define have_innodb innobase_hton.state -#else extern SHOW_COMP_OPTION have_innodb; -#endif -#ifdef WITH_EXAMPLE_STORAGE_ENGINE -extern handlerton example_hton; -#define have_example_db example_hton.state -#else extern SHOW_COMP_OPTION have_example_db; -#endif -#ifdef WITH_ARCHIVE_STORAGE_ENGINE -extern handlerton archive_hton; -#define have_archive_db archive_hton.state -#else extern SHOW_COMP_OPTION have_archive_db; -#endif -#ifdef WITH_CSV_STORAGE_ENGINE -extern handlerton tina_hton; -#define have_csv_db tina_hton.state -#else extern SHOW_COMP_OPTION have_csv_db; -#endif -#ifdef WITH_FEDERATED_STORAGE_ENGINE -extern handlerton federated_hton; -#define have_federated_db federated_hton.state -#else extern SHOW_COMP_OPTION have_federated_db; -#endif -#ifdef WITH_BLACKHOLE_STORAGE_ENGINE -extern handlerton blackhole_hton; -#define have_blackhole_db blackhole_hton.state -#else extern SHOW_COMP_OPTION have_blackhole_db; -#endif -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -extern handlerton ndbcluster_hton; -#define have_ndbcluster ndbcluster_hton.state -#else extern SHOW_COMP_OPTION have_ndbcluster; -#endif -#ifdef WITH_PARTITION_STORAGE_ENGINE -extern handlerton partition_hton; -#define have_partition_db partition_hton.state -#else extern SHOW_COMP_OPTION have_partition_db; -#endif +extern SHOW_COMP_OPTION have_merge_db; -extern handlerton myisammrg_hton; -/* MRG_MYISAM handler is always built, but may be skipped */ -#define have_merge_db myisammrg_hton.state - -extern handlerton myisam_hton; -extern handlerton myisammrg_hton; -extern handlerton heap_hton; +extern handlerton *partition_hton; +extern handlerton *myisam_hton; +extern handlerton *heap_hton; extern SHOW_COMP_OPTION have_row_based_replication; -extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_dlopen; +extern SHOW_COMP_OPTION have_openssl, have_symlink, have_dlopen; extern SHOW_COMP_OPTION have_query_cache; extern SHOW_COMP_OPTION have_geometry, have_rtree_keys; extern SHOW_COMP_OPTION have_crypt; @@ -2037,3 +2011,5 @@ bool schema_table_store_record(THD *thd, TABLE *table); #endif /* MYSQL_SERVER */ #endif /* MYSQL_CLIENT */ + +#endif /* MYSQL_PRIV_H */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6ccd4707248..6674dd87757 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -354,6 +354,14 @@ my_bool opt_safe_user_create = 0, opt_no_mix_types = 0; my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0; my_bool opt_log_slave_updates= 0; my_bool opt_innodb; + +/* + Legacy global handlerton. These will be removed (please do not add more). +*/ +handlerton *heap_hton; +handlerton *myisam_hton; +handlerton *partition_hton; + #ifdef WITH_INNOBASE_STORAGE_ENGINE extern ulong innobase_fast_shutdown; extern ulong innobase_large_page_size; @@ -782,7 +790,7 @@ static void close_connections(void) { if (ip_sock != INVALID_SOCKET) { - (void) shutdown(ip_sock,2); + (void) shutdown(ip_sock, SHUT_RDWR); (void) closesocket(ip_sock); ip_sock= INVALID_SOCKET; } @@ -814,7 +822,7 @@ static void close_connections(void) #ifdef HAVE_SYS_UN_H if (unix_sock != INVALID_SOCKET) { - (void) shutdown(unix_sock,2); + (void) shutdown(unix_sock, SHUT_RDWR); (void) closesocket(unix_sock); (void) unlink(mysqld_unix_port); unix_sock= INVALID_SOCKET; @@ -918,7 +926,7 @@ static void close_server_sock() { ip_sock=INVALID_SOCKET; DBUG_PRINT("info",("calling shutdown on TCP/IP socket")); - VOID(shutdown(tmp_sock,2)); + VOID(shutdown(tmp_sock, SHUT_RDWR)); #if defined(__NETWARE__) /* The following code is disabled for normal systems as it causes MySQL @@ -933,7 +941,7 @@ static void close_server_sock() { unix_sock=INVALID_SOCKET; DBUG_PRINT("info",("calling shutdown on unix socket")); - VOID(shutdown(tmp_sock,2)); + VOID(shutdown(tmp_sock, SHUT_RDWR)); #if defined(__NETWARE__) /* The following code is disabled for normal systems as it may cause MySQL @@ -1091,7 +1099,7 @@ pthread_handler_t kill_server_thread(void *arg __attribute__((unused))) extern "C" sig_handler print_signal_warning(int sig) { if (global_system_variables.log_warnings) - sql_print_warning("Got signal %d from thread %d", sig,my_thread_id()); + sql_print_warning("Got signal %d from thread %ld", sig,my_thread_id()); #ifdef DONT_REMEMBER_SIGNAL my_sigset(sig,print_signal_warning); /* int. thread system calls */ #endif @@ -1601,8 +1609,8 @@ static void network_init(void) if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1)) { - sql_print_error("The socket file path is too long (> %d): %s", - sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); + sql_print_error("The socket file path is too long (> %lu): %s", + sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); unireg_abort(1); } if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0) @@ -2953,7 +2961,7 @@ static void openssl_lock(int mode, openssl_lock_t *lock, const char *file, } if (err) { - sql_print_error("Fatal: can't %s OpenSSL %s lock", what); + sql_print_error("Fatal: can't %s OpenSSL lock", what); abort(); } } @@ -3120,7 +3128,11 @@ with --log-bin instead."); global_system_variables.binlog_format= BINLOG_FORMAT_ROW; else #endif +#if defined(HAVE_ROW_BASED_REPLICATION) + global_system_variables.binlog_format= BINLOG_FORMAT_MIXED; +#else global_system_variables.binlog_format= BINLOG_FORMAT_STMT; +#endif } /* Check that we have not let the format to unspecified at this point */ @@ -3247,7 +3259,7 @@ server."); default_storage_engine_str); unireg_abort(1); } - hton= &myisam_hton; + hton= myisam_hton; } global_system_variables.table_type= hton; } @@ -4238,7 +4250,7 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused))) if (req.sink) ((void (*)(int))req.sink)(req.fd); - (void) shutdown(new_sock,2); + (void) shutdown(new_sock, SHUT_RDWR); (void) closesocket(new_sock); continue; } @@ -4253,7 +4265,7 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused))) if (getsockname(new_sock,&dummy, &dummyLen) < 0) { sql_perror("Error on new connection socket"); - (void) shutdown(new_sock,2); + (void) shutdown(new_sock, SHUT_RDWR); (void) closesocket(new_sock); continue; } @@ -4265,7 +4277,7 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused))) if (!(thd= new THD)) { - (void) shutdown(new_sock,2); + (void) shutdown(new_sock, SHUT_RDWR); VOID(closesocket(new_sock)); continue; } @@ -4279,7 +4291,7 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused))) vio_delete(vio_tmp); else { - (void) shutdown(new_sock,2); + (void) shutdown(new_sock, SHUT_RDWR); (void) closesocket(new_sock); } delete thd; @@ -4886,7 +4898,13 @@ struct my_option my_long_options[] = "supports only statement-based binary logging, so only 'statement' is " "a legal value." #endif - , 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + , 0, 0, 0, GET_STR, REQUIRED_ARG, +#ifdef HAVE_ROW_BASED_REPLICATION + BINLOG_FORMAT_MIXED +#else + BINLOG_FORMAT_STMT +#endif + , 0, 0, 0, 0, 0 }, {"binlog-do-db", OPT_BINLOG_DO_DB, "Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -6964,7 +6982,7 @@ static void mysql_init_variables(void) /* Set default values for some option variables */ default_storage_engine_str= (char*) "MyISAM"; - global_system_variables.table_type= &myisam_hton; + global_system_variables.table_type= myisam_hton; global_system_variables.tx_isolation= ISO_REPEATABLE_READ; global_system_variables.select_limit= (ulonglong) HA_POS_ERROR; max_system_variables.select_limit= (ulonglong) HA_POS_ERROR; @@ -6985,6 +7003,51 @@ static void mysql_init_variables(void) "d:t:i:o,/tmp/mysqld.trace"); #endif opt_error_log= IF_WIN(1,0); +#ifdef WITH_MYISAMMRG_STORAGE_ENGINE + have_merge_db= SHOW_OPTION_YES; +#else + have_merge_db= SHOW_OPTION_NO; +#endif +#ifdef WITH_INNOBASE_STORAGE_ENGINE + have_innodb= SHOW_OPTION_YES; +#else + have_innodb= SHOW_OPTION_NO; +#endif +#ifdef WITH_EXAMPLE_STORAGE_ENGINE + have_example_db= SHOW_OPTION_YES; +#else + have_example_db= SHOW_OPTION_NO; +#endif +#ifdef WITH_ARCHIVE_STORAGE_ENGINE + have_archive_db= SHOW_OPTION_YES; +#else + have_archive_db= SHOW_OPTION_NO; +#endif +#ifdef WITH_BLACKHOLE_STORAGE_ENGINE + have_blackhole_db= SHOW_OPTION_YES; +#else + have_blackhole_db= SHOW_OPTION_NO; +#endif +#ifdef WITH_FEDERATED_STORAGE_ENGINE + have_federated_db= SHOW_OPTION_YES; +#else + have_federated_db= SHOW_OPTION_NO; +#endif +#ifdef WITH_CSV_STORAGE_ENGINE + have_csv_db= SHOW_OPTION_YES; +#else + have_csv_db= SHOW_OPTION_NO; +#endif +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE + have_ndbcluster= SHOW_OPTION_DISABLED; +#else + have_ndbcluster= SHOW_OPTION_NO; +#endif +#ifdef WITH_PARTITION_STORAGE_ENGINE + have_partition_db= SHOW_OPTION_YES; +#else + have_partition_db= SHOW_OPTION_NO; +#endif #ifdef HAVE_ROW_BASED_REPLICATION have_row_based_replication= SHOW_OPTION_YES; #else @@ -7601,14 +7664,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), exit(1); } switch (method-1) { - case 0: - method_conv= MI_STATS_METHOD_NULLS_NOT_EQUAL; + case 2: + method_conv= MI_STATS_METHOD_IGNORE_NULLS; break; case 1: method_conv= MI_STATS_METHOD_NULLS_EQUAL; break; - case 2: - method_conv= MI_STATS_METHOD_IGNORE_NULLS; + case 0: + default: + method_conv= MI_STATS_METHOD_NULLS_NOT_EQUAL; break; } global_system_variables.myisam_stats_method= method_conv; @@ -8061,6 +8125,7 @@ void refresh_status(THD *thd) #undef have_federated_db #undef have_partition_db #undef have_blackhole_db +#undef have_merge_db SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO; SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO; @@ -8070,6 +8135,7 @@ SHOW_COMP_OPTION have_csv_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_merge_db= SHOW_OPTION_NO; #ifndef WITH_INNOBASE_STORAGE_ENGINE uint innobase_flush_log_at_trx_commit; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 2f8fda3122d..0abb37b5345 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1165,11 +1165,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) } thd= head->in_use; - if (!(file= get_new_handler(head->s, thd->mem_root, head->s->db_type))) - goto failure; - DBUG_PRINT("info", ("Allocated new handler 0x%lx", (long) file)); - if (file->ha_open(head, head->s->normalized_path.str, head->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) + if (!(file= head->file->clone(thd->mem_root))) { /* Caller will free the memory */ goto failure; @@ -4917,9 +4913,17 @@ static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func, { Item_func_in *func=(Item_func_in*) cond_func; + /* + Array for IN() is constructed when all values have the same result + type. Tree won't be built for values with different result types, + so we check it here to avoid unnecessary work. + */ + if (!func->array) + break; + if (inv) { - if (func->array && func->cmp_type != ROW_RESULT) + if (func->array->result_type() != ROW_RESULT) { /* We get here for conditions in form "t.key NOT IN (c1, c2, ...)", @@ -6950,8 +6954,9 @@ void SEL_ARG::test_use_count(SEL_ARG *root) ulong count=count_key_part_usage(root,pos->next_key_part); if (count > pos->next_key_part->use_count) { - sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu should be %lu", - pos,pos->next_key_part->use_count,count); + sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu " + "should be %lu", (long unsigned int)pos, + pos->next_key_part->use_count, count); return; } pos->next_key_part->test_use_count(root); @@ -6959,7 +6964,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) } if (e_count != elements) sql_print_warning("Wrong use count: %u (should be %u) for tree at 0x%lx", - e_count, elements, (gptr) this); + e_count, elements, (long unsigned int) this); } #endif @@ -7530,42 +7535,42 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) } -bool QUICK_SELECT_I::check_if_keys_used(List *fields) +bool QUICK_SELECT_I::is_keys_used(const MY_BITMAP *fields) { - return check_if_key_used(head, index, *fields); + return is_key_used(head, index, fields); } -bool QUICK_INDEX_MERGE_SELECT::check_if_keys_used(List *fields) +bool QUICK_INDEX_MERGE_SELECT::is_keys_used(const MY_BITMAP *fields) { QUICK_RANGE_SELECT *quick; List_iterator_fast it(quick_selects); while ((quick= it++)) { - if (check_if_key_used(head, quick->index, *fields)) + if (is_key_used(head, quick->index, fields)) return 1; } return 0; } -bool QUICK_ROR_INTERSECT_SELECT::check_if_keys_used(List *fields) +bool QUICK_ROR_INTERSECT_SELECT::is_keys_used(const MY_BITMAP *fields) { QUICK_RANGE_SELECT *quick; List_iterator_fast it(quick_selects); while ((quick= it++)) { - if (check_if_key_used(head, quick->index, *fields)) + if (is_key_used(head, quick->index, fields)) return 1; } return 0; } -bool QUICK_ROR_UNION_SELECT::check_if_keys_used(List *fields) +bool QUICK_ROR_UNION_SELECT::is_keys_used(const MY_BITMAP *fields) { QUICK_SELECT_I *quick; List_iterator_fast it(quick_selects); while ((quick= it++)) { - if (quick->check_if_keys_used(fields)) + if (quick->is_keys_used(fields)) return 1; } return 0; @@ -8139,7 +8144,6 @@ end: DBUG_RETURN(result); } - /* Get the next record with a different prefix. @@ -10791,7 +10795,6 @@ static void print_ror_scans_arr(TABLE *table, const char *msg, DBUG_VOID_RETURN; } - /***************************************************************************** ** Print a quick range for debugging ** TODO: diff --git a/sql/opt_range.h b/sql/opt_range.h index 6f26f9f782c..6099608f7cd 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -224,10 +224,9 @@ public: virtual void add_info_string(String *str) {}; /* Return 1 if any index used by this quick select - a) uses field that is listed in passed field list or - b) is automatically updated (like a timestamp) + uses field which is marked in passed bitmap. */ - virtual bool check_if_keys_used(List *fields); + virtual bool is_keys_used(const MY_BITMAP *fields); /* rowid of last row retrieved by this quick select. This is used only when @@ -425,7 +424,7 @@ public: int get_type() { return QS_TYPE_INDEX_MERGE; } void add_keys_and_lengths(String *key_names, String *used_lengths); void add_info_string(String *str); - bool check_if_keys_used(List *fields); + bool is_keys_used(const MY_BITMAP *fields); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif @@ -484,7 +483,7 @@ public: int get_type() { return QS_TYPE_ROR_INTERSECT; } void add_keys_and_lengths(String *key_names, String *used_lengths); void add_info_string(String *str); - bool check_if_keys_used(List *fields); + bool is_keys_used(const MY_BITMAP *fields); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif @@ -538,7 +537,7 @@ public: int get_type() { return QS_TYPE_ROR_UNION; } void add_keys_and_lengths(String *key_names, String *used_lengths); void add_info_string(String *str); - bool check_if_keys_used(List *fields); + bool is_keys_used(const MY_BITMAP *fields); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 93ec48d69f2..7d6305594fd 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -234,7 +234,7 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) Type of range for the key part for this field will be returned in range_fl. */ - if ((outer_tables & table->map) || + if (table->file->inited || (outer_tables & table->map) || !find_key_for_maxmin(0, &ref, item_field->field, conds, &range_fl, &prefix_len)) { @@ -321,7 +321,7 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) Type of range for the key part for this field will be returned in range_fl. */ - if ((outer_tables & table->map) || + if (table->file->inited || (outer_tables & table->map) || !find_key_for_maxmin(1, &ref, item_field->field, conds, &range_fl, &prefix_len)) { diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 286637bd9aa..b3c7c1b80e7 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -443,11 +443,9 @@ bool partition_info::check_engine_mix(handlerton **engine_array, uint no_parts) DBUG_RETURN(TRUE); } } while (++i < no_parts); - if (engine_array[0] == &myisammrg_hton || - engine_array[0] == &tina_hton) + if (engine_array[0]->flags & HTON_NO_PARTITION) { - my_error(ER_PARTITION_MERGE_ERROR, MYF(0), - engine_array[0] == &myisammrg_hton ? "MyISAM Merge" : "CSV"); + my_error(ER_PARTITION_MERGE_ERROR, MYF(0)); DBUG_RETURN(TRUE); } DBUG_RETURN(FALSE); diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc new file mode 100644 index 00000000000..c80b6dc3f69 --- /dev/null +++ b/sql/rpl_utility.cc @@ -0,0 +1,153 @@ +/* Copyright 2006 MySQL AB. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "rpl_utility.h" + +uint32 +field_length_from_packed(enum_field_types const field_type, + byte const *const data) +{ + uint32 length; + + switch (field_type) { + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: + length= ~0UL; + break; + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_TINY: + length= 1; + break; + case MYSQL_TYPE_SHORT: + length= 2; + break; + case MYSQL_TYPE_INT24: + length= 3; + break; + case MYSQL_TYPE_LONG: + length= 4; + break; +#ifdef HAVE_LONG_LONG + case MYSQL_TYPE_LONGLONG: + length= 8; + break; +#endif + case MYSQL_TYPE_FLOAT: + length= sizeof(float); + break; + case MYSQL_TYPE_DOUBLE: + length= sizeof(double); + break; + case MYSQL_TYPE_NULL: + length= 0; + break; + case MYSQL_TYPE_NEWDATE: + length= 3; + break; + case MYSQL_TYPE_DATE: + length= 4; + break; + case MYSQL_TYPE_TIME: + length= 3; + break; + case MYSQL_TYPE_TIMESTAMP: + length= 4; + break; + case MYSQL_TYPE_DATETIME: + length= 8; + break; + break; + case MYSQL_TYPE_BIT: + length= ~0UL; + break; + default: + /* This case should never be chosen */ + DBUG_ASSERT(0); + /* If something goes awfully wrong, it's better to get a string than die */ + case MYSQL_TYPE_STRING: + length= uint2korr(data); + break; + + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: + length= ~0UL; // NYI + break; + + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_GEOMETRY: + length= ~0UL; // NYI + break; + } + + return length; +} + +/********************************************************************* + * table_def member definitions * + *********************************************************************/ + +/* + Is the definition compatible with a table? + +*/ +int +table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) + const +{ + /* + We only check the initial columns for the tables. + */ + uint const cols_to_check= min(table->s->fields, size()); + int error= 0; + + TABLE_SHARE const *const tsh= table->s; + + /* + To get proper error reporting for all columns of the table, we + both check the width and iterate over all columns. + */ + if (tsh->fields < size()) + { + DBUG_ASSERT(tsh->db.str && tsh->table_name.str); + error= 1; + slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, + "Table width mismatch - " + "received %u columns, %s.%s has %u columns", + size(), tsh->db.str, tsh->table_name.str, tsh->fields); + } + + for (uint col= 0 ; col < cols_to_check ; ++col) + { + if (table->field[col]->type() != type(col)) + { + DBUG_ASSERT(col < size() && col < tsh->fields); + DBUG_ASSERT(tsh->db.str && tsh->table_name.str); + error= 1; + slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, + "Column %d type mismatch - " + "received type %d, %s.%s has type %d", + col, type(col), tsh->db.str, tsh->table_name.str, + table->field[col]->type()); + } + } + + return error; +} diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h new file mode 100644 index 00000000000..df0b0cd2ee1 --- /dev/null +++ b/sql/rpl_utility.h @@ -0,0 +1,126 @@ +/* Copyright 2006 MySQL AB. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef RPL_UTILITY_H +#define RPL_UTILITY_H + +#ifndef __cplusplus +#error "Don't include this C++ header file from a non-C++ file!" +#endif + +#include "mysql_priv.h" + +uint32 +field_length_from_packed(enum_field_types const field_type, + byte const *const data); + +/* + A table definition from the master. + + RESPONSIBILITIES + + - Extract and decode table definition data from the table map event + - Check if table definition in table map is compatible with table + definition on slave + + DESCRIPTION + + Currently, the only field type data available is an array of the + type operators that are present in the table map event. + + TODO + + Add type operands to this structure to allow detection of + difference between, e.g., BIT(5) and BIT(10). + */ + +class table_def +{ +public: + /* + Convenience declaration of the type of the field type data in a + table map event. + */ + typedef unsigned char field_type; + + /* + Constructor. + + SYNOPSIS + table_def() + types Array of types + size Number of elements in array 'types' + */ + table_def(field_type *types, my_size_t size) + : m_type(types), m_size(size) + { + } + + /* + Return the number of fields there is type data for. + + SYNOPSIS + size() + + RETURN VALUE + The number of fields that there is type data for. + */ + my_size_t size() const { return m_size; } + + /* + Return a representation of the type data for one field. + + SYNOPSIS + type() + i Field index to return data for + + RETURN VALUE + + Will return a representation of the type data for field + 'i'. Currently, only the type identifier is returned. + */ + field_type type(my_ptrdiff_t i) const { return m_type[i]; } + + /* + Decide if the table definition is compatible with a table. + + SYNOPSIS + compatible_with() + rli Pointer to relay log info + table Pointer to table to compare with. + + DESCRIPTION + + Compare the definition with a table to see if it is compatible + with it. A table definition is compatible with a table if: + + - the columns types of the table definition is a (not + necessarily proper) prefix of the column type of the table, or + + - the other way around + + RETURN VALUE + 1 if the table definition is not compatible with 'table' + 0 if the table definition is compatible with 'table' + */ + int compatible_with(RELAY_LOG_INFO *rli, TABLE *table) const; + +private: + my_size_t m_size; // Number of elements in the types array + field_type *m_type; // Array of type descriptors +}; + +#endif /* RPL_UTILITY_H */ diff --git a/sql/set_var.cc b/sql/set_var.cc index e09c75573eb..4369c288cd5 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1218,8 +1218,8 @@ static void fix_tx_isolation(THD *thd, enum_var_type type) thd->variables.tx_isolation); } -static void fix_completion_type(THD *thd __attribute__(unused), - enum_var_type type __attribute__(unused)) {} +static void fix_completion_type(THD *thd __attribute__((unused)), + enum_var_type type __attribute__((unused))) {} static int check_completion_type(THD *thd, set_var *var) { @@ -1258,14 +1258,14 @@ static void fix_net_retry_count(THD *thd, enum_var_type type) thd->net.retry_count=thd->variables.net_retry_count; } #else /* HAVE_REPLICATION */ -static void fix_net_read_timeout(THD *thd __attribute__(unused), - enum_var_type type __attribute__(unused)) +static void fix_net_read_timeout(THD *thd __attribute__((unused)), + enum_var_type type __attribute__((unused))) {} -static void fix_net_write_timeout(THD *thd __attribute__(unused), - enum_var_type type __attribute__(unused)) +static void fix_net_write_timeout(THD *thd __attribute__((unused)), + enum_var_type type __attribute__((unused))) {} -static void fix_net_retry_count(THD *thd __attribute__(unused), - enum_var_type type __attribute__(unused)) +static void fix_net_retry_count(THD *thd __attribute__((unused)), + enum_var_type type __attribute__((unused))) {} #endif /* HAVE_REPLICATION */ @@ -3591,7 +3591,7 @@ byte *sys_var_thd_storage_engine::value_ptr(THD *thd, enum_var_type type, void sys_var_thd_storage_engine::set_default(THD *thd, enum_var_type type) { if (type == OPT_GLOBAL) - global_system_variables.*offset= &myisam_hton; + global_system_variables.*offset= myisam_hton; else thd->variables.*offset= global_system_variables.*offset; } diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 37f54899615..a34e8c152cf 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -3791,7 +3791,7 @@ ER_WRONG_MRG_TABLE cze "V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì" dan "Tabellerne i MERGE er ikke defineret ens" nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities" - eng "All tables in the MERGE table are not identically defined" + eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist" est "Kõik tabelid MERGE tabeli määratluses ei ole identsed" fre "Toutes les tables de la table de type MERGE n'ont pas la même définition" ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert" @@ -5960,9 +5960,9 @@ ER_EVENT_SET_VAR_ERROR eng "Error during starting/stopping of the scheduler. Error code %u" ger "Fehler während des Startens oder Anhalten des Schedulers. Fehlercode %u" ER_PARTITION_MERGE_ERROR - eng "%s handler cannot be used in partitioned tables" - ger "%s-Handler kann in partitionierten Tabellen nicht verwendet werden" - swe "%s kan inte användas i en partitionerad tabell" + eng "Engine cannot be used in partitioned tables" + ger "Engine kann in partitionierten Tabellen nicht verwendet werden" + swe "Engine inte användas i en partitionerad tabell" ER_CANT_ACTIVATE_LOG eng "Cannot activate '%-.64s' log" ger "Kann Logdatei '%-.64s' nicht aktivieren" @@ -5972,21 +5972,33 @@ ER_RBR_NOT_AVAILABLE ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA eng "Triggers can not be created on system tables" ger "Trigger können nicht auf Systemtabellen erzeugt werden" -ER_CANT_ALTER_LOG_TABLE - eng "You can't alter a log table if logging is enabled" -ER_BAD_LOG_ENGINE - eng "One can use only CSV and MyISAM engines for the log tables" -ER_CANT_DROP_LOG_TABLE - eng "Cannot drop log table if log is enabled" ER_EVENT_RECURSIVITY_FORBIDDEN eng "Recursivity of EVENT DDL statements is forbidden when body is present" + ger "Rekursivität von EVENT-DDL-Anweisungen ist unzulässig wenn ein Hauptteil (Body) existiert" ER_EVENTS_DB_ERROR eng "Cannot proceed because the tables used by events were found damaged at server start" + ger "Kann nicht weitermachen, weil die Tabellen, die von Events verwendet werden, beim Serverstart als beschädigt markiert wurden" ER_ONLY_INTEGERS_ALLOWED - eng "Only normal integers allowed as number here" + eng "Only integers allowed as number here" + ger "An dieser Stelle sind nur Ganzzahlen zulässig" +ER_AUTOINC_READ_FAILED + eng "Failed to read auto-increment value from storage engine" + ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen" ER_USERNAME - eng "user name" + eng "user name" + ger "Benutzername" ER_HOSTNAME - eng "host name" + eng "host name" + ger "Hostname" ER_WRONG_STRING_LENGTH - eng "String '%-.70s' is too long for %s (should be no longer than %d)" + eng "String '%-.70s' is too long for %s (should be no longer than %d)" + ger "String '%-.70s' ist zu lang für %s (sollte nicht länger sein als %d)" +ER_UNSUPORTED_LOG_ENGINE + eng "This storage engine cannot be used for log tables"" + ger "Diese Speicher-Engine kann für Logtabellen nicht verwendet werden" +ER_BAD_LOG_STATEMENT + eng "You cannot '%s' a log table if logging is enabled" + ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist" +ER_NON_INSERTABLE_TABLE + eng "The target table %-.100s of the %s is not insertable-into" + diff --git a/sql/slave.cc b/sql/slave.cc index f9645fc83e3..5d871a64347 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -4447,7 +4447,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, suppress_warnings= 0; sql_print_error("Slave I/O thread: error %s to master \ '%s@%s:%d': \ -Error: '%s' errno: %d retry-time: %d retries: %d", +Error: '%s' errno: %d retry-time: %d retries: %lu", (reconnect ? "reconnecting" : "connecting"), mi->user,mi->host,mi->port, mysql_error(mysql), last_errno, diff --git a/sql/slave.h b/sql/slave.h index 9d76277da0d..e70b2e4b326 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -152,7 +152,7 @@ typedef struct st_master_info /* the variables below are needed because we can change masters on the fly */ char master_log_name[FN_REFLEN]; char host[HOSTNAME_LENGTH+1]; - char user[USERNAME_BYTE_LENGTH+1]; + char user[USERNAME_LENGTH+1]; char password[MAX_PASSWORD_LENGTH+1]; my_bool ssl; // enables use of SSL connection if true char ssl_ca[FN_REFLEN], ssl_capath[FN_REFLEN], ssl_cert[FN_REFLEN]; @@ -272,7 +272,8 @@ const char *print_slave_db_safe(const char *db); int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); void skip_load_data_infile(NET* net); void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, - int err_code, const char* msg, ...); + int err_code, const char* msg, ...) + ATTRIBUTE_FORMAT(printf, 4, 5); void end_slave(); /* clean up */ void init_master_info_with_options(MASTER_INFO* mi); diff --git a/sql/sp.cc b/sql/sp.cc index 7ae335a1a3d..3411a18c17a 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -65,8 +65,6 @@ enum MYSQL_PROC_FIELD_COUNT }; -bool mysql_proc_table_exists= 1; - /* Tells what SP_DEFAULT_ACCESS should be mapped to */ #define SP_DEFAULT_ACCESS_MAPPING SP_CONTAINS_SQL @@ -118,13 +116,6 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup) bool not_used; DBUG_ENTER("open_proc_table"); - /* - Speed up things if mysql.proc doesn't exists. mysql_proc_table_exists - is set when we create or read stored procedure or on flush privileges. - */ - if (!mysql_proc_table_exists) - DBUG_RETURN(0); - thd->reset_n_backup_open_tables_state(backup); bzero((char*) &tables, sizeof(tables)); @@ -134,7 +125,6 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup) MYSQL_LOCK_IGNORE_FLUSH))) { thd->restore_backup_open_tables_state(backup); - mysql_proc_table_exists= 0; DBUG_RETURN(0); } table->use_all_columns(); @@ -186,15 +176,6 @@ static TABLE *open_proc_table_for_update(THD *thd) if (table) table->use_all_columns(); - /* - Under explicit LOCK TABLES or in prelocked mode we should not - say that mysql.proc table does not exist if we are unable to - open and lock it for writing since this condition may be - transient. - */ - if (!(thd->locked_tables || thd->prelocked_mode) || table) - mysql_proc_table_exists= test(table); - DBUG_RETURN(table); } @@ -406,16 +387,16 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, { LEX *old_lex= thd->lex, newlex; String defstr; - char old_db_buf[NAME_BYTE_LEN+1]; + char old_db_buf[NAME_LEN+1]; LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; ulong old_sql_mode= thd->variables.sql_mode; ha_rows old_select_limit= thd->variables.select_limit; sp_rcontext *old_spcont= thd->spcont; - - char definer_user_name_holder[USERNAME_BYTE_LENGTH + 1]; + + char definer_user_name_holder[USERNAME_LENGTH + 1]; LEX_STRING definer_user_name= { definer_user_name_holder, - USERNAME_BYTE_LENGTH }; + USERNAME_LENGTH }; char definer_host_name_holder[HOSTNAME_LENGTH + 1]; LEX_STRING definer_host_name= { definer_host_name_holder, HOSTNAME_LENGTH }; @@ -514,7 +495,7 @@ db_create_routine(THD *thd, int type, sp_head *sp) int ret; TABLE *table; char definer[USER_HOST_BUFF_SIZE]; - char old_db_buf[NAME_BYTE_LEN+1]; + char old_db_buf[NAME_LEN+1]; LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; DBUG_ENTER("db_create_routine"); @@ -1610,14 +1591,6 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex, case SP_KEY_NOT_FOUND: ret= SP_OK; break; - case SP_OPEN_TABLE_FAILED: - /* - Force it to attempt opening it again on subsequent calls; - otherwise we will get one error message the first time, and - then ER_SP_PROC_TABLE_CORRUPT (below) on subsequent tries. - */ - mysql_proc_table_exists= 1; - /* Fall through */ default: /* Any error when loading an existing routine is either some problem @@ -1633,7 +1606,17 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex, */ if (!thd->net.report_error) { - char n[NAME_LEN*2+2]; + /* + SP allows full NAME_LEN chars thus he have to allocate enough + size in bytes. Otherwise there is stack overrun could happen + if multibyte sequence is `name`. `db` is still safe because the + rest of the server checks agains NAME_LEN bytes and not chars. + Hence, the overrun happens only if the name is in length > 32 and + uses multibyte (cyrillic, greek, etc.) + + !! Change 3 with SYSTEM_CHARSET_MBMAXLEN when it's defined. + */ + char n[NAME_LEN*3*2+2]; /* m_qname.str is not always \0 terminated */ memcpy(n, name.m_qname.str, name.m_qname.length); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index b36686f7179..a061ae12dd1 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -795,7 +795,7 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b) This set is produced by tracking user variable reads during statement execution. - Fo SPs, this has the following implications: + For SPs, this has the following implications: 1) thd->user_var_events may contain events from several SP statements and needs to be valid after exection of these statements was finished. In order to achieve that, we @@ -808,6 +808,14 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b) reset_dynamic(&thd->user_var_events); calls in several different places. (TODO cosider moving this into mysql_bin_log.write() function) + + 4.2 Auto_increment storage in binlog + + As we may write two statements to binlog from one single logical statement + (case of "SELECT func1(),func2()": it is binlogged as "SELECT func1()" and + then "SELECT func2()"), we need to reset auto_increment binlog variables + after each binlogged SELECT. Otherwise, the auto_increment value of the + first SELECT would be used for the second too. */ @@ -954,7 +962,7 @@ bool sp_head::execute(THD *thd) { DBUG_ENTER("sp_head::execute"); - char old_db_buf[NAME_BYTE_LEN+1]; + char old_db_buf[NAME_LEN+1]; LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; sp_rcontext *ctx; @@ -1527,6 +1535,9 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, "failed to reflect this change in the binary log"); } reset_dynamic(&thd->user_var_events); + /* Forget those values, in case more function calls are binlogged: */ + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; + thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty(); } } @@ -1999,8 +2010,8 @@ sp_head::set_info(longlong created, longlong modified, void sp_head::set_definer(const char *definer, uint definerlen) { - char user_name_holder[USERNAME_BYTE_LENGTH + 1]; - LEX_STRING user_name= { user_name_holder, USERNAME_BYTE_LENGTH }; + char user_name_holder[USERNAME_LENGTH + 1]; + LEX_STRING user_name= { user_name_holder, USERNAME_LENGTH }; char host_name_holder[HOSTNAME_LENGTH + 1]; LEX_STRING host_name= { host_name_holder, HOSTNAME_LENGTH }; diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 318f0798c5d..c55fc744cd0 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -169,7 +169,7 @@ static byte* acl_entry_get_key(acl_entry *entry,uint *length, } #define IP_ADDR_STRLEN (3+1+3+1+3+1+3) -#define ACL_KEY_LENGTH (IP_ADDR_STRLEN+1+NAME_BYTE_LEN+1+USERNAME_BYTE_LENGTH+1) +#define ACL_KEY_LENGTH (IP_ADDR_STRLEN+1+NAME_LEN+1+USERNAME_LENGTH+1) static DYNAMIC_ARRAY acl_hosts,acl_users,acl_dbs; static MEM_ROOT mem, memex; @@ -312,12 +312,11 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) READ_RECORD read_record_info; my_bool return_val= 1; bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE; - char tmp_name[NAME_BYTE_LEN+1]; + char tmp_name[NAME_LEN+1]; int password_length; DBUG_ENTER("acl_load"); grant_version++; /* Privileges updated */ - mysql_proc_table_exists= 1; // Assume mysql.proc exists acl_cache->clear(1); // Clear locked hostname cache @@ -2401,7 +2400,7 @@ static GRANT_NAME *name_hash_search(HASH *name_hash, const char *user, const char *tname, bool exact) { - char helping [NAME_BYTE_LEN*2+USERNAME_BYTE_LENGTH+3]; + char helping [NAME_LEN*2+USERNAME_LENGTH+3]; uint len; GRANT_NAME *grant_name,*found=0; HASH_SEARCH_STATE state; @@ -3308,7 +3307,7 @@ bool mysql_grant(THD *thd, const char *db, List &list, { List_iterator str_list (list); LEX_USER *Str, *tmp_Str; - char tmp_db[NAME_BYTE_LEN+1]; + char tmp_db[NAME_LEN+1]; bool create_new_users=0; TABLE_LIST tables[2]; DBUG_ENTER("mysql_grant"); @@ -3372,7 +3371,7 @@ bool mysql_grant(THD *thd, const char *db, List &list, { result= TRUE; continue; - } + } if (replace_user_table(thd, tables[0].table, *Str, (!db ? rights : 0), revoke_grant, create_new_users, test(thd->variables.sql_mode & @@ -4012,7 +4011,7 @@ err2: bool check_grant_db(THD *thd,const char *db) { Security_context *sctx= thd->security_ctx; - char helping [NAME_BYTE_LEN+USERNAME_BYTE_LENGTH+2]; + char helping [NAME_LEN+USERNAME_LENGTH+2]; uint len; bool error= 1; diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index 9f1a0561138..264e3e2b988 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -806,9 +806,9 @@ void field_str::get_opt_type(String *answer, ha_rows total_rows) else if (num_info.decimals) // DOUBLE(%d,%d) sometime { if (num_info.dval > -FLT_MAX && num_info.dval < FLT_MAX) - sprintf(buff, "FLOAT(%d,%d)", num_info.integers, num_info.decimals); + sprintf(buff, "FLOAT(%d,%d)", (num_info.integers + num_info.decimals), num_info.decimals); else - sprintf(buff, "DOUBLE(%d,%d)", num_info.integers, num_info.decimals); + sprintf(buff, "DOUBLE(%d,%d)", (num_info.integers + num_info.decimals), num_info.decimals); } else if (ev_num_info.llval >= -128 && ev_num_info.ullval <= @@ -915,10 +915,10 @@ void field_real::get_opt_type(String *answer, else { if (min_arg >= -FLT_MAX && max_arg <= FLT_MAX) - sprintf(buff, "FLOAT(%d,%d)", (int) max_length - (item->decimals + 1), + sprintf(buff, "FLOAT(%d,%d)", (int) max_length - (item->decimals + 1) + max_notzero_dec_len, max_notzero_dec_len); else - sprintf(buff, "DOUBLE(%d,%d)", (int) max_length - (item->decimals + 1), + sprintf(buff, "DOUBLE(%d,%d)", (int) max_length - (item->decimals + 1) + max_notzero_dec_len, max_notzero_dec_len); answer->append(buff, (uint) strlen(buff)); } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 9e3049d433b..a530c7f7fdc 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1461,8 +1461,11 @@ void update_non_unique_table_error(TABLE_LIST *update, */ if (update->view) { + /* Issue the ER_NON_INSERTABLE_TABLE error for an INSERT */ if (update->view == duplicate->view) - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), update->alias, operation); + my_error(!strncmp(operation, "INSERT", 6) ? + ER_NON_INSERTABLE_TABLE : ER_NON_UPDATABLE_TABLE, MYF(0), + update->alias, operation); else my_error(ER_VIEW_PREVENT_UPDATE, MYF(0), (duplicate->view ? duplicate->alias : update->alias), diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 75c2422edf9..ee3b8aa79fe 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -2389,7 +2389,7 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used, tables_used->engine_data)) DBUG_RETURN(0); - if (tables_used->table->s->db_type == &myisammrg_hton) + if (tables_used->table->s->db_type->db_type == DB_TYPE_MRG_MYISAM) { ha_myisammrg *handler = (ha_myisammrg *) tables_used->table->file; MYRG_INFO *file = handler->myrg_info(); @@ -3013,7 +3013,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used, "other non-cacheable table(s)")); DBUG_RETURN(0); } - if (tables_used->table->s->db_type == &myisammrg_hton) + if (tables_used->table->s->db_type->db_type == DB_TYPE_MRG_MYISAM) { ha_myisammrg *handler = (ha_myisammrg *)tables_used->table->file; MYRG_INFO *file = handler->myrg_info(); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index e3181fb67cf..3fd0e621422 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -630,10 +630,30 @@ bool THD::store_globals() } -/* Cleanup after a query */ +/* + Cleanup after query. + + SYNOPSIS + THD::cleanup_after_query() + + DESCRIPTION + This function is used to reset thread data to its default state. + + NOTE + This function is not suitable for setting thread data to some + non-default values, as there is only one replication thread, so + different master threads may overwrite data of each other on + slave. +*/ void THD::cleanup_after_query() { + if (!in_sub_stmt) /* stored functions and triggers are a special case */ + { + /* Forget those values, for next binlogger: */ + stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; + auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + } if (first_successful_insert_id_in_cur_stmt > 0) { /* set what LAST_INSERT_ID() will return */ @@ -649,6 +669,7 @@ void THD::cleanup_after_query() where= THD::DEFAULT_WHERE; } + /* Convert a string to another character set @@ -2524,7 +2545,9 @@ my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data, { my_ptrdiff_t const offset= field->is_null(rec_offset) ? def_offset : rec_offset; - ptr= (byte*)field->pack((char *) ptr, field->ptr + offset); + field->move_field_offset(offset); + ptr= (byte*)field->pack((char *) ptr, field->ptr); + field->move_field_offset(-offset); } } return (static_cast(ptr - row_data)); @@ -2609,12 +2632,19 @@ int THD::binlog_update_row(TABLE* table, bool is_trans, my_size_t const after_size= pack_row(table, cols, after_row, after_record); + DBUG_DUMP("before_record", before_record, table->s->reclength); + DBUG_DUMP("after_record", after_record, table->s->reclength); + DBUG_DUMP("before_row", before_row, before_size); + DBUG_DUMP("after_row", after_row, after_size); + Rows_log_event* const ev= binlog_prepare_pending_rows_event(table, server_id, cols, colcnt, before_size + after_size, is_trans, static_cast(0)); - error= (unlikely(!ev)) || ev->add_row_data(before_row, before_size) || + error= + unlikely(!ev) || + ev->add_row_data(before_row, before_size) || ev->add_row_data(after_row, after_size); if (!table->write_row_record) @@ -2661,7 +2691,12 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans, int THD::binlog_flush_pending_rows_event(bool stmt_end) { DBUG_ENTER("THD::binlog_flush_pending_rows_event"); - if (!current_stmt_binlog_row_based || !mysql_bin_log.is_open()) + /* + We shall flush the pending event even if we are not in row-based + mode: it might be the case that we left row-based mode before + flushing anything (e.g., if we have explicitly locked tables). + */ + if (!mysql_bin_log.is_open()) DBUG_RETURN(0); /* @@ -2727,6 +2762,21 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, DBUG_PRINT("enter", ("qtype=%d, query='%s'", qtype, query)); DBUG_ASSERT(query && mysql_bin_log.is_open()); + /* + If we are not in prelocked mode, mysql_unlock_tables() will be + called after this binlog_query(), so we have to flush the pending + rows event with the STMT_END_F set to unlock all tables at the + slave side as well. + + If we are in prelocked mode, the flushing will be done inside the + top-most close_thread_tables(). + */ +#ifdef HAVE_ROW_BASED_REPLICATION + if (this->prelocked_mode == NON_PRELOCKED) + if (int error= binlog_flush_pending_rows_event(TRUE)) + DBUG_RETURN(error); +#endif /*HAVE_ROW_BASED_REPLICATION*/ + switch (qtype) { case THD::MYSQL_QUERY_TYPE: /* @@ -2740,25 +2790,7 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, case THD::ROW_QUERY_TYPE: #ifdef HAVE_ROW_BASED_REPLICATION if (current_stmt_binlog_row_based) - { - /* - If thd->lock is set, then we are not inside a stored function. - In that case, mysql_unlock_tables() will be called after this - binlog_query(), so we have to flush the pending rows event - with the STMT_END_F set to unlock all tables at the slave side - as well. - - We will not flush the pending event, if thd->lock is NULL. - This means that we are inside a stored function or trigger, so - the flushing will be done inside the top-most - close_thread_tables(). - */ -#ifdef HAVE_ROW_BASED_REPLICATION - if (this->lock) - DBUG_RETURN(binlog_flush_pending_rows_event(TRUE)); -#endif /*HAVE_ROW_BASED_REPLICATION*/ DBUG_RETURN(0); - } #endif /* Otherwise, we fall through */ case THD::STMT_QUERY_TYPE: diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc index 2784e71ccae..46fae3ffb99 100644 --- a/sql/sql_cursor.cc +++ b/sql/sql_cursor.cc @@ -45,7 +45,7 @@ class Sensitive_cursor: public Server_side_cursor query_id_t query_id; struct Engine_info { - const handlerton *ht; + handlerton *ht; void *read_view; }; Engine_info ht_info[MAX_HA]; @@ -318,12 +318,12 @@ Sensitive_cursor::post_open(THD *thd) info= &ht_info[0]; for (handlerton **pht= thd->transaction.stmt.ht; *pht; pht++) { - const handlerton *ht= *pht; + handlerton *ht= *pht; close_at_commit|= test(ht->flags & HTON_CLOSE_CURSORS_AT_COMMIT); if (ht->create_cursor_read_view) { info->ht= ht; - info->read_view= (ht->create_cursor_read_view)(); + info->read_view= (ht->create_cursor_read_view)(ht, thd); ++info; } } @@ -433,7 +433,7 @@ Sensitive_cursor::fetch(ulong num_rows) thd->set_n_backup_active_arena(this, &backup_arena); for (info= ht_info; info->read_view ; info++) - (info->ht->set_cursor_read_view)(info->read_view); + (info->ht->set_cursor_read_view)(info->ht, thd, info->read_view); join->fetch_limit+= num_rows; @@ -454,7 +454,7 @@ Sensitive_cursor::fetch(ulong num_rows) reset_thd(thd); for (info= ht_info; info->read_view; info++) - (info->ht->set_cursor_read_view)(0); + (info->ht->set_cursor_read_view)(info->ht, thd, 0); if (error == NESTED_LOOP_CURSOR_LIMIT) { @@ -487,7 +487,7 @@ Sensitive_cursor::close() for (Engine_info *info= ht_info; info->read_view; info++) { - (info->ht->close_cursor_read_view)(info->read_view); + (info->ht->close_cursor_read_view)(info->ht, thd, info->read_view); info->read_view= 0; info->ht= 0; } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 4c91ba6d8ff..ca006faf6e7 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -375,6 +375,7 @@ cleanup: */ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) { + Item *fake_conds= 0; SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_prepare_delete"); @@ -400,7 +401,7 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) DBUG_RETURN(TRUE); } } - select_lex->fix_prepare_information(thd, conds); + select_lex->fix_prepare_information(thd, conds, &fake_conds); DBUG_RETURN(FALSE); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index d561fb19953..c765501f669 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -112,7 +112,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, if (!table_list->updatable) { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT"); return -1; } @@ -228,7 +228,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, (table_list->view && check_view_insertability(thd, table_list))) { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT"); return -1; } @@ -572,7 +572,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, free_underlaid_joins(thd, &thd->lex->select_lex); joins_freed= TRUE; - table->file->ha_release_auto_increment(); /* Now all rows are inserted. Time to update logs and sends response to @@ -591,6 +590,11 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, else #endif { + /* + Do not do this release if this is a delayed insert, it would steal + auto_inc values from the delayed_insert thread as they share TABLE. + */ + table->file->ha_release_auto_increment(); if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error) { table->file->print_error(my_errno,MYF(0)); @@ -975,7 +979,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, update_non_unique_table_error(table_list, "INSERT", duplicate); DBUG_RETURN(TRUE); } - select_lex->fix_prepare_information(thd, &fake_conds); + select_lex->fix_prepare_information(thd, &fake_conds, &fake_conds); select_lex->first_execution= 0; } if (duplic == DUP_UPDATE || duplic == DUP_REPLACE) @@ -1330,13 +1334,16 @@ public: bool query_start_used, ignore, log_query; bool stmt_depends_on_first_successful_insert_id_in_prev_stmt; ulonglong first_successful_insert_id_in_prev_stmt; + ulonglong forced_insert_id; + ulong auto_increment_increment; + ulong auto_increment_offset; timestamp_auto_set_type timestamp_field_type; LEX_STRING query; delayed_row(LEX_STRING const query_arg, enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg) : record(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg), - query(query_arg) + forced_insert_id(0), query(query_arg) {} ~delayed_row() { @@ -1694,6 +1701,7 @@ write_delayed(THD *thd,TABLE *table, enum_duplicates duplic, { delayed_row *row; delayed_insert *di=thd->di; + const Discrete_interval *forced_auto_inc; DBUG_ENTER("write_delayed"); DBUG_PRINT("enter", ("query = '%s' length %u", query.str, query.length)); @@ -1743,6 +1751,17 @@ write_delayed(THD *thd,TABLE *table, enum_duplicates duplic, thd->first_successful_insert_id_in_prev_stmt; row->timestamp_field_type= table->timestamp_field_type; + /* Copy session variables. */ + row->auto_increment_increment= thd->variables.auto_increment_increment; + row->auto_increment_offset= thd->variables.auto_increment_offset; + /* Copy the next forced auto increment value, if any. */ + if ((forced_auto_inc= thd->auto_inc_intervals_forced.get_next())) + { + row->forced_insert_id= forced_auto_inc->minimum(); + DBUG_PRINT("delayed", ("transmitting auto_inc: %lu", + (ulong) row->forced_insert_id)); + } + di->rows.push_back(row); di->stacked_inserts++; di->status=1; @@ -1994,6 +2013,10 @@ pthread_handler_t handle_delayed_insert(void *arg) MYSQL_LOCK *lock=thd->lock; thd->lock=0; pthread_mutex_unlock(&di->mutex); + /* + We need to release next_insert_id before unlocking. This is + enforced by handler::ha_external_lock(). + */ di->table->file->ha_release_auto_increment(); mysql_unlock_tables(thd, lock); di->group_count=0; @@ -2114,14 +2137,43 @@ bool delayed_insert::handle_inserts(void) thd.start_time=row->start_time; thd.query_start_used=row->query_start_used; - /* for the binlog, forget auto_increment ids generated by previous rows */ -// thd.auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + /* + To get the exact auto_inc interval to store in the binlog we must not + use values from the previous interval (of the previous rows). + */ + bool log_query= (row->log_query && row->query.str != NULL); + DBUG_PRINT("delayed", ("query: '%s' length: %u", row->query.str ? + row->query.str : "[NULL]", row->query.length)); + if (log_query) + { + /* + This is the first value of an INSERT statement. + It is the right place to clear a forced insert_id. + This is usually done after the last value of an INSERT statement, + but we won't know this in the insert delayed thread. But before + the first value is sufficiently equivalent to after the last + value of the previous statement. + */ + table->file->ha_release_auto_increment(); + thd.auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + } thd.first_successful_insert_id_in_prev_stmt= row->first_successful_insert_id_in_prev_stmt; thd.stmt_depends_on_first_successful_insert_id_in_prev_stmt= row->stmt_depends_on_first_successful_insert_id_in_prev_stmt; table->timestamp_field_type= row->timestamp_field_type; + /* Copy the session variables. */ + thd.variables.auto_increment_increment= row->auto_increment_increment; + thd.variables.auto_increment_offset= row->auto_increment_offset; + /* Copy a forced insert_id, if any. */ + if (row->forced_insert_id) + { + DBUG_PRINT("delayed", ("received auto_inc: %lu", + (ulong) row->forced_insert_id)); + thd.force_one_auto_inc_interval(row->forced_insert_id); + } + info.ignore= row->ignore; info.handle_duplicates= row->dup; if (info.ignore || @@ -2156,7 +2208,7 @@ bool delayed_insert::handle_inserts(void) table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); } - if (row->log_query && row->query.str != NULL && mysql_bin_log.is_open()) + if (log_query && mysql_bin_log.is_open()) { /* If the query has several rows to insert, only the first row will come @@ -2199,6 +2251,7 @@ bool delayed_insert::handle_inserts(void) /* This should never happen */ table->file->print_error(error,MYF(0)); sql_print_error("%s",thd.net.last_error); + DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed in loop")); goto err; } query_cache_invalidate3(&thd, table, 1); @@ -2241,6 +2294,7 @@ bool delayed_insert::handle_inserts(void) { // This shouldn't happen table->file->print_error(error,MYF(0)); sql_print_error("%s",thd.net.last_error); + DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed after loop")); goto err; } query_cache_invalidate3(&thd, table, 1); @@ -2248,13 +2302,16 @@ bool delayed_insert::handle_inserts(void) DBUG_RETURN(0); err: + DBUG_EXECUTE("error", max_rows= 0;); /* Remove all not used rows */ while ((row=rows.get())) { delete row; thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status); stacked_inserts--; + DBUG_EXECUTE("error", max_rows++;); } + DBUG_PRINT("error", ("dropped %lu rows after an error", max_rows)); thread_safe_increment(delayed_insert_errors, &LOCK_delayed_status); pthread_mutex_lock(&mutex); DBUG_RETURN(1); @@ -2552,7 +2609,6 @@ bool select_insert::send_data(List &values) table->next_number_field->reset(); } } - table->file->ha_release_auto_increment(); DBUG_RETURN(error); } @@ -2626,6 +2682,7 @@ void select_insert::send_error(uint errcode,const char *err) } } ha_rollback_stmt(thd); + table->file->ha_release_auto_increment(); DBUG_VOID_RETURN; } @@ -2676,6 +2733,7 @@ bool select_insert::send_eof() } if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error) error=error2; + table->file->ha_release_auto_increment(); if (error) { table->file->print_error(error,MYF(0)); @@ -2767,8 +2825,8 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, tmp_table.s->db_create_options=0; tmp_table.s->blob_ptr_size= portable_sizeof_char_ptr; tmp_table.s->db_low_byte_first= - test(create_info->db_type == &myisam_hton || - create_info->db_type == &heap_hton); + test(create_info->db_type == myisam_hton || + create_info->db_type == heap_hton); tmp_table.null_row=tmp_table.maybe_null=0; while ((item=it++)) diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 0028c41c4a2..d94c45c4bdd 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1722,7 +1722,8 @@ bool st_lex::can_be_merged() unit= unit->next_unit()) { if (unit->first_select()->parent_lex == this && - (unit->item == 0 || unit->item->place() != IN_WHERE)) + (unit->item == 0 || + (unit->item->place() != IN_WHERE && unit->item->place() != IN_ON))) { selects_allow_merge= 0; break; @@ -2170,15 +2171,25 @@ static void fix_prepare_info_in_table_list(THD *thd, TABLE_LIST *tbl) /* - fix some structures at the end of preparation + Save WHERE/HAVING/ON clauses and replace them with disposable copies SYNOPSIS st_select_lex::fix_prepare_information - thd thread handler - conds pointer on conditions which will be used for execution statement + thd thread handler + conds in/out pointer to WHERE condition to be met at execution + having_conds in/out pointer to HAVING condition to be met at execution + + DESCRIPTION + The passed WHERE and HAVING are to be saved for the future executions. + This function saves it, and returns a copy which can be thrashed during + this execution of the statement. By saving/thrashing here we mean only + AND/OR trees. + The function also calls fix_prepare_info_in_table_list that saves all + ON expressions. */ -void st_select_lex::fix_prepare_information(THD *thd, Item **conds) +void st_select_lex::fix_prepare_information(THD *thd, Item **conds, + Item **having_conds) { if (!thd->stmt_arena->is_conventional() && first_execution) { @@ -2188,6 +2199,11 @@ void st_select_lex::fix_prepare_information(THD *thd, Item **conds) prep_where= *conds; *conds= where= prep_where->copy_andor_structure(thd); } + if (*having_conds) + { + prep_having= *having_conds; + *having_conds= having= prep_having->copy_andor_structure(thd); + } fix_prepare_info_in_table_list(thd, (TABLE_LIST *)table_list.first); } } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index f92d10758b9..a2806a03369 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -682,7 +682,7 @@ public: void print(THD *thd, String *str); static void print_order(String *str, ORDER *order); void print_limit(THD *thd, String *str); - void fix_prepare_information(THD *thd, Item **conds); + void fix_prepare_information(THD *thd, Item **conds, Item **having_conds); /* Destroy the used execution plan (JOIN) of this subtree (this SELECT_LEX and all nested SELECT_LEXes and SELECT_LEX_UNITs). diff --git a/sql/sql_list.h b/sql/sql_list.h index 05f589a2c23..070fd1e3d9a 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -94,9 +94,9 @@ public: inline base_list() { empty(); } inline base_list(const base_list &tmp) :Sql_alloc() { - elements=tmp.elements; - first=tmp.first; - last=tmp.last; + elements= tmp.elements; + first= tmp.first; + last= elements ? tmp.last : &first; } inline base_list(bool error) { } inline bool push_back(void *info) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 96f2092bc25..fc7ed7ff673 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1047,8 +1047,8 @@ static int check_connection(THD *thd) char *passwd= strend(user)+1; uint user_len= passwd - user - 1; char *db= passwd; - char db_buff[NAME_BYTE_LEN + 1]; // buffer to store db in utf8 - char user_buff[USERNAME_BYTE_LENGTH + 1]; // buffer to store user in utf8 + char db_buff[NAME_LEN + 1]; // buffer to store db in utf8 + char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8 uint dummy_errors; /* @@ -1724,7 +1724,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, password. New clients send the size (1 byte) + string (not null terminated, so also '\0' for empty string). */ - char db_buff[NAME_BYTE_LEN+1]; // buffer to store db in utf8 + char db_buff[NAME_LEN+1]; // buffer to store db in utf8 char *db= passwd; uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? *passwd++ : strlen(passwd); @@ -4912,6 +4912,19 @@ end_with_restore_list: } append_identifier(thd, &buff, first_table->table_name, first_table->table_name_length); + if (lex->view_list.elements) + { + List_iterator_fast names(lex->view_list); + LEX_STRING *name; + int i; + + for (i= 0; name= names++; i++) + { + buff.append(i ? ", " : "("); + append_identifier(thd, &buff, name->str, name->length); + } + buff.append(')'); + } buff.append(STRING_WITH_LEN(" AS ")); buff.append(first_table->source.str, first_table->source.length); @@ -5830,9 +5843,14 @@ void mysql_reset_thd_for_next_command(THD *thd) DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */ thd->free_list= 0; thd->select_number= 1; + /* + Those two lines below are theoretically unneeded as + THD::cleanup_after_query() should take care of this already. + */ thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty(); - thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= - thd->query_start_used= 0; + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; + + thd->query_start_used= 0; thd->is_fatal_error= thd->time_zone_used= 0; thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS | SERVER_QUERY_NO_INDEX_USED | @@ -6274,6 +6292,7 @@ bool add_to_list(THD *thd, SQL_LIST &list,Item *item,bool asc) table_options A set of the following bits: TL_OPTION_UPDATING Table will be updated TL_OPTION_FORCE_INDEX Force usage of index + TL_OPTION_ALIAS an alias in multi table DELETE lock_type How table should be locked use_index List of indexed used in USE INDEX ignore_index List of indexed used in IGNORE INDEX @@ -6302,7 +6321,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, if (!table) DBUG_RETURN(0); // End of memory alias_str= alias ? alias->str : table->table.str; - if (check_table_name(table->table.str, table->table.length)) + if (!test(table_options & TL_OPTION_ALIAS) && + check_table_name(table->table.str, table->table.length)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), table->table.str); DBUG_RETURN(0); @@ -7763,7 +7783,6 @@ LEX_USER *get_current_user(THD *thd, LEX_USER *user) SYNOPSIS check_string_length() - cs string charset str string to be checked err_msg error message to be displayed if the string is too long max_length max length @@ -7773,13 +7792,13 @@ LEX_USER *get_current_user(THD *thd, LEX_USER *user) TRUE the passed string is longer than max_length */ -bool check_string_length(CHARSET_INFO *cs, LEX_STRING *str, - const char *err_msg, uint max_length) +bool check_string_length(LEX_STRING *str, const char *err_msg, + uint max_length) { - if (cs->cset->charpos(cs, str->str, str->str + str->length, - max_length) >= str->length) - return FALSE; + if (str->length <= max_length) + return FALSE; my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_length); + return TRUE; } diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index af69c16c7e5..369dcb426f1 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -2037,18 +2037,17 @@ close_file: SYNOPSIS partition_key_modified table TABLE object for which partition fields are set-up - fields A list of the to be modifed + fields Bitmap representing fields to be modified RETURN VALUES TRUE Need special handling of UPDATE FALSE Normal UPDATE handling is ok */ -bool partition_key_modified(TABLE *table, List &fields) +bool partition_key_modified(TABLE *table, const MY_BITMAP *fields) { - List_iterator_fast f(fields); + Field **fld; partition_info *part_info= table->part_info; - Item_field *item_field; DBUG_ENTER("partition_key_modified"); if (!part_info) @@ -2056,9 +2055,8 @@ bool partition_key_modified(TABLE *table, List &fields) if (table->s->db_type->partition_flags && (table->s->db_type->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY)) DBUG_RETURN(FALSE); - f.rewind(); - while ((item_field=(Item_field*) f++)) - if (item_field->field->flags & FIELD_IN_PART_FUNC_FLAG) + for (fld= part_info->full_part_field_array; *fld; fld++) + if (bitmap_is_set(fields, (*fld)->field_index)) DBUG_RETURN(TRUE); DBUG_RETURN(FALSE); } @@ -4677,7 +4675,7 @@ the generated partition syntax in a correct manner. DBUG_PRINT("info", ("partition changed")); *partition_changed= TRUE; } - if (create_info->db_type == &partition_hton) + if (create_info->db_type == partition_hton) part_info->default_engine_type= table->part_info->default_engine_type; else part_info->default_engine_type= create_info->db_type; @@ -4689,7 +4687,7 @@ the generated partition syntax in a correct manner. if (!is_native_partitioned) { DBUG_ASSERT(create_info->db_type); - create_info->db_type= &partition_hton; + create_info->db_type= partition_hton; } } } diff --git a/sql/sql_partition.h b/sql/sql_partition.h index e34d71dfdc5..68348672d08 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -70,7 +70,7 @@ bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind); char *generate_partition_syntax(partition_info *part_info, uint *buf_length, bool use_sql_alloc, bool show_partition_options); -bool partition_key_modified(TABLE *table, List &fields); +bool partition_key_modified(TABLE *table, const MY_BITMAP *fields); void get_partition_set(const TABLE *table, byte *buf, const uint index, const key_range *key_spec, part_id_range *part_spec); diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 84810fb3324..34fb447792e 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -477,13 +477,6 @@ err: void plugin_deinitialize(struct st_plugin_int *plugin) { - if (plugin_type_deinitialize[plugin->plugin->type] && - (*plugin_type_deinitialize[plugin->plugin->type])(plugin)) - { - sql_print_error("Plugin '%s' of type %s failed deinitialization", - plugin->name.str, plugin_type_names[plugin->plugin->type]); - } - if (plugin->plugin->status_vars) { #ifdef FIX_LATER @@ -504,10 +497,18 @@ void plugin_deinitialize(struct st_plugin_int *plugin) #endif /* FIX_LATER */ } - if (plugin->plugin->deinit) + if (plugin_type_deinitialize[plugin->plugin->type]) + { + if ((*plugin_type_deinitialize[plugin->plugin->type])(plugin)) + { + sql_print_error("Plugin '%s' of type %s failed deinitialization", + plugin->name.str, plugin_type_names[plugin->plugin->type]); + } + } + else if (plugin->plugin->deinit) { DBUG_PRINT("info", ("Deinitializing plugin: '%s'", plugin->name.str)); - if (plugin->plugin->deinit()) + if (plugin->plugin->deinit(NULL)) { DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.", plugin->name.str)); @@ -556,6 +557,27 @@ static int plugin_initialize(struct st_plugin_int *plugin) { DBUG_ENTER("plugin_initialize"); + if (plugin_type_initialize[plugin->plugin->type]) + { + if ((*plugin_type_initialize[plugin->plugin->type])(plugin)) + { + sql_print_error("Plugin '%s' registration as a %s failed.", + plugin->name.str, plugin_type_names[plugin->plugin->type]); + goto err; + } + } + else if (plugin->plugin->init) + { + if (plugin->plugin->init(NULL)) + { + sql_print_error("Plugin '%s' init function returned error.", + plugin->name.str); + goto err; + } + } + + plugin->state= PLUGIN_IS_READY; + if (plugin->plugin->status_vars) { #ifdef FIX_LATER @@ -576,24 +598,6 @@ static int plugin_initialize(struct st_plugin_int *plugin) add_status_vars(plugin->plugin->status_vars); // add_status_vars makes a copy #endif /* FIX_LATER */ } - if (plugin->plugin->init) - { - if (plugin->plugin->init()) - { - sql_print_error("Plugin '%s' init function returned error.", - plugin->name.str); - goto err; - } - } - if (plugin_type_initialize[plugin->plugin->type] && - (*plugin_type_initialize[plugin->plugin->type])(plugin)) - { - sql_print_error("Plugin '%s' registration as a %s failed.", - plugin->name.str, plugin_type_names[plugin->plugin->type]); - goto err; - } - - plugin->state= PLUGIN_IS_READY; DBUG_RETURN(0); err: @@ -755,14 +759,16 @@ void plugin_load(void) while (!(error= read_record_info.read_record(&read_record_info))) { DBUG_PRINT("info", ("init plugin record")); - LEX_STRING name, dl; - name.str= get_field(&mem, table->field[0]); - name.length= strlen(name.str); - dl.str= get_field(&mem, table->field[1]); - dl.length= strlen(dl.str); + String str_name, str_dl; + get_field(&mem, table->field[0], &str_name); + get_field(&mem, table->field[1], &str_dl); + + LEX_STRING name= {(char *)str_name.ptr(), str_name.length()}; + LEX_STRING dl= {(char *)str_dl.ptr(), str_dl.length()}; + if (plugin_add(&name, &dl, REPORT_TO_LOG)) - DBUG_PRINT("warning", ("Couldn't load plugin named '%s' with soname '%s'.", - name.str, dl.str)); + sql_print_warning("Couldn't load plugin named '%s' with soname '%s'.", + str_name.c_ptr(), str_dl.c_ptr()); } if (error > 0) sql_print_error(ER(ER_GET_ERRNO), my_errno); @@ -858,8 +864,8 @@ my_bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING DBUG_RETURN(FALSE); deinit: plugin_deinitialize(tmp); -err: plugin_del(tmp); +err: rw_unlock(&THR_LOCK_plugin); DBUG_RETURN(TRUE); } diff --git a/sql/sql_repl.h b/sql/sql_repl.h index 45d3254dad0..789de64da85 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -24,7 +24,7 @@ typedef struct st_slave_info uint32 server_id; uint32 rpl_recovery_rank, master_id; char host[HOSTNAME_LENGTH+1]; - char user[USERNAME_BYTE_LENGTH+1]; + char user[USERNAME_LENGTH+1]; char password[MAX_PASSWORD_LENGTH+1]; uint16 port; THD* thd; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index b3e4bf39a8f..ed91719fd46 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -70,7 +70,7 @@ static int join_tab_cmp_straight(const void* ptr1, const void* ptr2); static void find_best(JOIN *join,table_map rest_tables,uint index, double record_count,double read_time); static uint cache_record_length(JOIN *join,uint index); -static double prev_record_reads(JOIN *join,table_map found_ref); +static double prev_record_reads(JOIN *join, uint idx, table_map found_ref); static bool get_best_combination(JOIN *join); static store_key *get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables, @@ -382,12 +382,14 @@ JOIN::prepare(Item ***rref_pointer_array, if ((res= subselect->select_transformer(this)) != Item_subselect::RES_OK) { - select_lex->fix_prepare_information(thd, &conds); + select_lex->fix_prepare_information(thd, &conds, &having); DBUG_RETURN((res == Item_subselect::RES_ERROR)); } } } + select_lex->fix_prepare_information(thd, &conds, &having); + if (having && having->with_sum_func) having->split_sum_func2(thd, ref_pointer_array, all_fields, &having, TRUE); @@ -492,7 +494,6 @@ JOIN::prepare(Item ***rref_pointer_array, if (alloc_func_list()) goto err; - select_lex->fix_prepare_information(thd, &conds); DBUG_RETURN(0); // All OK err: @@ -608,7 +609,6 @@ JOIN::optimize() build_bitmap_for_nested_joins(join_list, 0); sel->prep_where= conds ? conds->copy_andor_structure(thd) : 0; - sel->prep_having= having ? having->copy_andor_structure(thd) : 0; if (arena) thd->restore_active_arena(arena, &backup); @@ -1137,17 +1137,28 @@ JOIN::optimize() tmp_table_param.hidden_field_count= (all_fields.elements - fields_list.elements); + ORDER *tmp_group= ((!simple_group && !procedure && + !(test_flags & TEST_NO_KEY_GROUP)) ? group_list : + (ORDER*) 0); + /* + Pushing LIMIT to the temporary table creation is not applicable + when there is ORDER BY or GROUP BY or there is no GROUP BY, but + there are aggregate functions, because in all these cases we need + all result rows. + */ + ha_rows tmp_rows_limit= ((order == 0 || skip_sort_order || + test(select_options & OPTION_BUFFER_RESULT)) && + !tmp_group && + !thd->lex->current_select->with_sum_func) ? + select_limit : HA_POS_ERROR; + if (!(exec_tmp_table1= create_tmp_table(thd, &tmp_table_param, all_fields, - ((!simple_group && !procedure && - !(test_flags & TEST_NO_KEY_GROUP)) ? - group_list : (ORDER*) 0), + tmp_group, group_list ? 0 : select_distinct, group_list && simple_group, select_options, - (order == 0 || skip_sort_order || - test(select_options & OPTION_BUFFER_RESULT)) ? - select_limit : HA_POS_ERROR, + tmp_rows_limit, (char *) ""))) DBUG_RETURN(1); @@ -1327,7 +1338,7 @@ JOIN::exec() } (void) result->prepare2(); // Currently, this cannot fail. - if (!tables_list) + if (!tables_list && (tables || !select_lex->with_sum_func)) { // Only test of functions if (select_options & SELECT_DESCRIBE) select_describe(this, FALSE, FALSE, FALSE, @@ -1367,7 +1378,12 @@ JOIN::exec() thd->examined_row_count= 0; DBUG_VOID_RETURN; } - thd->limit_found_rows= thd->examined_row_count= 0; + /* + don't reset the found rows count if there're no tables + as FOUND_ROWS() may be called. + */ + if (tables) + thd->limit_found_rows= thd->examined_row_count= 0; if (zero_result_cause) { @@ -1406,7 +1422,8 @@ JOIN::exec() having= tmp_having; select_describe(this, need_tmp, order != 0 && !skip_sort_order, - select_distinct); + select_distinct, + !tables ? "No tables used" : NullS); DBUG_VOID_RETURN; } @@ -3420,6 +3437,7 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key) join->positions[idx].table= table; join->positions[idx].key=key; join->positions[idx].records_read=1.0; /* This is a const table */ + join->positions[idx].ref_depend_map= 0; /* Move the const table as down as possible in best_ref */ JOIN_TAB **pos=join->best_ref+idx+1; @@ -3477,6 +3495,7 @@ best_access_path(JOIN *join, double best= DBL_MAX; double best_time= DBL_MAX; double records= DBL_MAX; + table_map best_ref_depends_map; double tmp; ha_rows rec; @@ -3505,13 +3524,20 @@ best_access_path(JOIN *join, /* Calculate how many key segments of the current key we can use */ start_key= keyuse; - do - { /* for each keypart */ + + do /* For each keypart */ + { uint keypart= keyuse->keypart; table_map best_part_found_ref= 0; double best_prev_record_reads= DBL_MAX; - do + + do /* For each way to access the keypart */ { + + /* + if 1. expression doesn't refer to forward tables + 2. we won't get two ref-or-null's + */ if (!(remaining_tables & keyuse->used_tables) && !(ref_or_null_part && (keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL))) @@ -3519,8 +3545,9 @@ best_access_path(JOIN *join, found_part|= keyuse->keypart_map; if (!(keyuse->used_tables & ~join->const_table_map)) const_part|= keyuse->keypart_map; - double tmp= prev_record_reads(join, (found_ref | - keyuse->used_tables)); + + double tmp= prev_record_reads(join, idx, (found_ref | + keyuse->used_tables)); if (tmp < best_prev_record_reads) { best_part_found_ref= keyuse->used_tables & ~join->const_table_map; @@ -3559,7 +3586,7 @@ best_access_path(JOIN *join, Really, there should be records=0.0 (yes!) but 1.0 would be probably safer */ - tmp= prev_record_reads(join, found_ref); + tmp= prev_record_reads(join, idx, found_ref); records= 1.0; } else @@ -3574,7 +3601,7 @@ best_access_path(JOIN *join, max_key_part= (uint) ~0; if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) { - tmp = prev_record_reads(join, found_ref); + tmp = prev_record_reads(join, idx, found_ref); records=1.0; } else @@ -3711,7 +3738,30 @@ best_access_path(JOIN *join, { /* Check if we have statistic about the distribution */ if ((records= keyinfo->rec_per_key[max_key_part-1])) + { + /* + Fix for the case where the index statistics is too + optimistic: If + (1) We're considering ref(const) and there is quick select + on the same index, + (2) and that quick select uses more keyparts (i.e. it will + scan equal/smaller interval then this ref(const)) + (3) and E(#rows) for quick select is higher then our + estimate, + Then + We'll use E(#rows) from quick select. + + Q: Why do we choose to use 'ref'? Won't quick select be + cheaper in some cases ? + TODO: figure this out and adjust the plan choice if needed. + */ + if (!found_ref && table->quick_keys.is_set(key) && // (1) + table->quick_key_parts[key] > max_key_part && // (2) + records < (double)table->quick_rows[key]) // (3) + records= (double)table->quick_rows[key]; + tmp= records; + } else { /* @@ -3804,6 +3854,7 @@ best_access_path(JOIN *join, best_records= records; best_key= start_key; best_max_key_part= max_key_part; + best_ref_depends_map= found_ref; } } records= best_records; @@ -3932,6 +3983,8 @@ best_access_path(JOIN *join, best= tmp; records= rows2double(rnd_records); best_key= 0; + /* range/index_merge/ALL/index access method are "independent", so: */ + best_ref_depends_map= 0; } } @@ -3940,6 +3993,7 @@ best_access_path(JOIN *join, join->positions[idx].read_time= best; join->positions[idx].key= best_key; join->positions[idx].table= s; + join->positions[idx].ref_depend_map= best_ref_depends_map; if (!best_key && idx == join->const_tables && @@ -4707,17 +4761,85 @@ cache_record_length(JOIN *join,uint idx) } +/* + Get the number of different row combinations for subset of partial join + + SYNOPSIS + prev_record_reads() + join The join structure + idx Number of tables in the partial join order (i.e. the + partial join order is in join->positions[0..idx-1]) + found_ref Bitmap of tables for which we need to find # of distinct + row combinations. + + DESCRIPTION + Given a partial join order (in join->positions[0..idx-1]) and a subset of + tables within that join order (specified in found_ref), find out how many + distinct row combinations of subset tables will be in the result of the + partial join order. + + This is used as follows: Suppose we have a table accessed with a ref-based + method. The ref access depends on current rows of tables in found_ref. + We want to count # of different ref accesses. We assume two ref accesses + will be different if at least one of access parameters is different. + Example: consider a query + + SELECT * FROM t1, t2, t3 WHERE t1.key=c1 AND t2.key=c2 AND t3.key=t1.field + + and a join order: + t1, ref access on t1.key=c1 + t2, ref access on t2.key=c2 + t3, ref access on t3.key=t1.field + + For t1: n_ref_scans = 1, n_distinct_ref_scans = 1 + For t2: n_ref_scans = records_read(t1), n_distinct_ref_scans=1 + For t3: n_ref_scans = records_read(t1)*records_read(t2) + n_distinct_ref_scans = #records_read(t1) + + The reason for having this function (at least the latest version of it) + is that we need to account for buffering in join execution. + + An edge-case example: if we have a non-first table in join accessed via + ref(const) or ref(param) where there is a small number of different + values of param, then the access will likely hit the disk cache and will + not require any disk seeks. + + The proper solution would be to assume an LRU disk cache of some size, + calculate probability of cache hits, etc. For now we just count + identical ref accesses as one. + + RETURN + Expected number of row combinations +*/ + static double -prev_record_reads(JOIN *join,table_map found_ref) +prev_record_reads(JOIN *join, uint idx, table_map found_ref) { double found=1.0; - found_ref&= ~OUTER_REF_TABLE_BIT; - for (POSITION *pos=join->positions ; found_ref ; pos++) + POSITION *pos_end= join->positions - 1; + for (POSITION *pos= join->positions + idx - 1; pos != pos_end; pos--) { if (pos->table->table->map & found_ref) { - found_ref&= ~pos->table->table->map; - found*=pos->records_read; + found_ref|= pos->ref_depend_map; + /* + For the case of "t1 LEFT JOIN t2 ON ..." where t2 is a const table + with no matching row we will get position[t2].records_read==0. + Actually the size of output is one null-complemented row, therefore + we will use value of 1 whenever we get records_read==0. + + Note + - the above case can't occur if inner part of outer join has more + than one table: table with no matches will not be marked as const. + + - Ideally we should add 1 to records_read for every possible null- + complemented row. We're not doing it because: 1. it will require + non-trivial code and add overhead. 2. The value of records_read + is an inprecise estimate and adding 1 (or, in the worst case, + #max_nested_outer_joins=64-1) will not make it any more precise. + */ + if (pos->records_read) + found*= pos->records_read; } } return found; @@ -6409,29 +6531,30 @@ finish: /* - Check whether an item is a simple equality predicate and if so - create/find a multiple equality for this predicate + Check whether an equality can be used to build multiple equalities SYNOPSIS - check_equality() - item item to check - cond_equal multiple equalities that must hold together with the predicate + check_simple_equality() + left_item left term of the quality to be checked + right_item right term of the equality to be checked + item equality item if the equality originates from a condition + predicate, 0 if the equality is the result of row elimination + cond_equal multiple equalities that must hold together with the equality DESCRIPTION - This function first checks whether an item is a simple equality i.e. - the one that equates a field with another field or a constant - (item=constant_item or item=field_item). - If this is the case the function looks a for a multiple equality + This function first checks whether the equality (left_item=right_item) + is a simple equality i.e. the one that equates a field with another field + or a constant (field=field_item or field=const_item). + If this is the case the function looks for a multiple equality in the lists referenced directly or indirectly by cond_equal inferring the given simple equality. If it doesn't find any, it builds a multiple equality that covers the predicate, i.e. the predicate can be inferred - from it. + from this multiple equality. The built multiple equality could be obtained in such a way: create a binary multiple equality equivalent to the predicate, then merge it, if possible, with one of old multiple equalities. This guarantees that the set of multiple equalities covering equality - predicates will - be minimal. + predicates will be minimal. EXAMPLE For the where condition @@ -6449,7 +6572,7 @@ finish: and will transform *cond_equal into (ptr(CE),[Item_equal(f,e)]). NOTES - Now only fields that have the same type defintions (verified by + Now only fields that have the same type definitions (verified by the Field::eq_def method) are placed to the same multiple equalities. Because of this some equality predicates are not eliminated and can be used in the constant propagation procedure. @@ -6482,11 +6605,263 @@ finish: copying would be much more complicated. RETURN - TRUE - if the predicate is a simple equality predicate - FALSE - otherwise + TRUE if the predicate is a simple equality predicate to be used + for building multiple equalities + FALSE otherwise */ -static bool check_equality(Item *item, COND_EQUAL *cond_equal) +static bool check_simple_equality(Item *left_item, Item *right_item, + Item *item, COND_EQUAL *cond_equal) +{ + if (left_item->type() == Item::REF_ITEM && + ((Item_ref*)left_item)->ref_type() == Item_ref::VIEW_REF) + { + if (((Item_ref*)left_item)->depended_from) + return FALSE; + left_item= left_item->real_item(); + } + if (right_item->type() == Item::REF_ITEM && + ((Item_ref*)right_item)->ref_type() == Item_ref::VIEW_REF) + { + if (((Item_ref*)right_item)->depended_from) + return FALSE; + right_item= right_item->real_item(); + } + if (left_item->type() == Item::FIELD_ITEM && + right_item->type() == Item::FIELD_ITEM && + !((Item_field*)left_item)->depended_from && + !((Item_field*)right_item)->depended_from) + { + /* The predicate the form field1=field2 is processed */ + + Field *left_field= ((Item_field*) left_item)->field; + Field *right_field= ((Item_field*) right_item)->field; + + if (!left_field->eq_def(right_field)) + return FALSE; + + /* Search for multiple equalities containing field1 and/or field2 */ + bool left_copyfl, right_copyfl; + Item_equal *left_item_equal= + find_item_equal(cond_equal, left_field, &left_copyfl); + Item_equal *right_item_equal= + find_item_equal(cond_equal, right_field, &right_copyfl); + + /* As (NULL=NULL) != TRUE we can't just remove the predicate f=f */ + if (left_field->eq(right_field)) /* f = f */ + return (!(left_field->maybe_null() && !left_item_equal)); + + if (left_item_equal && left_item_equal == right_item_equal) + { + /* + The equality predicate is inference of one of the existing + multiple equalities, i.e the condition is already covered + by upper level equalities + */ + return TRUE; + } + + /* Copy the found multiple equalities at the current level if needed */ + if (left_copyfl) + { + /* left_item_equal of an upper level contains left_item */ + left_item_equal= new Item_equal(left_item_equal); + cond_equal->current_level.push_back(left_item_equal); + } + if (right_copyfl) + { + /* right_item_equal of an upper level contains right_item */ + right_item_equal= new Item_equal(right_item_equal); + cond_equal->current_level.push_back(right_item_equal); + } + + if (left_item_equal) + { + /* left item was found in the current or one of the upper levels */ + if (! right_item_equal) + left_item_equal->add((Item_field *) right_item); + else + { + /* Merge two multiple equalities forming a new one */ + left_item_equal->merge(right_item_equal); + /* Remove the merged multiple equality from the list */ + List_iterator li(cond_equal->current_level); + while ((li++) != right_item_equal); + li.remove(); + } + } + else + { + /* left item was not found neither the current nor in upper levels */ + if (right_item_equal) + right_item_equal->add((Item_field *) left_item); + else + { + /* None of the fields was found in multiple equalities */ + Item_equal *item= new Item_equal((Item_field *) left_item, + (Item_field *) right_item); + cond_equal->current_level.push_back(item); + } + } + return TRUE; + } + + { + /* The predicate of the form field=const/const=field is processed */ + Item *const_item= 0; + Item_field *field_item= 0; + if (left_item->type() == Item::FIELD_ITEM && + !((Item_field*)left_item)->depended_from && + right_item->const_item()) + { + field_item= (Item_field*) left_item; + const_item= right_item; + } + else if (right_item->type() == Item::FIELD_ITEM && + !((Item_field*)right_item)->depended_from && + left_item->const_item()) + { + field_item= (Item_field*) right_item; + const_item= left_item; + } + + if (const_item && + field_item->result_type() == const_item->result_type()) + { + bool copyfl; + + if (field_item->result_type() == STRING_RESULT) + { + CHARSET_INFO *cs= ((Field_str*) field_item->field)->charset(); + if (!item) + { + Item_func_eq *eq_item; + if ((eq_item= new Item_func_eq(left_item, right_item))) + return FALSE; + eq_item->set_cmp_func(); + eq_item->quick_fix_field(); + item= eq_item; + } + if ((cs != ((Item_func *) item)->compare_collation()) || + !cs->coll->propagate(cs, 0, 0)) + return FALSE; + } + + Item_equal *item_equal = find_item_equal(cond_equal, + field_item->field, ©fl); + if (copyfl) + { + item_equal= new Item_equal(item_equal); + cond_equal->current_level.push_back(item_equal); + } + if (item_equal) + { + /* + The flag cond_false will be set to 1 after this, if item_equal + already contains a constant and its value is not equal to + the value of const_item. + */ + item_equal->add(const_item); + } + else + { + item_equal= new Item_equal(const_item, field_item); + cond_equal->current_level.push_back(item_equal); + } + return TRUE; + } + } + return FALSE; +} + + +/* + Convert row equalities into a conjunction of regular equalities + + SYNOPSIS + check_row_equality() + left_row left term of the row equality to be processed + right_row right term of the row equality to be processed + cond_equal multiple equalities that must hold together with the predicate + eq_list results of conversions of row equalities that are not simple + enough to form multiple equalities + + DESCRIPTION + The function converts a row equality of the form (E1,...,En)=(E'1,...,E'n) + into a list of equalities E1=E'1,...,En=E'n. For each of these equalities + Ei=E'i the function checks whether it is a simple equality or a row equality. + If it is a simple equality it is used to expand multiple equalities of + cond_equal. If it is a row equality it converted to a sequence of equalities + between row elements. If Ei=E'i is neither a simple equality nor a row + equality the item for this predicate is added to eq_list. + + RETURN + TRUE if conversion has succeeded (no fatal error) + FALSE otherwise +*/ + +static bool check_row_equality(Item *left_row, Item_row *right_row, + COND_EQUAL *cond_equal, List* eq_list) +{ + uint n= left_row->cols(); + for (uint i= 0 ; i < n; i++) + { + bool is_converted; + Item *left_item= left_row->el(i); + Item *right_item= right_row->el(i); + if (left_item->type() == Item::ROW_ITEM && + right_item->type() == Item::ROW_ITEM) + is_converted= check_row_equality((Item_row *) left_item, + (Item_row *) right_item, + cond_equal, eq_list); + else + is_converted= check_simple_equality(left_item, right_item, 0, cond_equal); + + if (!is_converted) + { + Item_func_eq *eq_item; + if (!(eq_item= new Item_func_eq(left_item, right_item))) + return FALSE; + eq_item->set_cmp_func(); + eq_item->quick_fix_field(); + eq_list->push_back(eq_item); + } + } + return TRUE; +} + + +/* + Eliminate row equalities and form multiple equalities predicates + + SYNOPSIS + check_equality() + item predicate to process + cond_equal multiple equalities that must hold together with the predicate + eq_list results of conversions of row equalities that are not simple + enough to form multiple equalities + + DESCRIPTION + This function checks whether the item is a simple equality + i.e. the one that equates a field with another field or a constant + (field=field_item or field=constant_item), or, a row equality. + For a simple equality the function looks for a multiple equality + in the lists referenced directly or indirectly by cond_equal inferring + the given simple equality. If it doesn't find any, it builds/expands + multiple equality that covers the predicate. + Row equalities are eliminated substituted for conjunctive regular equalities + which are treated in the same way as original equality predicates. + + RETURN + TRUE if re-writing rules have been applied + FALSE otherwise, i.e. + if the predicate is not an equality, + or, if the equality is neither a simple one nor a row equality, + or, if the procedure fails by a fatal error. +*/ + +static bool check_equality(Item *item, COND_EQUAL *cond_equal, + List *eq_list) { if (item->type() == Item::FUNC_ITEM && ((Item_func*) item)->functype() == Item_func::EQ_FUNC) @@ -6494,165 +6869,25 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal) Item *left_item= ((Item_func*) item)->arguments()[0]; Item *right_item= ((Item_func*) item)->arguments()[1]; - if (left_item->type() == Item::REF_ITEM && - ((Item_ref*)left_item)->ref_type() == Item_ref::VIEW_REF) - { - if (((Item_ref*)left_item)->depended_from) - return FALSE; - left_item= left_item->real_item(); - } - if (right_item->type() == Item::REF_ITEM && - ((Item_ref*)right_item)->ref_type() == Item_ref::VIEW_REF) - { - if (((Item_ref*)right_item)->depended_from) - return FALSE; - right_item= right_item->real_item(); - } - if (left_item->type() == Item::FIELD_ITEM && - right_item->type() == Item::FIELD_ITEM && - !((Item_field*)left_item)->depended_from && - !((Item_field*)right_item)->depended_from) - { - /* The predicate the form field1=field2 is processed */ - - Field *left_field= ((Item_field*) left_item)->field; - Field *right_field= ((Item_field*) right_item)->field; - - if (!left_field->eq_def(right_field)) - return FALSE; - - if (left_field->eq(right_field)) /* f = f */ - return TRUE; - - /* Search for multiple equalities containing field1 and/or field2 */ - bool left_copyfl, right_copyfl; - Item_equal *left_item_equal= - find_item_equal(cond_equal, left_field, &left_copyfl); - Item_equal *right_item_equal= - find_item_equal(cond_equal, right_field, &right_copyfl); - - if (left_item_equal && left_item_equal == right_item_equal) - { - /* - The equality predicate is inference of one of the existing - multiple equalities, i.e the condition is already covered - by upper level equalities - */ - return TRUE; - } - - /* Copy the found multiple equalities at the current level if needed */ - if (left_copyfl) - { - /* left_item_equal of an upper level contains left_item */ - left_item_equal= new Item_equal(left_item_equal); - cond_equal->current_level.push_back(left_item_equal); - } - if (right_copyfl) - { - /* right_item_equal of an upper level contains right_item */ - right_item_equal= new Item_equal(right_item_equal); - cond_equal->current_level.push_back(right_item_equal); - } - - if (left_item_equal) - { - /* left item was found in the current or one of the upper levels */ - if (! right_item_equal) - left_item_equal->add((Item_field *) right_item); - else - { - /* Merge two multiple equalities forming a new one */ - left_item_equal->merge(right_item_equal); - /* Remove the merged multiple equality from the list */ - List_iterator li(cond_equal->current_level); - while ((li++) != right_item_equal); - li.remove(); - } - } - else - { - /* left item was not found neither the current nor in upper levels */ - if (right_item_equal) - right_item_equal->add((Item_field *) left_item); - else - { - /* None of the fields was found in multiple equalities */ - Item_equal *item= new Item_equal((Item_field *) left_item, - (Item_field *) right_item); - cond_equal->current_level.push_back(item); - } - } - return TRUE; - } - - { - /* The predicate of the form field=const/const=field is processed */ - Item *const_item= 0; - Item_field *field_item= 0; - if (left_item->type() == Item::FIELD_ITEM && - !((Item_field*)left_item)->depended_from && - right_item->const_item()) - { - field_item= (Item_field*) left_item; - const_item= right_item; - } - else if (right_item->type() == Item::FIELD_ITEM && - !((Item_field*)right_item)->depended_from && - left_item->const_item()) - { - field_item= (Item_field*) right_item; - const_item= left_item; - } - - if (const_item && - field_item->result_type() == const_item->result_type()) - { - bool copyfl; - - if (field_item->result_type() == STRING_RESULT) - { - CHARSET_INFO *cs= ((Field_str*) field_item->field)->charset(); - if ((cs != ((Item_cond *) item)->compare_collation()) || - !cs->coll->propagate(cs, 0, 0)) - return FALSE; - } - - Item_equal *item_equal = find_item_equal(cond_equal, - field_item->field, ©fl); - if (copyfl) - { - item_equal= new Item_equal(item_equal); - cond_equal->current_level.push_back(item_equal); - } - if (item_equal) - { - /* - The flag cond_false will be set to 1 after this, if item_equal - already contains a constant and its value is not equal to - the value of const_item. - */ - item_equal->add(const_item); - } - else - { - item_equal= new Item_equal(const_item, field_item); - cond_equal->current_level.push_back(item_equal); - } - return TRUE; - } - } - } + if (left_item->type() == Item::ROW_ITEM && + right_item->type() == Item::ROW_ITEM) + return check_row_equality((Item_row *) left_item, + (Item_row *) right_item, + cond_equal, eq_list); + else + return check_simple_equality(left_item, right_item, item, cond_equal); + } return FALSE; } + /* Replace all equality predicates in a condition by multiple equality items SYNOPSIS build_equal_items_for_cond() - cond condition(expression) where to make replacement - inherited path to all inherited multiple equality items + cond condition(expression) where to make replacement + inherited path to all inherited multiple equality items DESCRIPTION At each 'and' level the function detects items for equality predicates @@ -6666,7 +6901,9 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal) The function also traverses the cond tree and and for each field reference sets a pointer to the multiple equality item containing the field, if there is any. If this multiple equality equates fields to a constant the - function replace the field reference by the constant. + function replaces the field reference by the constant in the cases + when the field is not of a string type or when the field reference is + just an argument of a comparison predicate. The function also determines the maximum number of members in equality lists of each Item_cond_and object assigning it to cond_equal->max_members of this object and updating accordingly @@ -6717,10 +6954,12 @@ static COND *build_equal_items_for_cond(COND *cond, Item_equal *item_equal; uint members; COND_EQUAL cond_equal; + COND *new_cond; cond_equal.upper_levels= inherited; if (cond->type() == Item::COND_ITEM) { + List eq_list; bool and_level= ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC; List *args= ((Item_cond*) cond)->argument_list(); @@ -6743,7 +6982,7 @@ static COND *build_equal_items_for_cond(COND *cond, structure here because it's restored before each re-execution of any prepared statement/stored procedure. */ - if (check_equality(item, &cond_equal)) + if (check_equality(item, &cond_equal, &eq_list)) li.remove(); } @@ -6790,10 +7029,14 @@ static COND *build_equal_items_for_cond(COND *cond, } } if (and_level) + { + args->concat(&eq_list); args->concat((List *)&cond_equal.current_level); + } } else if (cond->type() == Item::FUNC_ITEM) { + List eq_list; /* If an equality predicate forms the whole and level, we call it standalone equality and it's processed here. @@ -6804,19 +7047,57 @@ static COND *build_equal_items_for_cond(COND *cond, for WHERE a=b AND c=d AND (b=c OR d=5) b=c is replaced by =(a,b,c,d). */ - if (check_equality(cond, &cond_equal) && - (item_equal= cond_equal.current_level.pop())) + if (check_equality(cond, &cond_equal, &eq_list)) { - item_equal->fix_length_and_dec(); - item_equal->update_used_tables(); - return item_equal; + int n= cond_equal.current_level.elements + eq_list.elements; + if (n == 0) + return new Item_int((longlong) 1,1); + else if (n == 1) + { + if ((item_equal= cond_equal.current_level.pop())) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + return item_equal; + } + else + return eq_list.pop(); + } + else + { + /* + Here a new AND level must be created. It can happen only + when a row equality is processed as a standalone predicate. + */ + Item_cond_and *and_cond= new Item_cond_and(eq_list); + and_cond->quick_fix_field(); + List *args= and_cond->argument_list(); + List_iterator_fast it(cond_equal.current_level); + while ((item_equal= it++)) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + members= item_equal->members(); + if (cond_equal.max_members < members) + cond_equal.max_members= members; + } + and_cond->cond_equal= cond_equal; + args->concat((List *)&cond_equal.current_level); + + return and_cond; + } } /* For each field reference in cond, not from equal item predicates, set a pointer to the multiple equality it belongs to (if there is any) + as soon the field is not of a string type or the field reference is + an argument of a comparison predicate. */ - cond= cond->transform(&Item::equal_fields_propagator, - (byte *) inherited); + byte *is_subst_valid= (byte *) 1; + cond= cond->compile(&Item::subst_argument_checker, + &is_subst_valid, + &Item::equal_fields_propagator, + (byte *) inherited); cond->update_used_tables(); } return cond; @@ -7096,7 +7377,7 @@ static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels, /* Substitute every field reference in a condition by the best equal field - and eliminate all multiplle equality predicates + and eliminate all multiple equality predicates SYNOPSIS substitute_for_best_equal_field() @@ -8857,7 +9138,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, OPTION_BIG_TABLES || (select_options & TMP_TABLE_FORCE_MYISAM)) { table->file= get_new_handler(share, &table->mem_root, - share->db_type= &myisam_hton); + share->db_type= myisam_hton); if (group && (param->group_parts > table->file->max_key_parts() || param->group_length > table->file->max_key_length())) @@ -8866,7 +9147,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, else { table->file= get_new_handler(share, &table->mem_root, - share->db_type= &heap_hton); + share->db_type= heap_hton); } if (!table->file) goto err; @@ -9027,11 +9308,18 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, if (thd->variables.tmp_table_size == ~(ulong) 0) // No limit share->max_rows= ~(ha_rows) 0; else - share->max_rows= (((share->db_type == &heap_hton) ? + share->max_rows= (((share->db_type == heap_hton) ? min(thd->variables.tmp_table_size, thd->variables.max_heap_table_size) : thd->variables.tmp_table_size)/ share->reclength); set_if_bigger(share->max_rows,1); // For dummy start options + /* + Push the LIMIT clause to the temporary table creation, so that we + materialize only up to 'rows_limit' records instead of all result records. + */ + set_if_smaller(share->max_rows, rows_limit); + param->end_write_records= rows_limit; + keyinfo= param->keyinfo; if (group) @@ -9049,10 +9337,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, keyinfo->rec_per_key=0; keyinfo->algorithm= HA_KEY_ALG_UNDEF; keyinfo->name= (char*) "group_key"; - for (; group ; group=group->next,key_part_info++) + ORDER *cur_group= group; + for (; cur_group ; cur_group= cur_group->next, key_part_info++) { - Field *field=(*group->item)->get_tmp_table_field(); - bool maybe_null=(*group->item)->maybe_null; + Field *field=(*cur_group->item)->get_tmp_table_field(); + bool maybe_null=(*cur_group->item)->maybe_null; key_part_info->null_bit=0; key_part_info->field= field; key_part_info->offset= field->offset(); @@ -9065,8 +9354,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, 0 : FIELDFLAG_BINARY; if (!using_unique_constraint) { - group->buff=(char*) group_buff; - if (!(group->field= field->new_key_field(thd->mem_root,table, + cur_group->buff=(char*) group_buff; + if (!(cur_group->field= field->new_key_field(thd->mem_root,table, (char*) group_buff + test(maybe_null), field->null_ptr, @@ -9084,12 +9373,12 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, key_part_info->null_bit=field->null_bit; key_part_info->null_offset= (uint) (field->null_ptr - (uchar*) table->record[0]); - group->buff++; // Pointer to field data + cur_group->buff++; // Pointer to field data group_buff++; // Skipp null flag } /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */ key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL; - group_buff+= group->field->pack_length(); + group_buff+= cur_group->field->pack_length(); } keyinfo->key_length+= key_part_info->length; } @@ -9163,23 +9452,10 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, } } - /* - Push the LIMIT clause to the temporary table creation, so that we - materialize only up to 'rows_limit' records instead of all result records. - This optimization is not applicable when there is GROUP BY or there is - no GROUP BY, but there are aggregate functions, because both must be - computed for all result rows. - */ - if (!group && !thd->lex->current_select->with_sum_func) - { - set_if_smaller(table->s->max_rows, rows_limit); - param->end_write_records= rows_limit; - } - if (thd->is_fatal_error) // If end of memory goto err; /* purecov: inspected */ share->db_record_offset= 1; - if (share->db_type == &myisam_hton) + if (share->db_type == myisam_hton) { if (create_myisam_tmp_table(table,param,select_options)) goto err; @@ -9505,7 +9781,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, int write_err; DBUG_ENTER("create_myisam_from_heap"); - if (table->s->db_type != &heap_hton || + if (table->s->db_type != heap_hton || error != HA_ERR_RECORD_FILE_FULL) { table->file->print_error(error,MYF(0)); @@ -9514,9 +9790,9 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, new_table= *table; share= *table->s; new_table.s= &share; - new_table.s->db_type= &myisam_hton; + new_table.s->db_type= myisam_hton; if (!(new_table.file= get_new_handler(&share, &new_table.mem_root, - &myisam_hton))) + myisam_hton))) DBUG_RETURN(1); // End of memory save_proc_info=thd->proc_info; @@ -9700,9 +9976,13 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) table->file->ha_index_init(0, 0); } /* Set up select_end */ - join->join_tab[join->tables-1].next_select= setup_end_select_func(join); + Next_select_func end_select= setup_end_select_func(join); + if (join->tables) + { + join->join_tab[join->tables-1].next_select= end_select; - join_tab=join->join_tab+join->const_tables; + join_tab=join->join_tab+join->const_tables; + } join->send_records=0; if (join->tables == join->const_tables) { @@ -9712,7 +9992,6 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) */ if (!join->conds || join->conds->val_int()) { - Next_select_func end_select= join->join_tab[join->tables-1].next_select; error= (*end_select)(join,join_tab,0); if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT) error= (*end_select)(join,join_tab,1); @@ -9726,6 +10005,8 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) } else { + DBUG_ASSERT(join->tables); + DBUG_ASSERT(join_tab); error= sub_select(join,join_tab,0); if (error == NESTED_LOOP_OK || error == NESTED_LOOP_NO_MORE_ROWS) error= sub_select(join,join_tab,1); @@ -10302,6 +10583,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) tab->info="const row not found"; /* Mark for EXPLAIN that the row was not found */ pos->records_read=0.0; + pos->ref_depend_map= 0; if (!table->maybe_null || error > 0) DBUG_RETURN(error); } @@ -10327,6 +10609,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) tab->info="unique row not found"; /* Mark for EXPLAIN that the row was not found */ pos->records_read=0.0; + pos->ref_depend_map= 0; if (!table->maybe_null || error > 0) DBUG_RETURN(error); } @@ -12127,7 +12410,7 @@ remove_duplicates(JOIN *join, TABLE *entry,List &fields, Item *having) free_io_cache(entry); // Safety entry->file->info(HA_STATUS_VARIABLE); - if (entry->s->db_type == &heap_hton || + if (entry->s->db_type == heap_hton || (!entry->s->blob_fields && ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->stats.records < thd->variables.sortbuff_size))) @@ -13313,6 +13596,8 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, param->copy_funcs.empty(); for (i= 0; (pos= li++); i++) { + Field *field; + char *tmp; Item *real_pos= pos->real_item(); if (real_pos->type() == Item::FIELD_ITEM) { @@ -13343,17 +13628,25 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, set up save buffer and change result_field to point at saved value */ - Field *field= item->field; + field= item->field; item->result_field=field->new_field(thd->mem_root,field->table, 1); - char *tmp=(char*) sql_alloc(field->pack_length()+1); + /* + We need to allocate one extra byte for null handling and + another extra byte to not get warnings from purify in + Field_string::val_int + */ + tmp= (char*) sql_alloc(field->pack_length()+2); if (!tmp) goto err; - if (copy) - { - copy->set(tmp, item->result_field); - item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1); - copy++; - } + if (copy) + { + copy->set(tmp, item->result_field); + item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1); +#ifdef HAVE_purify + copy->to_ptr[copy->from_length]= 0; +#endif + copy++; + } } } else if ((real_pos->type() == Item::FUNC_ITEM || @@ -14371,9 +14664,12 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(new Item_string(table_name_buffer, len, cs)); } else - item_list.push_back(new Item_string(table->alias, - strlen(table->alias), + { + TABLE_LIST *tab=table->pos_in_table_list; + item_list.push_back(new Item_string(tab->alias, + strlen(tab->alias), cs)); + } /* "partitions" column */ if (join->thd->lex->describe & DESCRIBE_PARTITIONS) { @@ -14602,8 +14898,8 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) // drop UNCACHEABLE_EXPLAIN, because it is for internal usage only uint8 uncacheable= (sl->uncacheable & ~UNCACHEABLE_EXPLAIN); sl->type= (((&thd->lex->select_lex)==sl)? - ((thd->lex->all_selects_list != sl) ? - primary_key_name : "SIMPLE"): + (sl->first_inner_unit() || sl->next_select() ? + "PRIMARY" : "SIMPLE"): ((sl == first)? ((sl->linkage == DERIVED_TABLE_TYPE) ? "DERIVED": diff --git a/sql/sql_select.h b/sql/sql_select.h index 3b0c312757d..eb6d2d5d34f 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -176,7 +176,13 @@ enum_nested_loop_state sub_select(JOIN *join,JOIN_TAB *join_tab, bool */ typedef struct st_position { + /* + The "fanout": number of output rows that will be produced (after + pushed down selection condition is applied) per each row combination of + previous tables. + */ double records_read; + /* Cost accessing the table in course of the entire complete join execution, i.e. cost of one access method use (e.g. 'range' or 'ref' scan ) times @@ -184,7 +190,15 @@ typedef struct st_position */ double read_time; JOIN_TAB *table; + + /* + NULL - 'index' or 'range' or 'index_merge' or 'ALL' access is used. + Other - [eq_]ref[_or_null] access is used. Pointer to {t.keypart1 = expr} + */ KEYUSE *key; + + /* If ref-based access is used: bitmap of tables this table depends on */ + table_map ref_depend_map; } POSITION; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 3d2bdd60df7..599bca6cbe1 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -211,6 +211,22 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin, else table->field[8]->set_null(); + switch (plug->license) { + case PLUGIN_LICENSE_GPL: + table->field[9]->store(PLUGIN_LICENSE_GPL_STRING, + strlen(PLUGIN_LICENSE_GPL_STRING), cs); + break; + case PLUGIN_LICENSE_BSD: + table->field[9]->store(PLUGIN_LICENSE_BSD_STRING, + strlen(PLUGIN_LICENSE_BSD_STRING), cs); + break; + default: + table->field[9]->store(PLUGIN_LICENSE_PROPRIETARY_STRING, + strlen(PLUGIN_LICENSE_PROPRIETARY_STRING), cs); + break; + } + table->field[9]->set_notnull(); + return schema_table_store_record(thd, table); } @@ -1995,15 +2011,22 @@ void remove_status_vars(SHOW_VAR *list) } } +inline void make_upper(char *buf) +{ + for (; *buf; buf++) + *buf= my_toupper(system_charset_info, *buf); +} + static bool show_status_array(THD *thd, const char *wild, SHOW_VAR *variables, enum enum_var_type value_type, struct system_status_var *status_var, - const char *prefix, TABLE *table) + const char *prefix, TABLE *table, + bool ucase_names) { char buff[SHOW_VAR_FUNC_BUFF_SIZE], *prefix_end; - /* the variable name should not be longer then 80 characters */ - char name_buffer[80]; + /* the variable name should not be longer than 64 characters */ + char name_buffer[64]; int len; LEX_STRING null_lex_str; SHOW_VAR tmp, *var; @@ -2021,6 +2044,8 @@ static bool show_status_array(THD *thd, const char *wild, { strnmov(prefix_end, variables->name, len); name_buffer[sizeof(name_buffer)-1]=0; /* Safety */ + if (ucase_names) + make_upper(name_buffer); /* if var->type is SHOW_FUNC, call the function. @@ -2032,8 +2057,8 @@ static bool show_status_array(THD *thd, const char *wild, SHOW_TYPE show_type=var->type; if (show_type == SHOW_ARRAY) { - show_status_array(thd, wild, (SHOW_VAR *) var->value, - value_type, status_var, name_buffer, table); + show_status_array(thd, wild, (SHOW_VAR *) var->value, value_type, + status_var, name_buffer, table, ucase_names); } else { @@ -2042,7 +2067,7 @@ static bool show_status_array(THD *thd, const char *wild, { char *value=var->value; const char *pos, *end; // We assign a lot of const's - long nr; + if (show_type == SHOW_SYS) { show_type= ((sys_var*) value)->type(); @@ -2124,6 +2149,7 @@ static bool show_status_array(THD *thd, const char *wild, table->field[0]->store(name_buffer, strlen(name_buffer), system_charset_info); table->field[1]->store(pos, (uint32) (end - pos), system_charset_info); + table->field[1]->set_notnull(); if (schema_table_store_record(thd, table)) DBUG_RETURN(TRUE); } @@ -2925,7 +2951,7 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, ha_row_type[(uint) share->row_type], NullS); #ifdef WITH_PARTITION_STORAGE_ENGINE - if (show_table->s->db_type == &partition_hton && + if (show_table->s->db_type == partition_hton && show_table->part_info != NULL && show_table->part_info->no_parts > 0) ptr= strmov(ptr, " partitioned"); @@ -4201,8 +4227,6 @@ static interval_type get_real_interval_type(interval_type i_type) return INTERVAL_SECOND; } -extern LEX_STRING interval_type_to_name[]; - /* Loads an event from mysql.event and copies it's data to a row of @@ -4387,7 +4411,7 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond) const char *wild= lex->wild ? lex->wild->ptr() : NullS; pthread_mutex_lock(&LOCK_global_system_variables); res= show_status_array(thd, wild, init_vars, - lex->option_type, 0, "", tables->table); + lex->option_type, 0, "", tables->table, 0); pthread_mutex_unlock(&LOCK_global_system_variables); DBUG_RETURN(res); } @@ -4407,7 +4431,8 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond) (SHOW_VAR *)all_status_vars.buffer, OPT_GLOBAL, (lex->option_type == OPT_GLOBAL ? - &tmp: thd->initial_status_var), "",tables->table); + &tmp: thd->initial_status_var), + "", tables->table, 0); pthread_mutex_unlock(&LOCK_status); DBUG_RETURN(res); } @@ -4556,6 +4581,21 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list) DBUG_RETURN(0); } break; + case MYSQL_TYPE_DECIMAL: + if (!(item= new Item_decimal((longlong) fields_info->value, false))) + { + DBUG_RETURN(0); + } + item->unsigned_flag= (fields_info->field_length/10000)%10; + item->decimals= fields_info->field_length%10; + item->max_length= (fields_info->field_length/100)%100; + if (item->unsigned_flag == 0) + item->max_length+= 1; + if (item->decimals > 0) + item->max_length+= 1; + item->set_name(fields_info->field_name, + strlen(fields_info->field_name), cs); + break; default: /* this should be changed when Item_empty_string is fixed(in 4.1) */ if (!(item= new Item_empty_string("", 0, cs))) @@ -4975,7 +5015,7 @@ static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin, (run_hton_fill_schema_files_args *) arg; handlerton *hton= (handlerton *)plugin->data; if(hton->fill_files_table && hton->state == SHOW_OPTION_YES) - hton->fill_files_table(thd, args->tables, args->cond); + hton->fill_files_table(hton, thd, args->tables, args->cond); return false; } @@ -4995,6 +5035,173 @@ int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond) DBUG_RETURN(0); } +int fill_schema_status(THD *thd, SHOW_VAR *variables, + struct system_status_var *status_var, + const char *prefix, TABLE *table) +{ + SHOW_VAR tmp, *var; + SHOW_TYPE show_type; + LEX_STRING null_lex_str; + char buff[SHOW_VAR_FUNC_BUFF_SIZE]; + char name_buf[64], *name_pos; + int name_len; + DBUG_ENTER("fill_schema_status"); + + null_lex_str.str= 0; + null_lex_str.length= 0; + + name_pos= strnmov(name_buf, prefix, sizeof(name_buf) - 1); + if (*prefix) + *name_pos++= '_'; + name_len= name_buf + sizeof(name_buf) - name_pos; + + for (; variables->name; variables++) + { + strnmov(name_pos, variables->name, name_len); + name_buf[sizeof(name_buf) - 1]= 0; + make_upper(name_buf); + + for (var= variables; var->type == SHOW_FUNC; var= &tmp) + ((mysql_show_var_func)(var->value))(thd, &tmp, buff); + + show_type= var->type; + + if (show_type == SHOW_ARRAY) + { + fill_schema_status(thd, (SHOW_VAR*) var->value, + status_var, name_buf, table); + } + else + { + char *value= var->value; + + restore_record(table, s->default_values); + table->field[0]->store(name_buf, strlen(name_buf), system_charset_info); + + if (show_type == SHOW_SYS) + { + show_type= ((sys_var*) value)->type(); + value= (char*) ((sys_var*) value)->value_ptr(thd, OPT_GLOBAL, + &null_lex_str); + } + + switch (show_type) + { + case SHOW_DOUBLE_STATUS: + value= (char*) status_var + (ulong) value; + table->field[1]->store(*(double*) value); + break; + case SHOW_LONG_STATUS: + value= (char*) status_var + (ulong) value; + /* fall through */ + case SHOW_LONG: + case SHOW_LONG_NOFLUSH: /* the difference lies in refresh_status() */ + table->field[1]->store((longlong) *(long*) value, false); + break; + case SHOW_LONGLONG: + table->field[1]->store(*(longlong*) value, false); + break; + case SHOW_HA_ROWS: + table->field[1]->store((longlong) *(ha_rows*) value, false); + break; + case SHOW_BOOL: + table->field[1]->store((longlong) *(bool*) value, false); + break; + case SHOW_MY_BOOL: + table->field[1]->store((longlong) *(my_bool*) value, false); + break; + case SHOW_INT: + table->field[1]->store((longlong) *(uint32*) value, false); + break; + case SHOW_HAVE: /* always displayed as 0 */ + table->field[1]->store((longlong) 0, false); + break; + case SHOW_CHAR_PTR: + value= *(char**) value; + /* fall through */ + case SHOW_CHAR: /* always displayed as 0 */ + table->field[1]->store((longlong) 0, false); + break; + case SHOW_KEY_CACHE_LONG: + value= (char*) dflt_key_cache + (ulong) value; + table->field[1]->store((longlong) *(long*) value, false); + break; + case SHOW_KEY_CACHE_LONGLONG: + value= (char*) dflt_key_cache + (ulong) value; + table->field[1]->store(*(longlong*) value, false); + break; + case SHOW_UNDEF: /* always displayed as 0 */ + table->field[1]->store((longlong) 0, false); + break; + case SHOW_SYS: /* cannot happen */ + default: + DBUG_ASSERT(0); + break; + } + + table->field[1]->set_notnull(); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(1); + } + } + + DBUG_RETURN(0); +} + +int fill_schema_global_status(THD *thd, TABLE_LIST *tables, COND *cond) +{ + STATUS_VAR tmp; + int res= 0; + DBUG_ENTER("fill_schema_global_status"); + + pthread_mutex_lock(&LOCK_status); + calc_sum_of_all_status(&tmp); + res= fill_schema_status(thd, (SHOW_VAR*) all_status_vars.buffer, + &tmp, "", tables->table); + pthread_mutex_unlock(&LOCK_status); + + DBUG_RETURN(res); +} + +int fill_schema_session_status(THD *thd, TABLE_LIST *tables, COND *cond) +{ + int res= 0; + DBUG_ENTER("fill_schema_session_status"); + + pthread_mutex_lock(&LOCK_status); + res= fill_schema_status(thd, (SHOW_VAR*) all_status_vars.buffer, + &thd->status_var, "", tables->table); + pthread_mutex_unlock(&LOCK_status); + + DBUG_RETURN(res); +} + +int fill_schema_global_variables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + int res= 0; + DBUG_ENTER("fill_schema_global_variables"); + + pthread_mutex_lock(&LOCK_global_system_variables); + res= show_status_array(thd, "", init_vars, OPT_GLOBAL, + NULL, "", tables->table, 1); + pthread_mutex_unlock(&LOCK_global_system_variables); + + DBUG_RETURN(res); +} + +int fill_schema_session_variables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + int res= 0; + DBUG_ENTER("fill_schema_session_variables"); + + pthread_mutex_lock(&LOCK_global_system_variables); + res= show_status_array(thd, "", init_vars, OPT_SESSION, + NULL, "", tables->table, 1); + pthread_mutex_unlock(&LOCK_global_system_variables); + + DBUG_RETURN(res); +} + ST_FIELD_INFO schema_fields_info[]= { {"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, @@ -5347,6 +5554,22 @@ ST_FIELD_INFO variables_fields_info[]= }; +ST_FIELD_INFO status_fields_info[]= +{ + {"VARIABLE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Variable_name"}, + {"VARIABLE_VALUE", 2207, MYSQL_TYPE_DECIMAL, 0, 0, "Value"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO system_variables_fields_info[]= +{ + {"VARIABLE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Variable_name"}, + {"VARIABLE_VALUE", 65535, MYSQL_TYPE_STRING, 0, 1, "Value"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + ST_FIELD_INFO processlist_fields_info[]= { {"ID", 4, MYSQL_TYPE_LONG, 0, 0, "Id"}, @@ -5372,6 +5595,7 @@ ST_FIELD_INFO plugin_fields_info[]= {"PLUGIN_LIBRARY_VERSION", 20, MYSQL_TYPE_STRING, 0, 1, 0}, {"PLUGIN_AUTHOR", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"PLUGIN_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0}, + {"PLUGIN_LICENSE", 80, MYSQL_TYPE_STRING, 0, 1, "License"}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5459,6 +5683,10 @@ ST_SCHEMA_TABLE schema_tables[]= Events::fill_schema_events, make_old_format, 0, -1, -1, 0}, {"FILES", files_fields_info, create_schema_table, fill_schema_files, 0, 0, -1, -1, 0}, + {"GLOBAL_STATUS", status_fields_info, create_schema_table, + fill_schema_global_status, make_old_format, 0, -1, -1, 0}, + {"GLOBAL_VARIABLES", system_variables_fields_info, create_schema_table, + fill_schema_global_variables, make_old_format, 0, -1, -1, 0}, {"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table, get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0}, {"OPEN_TABLES", open_tables_fields_info, create_schema_table, @@ -5478,6 +5706,10 @@ ST_SCHEMA_TABLE schema_tables[]= fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0}, {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table, fill_schema_schema_privileges, 0, 0, -1, -1, 0}, + {"SESSION_STATUS", status_fields_info, create_schema_table, + fill_schema_session_status, make_old_format, 0, -1, -1, 0}, + {"SESSION_VARIABLES", system_variables_fields_info, create_schema_table, + fill_schema_session_variables, make_old_format, 0, -1, -1, 0}, {"STATISTICS", stat_fields_info, create_schema_table, get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0}, {"STATUS", variables_fields_info, create_schema_table, fill_status, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 02de7abb674..6f8deab220e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1628,12 +1628,12 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, (!my_strcasecmp(system_charset_info, table->table_name, "slow_log") && opt_slow_log && logger.is_slow_log_table_enabled()))) { - my_error(ER_CANT_DROP_LOG_TABLE, MYF(0)); + my_error(ER_BAD_LOG_STATEMENT, MYF(0), "DROP"); DBUG_RETURN(1); } } - if (lock_table_names(thd, tables)) + if (!drop_temporary && lock_table_names(thd, tables)) DBUG_RETURN(1); /* Don't give warnings for not found errors, as we already generate notes */ @@ -1818,7 +1818,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, } } - unlock_table_names(thd, tables, (TABLE_LIST*) 0); + if (!drop_temporary) + unlock_table_names(thd, tables, (TABLE_LIST*) 0); thd->no_warnings_for_error= 0; DBUG_RETURN(error); } @@ -3276,7 +3277,7 @@ bool mysql_create_table_internal(THD *thd, goto err; } } - if ((part_engine_type == &partition_hton) && + if ((part_engine_type == partition_hton) && part_info->default_engine_type) { /* @@ -3319,7 +3320,7 @@ bool mysql_create_table_internal(THD *thd, part_info->part_info_len= syntax_len; if ((!(engine_type->partition_flags && engine_type->partition_flags() & HA_CAN_PARTITION)) || - create_info->db_type == &partition_hton) + create_info->db_type == partition_hton) { /* The handler assigned to the table cannot handle partitioning. @@ -3328,7 +3329,7 @@ bool mysql_create_table_internal(THD *thd, DBUG_PRINT("info", ("db_type: %d", ha_legacy_type(create_info->db_type))); delete file; - create_info->db_type= &partition_hton; + create_info->db_type= partition_hton; if (!(file= get_ha_partition(part_info))) { DBUG_RETURN(TRUE); @@ -5179,7 +5180,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, (table_kind == SLOW_LOG && opt_slow_log && logger.is_slow_log_table_enabled())) { - my_error(ER_CANT_ALTER_LOG_TABLE, MYF(0)); + my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER"); DBUG_RETURN(TRUE); } @@ -5187,10 +5188,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if ((table_kind == GENERAL_LOG || table_kind == SLOW_LOG) && (lex_create_info->used_fields & HA_CREATE_USED_ENGINE) && (!lex_create_info->db_type || /* unknown engine */ - !(lex_create_info->db_type->db_type == DB_TYPE_MYISAM || - lex_create_info->db_type->db_type == DB_TYPE_CSV_DB))) + !(lex_create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES))) { - my_error(ER_BAD_LOG_ENGINE, MYF(0)); + my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0)); DBUG_RETURN(TRUE); } } @@ -6779,7 +6779,7 @@ static bool check_engine(THD *thd, const char *table_name, *new_engine= 0; return TRUE; } - *new_engine= &myisam_hton; + *new_engine= myisam_hton; } return FALSE; } diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc index 13dfb491af4..470fa5bc862 100644 --- a/sql/sql_tablespace.cc +++ b/sql/sql_tablespace.cc @@ -21,7 +21,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info) { int error= HA_ADMIN_NOT_IMPLEMENTED; - const handlerton *hton= ts_info->storage_engine; + handlerton *hton= ts_info->storage_engine; DBUG_ENTER("mysql_alter_tablespace"); /* @@ -42,7 +42,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info) if (hton->alter_tablespace) { - if ((error= hton->alter_tablespace(thd, ts_info))) + if ((error= hton->alter_tablespace(hton, thd, ts_info))) { if (error == HA_ADMIN_NOT_IMPLEMENTED) { diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index b2464745f7c..09576f5e523 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -116,11 +116,6 @@ public: bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER]); } - bool has_before_update_triggers() - { - return test(bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE]); - } - void set_table(TABLE *new_table); void mark_fields_used(trg_event_type event); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index b62e80edfab..51ec287a6cb 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -25,7 +25,7 @@ #include "sp_head.h" #include "sql_trigger.h" -static bool safe_update_on_fly(JOIN_TAB *join_tab, List *fields); +static bool safe_update_on_fly(JOIN_TAB *join_tab); /* Return 0 if row hasn't changed */ @@ -271,13 +271,16 @@ int mysql_update(THD *thd, } } init_ftfuncs(thd, select_lex, 1); + + table->mark_columns_needed_for_update(); + /* Check if we are modifying a key that we are used to search with */ if (select && select->quick) { used_index= select->quick->index; used_key_is_modified= (!select->quick->unique_key_range() && - select->quick->check_if_keys_used(&fields)); + select->quick->is_keys_used(table->write_set)); } else { @@ -285,12 +288,13 @@ int mysql_update(THD *thd, if (used_index == MAX_KEY) // no index for sort order used_index= table->file->key_used_on_scan; if (used_index != MAX_KEY) - used_key_is_modified= check_if_key_used(table, used_index, fields); + used_key_is_modified= is_key_used(table, used_index, table->write_set); } + #ifdef WITH_PARTITION_STORAGE_ENGINE if (used_key_is_modified || order || - partition_key_modified(table, fields)) + partition_key_modified(table, table->write_set)) #else if (used_key_is_modified || order) #endif @@ -449,8 +453,6 @@ int mysql_update(THD *thd, MODE_STRICT_ALL_TABLES))); will_batch= !table->file->start_bulk_update(); - table->mark_columns_needed_for_update(); - /* We can use compare_record() to optimize away updates if the table handler is returning all columns OR if @@ -725,6 +727,7 @@ err: bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, Item **conds, uint order_num, ORDER *order) { + Item *fake_conds= 0; TABLE *table= table_list->table; TABLE_LIST tables; List all_fields; @@ -764,7 +767,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, DBUG_RETURN(TRUE); } } - select_lex->fix_prepare_information(thd, conds); + select_lex->fix_prepare_information(thd, conds, &fake_conds); DBUG_RETURN(FALSE); } @@ -1214,9 +1217,11 @@ multi_update::initialize_tables(JOIN *join) TMP_TABLE_PARAM *tmp_param; table->mark_columns_needed_for_update(); + if (ignore) + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); if (table == main_table) // First table in join { - if (safe_update_on_fly(join->join_tab, &temp_fields)) + if (safe_update_on_fly(join->join_tab)) { table_to_update= main_table; // Update table on the fly continue; @@ -1274,7 +1279,6 @@ multi_update::initialize_tables(JOIN *join) SYNOPSIS safe_update_on_fly join_tab How table is used in join - fields Fields that are updated NOTES We can update the first table in join on the fly if we know that @@ -1287,9 +1291,8 @@ multi_update::initialize_tables(JOIN *join) - We are doing a range scan and we don't update the scan key or the primary key for a clustered table handler. - When checking for above cases we also should take into account that - BEFORE UPDATE trigger potentially may change value of any field in row - being updated. + This function gets information about fields to be updated from + the TABLE::write_set bitmap. WARNING This code is a bit dependent of how make_join_readinfo() works. @@ -1299,7 +1302,7 @@ multi_update::initialize_tables(JOIN *join) 1 Safe to update */ -static bool safe_update_on_fly(JOIN_TAB *join_tab, List *fields) +static bool safe_update_on_fly(JOIN_TAB *join_tab) { TABLE *table= join_tab->table; switch (join_tab->type) { @@ -1309,21 +1312,15 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List *fields) return TRUE; // At most one matching row case JT_REF: case JT_REF_OR_NULL: - return !check_if_key_used(table, join_tab->ref.key, *fields) && - !(table->triggers && - table->triggers->has_before_update_triggers()); + return !is_key_used(table, join_tab->ref.key, table->write_set); case JT_ALL: /* If range search on index */ if (join_tab->quick) - return !join_tab->quick->check_if_keys_used(fields) && - !(table->triggers && - table->triggers->has_before_update_triggers()); + return !join_tab->quick->is_keys_used(table->write_set); /* If scanning in clustered key */ if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && table->s->primary_key < MAX_KEY) - return !check_if_key_used(table, table->s->primary_key, *fields) && - !(table->triggers && - table->triggers->has_before_update_triggers()); + return !is_key_used(table, table->s->primary_key, table->write_set); return TRUE; default: break; // Avoid compler warning @@ -1336,7 +1333,11 @@ multi_update::~multi_update() { TABLE_LIST *table; for (table= update_tables ; table; table= table->next_local) + { table->table->no_keyread= table->table->no_cache= 0; + if (ignore) + table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + } if (tmp_tables) { diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 187d289cb16..7f6d935ff5e 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -1603,7 +1603,7 @@ bool insert_view_fields(THD *thd, List *list, TABLE_LIST *view) list->push_back(fld); else { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), view->alias, "INSERT"); + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), view->alias, "INSERT"); DBUG_RETURN(TRUE); } } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index d4e0c5ed907..ab43acde2a5 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -797,8 +797,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); predicate bit_expr bit_term bit_factor value_expr term factor table_wild simple_expr udf_expr expr_or_default set_expr_or_default interval_expr - param_marker singlerow_subselect singlerow_subselect_init - exists_subselect exists_subselect_init geometry_function + param_marker geometry_function signed_literal now_or_signed_literal opt_escape sp_opt_default simple_ident_nospvar simple_ident_q @@ -864,7 +863,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type internal_variable_name -%type in_subselect in_subselect_init +%type subselect subselect_init get_select_lex %type comp_op @@ -1354,7 +1353,7 @@ event_tail: Lex->sql_command= SQLCOM_CREATE_EVENT; Lex->expr_allows_subselect= TRUE; } - + ; ev_schedule_time: EVERY_SYM expr interval { @@ -1367,7 +1366,7 @@ ev_schedule_time: EVERY_SYM expr interval { Lex->event_parse_data->item_execute_at= $2; } - ; + ; opt_ev_status: /* empty */ { $$= 0; } | ENABLE_SYM @@ -5564,12 +5563,14 @@ select_paren: yyerror(ER(ER_SYNTAX_ERROR)); YYABORT; } - if (sel->linkage == UNION_TYPE && - !sel->master_unit()->first_select()->braces) - { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; - } + if (sel->linkage == UNION_TYPE && + !sel->master_unit()->first_select()->braces && + sel->master_unit()->first_select()->linkage == + UNION_TYPE) + { + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } /* select in braces, can't contain global parameters */ if (sel->master_unit()->fake_select_lex) sel->master_unit()->global_parameters= @@ -5827,37 +5828,37 @@ bool_pri: | bool_pri EQUAL_SYM predicate { $$= new Item_func_equal($1,$3); } | bool_pri comp_op predicate %prec EQ { $$= (*$2)(0)->create($1,$3); } - | bool_pri comp_op all_or_any in_subselect %prec EQ - { $$= all_any_subquery_creator($1, $2, $3, $4); } + | bool_pri comp_op all_or_any '(' subselect ')' %prec EQ + { $$= all_any_subquery_creator($1, $2, $3, $5); } | predicate ; predicate: - bit_expr IN_SYM '(' expr_list ')' - { - if ($4->elements == 1) - $$= new Item_func_eq($1, $4->head()); - else - { - $4->push_front($1); - $$= new Item_func_in(*$4); - } - } - | bit_expr not IN_SYM '(' expr_list ')' + bit_expr IN_SYM '(' subselect ')' + { $$= new Item_in_subselect($1, $4); } + | bit_expr not IN_SYM '(' subselect ')' + { $$= negate_expression(YYTHD, new Item_in_subselect($1, $5)); } + | bit_expr IN_SYM '(' expr ')' { - if ($5->elements == 1) - $$= new Item_func_ne($1, $5->head()); - else - { - $5->push_front($1); - Item_func_in *item = new Item_func_in(*$5); + $$= new Item_func_eq($1, $4); + } + | bit_expr IN_SYM '(' expr ',' expr_list ')' + { + $6->push_front($4); + $6->push_front($1); + $$= new Item_func_in(*$6); + } + | bit_expr not IN_SYM '(' expr ')' + { + $$= new Item_func_ne($1, $5); + } + | bit_expr not IN_SYM '(' expr ',' expr_list ')' + { + $7->push_front($5); + $7->push_front($1); + Item_func_in *item = new Item_func_in(*$7); item->negate(); $$= item; - } } - | bit_expr IN_SYM in_subselect - { $$= new Item_in_subselect($1, $3); } - | bit_expr not IN_SYM in_subselect - { $$= negate_expression(YYTHD, new Item_in_subselect($1, $4)); } | bit_expr BETWEEN_SYM bit_expr AND_SYM predicate { $$= new Item_func_between($1,$3,$5); } | bit_expr not BETWEEN_SYM bit_expr AND_SYM predicate @@ -5979,6 +5980,10 @@ simple_expr: | '-' simple_expr %prec NEG { $$= new Item_func_neg($2); } | '~' simple_expr %prec NEG { $$= new Item_func_bit_neg($2); } | not2 simple_expr %prec NEG { $$= negate_expression(YYTHD, $2); } + | '(' subselect ')' + { + $$= new Item_singlerow_subselect($2); + } | '(' expr ')' { $$= $2; } | '(' expr ',' expr_list ')' { @@ -5990,8 +5995,10 @@ simple_expr: $5->push_front($3); $$= new Item_row(*$5); } - | EXISTS exists_subselect { $$= $2; } - | singlerow_subselect { $$= $1; } + | EXISTS '(' subselect ')' + { + $$= new Item_exists_subselect($3); + } | '{' ident expr '}' { $$= $3; } | MATCH ident_list_arg AGAINST '(' bit_expr fulltext_options ')' { $2->push_front($5); @@ -6840,11 +6847,13 @@ join_table: /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $3)) YYABORT; + Select->parsing_place= IN_ON; } expr { add_join_on($3,$6); Lex->pop_context(); + Select->parsing_place= NO_MATTER; } | table_ref STRAIGHT_JOIN table_factor ON @@ -6853,12 +6862,14 @@ join_table: /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $3)) YYABORT; + Select->parsing_place= IN_ON; } expr { $3->straight=1; add_join_on($3,$6); Lex->pop_context(); + Select->parsing_place= NO_MATTER; } | table_ref normal_join table_ref USING @@ -6882,6 +6893,7 @@ join_table: /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $5)) YYABORT; + Select->parsing_place= IN_ON; } expr { @@ -6889,6 +6901,7 @@ join_table: Lex->pop_context(); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; + Select->parsing_place= NO_MATTER; } | table_ref LEFT opt_outer JOIN_SYM table_factor { @@ -6913,6 +6926,7 @@ join_table: /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $5)) YYABORT; + Select->parsing_place= IN_ON; } expr { @@ -6921,6 +6935,7 @@ join_table: YYABORT; add_join_on($$, $8); Lex->pop_context(); + Select->parsing_place= NO_MATTER; } | table_ref RIGHT opt_outer JOIN_SYM table_factor { @@ -8022,14 +8037,17 @@ table_wild_one: ident opt_wild opt_table_alias { if (!Select->add_table_to_list(YYTHD, new Table_ident($1), $3, - TL_OPTION_UPDATING, Lex->lock_option)) + TL_OPTION_UPDATING | + TL_OPTION_ALIAS, Lex->lock_option)) YYABORT; } | ident '.' ident opt_wild opt_table_alias { if (!Select->add_table_to_list(YYTHD, new Table_ident(YYTHD, $1, $3, 0), - $5, TL_OPTION_UPDATING, + $5, + TL_OPTION_UPDATING | + TL_OPTION_ALIAS, Lex->lock_option)) YYABORT; } @@ -8660,6 +8678,9 @@ load_data: FROM MASTER_SYM { Lex->sql_command = SQLCOM_LOAD_MASTER_DATA; + WARN_DEPRECATED(yythd, "5.2", "LOAD DATA FROM MASTER", + "mysqldump or future " + "BACKUP/RESTORE DATABASE facility"); }; opt_local: @@ -9224,7 +9245,7 @@ user: $$->host.str= (char *) "%"; $$->host.length= 1; - if (check_string_length(system_charset_info, &$$->user, + if (check_string_length(&$$->user, ER(ER_USERNAME), USERNAME_LENGTH)) YYABORT; } @@ -9235,9 +9256,9 @@ user: YYABORT; $$->user = $1; $$->host=$3; - if (check_string_length(system_charset_info, &$$->user, + if (check_string_length(&$$->user, ER(ER_USERNAME), USERNAME_LENGTH) || - check_string_length(&my_charset_latin1, &$$->host, + check_string_length(&$$->host, ER(ER_HOSTNAME), HOSTNAME_LENGTH)) YYABORT; } @@ -9297,6 +9318,7 @@ keyword: | UNICODE_SYM {} | UNINSTALL_SYM {} | XA_SYM {} + | UPGRADE_SYM {} ; /* @@ -10657,49 +10679,38 @@ union_option: | ALL { $$=0; } ; -singlerow_subselect: - subselect_start singlerow_subselect_init - subselect_end - { - $$= $2; - }; +subselect: + SELECT_SYM subselect_start subselect_init subselect_end + { + $$= $3; + } + | '(' subselect_start subselect ')' + { + LEX *lex= Lex; + THD *thd= YYTHD; + /* + note that a local variable can't be used for + $3 as it's used in local variable construction + and some compilers can't guarnatee the order + in which the local variables are initialized. + */ + List_iterator it($3->item_list); + Item *item; + /* + we must fill the items list for the "derived table". + */ + while ((item= it++)) + add_item_to_list(thd, item); + } + union_clause subselect_end { $$= $3; }; -singlerow_subselect_init: - select_init2 - { - $$= new Item_singlerow_subselect(Lex->current_select-> - master_unit()->first_select()); - }; - -exists_subselect: - subselect_start exists_subselect_init - subselect_end - { - $$= $2; - }; - -exists_subselect_init: - select_init2 - { - $$= new Item_exists_subselect(Lex->current_select->master_unit()-> - first_select()); - }; - -in_subselect: - subselect_start in_subselect_init - subselect_end - { - $$= $2; - }; - -in_subselect_init: +subselect_init: select_init2 { $$= Lex->current_select->master_unit()->first_select(); }; subselect_start: - '(' SELECT_SYM { LEX *lex=Lex; if (!lex->expr_allows_subselect) @@ -10707,12 +10718,18 @@ subselect_start: yyerror(ER(ER_SYNTAX_ERROR)); YYABORT; } + /* + we are making a "derived table" for the parenthesis + as we need to have a lex level to fit the union + after the parenthesis, e.g. + (SELECT .. ) UNION ... becomes + SELECT * FROM ((SELECT ...) UNION ...) + */ if (mysql_new_select(Lex, 1)) YYABORT; }; subselect_end: - ')' { LEX *lex=Lex; lex->pop_context(); diff --git a/sql/stacktrace.c b/sql/stacktrace.c index 43f35c452f7..a2fe2ab88f1 100644 --- a/sql/stacktrace.c +++ b/sql/stacktrace.c @@ -222,7 +222,7 @@ terribly wrong...\n"); fprintf(stderr, "Stack trace seems successful - bottom reached\n"); end: - fprintf(stderr, "Please read http://dev.mysql.com/doc/mysql/en/Using_stack_trace.html and follow instructions on how to resolve the stack trace. Resolved\n\ + fprintf(stderr, "Please read http://dev.mysql.com/doc/mysql/en/using-stack-trace.html and follow instructions on how to resolve the stack trace. Resolved\n\ stack trace is much more helpful in diagnosing the problem, so please do \n\ resolve it\n"); } diff --git a/sql/table.cc b/sql/table.cc index f1845fdedb3..5b41ad48696 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -632,7 +632,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, if (!strncmp(next_chunk + 2, "partition", str_db_type_length)) { /* Use partition handler */ - share->db_type= &partition_hton; + share->db_type= partition_hton; DBUG_PRINT("info", ("setting dbtype to '%.*s' (%d)", str_db_type_length, next_chunk + 2, ha_legacy_type(share->db_type))); @@ -2257,7 +2257,7 @@ char *get_field(MEM_ROOT *mem, Field *field) bool check_db_name(char *name) { - uint name_length= 0; // name length in symbols + char *start= name; /* Used to catch empty names and names with end space */ bool last_char_is_space= TRUE; @@ -2277,15 +2277,13 @@ bool check_db_name(char *name) name += len; continue; } - name_length++; } #else last_char_is_space= *name==' '; #endif - name_length++; name++; } - return last_char_is_space || name_length > NAME_LEN; + return last_char_is_space || (uint) (name - start) > NAME_LEN; } diff --git a/sql/table.h b/sql/table.h index b90ee280f91..434fb5a4d11 100644 --- a/sql/table.h +++ b/sql/table.h @@ -474,6 +474,8 @@ enum enum_schema_tables SCH_ENGINES, SCH_EVENTS, SCH_FILES, + SCH_GLOBAL_STATUS, + SCH_GLOBAL_VARIABLES, SCH_KEY_COLUMN_USAGE, SCH_OPEN_TABLES, SCH_PARTITIONS, @@ -483,6 +485,8 @@ enum enum_schema_tables SCH_PROCEDURES, SCH_SCHEMATA, SCH_SCHEMA_PRIVILEGES, + SCH_SESSION_STATUS, + SCH_SESSION_VARIABLES, SCH_STATISTICS, SCH_STATUS, SCH_TABLES, diff --git a/sql/udf_example.c b/sql/udf_example.c index a80fce81278..2fa7474eb16 100644 --- a/sql/udf_example.c +++ b/sql/udf_example.c @@ -127,7 +127,14 @@ typedef long long longlong; #else #include #include +#if defined(MYSQL_SERVER) #include /* To get strmov() */ +#else +/* when compiled as standalone */ +#define strmov(a,b) strcpy(a,b) +#define bzero(a,b) memset(a,0,b) +#define memcpy_fixed(a,b,c) memcpy(a,b,c) +#endif #endif #include #include @@ -674,10 +681,14 @@ longlong sequence(UDF_INIT *initid __attribute__((unused)), UDF_ARGS *args, ** ****************************************************************************/ +#ifdef __WIN__ +#include +#else #include #include #include #include +#endif my_bool lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message); void lookup_deinit(UDF_INIT *initid); diff --git a/sql/udf_example.def b/sql/udf_example.def new file mode 100644 index 00000000000..d3081ca7768 --- /dev/null +++ b/sql/udf_example.def @@ -0,0 +1,24 @@ +LIBRARY udf_example +DESCRIPTION 'MySQL Sample for UDF' +VERSION 1.0 +EXPORTS + lookup + lookup_init + reverse_lookup + reverse_lookup_init + metaphon_init + metaphon_deinit + metaphon + myfunc_double_init + myfunc_double + myfunc_int_init + myfunc_int + sequence_init + sequence_deinit + sequence + avgcost_init + avgcost_deinit + avgcost_reset + avgcost_add + avgcost_clear + avgcost diff --git a/sql/unireg.cc b/sql/unireg.cc index 396ff4fba27..2ea572c782c 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -292,13 +292,19 @@ bool mysql_create_frm(THD *thd, const char *file_name, goto err3; { - /* Unescape all UCS2 intervals: were escaped in pack_headers */ + /* + Restore all UCS2 intervals. + HEX representation of them is not needed anymore. + */ List_iterator it(create_fields); create_field *field; while ((field=it++)) { - if (field->interval && field->charset->mbminlen > 1) - unhex_type2(field->interval); + if (field->save_interval) + { + field->interval= field->save_interval; + field->save_interval= 0; + } } } DBUG_RETURN(0); @@ -589,18 +595,36 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type, reclength=(uint) (field->offset+ data_offset + length); n_length+= (ulong) strlen(field->field_name)+1; field->interval_id=0; + field->save_interval= 0; if (field->interval) { uint old_int_count=int_count; if (field->charset->mbminlen > 1) { - /* Escape UCS2 intervals using HEX notation */ + /* + Escape UCS2 intervals using HEX notation to avoid + problems with delimiters between enum elements. + As the original representation is still needed in + the function make_empty_rec to create a record of + filled with default values it is saved in save_interval + The HEX representation is created from this copy. + */ + field->save_interval= field->interval; + field->interval= (TYPELIB*) sql_alloc(sizeof(TYPELIB)); + *field->interval= *field->save_interval; + field->interval->type_names= + (const char **) sql_alloc(sizeof(char*) * + (field->interval->count+1)); + field->interval->type_names[field->interval->count]= 0; + field->interval->type_lengths= + (uint *) sql_alloc(sizeof(uint) * field->interval->count); + for (uint pos= 0; pos < field->interval->count; pos++) { char *dst; - uint length= field->interval->type_lengths[pos], hex_length; - const char *src= field->interval->type_names[pos]; + uint length= field->save_interval->type_lengths[pos], hex_length; + const char *src= field->save_interval->type_names[pos]; hex_length= length * 2; field->interval->type_lengths[pos]= hex_length; field->interval->type_names[pos]= dst= sql_alloc(hex_length + 1); @@ -852,7 +876,8 @@ static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type, field->charset, field->geom_type, field->unireg_check, - field->interval, + field->save_interval ? field->save_interval : + field->interval, field->field_name); if (!regfield) goto err; // End of memory diff --git a/storage/archive/Makefile.am b/storage/archive/Makefile.am index 3d933408b0b..5e91ffa83e8 100644 --- a/storage/archive/Makefile.am +++ b/storage/archive/Makefile.am @@ -57,6 +57,6 @@ archive_test_LDADD = $(top_builddir)/mysys/libmysys.a \ archive_test_LDFLAGS = @NOINST_LDFLAGS@ -EXTRA_DIST = CMakeLists.txt +EXTRA_DIST = CMakeLists.txt plug.in # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 3029b1db53e..b7673fc96ea 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -139,17 +139,21 @@ static HASH archive_open_tables; #define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption /* Static declarations for handerton */ -static handler *archive_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); +static handler *archive_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root); +int archive_db_end(handlerton *hton, ha_panic_function type); + /* Number of rows that will force a bulk insert. */ #define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2 -handlerton archive_hton; - -static handler *archive_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *archive_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_archive(table); + return new (mem_root) ha_archive(hton, table); } /* @@ -168,24 +172,25 @@ static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length, SYNOPSIS archive_db_init() - void + void * RETURN FALSE OK TRUE Error */ -int archive_db_init() +int archive_db_init(void *p) { DBUG_ENTER("archive_db_init"); + handlerton *archive_hton; if (archive_inited) DBUG_RETURN(FALSE); - - archive_hton.state=SHOW_OPTION_YES; - archive_hton.db_type=DB_TYPE_ARCHIVE_DB; - archive_hton.create=archive_create_handler; - archive_hton.panic=archive_db_end; - archive_hton.flags=HTON_NO_FLAGS; + archive_hton= (handlerton *)p; + archive_hton->state=SHOW_OPTION_YES; + archive_hton->db_type=DB_TYPE_ARCHIVE_DB; + archive_hton->create=archive_create_handler; + archive_hton->panic=archive_db_end; + archive_hton->flags=HTON_NO_FLAGS; if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST)) goto error; @@ -214,7 +219,7 @@ error: FALSE OK */ -int archive_db_done() +int archive_db_done(void *p) { if (archive_inited) { @@ -226,13 +231,13 @@ int archive_db_done() } -int archive_db_end(ha_panic_function type) +int archive_db_end(handlerton *hton, ha_panic_function type) { - return archive_db_done(); + return archive_db_done(NULL); } -ha_archive::ha_archive(TABLE_SHARE *table_arg) - :handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0) +ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), delayed_insert(0), bulk_insert(0) { /* Set our original buffer from pre-allocated memory */ buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info); @@ -1571,7 +1576,7 @@ bool ha_archive::check_and_repair(THD *thd) } struct st_mysql_storage_engine archive_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &archive_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(archive) { @@ -1580,6 +1585,7 @@ mysql_declare_plugin(archive) "ARCHIVE", "Brian Aker, MySQL AB", "Archive storage engine", + PLUGIN_LICENSE_GPL, archive_db_init, /* Plugin Init */ archive_db_done, /* Plugin Deinit */ 0x0100 /* 1.0 */, diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 0485b7e9f9c..1a601c8451a 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -67,7 +67,7 @@ class ha_archive: public handler uint current_k_offset; public: - ha_archive(TABLE_SHARE *table_arg); + ha_archive(handlerton *hton, TABLE_SHARE *table_arg); ~ha_archive() { } @@ -139,6 +139,3 @@ public: bool check_and_repair(THD *thd); }; -int archive_db_init(void); -int archive_db_end(ha_panic_function type); - diff --git a/storage/blackhole/Makefile.am b/storage/blackhole/Makefile.am index e3e28cd75b5..36a885c4a26 100644 --- a/storage/blackhole/Makefile.am +++ b/storage/blackhole/Makefile.am @@ -47,6 +47,6 @@ libblackhole_a_CFLAGS = $(AM_CFLAGS) libblackhole_a_SOURCES= ha_blackhole.cc -EXTRA_DIST = CMakeLists.txt +EXTRA_DIST = CMakeLists.txt plug.in # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc index c1141cce5ae..2515f059e02 100644 --- a/storage/blackhole/ha_blackhole.cc +++ b/storage/blackhole/ha_blackhole.cc @@ -24,11 +24,11 @@ /* Static declarations for handlerton */ -handlerton blackhole_hton; -static handler *blackhole_create_handler(TABLE_SHARE *table, +static handler *blackhole_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root) { - return new (mem_root) ha_blackhole(table); + return new (mem_root) ha_blackhole(hton, table); } @@ -36,8 +36,9 @@ static handler *blackhole_create_handler(TABLE_SHARE *table, ** BLACKHOLE tables *****************************************************************************/ -ha_blackhole::ha_blackhole(TABLE_SHARE *table_arg) - :handler(&blackhole_hton, table_arg) +ha_blackhole::ha_blackhole(handlerton *hton, + TABLE_SHARE *table_arg) + :handler(hton, table_arg) {} @@ -201,17 +202,19 @@ int ha_blackhole::index_last(byte * buf) DBUG_RETURN(HA_ERR_END_OF_FILE); } -static int blackhole_init() +static int blackhole_init(void *p) { - blackhole_hton.state= SHOW_OPTION_YES; - blackhole_hton.db_type= DB_TYPE_BLACKHOLE_DB; - blackhole_hton.create= blackhole_create_handler; - blackhole_hton.flags= HTON_CAN_RECREATE; + handlerton *blackhole_hton; + blackhole_hton= (handlerton *)p; + blackhole_hton->state= SHOW_OPTION_YES; + blackhole_hton->db_type= DB_TYPE_BLACKHOLE_DB; + blackhole_hton->create= blackhole_create_handler; + blackhole_hton->flags= HTON_CAN_RECREATE; return 0; } struct st_mysql_storage_engine blackhole_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &blackhole_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(blackhole) { @@ -220,6 +223,7 @@ mysql_declare_plugin(blackhole) "BLACKHOLE", "MySQL AB", "/dev/null storage engine (anything you write to it disappears)", + PLUGIN_LICENSE_GPL, blackhole_init, /* Plugin Init */ NULL, /* Plugin Deinit */ 0x0100 /* 1.0 */, diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h index 55c26f6f02e..54cec41fd26 100644 --- a/storage/blackhole/ha_blackhole.h +++ b/storage/blackhole/ha_blackhole.h @@ -28,7 +28,7 @@ class ha_blackhole: public handler THR_LOCK thr_lock; public: - ha_blackhole(TABLE_SHARE *table_arg); + ha_blackhole(handlerton *hton, TABLE_SHARE *table_arg); ~ha_blackhole() { } diff --git a/storage/csv/Makefile.am b/storage/csv/Makefile.am index adebebd7cd8..e6bda138120 100644 --- a/storage/csv/Makefile.am +++ b/storage/csv/Makefile.am @@ -41,6 +41,6 @@ noinst_LIBRARIES = @plugin_csv_static_target@ libcsv_a_CXXFLAGS = $(AM_CFLAGS) libcsv_a_SOURCES = ha_tina.cc -EXTRA_DIST = CMakeLists.txt +EXTRA_DIST = CMakeLists.txt plug.in # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index ce19202d745..dad28a74af6 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -74,8 +74,11 @@ static int write_meta_file(File meta_file, ha_rows rows, bool dirty); pthread_mutex_t tina_mutex; static HASH tina_open_tables; static int tina_init= 0; -static handler *tina_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); -static int tina_init_func(); +static handler *tina_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root); +int tina_end(handlerton *hton, ha_panic_function type); + off_t Transparent_file::read_next() { @@ -124,7 +127,6 @@ char Transparent_file::get_value(off_t offset) return buff[0]; } } -handlerton tina_hton; /***************************************************************************** ** TINA tables @@ -149,25 +151,28 @@ static byte* tina_get_key(TINA_SHARE *share,uint *length, return (byte*) share->table_name; } -static int tina_init_func() +static int tina_init_func(void *p) { + handlerton *tina_hton; + if (!tina_init) { + tina_hton= (handlerton *)p; tina_init++; VOID(pthread_mutex_init(&tina_mutex,MY_MUTEX_INIT_FAST)); (void) hash_init(&tina_open_tables,system_charset_info,32,0,0, (hash_get_key) tina_get_key,0,0); - bzero(&tina_hton, sizeof(handlerton)); - tina_hton.state= SHOW_OPTION_YES; - tina_hton.db_type= DB_TYPE_CSV_DB; - tina_hton.create= tina_create_handler; - tina_hton.panic= tina_end; - tina_hton.flags= HTON_CAN_RECREATE; + tina_hton->state= SHOW_OPTION_YES; + tina_hton->db_type= DB_TYPE_CSV_DB; + tina_hton->create= tina_create_handler; + tina_hton->panic= tina_end; + tina_hton->flags= (HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES | + HTON_NO_PARTITION); } return 0; } -static int tina_done_func() +static int tina_done_func(void *p) { if (tina_init) { @@ -195,7 +200,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) uint length; if (!tina_init) - tina_init_func(); + tina_init_func(NULL); pthread_mutex_lock(&tina_mutex); length=(uint) strlen(table_name); @@ -450,9 +455,9 @@ static int free_share(TINA_SHARE *share) DBUG_RETURN(result_code); } -int tina_end(ha_panic_function type) +int tina_end(handlerton *hton, ha_panic_function type) { - return tina_done_func(); + return tina_done_func(NULL); } @@ -494,14 +499,16 @@ off_t find_eoln_buff(Transparent_file *data_buff, off_t begin, } -static handler *tina_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *tina_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_tina(table); + return new (mem_root) ha_tina(hton, table); } -ha_tina::ha_tina(TABLE_SHARE *table_arg) - :handler(&tina_hton, table_arg), +ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), /* These definitions are found in handler.h They are not probably completely right. @@ -541,7 +548,10 @@ int ha_tina::encode_quote(byte *buf) in the code. */ if ((*field)->is_null()) - ptr= end_ptr= 0; + { + buffer.append(STRING_WITH_LEN("\"\",")); + continue; + } else { (*field)->val_str(&attribute,&attribute); @@ -642,6 +652,7 @@ int ha_tina::find_current_row(byte *buf) off_t end_offset, curr_offset= current_position; int eoln_len; my_bitmap_map *org_bitmap; + int error; DBUG_ENTER("ha_tina::find_current_row"); /* @@ -655,23 +666,23 @@ int ha_tina::find_current_row(byte *buf) /* Avoid asserts in ::store() for columns that are not going to be updated */ org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + error= HA_ERR_CRASHED_ON_USAGE; + + memset(buf, 0, table->s->null_bytes); for (Field **field=table->field ; *field ; field++) { buffer.length(0); - if (file_buff->get_value(curr_offset) == '"') + if (curr_offset < end_offset && + file_buff->get_value(curr_offset) == '"') curr_offset++; // Incrementpast the first quote else - { - dbug_tmp_restore_column_map(table->write_set, org_bitmap); - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - } - for(;curr_offset != end_offset; curr_offset++) + goto err; + for(;curr_offset < end_offset; curr_offset++) { // Need to convert line feeds! if (file_buff->get_value(curr_offset) == '"' && - (((file_buff->get_value(curr_offset + 1) == ',') && - (file_buff->get_value(curr_offset + 2) == '"')) || + ((file_buff->get_value(curr_offset + 1) == ',') || (curr_offset == end_offset -1 ))) { curr_offset+= 2; // Move past the , and the " @@ -701,10 +712,7 @@ int ha_tina::find_current_row(byte *buf) we are working with a damaged file. */ if (curr_offset == end_offset - 1) - { - dbug_tmp_restore_column_map(table->write_set, org_bitmap); - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - } + goto err; buffer.append(file_buff->get_value(curr_offset)); } } @@ -712,11 +720,12 @@ int ha_tina::find_current_row(byte *buf) (*field)->store(buffer.ptr(), buffer.length(), system_charset_info); } next_position= end_offset + eoln_len; - /* Maybe use \N for null? */ - memset(buf, 0, table->s->null_bytes); /* We do not implement nulls! */ + error= 0; + +err: dbug_tmp_restore_column_map(table->write_set, org_bitmap); - DBUG_RETURN(0); + DBUG_RETURN(error); } /* @@ -1517,7 +1526,7 @@ bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info, } struct st_mysql_storage_engine csv_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &tina_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(csv) { @@ -1526,6 +1535,7 @@ mysql_declare_plugin(csv) "CSV", "Brian Aker, MySQL AB", "CSV storage engine", + PLUGIN_LICENSE_GPL, tina_init_func, /* Plugin Init */ tina_done_func, /* Plugin Deinit */ 0x0100 /* 1.0 */, diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h index 14ca848c855..f408e8f4a7d 100644 --- a/storage/csv/ha_tina.h +++ b/storage/csv/ha_tina.h @@ -129,7 +129,7 @@ private: int init_tina_writer(); public: - ha_tina(TABLE_SHARE *table_arg); + ha_tina(handlerton *hton, TABLE_SHARE *table_arg); ~ha_tina() { if (chain_alloced) @@ -212,5 +212,3 @@ public: int chain_append(); }; -int tina_end(ha_panic_function type); - diff --git a/storage/example/Makefile.am b/storage/example/Makefile.am index db3e35e4315..f9ed8c00629 100644 --- a/storage/example/Makefile.am +++ b/storage/example/Makefile.am @@ -47,6 +47,6 @@ libexample_a_CFLAGS = $(AM_CFLAGS) libexample_a_SOURCES= ha_example.cc -EXTRA_DIST = CMakeLists.txt +EXTRA_DIST = CMakeLists.txt plug.in # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc index 6986796cb2e..212def8c18e 100644 --- a/storage/example/ha_example.cc +++ b/storage/example/ha_example.cc @@ -73,12 +73,14 @@ #include -static handler *example_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); +static handler *example_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root); static int example_init_func(); static bool example_init_func_for_handlerton(); static int example_panic(enum ha_panic_function flag); -handlerton example_hton; +handlerton *example_hton; /* Variables for example share methods */ static HASH example_open_tables; // Hash used to track open tables @@ -96,25 +98,26 @@ static byte* example_get_key(EXAMPLE_SHARE *share,uint *length, return (byte*) share->table_name; } -static int example_init_func() +static int example_init_func(void *p) { DBUG_ENTER("example_init_func"); if (!example_init) { + example_hton= (handlerton *)p; example_init= 1; VOID(pthread_mutex_init(&example_mutex,MY_MUTEX_INIT_FAST)); (void) hash_init(&example_open_tables,system_charset_info,32,0,0, (hash_get_key) example_get_key,0,0); - example_hton.state= SHOW_OPTION_YES; - example_hton.db_type= DB_TYPE_EXAMPLE_DB; - example_hton.create= example_create_handler; - example_hton.flags= HTON_CAN_RECREATE; + example_hton->state= SHOW_OPTION_YES; + example_hton->db_type= DB_TYPE_EXAMPLE_DB; + example_hton->create= example_create_handler; + example_hton->flags= HTON_CAN_RECREATE; } DBUG_RETURN(0); } -static int example_done_func() +static int example_done_func(void *p) { int error= 0; DBUG_ENTER("example_done_func"); @@ -200,14 +203,16 @@ static int free_share(EXAMPLE_SHARE *share) } -static handler* example_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler* example_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_example(table); + return new (mem_root) ha_example(hton, table); } -ha_example::ha_example(TABLE_SHARE *table_arg) - :handler(&example_hton, table_arg) +ha_example::ha_example(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg) {} /* @@ -702,7 +707,7 @@ int ha_example::create(const char *name, TABLE *table_arg, } struct st_mysql_storage_engine example_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &example_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(example) @@ -712,6 +717,7 @@ mysql_declare_plugin(example) "EXAMPLE", "Brian Aker, MySQL AB", "Example storage engine", + PLUGIN_LICENSE_GPL, example_init_func, /* Plugin Init */ example_done_func, /* Plugin Deinit */ 0x0001 /* 0.1 */, diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h index 956dc62311c..2f5dbc29c56 100644 --- a/storage/example/ha_example.h +++ b/storage/example/ha_example.h @@ -45,7 +45,7 @@ class ha_example: public handler EXAMPLE_SHARE *share; /* Shared lock info */ public: - ha_example(TABLE_SHARE *table_arg); + ha_example(handlerton *hton, TABLE_SHARE *table_arg); ~ha_example() { } diff --git a/storage/federated/Makefile.am b/storage/federated/Makefile.am index 813455ed5c7..a9f0bfcbb4a 100644 --- a/storage/federated/Makefile.am +++ b/storage/federated/Makefile.am @@ -47,6 +47,6 @@ libfederated_a_CFLAGS = $(AM_CFLAGS) libfederated_a_SOURCES= ha_federated.cc -EXTRA_DIST = CMakeLists.txt +EXTRA_DIST = CMakeLists.txt plug.in # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 3e9759136bf..00b2ac87cd4 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -359,19 +359,22 @@ static const uint sizeof_trailing_and= sizeof(" AND ") - 1; static const uint sizeof_trailing_where= sizeof(" WHERE ") - 1; /* Static declaration for handerton */ -static handler *federated_create_handler(TABLE_SHARE *table, +static handler *federated_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root); -static int federated_commit(THD *thd, bool all); -static int federated_rollback(THD *thd, bool all); +static int federated_commit(handlerton *hton, THD *thd, bool all); +static int federated_rollback(handlerton *hton, THD *thd, bool all); +static int federated_db_init(void); +static int federated_db_end(handlerton *hton, ha_panic_function type); + /* Federated storage engine handlerton */ -handlerton federated_hton; - -static handler *federated_create_handler(TABLE_SHARE *table, +static handler *federated_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root) { - return new (mem_root) ha_federated(table); + return new (mem_root) ha_federated(hton, table); } @@ -396,17 +399,17 @@ static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, TRUE Error */ -int federated_db_init() +int federated_db_init(void *p) { DBUG_ENTER("federated_db_init"); - - federated_hton.state= SHOW_OPTION_YES; - federated_hton.db_type= DB_TYPE_FEDERATED_DB; - federated_hton.commit= federated_commit; - federated_hton.rollback= federated_rollback; - federated_hton.create= federated_create_handler; - federated_hton.panic= federated_db_end; - federated_hton.flags= HTON_ALTER_NOT_SUPPORTED; + handlerton *federated_hton= (handlerton *)p; + federated_hton->state= SHOW_OPTION_YES; + federated_hton->db_type= DB_TYPE_FEDERATED_DB; + federated_hton->commit= federated_commit; + federated_hton->rollback= federated_rollback; + federated_hton->create= federated_create_handler; + federated_hton->panic= federated_db_end; + federated_hton->flags= HTON_ALTER_NOT_SUPPORTED; if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST)) goto error; @@ -434,7 +437,7 @@ error: FALSE OK */ -int federated_db_end(ha_panic_function type) +int federated_db_end(handlerton *hton, ha_panic_function type) { if (federated_init) { @@ -723,8 +726,9 @@ error: ** FEDERATED tables *****************************************************************************/ -ha_federated::ha_federated(TABLE_SHARE *table_arg) - :handler(&federated_hton, table_arg), +ha_federated::ha_federated(handlerton *hton, + TABLE_SHARE *table_arg) + :handler(hton, table_arg), mysql(0), stored_result(0) { trx_next= 0; @@ -2736,7 +2740,7 @@ bool ha_federated::get_error_message(int error, String* buf) int ha_federated::external_lock(THD *thd, int lock_type) { int error= 0; - ha_federated *trx= (ha_federated *)thd->ha_data[federated_hton.slot]; + ha_federated *trx= (ha_federated *)thd->ha_data[ht->slot]; DBUG_ENTER("ha_federated::external_lock"); if (lock_type != F_UNLCK) @@ -2754,7 +2758,7 @@ int ha_federated::external_lock(THD *thd, int lock_type) DBUG_PRINT("info", ("error setting autocommit TRUE: %d", error)); DBUG_RETURN(error); } - trans_register_ha(thd, FALSE, &federated_hton); + trans_register_ha(thd, FALSE, ht); } else { @@ -2770,8 +2774,8 @@ int ha_federated::external_lock(THD *thd, int lock_type) DBUG_PRINT("info", ("error setting autocommit FALSE: %d", error)); DBUG_RETURN(error); } - thd->ha_data[federated_hton.slot]= this; - trans_register_ha(thd, TRUE, &federated_hton); + thd->ha_data[ht->slot]= this; + trans_register_ha(thd, TRUE, ht); /* Send a lock table to the remote end. We do not support this at the moment @@ -2796,10 +2800,10 @@ int ha_federated::external_lock(THD *thd, int lock_type) } -static int federated_commit(THD *thd, bool all) +static int federated_commit(handlerton *hton, THD *thd, bool all) { int return_val= 0; - ha_federated *trx= (ha_federated *)thd->ha_data[federated_hton.slot]; + ha_federated *trx= (ha_federated *)thd->ha_data[hton->slot]; DBUG_ENTER("federated_commit"); if (all) @@ -2814,7 +2818,7 @@ static int federated_commit(THD *thd, bool all) if (error && !return_val); return_val= error; } - thd->ha_data[federated_hton.slot]= NULL; + thd->ha_data[hton->slot]= NULL; } DBUG_PRINT("info", ("error val: %d", return_val)); @@ -2822,10 +2826,10 @@ static int federated_commit(THD *thd, bool all) } -static int federated_rollback(THD *thd, bool all) +static int federated_rollback(handlerton *hton, THD *thd, bool all) { int return_val= 0; - ha_federated *trx= (ha_federated *)thd->ha_data[federated_hton.slot]; + ha_federated *trx= (ha_federated *)thd->ha_data[hton->slot]; DBUG_ENTER("federated_rollback"); if (all) @@ -2840,7 +2844,7 @@ static int federated_rollback(THD *thd, bool all) if (error && !return_val) return_val= error; } - thd->ha_data[federated_hton.slot]= NULL; + thd->ha_data[hton->slot]= NULL; } DBUG_PRINT("info", ("error val: %d", return_val)); @@ -2882,7 +2886,7 @@ int ha_federated::execute_simple_query(const char *query, int len) } struct st_mysql_storage_engine federated_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &federated_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(federated) { @@ -2891,6 +2895,7 @@ mysql_declare_plugin(federated) "FEDERATED", "Patrick Galbraith and Brian Aker, MySQL AB", "Federated MySQL storage engine", + PLUGIN_LICENSE_GPL, federated_db_init, /* Plugin Init */ NULL, /* Plugin Deinit */ 0x0100 /* 1.0 */, diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h index ebdc775d3bf..ade1e5b181e 100644 --- a/storage/federated/ha_federated.h +++ b/storage/federated/ha_federated.h @@ -94,7 +94,7 @@ private: int stash_remote_error(); public: - ha_federated(TABLE_SHARE *table_arg); + ha_federated(handlerton *hton, TABLE_SHARE *table_arg); ~ha_federated() {} /* The name that will be used for display purposes */ const char *table_type() const { return "FEDERATED"; } @@ -236,6 +236,3 @@ public: MYSQL_RES **result); }; -int federated_db_init(void); -int federated_db_end(ha_panic_function type); - diff --git a/storage/heap/Makefile.am b/storage/heap/Makefile.am index 46565126b65..8bcba43dc0d 100644 --- a/storage/heap/Makefile.am +++ b/storage/heap/Makefile.am @@ -50,7 +50,7 @@ libheap_a_SOURCES = hp_open.c hp_extra.c hp_close.c hp_panic.c hp_info.c \ hp_hash.c _check.c _rectest.c hp_static.c -EXTRA_DIST = CMakeLists.txt +EXTRA_DIST = CMakeLists.txt plug.in # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index 57777410bea..8e8744c2d34 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -25,23 +25,35 @@ #include "ha_heap.h" -static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); +static handler *heap_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root); -handlerton heap_hton; - -int heap_init() +int heap_panic(handlerton *hton, ha_panic_function flag) { - heap_hton.state= SHOW_OPTION_YES; - heap_hton.db_type= DB_TYPE_HEAP; - heap_hton.create= heap_create_handler; - heap_hton.panic= heap_panic; - heap_hton.flags= HTON_CAN_RECREATE; + return hp_panic(flag); +} + + +int heap_init(void *p) +{ + handlerton *heap_hton; + + heap_hton= (handlerton *)p; + heap_hton->state= SHOW_OPTION_YES; + heap_hton->db_type= DB_TYPE_HEAP; + heap_hton->create= heap_create_handler; + heap_hton->panic= heap_panic; + heap_hton->flags= HTON_CAN_RECREATE; + return 0; } -static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *heap_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_heap(table); + return new (mem_root) ha_heap(hton, table); } @@ -49,8 +61,8 @@ static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) ** HEAP tables *****************************************************************************/ -ha_heap::ha_heap(TABLE_SHARE *table_arg) - :handler(&heap_hton, table_arg), file(0), records_changed(0), +ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), file(0), records_changed(0), key_stat_version(0) {} @@ -174,7 +186,10 @@ int ha_heap::write_row(byte * buf) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) - update_auto_increment(); + { + if ((res= update_auto_increment())) + return res; + } res= heap_write(file,buf); if (!res && (++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)) @@ -696,7 +711,7 @@ bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info, } struct st_mysql_storage_engine heap_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &heap_hton}; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(heap) { @@ -705,6 +720,7 @@ mysql_declare_plugin(heap) "MEMORY", "MySQL AB", "Hash based, stored in memory, useful for temporary tables", + PLUGIN_LICENSE_GPL, heap_init, NULL, 0x0100, /* 1.0 */ diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h index 00e59856f26..0f6386655cb 100644 --- a/storage/heap/ha_heap.h +++ b/storage/heap/ha_heap.h @@ -31,7 +31,7 @@ class ha_heap: public handler uint records_changed; uint key_stat_version; public: - ha_heap(TABLE_SHARE *table); + ha_heap(handlerton *hton, TABLE_SHARE *table); ~ha_heap() {} const char *table_type() const { diff --git a/storage/heap/hp_panic.c b/storage/heap/hp_panic.c index 2b659cbfbb3..08c310bec3d 100644 --- a/storage/heap/hp_panic.c +++ b/storage/heap/hp_panic.c @@ -19,10 +19,10 @@ /* if flag == HA_PANIC_CLOSE then all files are removed for more memory */ -int heap_panic(enum ha_panic_function flag) +int hp_panic(enum ha_panic_function flag) { LIST *element,*next_open; - DBUG_ENTER("heap_panic"); + DBUG_ENTER("hp_panic"); pthread_mutex_lock(&THR_LOCK_heap); for (element=heap_open_list ; element ; element=next_open) @@ -54,4 +54,4 @@ int heap_panic(enum ha_panic_function flag) } pthread_mutex_unlock(&THR_LOCK_heap); DBUG_RETURN(0); -} /* heap_panic */ +} /* hp_panic */ diff --git a/storage/heap/hp_test1.c b/storage/heap/hp_test1.c index 703b39b1e2d..a7423effac8 100644 --- a/storage/heap/hp_test1.c +++ b/storage/heap/hp_test1.c @@ -155,7 +155,7 @@ int main(int argc, char **argv) } #endif - if (heap_close(file) || heap_panic(HA_PANIC_CLOSE)) + if (heap_close(file) || hp_panic(HA_PANIC_CLOSE)) goto err; my_end(MY_GIVE_INFO); return(0); diff --git a/storage/heap/hp_test2.c b/storage/heap/hp_test2.c index c1d987a3b5f..b4e8cf98f0b 100644 --- a/storage/heap/hp_test2.c +++ b/storage/heap/hp_test2.c @@ -603,7 +603,7 @@ end: if (heap_close(file) || (file2 && heap_close(file2))) goto err; heap_delete_table(filename2); - heap_panic(HA_PANIC_CLOSE); + hp_panic(HA_PANIC_CLOSE); my_end(MY_GIVE_INFO); return(0); err: @@ -669,7 +669,7 @@ static sig_handler endprog(int sig_number __attribute__((unused))) else #endif { - heap_panic(HA_PANIC_CLOSE); + hp_panic(HA_PANIC_CLOSE); my_end(1); exit(1); } diff --git a/storage/innobase/Makefile.am b/storage/innobase/Makefile.am index bdab35f1dc0..f2b5e5f4bfc 100644 --- a/storage/innobase/Makefile.am +++ b/storage/innobase/Makefile.am @@ -91,7 +91,7 @@ EXTRA_DIST = include/btr0btr.h include/btr0btr.ic include/btr0cur.h include/btr include/ut0sort.h include/ut0ut.h include/ut0ut.ic include/ut0vec.h include/ut0vec.ic include/ha_prototypes.h \ include/ut0list.h include/ut0list.ic \ include/ut0wqueue.h \ - CMakeLists.txt + CMakeLists.txt plug.in noinst_LIBRARIES = libinnobase.a libinnobase_a_LIBADD = usr/libusr.a srv/libsrv.a dict/libdict.a \ diff --git a/storage/innobase/btr/btr0btr.c b/storage/innobase/btr/btr0btr.c index 095ef6f02df..ffaf2065045 100644 --- a/storage/innobase/btr/btr0btr.c +++ b/storage/innobase/btr/btr0btr.c @@ -77,16 +77,6 @@ make them consecutive on disk if possible. From the other file segment we allocate pages for the non-leaf levels of the tree. */ -/****************************************************************** -Creates a new index page to the tree (not the root, and also not -used in page reorganization). */ -static -void -btr_page_create( -/*============*/ - page_t* page, /* in: page to be created */ - dict_tree_t* tree, /* in: index tree */ - mtr_t* mtr); /* in: mtr */ /**************************************************************** Returns the upper level node pointer to a page. It is assumed that mtr holds an x-latch on the tree. */ @@ -95,7 +85,7 @@ rec_t* btr_page_get_father_node_ptr( /*=========================*/ /* out: pointer to node pointer record */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page: must contain at least one user record */ mtr_t* mtr); /* in: mtr */ @@ -132,19 +122,18 @@ page_t* btr_root_get( /*=========*/ /* out: root page, x-latched */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ mtr_t* mtr) /* in: mtr */ { ulint space; ulint root_page_no; page_t* root; - space = dict_tree_get_space(tree); - root_page_no = dict_tree_get_page(tree); + space = dict_index_get_space(index); + root_page_no = dict_index_get_page(index); root = btr_page_get(space, root_page_no, RW_X_LATCH, mtr); - ut_a((ibool)!!page_is_comp(root) - == dict_table_is_comp(tree->tree_index->table)); + ut_a((ibool)!!page_is_comp(root) == dict_table_is_comp(index->table)); return(root); } @@ -254,23 +243,22 @@ btr_get_next_user_rec( } /****************************************************************** -Creates a new index page to the tree (not the root, and also not used in -page reorganization). */ +Creates a new index page (not the root, and also not +used in page reorganization). */ static void btr_page_create( /*============*/ page_t* page, /* in: page to be created */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index */ mtr_t* mtr) /* in: mtr */ { ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); - page_create(page, mtr, - dict_table_is_comp(tree->tree_index->table)); + page_create(page, mtr, dict_table_is_comp(index->table)); buf_block_align(page)->check_index_page_at_flush = TRUE; - btr_page_set_index_id(page, tree->id, mtr); + btr_page_set_index_id(page, index->id, mtr); } /****************************************************************** @@ -281,20 +269,20 @@ page_t* btr_page_alloc_for_ibuf( /*====================*/ /* out: new allocated page, x-latched */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ mtr_t* mtr) /* in: mtr */ { fil_addr_t node_addr; page_t* root; page_t* new_page; - root = btr_root_get(tree, mtr); + root = btr_root_get(index, mtr); node_addr = flst_get_first(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr); ut_a(node_addr.page != FIL_NULL); - new_page = buf_page_get(dict_tree_get_space(tree), node_addr.page, + new_page = buf_page_get(dict_index_get_space(index), node_addr.page, RW_X_LATCH, mtr); #ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(new_page, SYNC_TREE_NODE_NEW); @@ -318,7 +306,7 @@ btr_page_alloc( /*===========*/ /* out: new allocated page, x-latched; NULL if out of space */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index */ ulint hint_page_no, /* in: hint of a good page */ byte file_direction, /* in: direction where a possible page split is made */ @@ -331,12 +319,12 @@ btr_page_alloc( page_t* new_page; ulint new_page_no; - if (tree->type & DICT_IBUF) { + if (index->type & DICT_IBUF) { - return(btr_page_alloc_for_ibuf(tree, mtr)); + return(btr_page_alloc_for_ibuf(index, mtr)); } - root = btr_root_get(tree, mtr); + root = btr_root_get(index, mtr); if (level == 0) { seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; @@ -355,7 +343,7 @@ btr_page_alloc( return(NULL); } - new_page = buf_page_get(dict_tree_get_space(tree), new_page_no, + new_page = buf_page_get(dict_index_get_space(index), new_page_no, RW_X_LATCH, mtr); #ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(new_page, SYNC_TREE_NODE_NEW); @@ -382,9 +370,9 @@ btr_get_size( mtr_start(&mtr); - mtr_s_lock(dict_tree_get_lock(index->tree), &mtr); + mtr_s_lock(dict_index_get_lock(index), &mtr); - root = btr_root_get(index->tree, &mtr); + root = btr_root_get(index, &mtr); if (flag == BTR_N_LEAF_PAGES) { seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; @@ -415,7 +403,7 @@ static void btr_page_free_for_ibuf( /*===================*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page to be freed, x-latched */ mtr_t* mtr) /* in: mtr */ { @@ -423,7 +411,7 @@ btr_page_free_for_ibuf( ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); - root = btr_root_get(tree, mtr); + root = btr_root_get(index, mtr); flst_add_first(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, mtr); @@ -440,7 +428,7 @@ argument. */ void btr_page_free_low( /*==============*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page to be freed, x-latched */ ulint level, /* in: page level */ mtr_t* mtr) /* in: mtr */ @@ -457,14 +445,14 @@ btr_page_free_low( buf_frame_modify_clock_inc(page); - if (tree->type & DICT_IBUF) { + if (index->type & DICT_IBUF) { - btr_page_free_for_ibuf(tree, page, mtr); + btr_page_free_for_ibuf(index, page, mtr); return; } - root = btr_root_get(tree, mtr); + root = btr_root_get(index, mtr); if (level == 0) { seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; @@ -485,7 +473,7 @@ storage pages because the page must contain info on its level. */ void btr_page_free( /*==========*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page to be freed, x-latched */ mtr_t* mtr) /* in: mtr */ { @@ -495,7 +483,7 @@ btr_page_free( MTR_MEMO_PAGE_X_FIX)); level = btr_page_get_level(page, mtr); - btr_page_free_low(tree, page, level, mtr); + btr_page_free_low(index, page, level, mtr); } /****************************************************************** @@ -558,7 +546,7 @@ btr_page_get_father_for_rec( /*========================*/ /* out: pointer to node pointer record, its page x-latched */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page: must contain at least one user record */ rec_t* user_rec,/* in: user_record on page */ @@ -568,22 +556,20 @@ btr_page_get_father_for_rec( dtuple_t* tuple; btr_cur_t cursor; rec_t* node_ptr; - dict_index_t* index; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; *offsets_ = (sizeof offsets_) / sizeof *offsets_; - ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree), + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_a(page_rec_is_user_rec(user_rec)); - ut_ad(dict_tree_get_page(tree) != buf_frame_get_page_no(page)); + ut_ad(dict_index_get_page(index) != buf_frame_get_page_no(page)); heap = mem_heap_create(100); - tuple = dict_tree_build_node_ptr(tree, user_rec, 0, heap, - btr_page_get_level(page, mtr)); - index = tree->tree_index; + tuple = dict_index_build_node_ptr(index, user_rec, 0, heap, + btr_page_get_level(page, mtr)); /* In the following, we choose just any index from the tree as the first parameter for btr_cur_search_to_nth_level. */ @@ -647,14 +633,14 @@ rec_t* btr_page_get_father_node_ptr( /*=========================*/ /* out: pointer to node pointer record */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page: must contain at least one user record */ mtr_t* mtr) /* in: mtr */ { - return(btr_page_get_father_for_rec - (tree, page, - page_rec_get_next(page_get_infimum_rec(page)), mtr)); + return(btr_page_get_father_for_rec( + index, page, + page_rec_get_next(page_get_infimum_rec(page)), mtr)); } /**************************************************************** @@ -683,8 +669,8 @@ btr_create( if (type & DICT_IBUF) { /* Allocate first the ibuf header page */ - ibuf_hdr_frame = fseg_create - (space, 0, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr); + ibuf_hdr_frame = fseg_create( + space, 0, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr); #ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(ibuf_hdr_frame, SYNC_TREE_NODE_NEW); @@ -786,8 +772,8 @@ leaf_loop: /* NOTE: page hash indexes are dropped when a page is freed inside fsp0fsp. */ - finished = fseg_free_step - (root + PAGE_HEADER + PAGE_BTR_SEG_LEAF, &mtr); + finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_LEAF, + &mtr); mtr_commit(&mtr); if (!finished) { @@ -799,8 +785,8 @@ top_loop: root = btr_page_get(space, root_page_no, RW_X_LATCH, &mtr); - finished = fseg_free_step_not_header - (root + PAGE_HEADER + PAGE_BTR_SEG_TOP, &mtr); + finished = fseg_free_step_not_header( + root + PAGE_HEADER + PAGE_BTR_SEG_TOP, &mtr); mtr_commit(&mtr); if (!finished) { @@ -827,8 +813,7 @@ btr_free_root( btr_search_drop_page_hash_index(root); top_loop: - finished = fseg_free_step - (root + PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); + finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); if (!finished) { goto top_loop; @@ -1000,7 +985,7 @@ btr_root_raise_and_insert( dtuple_t* tuple, /* in: tuple to insert */ mtr_t* mtr) /* in: mtr */ { - dict_tree_t* tree; + dict_index_t* index; page_t* root; page_t* new_page; ulint new_page_no; @@ -1012,10 +997,10 @@ btr_root_raise_and_insert( page_cur_t* page_cursor; root = btr_cur_get_page(cursor); - tree = btr_cur_get_tree(cursor); + index = btr_cur_get_index(cursor); - ut_ad(dict_tree_get_page(tree) == buf_frame_get_page_no(root)); - ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree), + ut_ad(dict_index_get_page(index) == buf_frame_get_page_no(root)); + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(root), MTR_MEMO_PAGE_X_FIX)); @@ -1025,10 +1010,10 @@ btr_root_raise_and_insert( moving the root records to the new page, emptying the root, putting a node pointer to the new page, and then splitting the new page. */ - new_page = btr_page_alloc(tree, 0, FSP_NO_DIR, + new_page = btr_page_alloc(index, 0, FSP_NO_DIR, btr_page_get_level(root, mtr), mtr); - btr_page_create(new_page, tree, mtr); + btr_page_create(new_page, index, mtr); level = btr_page_get_level(root, mtr); @@ -1043,7 +1028,7 @@ btr_root_raise_and_insert( /* Move the records from root to the new page */ page_move_rec_list_end(new_page, root, page_get_infimum_rec(root), - cursor->index, mtr); + index, mtr); /* If this is a pessimistic insert which is actually done to perform a pessimistic update then we have stored the lock information of the record to be inserted on the infimum of the @@ -1060,10 +1045,10 @@ btr_root_raise_and_insert( /* Build the node pointer (= node key and page address) for the child */ - node_ptr = dict_tree_build_node_ptr(tree, rec, new_page_no, heap, - level); + node_ptr = dict_index_build_node_ptr(index, rec, new_page_no, heap, + level); /* Reorganize the root to get free space */ - btr_page_reorganize(root, cursor->index, mtr); + btr_page_reorganize(root, index, mtr); page_cursor = btr_cur_get_page_cur(cursor); @@ -1072,7 +1057,7 @@ btr_root_raise_and_insert( page_cur_set_before_first(root, page_cursor); node_ptr_rec = page_cur_tuple_insert(page_cursor, node_ptr, - cursor->index, mtr); + index, mtr); ut_ad(node_ptr_rec); @@ -1092,9 +1077,9 @@ btr_root_raise_and_insert( buf_frame_get_page_no(new_page)); #endif - ibuf_reset_free_bits(tree->tree_index, new_page); + ibuf_reset_free_bits(index, new_page); /* Reposition the cursor to the child node */ - page_cur_search(new_page, cursor->index, tuple, + page_cur_search(new_page, index, tuple, PAGE_CUR_LE, page_cursor); /* Split the child and insert tuple */ @@ -1419,7 +1404,7 @@ that mtr holds an x-latch on the tree. */ void btr_insert_on_non_leaf_level( /*=========================*/ - dict_tree_t* tree, /* in: tree */ + dict_index_t* index, /* in: index */ ulint level, /* in: level, must be > 0 */ dtuple_t* tuple, /* in: the record to be inserted */ mtr_t* mtr) /* in: mtr */ @@ -1431,11 +1416,7 @@ btr_insert_on_non_leaf_level( ut_ad(level > 0); - /* In the following, choose just any index from the tree as the - first parameter for btr_cur_search_to_nth_level. */ - - btr_cur_search_to_nth_level(tree->tree_index, - level, tuple, PAGE_CUR_LE, + btr_cur_search_to_nth_level(index, level, tuple, PAGE_CUR_LE, BTR_CONT_MODIFY_TREE, &cursor, 0, mtr); @@ -1454,7 +1435,7 @@ static void btr_attach_half_pages( /*==================*/ - dict_tree_t* tree, /* in: the index tree */ + dict_index_t* index, /* in: the index tree */ page_t* page, /* in: page to be split */ rec_t* split_rec, /* in: first record on upper half page */ @@ -1493,15 +1474,17 @@ btr_attach_half_pages( lower_page = new_page; upper_page = page; - /* Look from the tree for the node pointer to page */ - node_ptr = btr_page_get_father_node_ptr(tree, page, mtr); + /* Look up the index for the node pointer to page */ + node_ptr = btr_page_get_father_node_ptr(index, page, mtr); /* Replace the address of the old child node (= page) with the address of the new lower half */ - btr_node_ptr_set_child_page_no(node_ptr, rec_get_offsets - (node_ptr, tree->tree_index, - NULL, ULINT_UNDEFINED, &heap), + btr_node_ptr_set_child_page_no(node_ptr, + rec_get_offsets( + node_ptr, index, + NULL, ULINT_UNDEFINED, + &heap), lower_page_no, mtr); mem_heap_empty(heap); } else { @@ -1517,13 +1500,13 @@ btr_attach_half_pages( /* Build the node pointer (= node key and page address) for the upper half */ - node_ptr_upper = dict_tree_build_node_ptr(tree, split_rec, - upper_page_no, heap, level); + node_ptr_upper = dict_index_build_node_ptr(index, split_rec, + upper_page_no, heap, level); /* Insert it next to the pointer to the lower half. Note that this may generate recursion leading to a split on the higher level. */ - btr_insert_on_non_leaf_level(tree, level + 1, node_ptr_upper, mtr); + btr_insert_on_non_leaf_level(index, level + 1, node_ptr_upper, mtr); /* Free the memory heap */ mem_heap_free(heap); @@ -1585,7 +1568,6 @@ btr_page_split_and_insert( dtuple_t* tuple, /* in: tuple to insert */ mtr_t* mtr) /* in: mtr */ { - dict_tree_t* tree; page_t* page; ulint page_no; byte direction; @@ -1611,12 +1593,11 @@ btr_page_split_and_insert( func_start: mem_heap_empty(heap); offsets = NULL; - tree = btr_cur_get_tree(cursor); - ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree), + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(cursor->index), MTR_MEMO_X_LOCK)); #ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(dict_tree_get_lock(tree), RW_LOCK_EX)); + ut_ad(rw_lock_own(dict_index_get_lock(cursor->index), RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ page = btr_cur_get_page(cursor); @@ -1649,10 +1630,10 @@ func_start: split_rec = page_get_middle_rec(page); } - /* 2. Allocate a new page to the tree */ - new_page = btr_page_alloc(tree, hint_page_no, direction, + /* 2. Allocate a new page to the index */ + new_page = btr_page_alloc(cursor->index, hint_page_no, direction, btr_page_get_level(page, mtr), mtr); - btr_page_create(new_page, tree, mtr); + btr_page_create(new_page, cursor->index, mtr); /* 3. Calculate the first record on the upper half-page, and the first record (move_limit) on original page which ends up on the @@ -1671,7 +1652,8 @@ func_start: /* 4. Do first the modifications in the tree structure */ - btr_attach_half_pages(tree, page, first_rec, new_page, direction, mtr); + btr_attach_half_pages(cursor->index, page, first_rec, + new_page, direction, mtr); if (split_rec == NULL) { mem_free(buf); @@ -1697,7 +1679,7 @@ func_start: if (insert_will_fit && (btr_page_get_level(page, mtr) == 0)) { - mtr_memo_release(mtr, dict_tree_get_lock(tree), + mtr_memo_release(mtr, dict_index_get_lock(cursor->index), MTR_MEMO_X_LOCK); } @@ -1797,8 +1779,8 @@ func_start: buf_frame_get_page_no(right_page)); #endif - ut_ad(page_validate(left_page, tree->tree_index)); - ut_ad(page_validate(right_page, tree->tree_index)); + ut_ad(page_validate(left_page, cursor->index)); + ut_ad(page_validate(right_page, cursor->index)); mem_heap_free(heap); return(rec); @@ -1810,7 +1792,6 @@ static void btr_level_list_remove( /*==================*/ - dict_tree_t* tree __attribute__((unused)), /* in: index tree */ page_t* page, /* in: page to remove */ mtr_t* mtr) /* in: mtr */ { @@ -1820,7 +1801,7 @@ btr_level_list_remove( ulint next_page_no; page_t* next_page; - ut_ad(tree && page && mtr); + ut_ad(page && mtr); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); /* Get the previous and next page numbers of page */ @@ -1867,12 +1848,11 @@ btr_set_min_rec_mark_log( ulint comp, /* nonzero=compact record format */ mtr_t* mtr) /* in: mtr */ { - mlog_write_initial_log_record - (rec, comp ? MLOG_COMP_REC_MIN_MARK : MLOG_REC_MIN_MARK, mtr); + mlog_write_initial_log_record( + rec, comp ? MLOG_COMP_REC_MIN_MARK : MLOG_REC_MIN_MARK, mtr); /* Write rec offset as a 2-byte ulint */ - mlog_catenate_ulint(mtr, ut_align_offset(rec, UNIV_PAGE_SIZE), - MLOG_2BYTES); + mlog_catenate_ulint(mtr, page_offset(rec), MLOG_2BYTES); } /******************************************************************** @@ -1932,7 +1912,7 @@ Deletes on the upper level the node pointer to a page. */ void btr_node_ptr_delete( /*================*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page whose node pointer is deleted */ mtr_t* mtr) /* in: mtr */ { @@ -1945,9 +1925,9 @@ btr_node_ptr_delete( MTR_MEMO_PAGE_X_FIX)); /* Delete node pointer on father page */ - node_ptr = btr_page_get_father_node_ptr(tree, page, mtr); + node_ptr = btr_page_get_father_node_ptr(index, page, mtr); - btr_cur_position(tree->tree_index, node_ptr, &cursor); + btr_cur_position(index, node_ptr, &cursor); compressed = btr_cur_pessimistic_delete(&err, TRUE, &cursor, FALSE, mtr); ut_a(err == DB_SUCCESS); @@ -1964,7 +1944,7 @@ static void btr_lift_page_up( /*=============*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page which is the only on its level; must not be empty: use btr_discard_only_page_on_level if the last @@ -1973,17 +1953,15 @@ btr_lift_page_up( { page_t* father_page; ulint page_level; - dict_index_t* index; ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL); ut_ad(btr_page_get_next(page, mtr) == FIL_NULL); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); - father_page = buf_frame_align - (btr_page_get_father_node_ptr(tree, page, mtr)); + father_page = buf_frame_align( + btr_page_get_father_node_ptr(index, page, mtr)); page_level = btr_page_get_level(page, mtr); - index = tree->tree_index; btr_search_drop_page_hash_index(page); @@ -1998,12 +1976,12 @@ btr_lift_page_up( btr_page_set_level(father_page, page_level, mtr); /* Free the file page */ - btr_page_free(tree, page, mtr); + btr_page_free(index, page, mtr); /* We play safe and reset the free bits for the father */ ibuf_reset_free_bits(index, father_page); ut_ad(page_validate(father_page, index)); - ut_ad(btr_check_node_ptr(tree, father_page, mtr)); + ut_ad(btr_check_node_ptr(index, father_page, mtr)); } /***************************************************************** @@ -2026,7 +2004,7 @@ btr_compress( empty */ mtr_t* mtr) /* in: mtr */ { - dict_tree_t* tree; + dict_index_t* index; ulint space; ulint left_page_no; ulint right_page_no; @@ -2045,16 +2023,16 @@ btr_compress( ulint comp; page = btr_cur_get_page(cursor); - tree = btr_cur_get_tree(cursor); + index = btr_cur_get_index(cursor); comp = page_is_comp(page); - ut_a((ibool)!!comp == dict_table_is_comp(cursor->index->table)); + ut_a((ibool)!!comp == dict_table_is_comp(index->table)); - ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree), + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); level = btr_page_get_level(page, mtr); - space = dict_tree_get_space(tree); + space = dict_index_get_space(index); left_page_no = btr_page_get_prev(page, mtr); right_page_no = btr_page_get_next(page, mtr); @@ -2064,7 +2042,7 @@ btr_compress( left_page_no, right_page_no); #endif - node_ptr = btr_page_get_father_node_ptr(tree, page, mtr); + node_ptr = btr_page_get_father_node_ptr(index, page, mtr); ut_ad(!comp || rec_get_status(node_ptr) == REC_STATUS_NODE_PTR); father_page = buf_frame_align(node_ptr); ut_a(comp == page_is_comp(father_page)); @@ -2093,7 +2071,7 @@ btr_compress( } else { /* The page is the only one on the level, lift the records to the father */ - btr_lift_page_up(tree, page, mtr); + btr_lift_page_up(index, page, mtr); return; } @@ -2102,8 +2080,8 @@ btr_compress( data_size = page_get_data_size(page); ut_a(page_is_comp(merge_page) == comp); - max_ins_size_reorg = page_get_max_insert_size_after_reorganize - (merge_page, n_recs); + max_ins_size_reorg = page_get_max_insert_size_after_reorganize( + merge_page, n_recs); if (data_size > max_ins_size_reorg) { /* No space for merge */ @@ -2111,7 +2089,7 @@ btr_compress( return; } - ut_ad(page_validate(merge_page, cursor->index)); + ut_ad(page_validate(merge_page, index)); max_ins_size = page_get_max_insert_size(merge_page, n_recs); @@ -2119,11 +2097,11 @@ btr_compress( /* We have to reorganize merge_page */ - btr_page_reorganize(merge_page, cursor->index, mtr); + btr_page_reorganize(merge_page, index, mtr); max_ins_size = page_get_max_insert_size(merge_page, n_recs); - ut_ad(page_validate(merge_page, cursor->index)); + ut_ad(page_validate(merge_page, index)); ut_ad(page_get_max_insert_size(merge_page, n_recs) == max_ins_size_reorg); } @@ -2138,10 +2116,10 @@ btr_compress( btr_search_drop_page_hash_index(page); /* Remove the page from the level list */ - btr_level_list_remove(tree, page, mtr); + btr_level_list_remove(page, mtr); if (is_left) { - btr_node_ptr_delete(tree, page, mtr); + btr_node_ptr_delete(index, page, mtr); } else { mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; @@ -2149,46 +2127,48 @@ btr_compress( /* Replace the address of the old child node (= page) with the address of the merge page to the right */ - btr_node_ptr_set_child_page_no(node_ptr, rec_get_offsets - (node_ptr, cursor->index, - offsets_, ULINT_UNDEFINED, - &heap), + btr_node_ptr_set_child_page_no(node_ptr, + rec_get_offsets( + node_ptr, index, + offsets_, + ULINT_UNDEFINED, + &heap), right_page_no, mtr); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } - btr_node_ptr_delete(tree, merge_page, mtr); + btr_node_ptr_delete(index, merge_page, mtr); } /* Move records to the merge page */ if (is_left) { - orig_pred = page_rec_get_prev - (page_get_supremum_rec(merge_page)); + orig_pred = page_rec_get_prev( + page_get_supremum_rec(merge_page)); page_copy_rec_list_start(merge_page, page, page_get_supremum_rec(page), - cursor->index, mtr); + index, mtr); lock_update_merge_left(merge_page, orig_pred, page); } else { - orig_succ = page_rec_get_next - (page_get_infimum_rec(merge_page)); + orig_succ = page_rec_get_next( + page_get_infimum_rec(merge_page)); page_copy_rec_list_end(merge_page, page, page_get_infimum_rec(page), - cursor->index, mtr); + index, mtr); lock_update_merge_right(orig_succ, page); } /* We have added new records to merge_page: update its free bits */ - ibuf_update_free_bits_if_full(cursor->index, merge_page, + ibuf_update_free_bits_if_full(index, merge_page, UNIV_PAGE_SIZE, ULINT_UNDEFINED); - ut_ad(page_validate(merge_page, cursor->index)); + ut_ad(page_validate(merge_page, index)); /* Free the file page */ - btr_page_free(tree, page, mtr); + btr_page_free(index, page, mtr); - ut_ad(btr_check_node_ptr(tree, merge_page, mtr)); + ut_ad(btr_check_node_ptr(index, merge_page, mtr)); } /***************************************************************** @@ -2197,7 +2177,7 @@ static void btr_discard_only_page_on_level( /*===========================*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page which is the only on its level */ mtr_t* mtr) /* in: mtr */ { @@ -2211,7 +2191,7 @@ btr_discard_only_page_on_level( MTR_MEMO_PAGE_X_FIX)); btr_search_drop_page_hash_index(page); - node_ptr = btr_page_get_father_node_ptr(tree, page, mtr); + node_ptr = btr_page_get_father_node_ptr(index, page, mtr); father_page = buf_frame_align(node_ptr); page_level = btr_page_get_level(page, mtr); @@ -2221,19 +2201,19 @@ btr_discard_only_page_on_level( btr_page_set_level(father_page, page_level, mtr); /* Free the file page */ - btr_page_free(tree, page, mtr); + btr_page_free(index, page, mtr); - if (buf_frame_get_page_no(father_page) == dict_tree_get_page(tree)) { + if (buf_frame_get_page_no(father_page) == dict_index_get_page(index)) { /* The father is the root page */ btr_page_empty(father_page, mtr); /* We play safe and reset the free bits for the father */ - ibuf_reset_free_bits(tree->tree_index, father_page); + ibuf_reset_free_bits(index, father_page); } else { ut_ad(page_get_n_recs(father_page) == 1); - btr_discard_only_page_on_level(tree, father_page, mtr); + btr_discard_only_page_on_level(index, father_page, mtr); } } @@ -2249,7 +2229,7 @@ btr_discard_page( the root page */ mtr_t* mtr) /* in: mtr */ { - dict_tree_t* tree; + dict_index_t* index; ulint space; ulint left_page_no; ulint right_page_no; @@ -2258,14 +2238,14 @@ btr_discard_page( rec_t* node_ptr; page = btr_cur_get_page(cursor); - tree = btr_cur_get_tree(cursor); + index = btr_cur_get_index(cursor); - ut_ad(dict_tree_get_page(tree) != buf_frame_get_page_no(page)); - ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree), + ut_ad(dict_index_get_page(index) != buf_frame_get_page_no(page)); + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); - space = dict_tree_get_space(tree); + space = dict_index_get_space(index); /* Decide the page which will inherit the locks */ @@ -2287,7 +2267,7 @@ btr_discard_page( == buf_frame_get_page_no(page)); #endif /* UNIV_BTR_DEBUG */ } else { - btr_discard_only_page_on_level(tree, page, mtr); + btr_discard_only_page_on_level(index, page, mtr); return; } @@ -2307,22 +2287,23 @@ btr_discard_page( btr_set_min_rec_mark(node_ptr, page_is_comp(merge_page), mtr); } - btr_node_ptr_delete(tree, page, mtr); + btr_node_ptr_delete(index, page, mtr); /* Remove the page from the level list */ - btr_level_list_remove(tree, page, mtr); + btr_level_list_remove(page, mtr); if (left_page_no != FIL_NULL) { lock_update_discard(page_get_supremum_rec(merge_page), page); } else { - lock_update_discard(page_rec_get_next - (page_get_infimum_rec(merge_page)), page); + lock_update_discard(page_rec_get_next( + page_get_infimum_rec(merge_page)), + page); } /* Free the file page */ - btr_page_free(tree, page, mtr); + btr_page_free(index, page, mtr); - ut_ad(btr_check_node_ptr(tree, merge_page, mtr)); + ut_ad(btr_check_node_ptr(index, merge_page, mtr)); } #ifdef UNIV_BTR_PRINT @@ -2332,13 +2313,13 @@ Prints size info of a B-tree. */ void btr_print_size( /*===========*/ - dict_tree_t* tree) /* in: index tree */ + dict_index_t* index) /* in: index tree */ { page_t* root; fseg_header_t* seg; mtr_t mtr; - if (tree->type & DICT_IBUF) { + if (index->type & DICT_IBUF) { fputs("Sorry, cannot print info of an ibuf tree:" " use ibuf functions\n", stderr); @@ -2347,14 +2328,14 @@ btr_print_size( mtr_start(&mtr); - root = btr_root_get(tree, &mtr); + root = btr_root_get(index, &mtr); seg = root + PAGE_HEADER + PAGE_BTR_SEG_TOP; fputs("INFO OF THE NON-LEAF PAGE SEGMENT\n", stderr); fseg_print(seg, &mtr); - if (!(tree->type & DICT_UNIVERSAL)) { + if (!(index->type & DICT_UNIVERSAL)) { seg = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; @@ -2371,7 +2352,7 @@ static void btr_print_recursive( /*================*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: index page */ ulint width, /* in: print this many entries from start and end */ @@ -2385,7 +2366,6 @@ btr_print_recursive( mtr_t mtr2; rec_t* node_ptr; page_t* child; - dict_index_t* index; ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); @@ -2393,7 +2373,6 @@ btr_print_recursive( (ulong) btr_page_get_level(page, mtr), (ulong) buf_frame_get_page_no(page)); - index = UT_LIST_GET_FIRST(tree->tree_indexes); page_print(page, index, width, width); n_recs = page_get_n_recs(page); @@ -2417,7 +2396,7 @@ btr_print_recursive( ULINT_UNDEFINED, heap); child = btr_node_ptr_get_child(node_ptr, *offsets, &mtr2); - btr_print_recursive(tree, child, width, + btr_print_recursive(index, child, width, heap, offsets, &mtr2); mtr_commit(&mtr2); } @@ -2431,9 +2410,9 @@ btr_print_recursive( Prints directories and other info of all nodes in the tree. */ void -btr_print_tree( -/*===========*/ - dict_tree_t* tree, /* in: tree */ +btr_print_index( +/*============*/ + dict_index_t* index, /* in: index */ ulint width) /* in: print this many entries from start and end */ { @@ -2449,16 +2428,16 @@ btr_print_tree( mtr_start(&mtr); - root = btr_root_get(tree, &mtr); + root = btr_root_get(index, &mtr); - btr_print_recursive(tree, root, width, &heap, &offsets, &mtr); + btr_print_recursive(index, root, width, &heap, &offsets, &mtr); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } mtr_commit(&mtr); - btr_validate_tree(tree, NULL); + btr_validate_index(index, NULL); } #endif /* UNIV_BTR_PRINT */ @@ -2470,7 +2449,7 @@ ibool btr_check_node_ptr( /*===============*/ /* out: TRUE */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: index page */ mtr_t* mtr) /* in: mtr */ { @@ -2480,12 +2459,12 @@ btr_check_node_ptr( ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); - if (dict_tree_get_page(tree) == buf_frame_get_page_no(page)) { + if (dict_index_get_page(index) == buf_frame_get_page_no(page)) { return(TRUE); } - node_ptr = btr_page_get_father_node_ptr(tree, page, mtr); + node_ptr = btr_page_get_father_node_ptr(index, page, mtr); if (btr_page_get_level(page, mtr) == 0) { @@ -2494,12 +2473,12 @@ btr_check_node_ptr( heap = mem_heap_create(256); - node_ptr_tuple = dict_tree_build_node_ptr - (tree, page_rec_get_next(page_get_infimum_rec(page)), - 0, heap, btr_page_get_level(page, mtr)); + node_ptr_tuple = dict_index_build_node_ptr( + index, page_rec_get_next(page_get_infimum_rec(page)), 0, heap, + btr_page_get_level(page, mtr)); ut_a(!cmp_dtuple_rec(node_ptr_tuple, node_ptr, - rec_get_offsets(node_ptr, tree->tree_index, + rec_get_offsets(node_ptr, index, NULL, ULINT_UNDEFINED, &heap))); mem_heap_free(heap); @@ -2588,8 +2567,8 @@ btr_index_rec_validate( offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); for (i = 0; i < n; i++) { - dtype_t* type = dict_index_get_nth_type(index, i); - ulint fixed_size = dtype_get_fixed_size(type); + ulint fixed_size = dict_col_get_fixed_size( + dict_index_get_nth_col(index, i)); rec_get_nth_field(rec, offsets, i, &len); @@ -2608,8 +2587,7 @@ btr_index_rec_validate( fprintf(stderr, "InnoDB: field %lu len is %lu," " should be %lu\n", - (ulong) i, (ulong) len, - (ulong) dtype_get_fixed_size(type)); + (ulong) i, (ulong) len, (ulong) fixed_size); if (dump_on_error) { buf_page_print(page); @@ -2670,6 +2648,7 @@ Report an error on one page of an index tree. */ static void btr_validate_report1( +/*=================*/ /* out: TRUE if ok */ dict_index_t* index, /* in: index */ ulint level, /* in: B-tree level */ @@ -2689,6 +2668,7 @@ Report an error on two pages of an index tree. */ static void btr_validate_report2( +/*=================*/ /* out: TRUE if ok */ dict_index_t* index, /* in: index */ ulint level, /* in: B-tree level */ @@ -2712,7 +2692,7 @@ ibool btr_validate_level( /*===============*/ /* out: TRUE if ok */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ trx_t* trx, /* in: transaction or NULL */ ulint level) /* in: level number */ { @@ -2729,7 +2709,6 @@ btr_validate_level( page_cur_t cursor; dtuple_t* node_ptr_tuple; ibool ret = TRUE; - dict_index_t* index; mtr_t mtr; mem_heap_t* heap = mem_heap_create(256); ulint* offsets = NULL; @@ -2737,14 +2716,12 @@ btr_validate_level( mtr_start(&mtr); - mtr_x_lock(dict_tree_get_lock(tree), &mtr); + mtr_x_lock(dict_index_get_lock(index), &mtr); - page = btr_root_get(tree, &mtr); + page = btr_root_get(index, &mtr); space = buf_frame_get_space_id(page); - index = tree->tree_index; - while (level != btr_page_get_level(page, &mtr)) { ut_a(btr_page_get_level(page, &mtr) > 0); @@ -2768,7 +2745,7 @@ loop: } mem_heap_empty(heap); offsets = offsets2 = NULL; - mtr_x_lock(dict_tree_get_lock(tree), &mtr); + mtr_x_lock(dict_index_get_lock(index), &mtr); /* Check ordering etc. of records */ @@ -2794,7 +2771,7 @@ loop: ut_a((page_get_n_recs(page) > 0) || ((level == 0) && (buf_frame_get_page_no(page) - == dict_tree_get_page(tree)))); + == dict_index_get_page(index)))); if (right_page_no != FIL_NULL) { rec_t* right_rec; @@ -2824,8 +2801,8 @@ loop: } rec = page_rec_get_prev(page_get_supremum_rec(page)); - right_rec = page_rec_get_next(page_get_infimum_rec - (right_page)); + right_rec = page_rec_get_next(page_get_infimum_rec( + right_page)); offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); offsets2 = rec_get_offsets(right_rec, index, @@ -2847,8 +2824,8 @@ loop: rec_print(stderr, rec, index); putc('\n', stderr); fputs("InnoDB: record ", stderr); - rec = page_rec_get_next - (page_get_infimum_rec(right_page)); + rec = page_rec_get_next( + page_get_infimum_rec(right_page)); rec_print(stderr, rec, index); putc('\n', stderr); @@ -2857,25 +2834,26 @@ loop: } if (level > 0 && left_page_no == FIL_NULL) { - ut_a(REC_INFO_MIN_REC_FLAG & rec_get_info_bits - (page_rec_get_next(page_get_infimum_rec(page)), - page_is_comp(page))); + ut_a(REC_INFO_MIN_REC_FLAG & rec_get_info_bits( + page_rec_get_next(page_get_infimum_rec(page)), + page_is_comp(page))); } - if (buf_frame_get_page_no(page) != dict_tree_get_page(tree)) { + if (buf_frame_get_page_no(page) != dict_index_get_page(index)) { /* Check father node pointers */ - node_ptr = btr_page_get_father_node_ptr(tree, page, &mtr); + node_ptr = btr_page_get_father_node_ptr(index, page, &mtr); father_page = buf_frame_align(node_ptr); offsets = rec_get_offsets(node_ptr, index, offsets, ULINT_UNDEFINED, &heap); if (btr_node_ptr_get_child_page_no(node_ptr, offsets) != buf_frame_get_page_no(page) - || node_ptr != btr_page_get_father_for_rec - (tree, page, - page_rec_get_prev(page_get_supremum_rec(page)), &mtr)) { + || node_ptr != btr_page_get_father_for_rec( + index, page, + page_rec_get_prev(page_get_supremum_rec(page)), + &mtr)) { btr_validate_report1(index, level, page); fputs("InnoDB: node pointer to the page is wrong\n", @@ -2893,9 +2871,10 @@ loop: (node_ptr, offsets)); fputs("InnoDB: record on page ", stderr); - rec = btr_page_get_father_for_rec - (tree, page, page_rec_get_prev - (page_get_supremum_rec(page)), &mtr); + rec = btr_page_get_father_for_rec( + index, page, + page_rec_get_prev(page_get_supremum_rec(page)), + &mtr); rec_print(stderr, rec, index); putc('\n', stderr); ret = FALSE; @@ -2908,15 +2887,15 @@ loop: offsets, ULINT_UNDEFINED, &heap); - node_ptr_tuple = dict_tree_build_node_ptr - (tree, - page_rec_get_next(page_get_infimum_rec(page)), - 0, heap, btr_page_get_level(page, &mtr)); + node_ptr_tuple = dict_index_build_node_ptr( + index, + page_rec_get_next(page_get_infimum_rec(page)), + 0, heap, btr_page_get_level(page, &mtr)); if (cmp_dtuple_rec(node_ptr_tuple, node_ptr, offsets)) { - rec_t* first_rec = page_rec_get_next - (page_get_infimum_rec(page)); + rec_t* first_rec = page_rec_get_next( + page_get_infimum_rec(page)); btr_validate_report1(index, level, page); @@ -2937,18 +2916,18 @@ loop: } if (left_page_no == FIL_NULL) { - ut_a(node_ptr == page_rec_get_next - (page_get_infimum_rec(father_page))); + ut_a(node_ptr == page_rec_get_next( + page_get_infimum_rec(father_page))); ut_a(btr_page_get_prev(father_page, &mtr) == FIL_NULL); } if (right_page_no == FIL_NULL) { - ut_a(node_ptr == page_rec_get_prev - (page_get_supremum_rec(father_page))); + ut_a(node_ptr == page_rec_get_prev( + page_get_supremum_rec(father_page))); ut_a(btr_page_get_next(father_page, &mtr) == FIL_NULL); } else { - right_node_ptr = btr_page_get_father_node_ptr - (tree, right_page, &mtr); + right_node_ptr = btr_page_get_father_node_ptr( + index, right_page, &mtr); if (page_rec_get_next(node_ptr) != page_get_supremum_rec(father_page)) { @@ -2967,12 +2946,12 @@ loop: buf_page_print(right_page); } } else { - right_father_page = buf_frame_align - (right_node_ptr); + right_father_page = buf_frame_align( + right_node_ptr); - if (right_node_ptr != page_rec_get_next - (page_get_infimum_rec - (right_father_page))) { + if (right_node_ptr != page_rec_get_next( + page_get_infimum_rec( + right_father_page))) { ret = FALSE; fputs("InnoDB: node pointer 2 to" " the right page is wrong\n", @@ -3029,10 +3008,10 @@ node_ptr_fails: Checks the consistency of an index tree. */ ibool -btr_validate_tree( -/*==============*/ +btr_validate_index( +/*===============*/ /* out: TRUE if ok */ - dict_tree_t* tree, /* in: tree */ + dict_index_t* index, /* in: index */ trx_t* trx) /* in: transaction or NULL */ { mtr_t mtr; @@ -3041,13 +3020,13 @@ btr_validate_tree( ulint n; mtr_start(&mtr); - mtr_x_lock(dict_tree_get_lock(tree), &mtr); + mtr_x_lock(dict_index_get_lock(index), &mtr); - root = btr_root_get(tree, &mtr); + root = btr_root_get(index, &mtr); n = btr_page_get_level(root, &mtr); for (i = 0; i <= n && !trx_is_interrupted(trx); i++) { - if (!btr_validate_level(tree, trx, n - i)) { + if (!btr_validate_level(index, trx, n - i)) { mtr_commit(&mtr); diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index 8fbc25c9e79..975d5252a2f 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -272,7 +272,6 @@ btr_cur_search_to_nth_level( RW_S_LATCH, or 0 */ mtr_t* mtr) /* in: mtr */ { - dict_tree_t* tree; page_cur_t* page_cursor; page_t* page; page_t* guess; @@ -303,7 +302,7 @@ btr_cur_search_to_nth_level( ending to upper levels */ ut_ad(level == 0 || mode == PAGE_CUR_LE); - ut_ad(dict_tree_check_search_tuple(index->tree, tuple)); + ut_ad(dict_index_check_search_tuple(index, tuple)); ut_ad(!(index->type & DICT_IBUF) || ibuf_inside()); ut_ad(dtuple_check_typed(tuple)); @@ -374,23 +373,21 @@ btr_cur_search_to_nth_level( savepoint = mtr_set_savepoint(mtr); - tree = index->tree; - if (latch_mode == BTR_MODIFY_TREE) { - mtr_x_lock(dict_tree_get_lock(tree), mtr); + mtr_x_lock(dict_index_get_lock(index), mtr); } else if (latch_mode == BTR_CONT_MODIFY_TREE) { /* Do nothing */ - ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree), + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); } else { - mtr_s_lock(dict_tree_get_lock(tree), mtr); + mtr_s_lock(dict_index_get_lock(index), mtr); } page_cursor = btr_cur_get_page_cur(cursor); - space = dict_tree_get_space(tree); - page_no = dict_tree_get_page(tree); + space = dict_index_get_space(index); + page_no = dict_index_get_page(index); up_match = 0; up_bytes = 0; @@ -478,7 +475,7 @@ retry_page_get: buf_page_dbg_add_level(page, SYNC_TREE_NODE); } #endif - ut_ad(0 == ut_dulint_cmp(tree->id, + ut_ad(0 == ut_dulint_cmp(index->id, btr_page_get_index_id(page))); if (height == ULINT_UNDEFINED) { @@ -507,9 +504,9 @@ retry_page_get: /* Release the tree s-latch */ - mtr_release_s_latch_at_savepoint - (mtr, savepoint, - dict_tree_get_lock(tree)); + mtr_release_s_latch_at_savepoint( + mtr, savepoint, + dict_index_get_lock(index)); } page_mode = mode; @@ -525,8 +522,8 @@ retry_page_get: /* If this is the desired level, leave the loop */ - ut_ad(height == btr_page_get_level - (page_cur_get_page(page_cursor), mtr)); + ut_ad(height == btr_page_get_level( + page_cur_get_page(page_cursor), mtr)); if (level == height) { @@ -598,7 +595,6 @@ btr_cur_open_at_index_side( mtr_t* mtr) /* in: mtr */ { page_cur_t* page_cursor; - dict_tree_t* tree; page_t* page; ulint page_no; ulint space; @@ -615,24 +611,22 @@ btr_cur_open_at_index_side( estimate = latch_mode & BTR_ESTIMATE; latch_mode = latch_mode & ~BTR_ESTIMATE; - tree = index->tree; - /* Store the position of the tree latch we push to mtr so that we know how to release it when we have latched the leaf node */ savepoint = mtr_set_savepoint(mtr); if (latch_mode == BTR_MODIFY_TREE) { - mtr_x_lock(dict_tree_get_lock(tree), mtr); + mtr_x_lock(dict_index_get_lock(index), mtr); } else { - mtr_s_lock(dict_tree_get_lock(tree), mtr); + mtr_s_lock(dict_index_get_lock(index), mtr); } page_cursor = btr_cur_get_page_cur(cursor); cursor->index = index; - space = dict_tree_get_space(tree); - page_no = dict_tree_get_page(tree); + space = dict_index_get_space(index); + page_no = dict_index_get_page(index); height = ULINT_UNDEFINED; @@ -641,7 +635,7 @@ btr_cur_open_at_index_side( BUF_GET, __FILE__, __LINE__, mtr); - ut_ad(0 == ut_dulint_cmp(tree->id, + ut_ad(0 == ut_dulint_cmp(index->id, btr_page_get_index_id(page))); buf_block_align(page)->check_index_page_at_flush = TRUE; @@ -668,9 +662,9 @@ btr_cur_open_at_index_side( /* Release the tree s-latch */ - mtr_release_s_latch_at_savepoint - (mtr, savepoint, - dict_tree_get_lock(tree)); + mtr_release_s_latch_at_savepoint( + mtr, savepoint, + dict_index_get_lock(index)); } } @@ -727,7 +721,6 @@ btr_cur_open_at_rnd_pos( mtr_t* mtr) /* in: mtr */ { page_cur_t* page_cursor; - dict_tree_t* tree; page_t* page; ulint page_no; ulint space; @@ -738,19 +731,17 @@ btr_cur_open_at_rnd_pos( ulint* offsets = offsets_; *offsets_ = (sizeof offsets_) / sizeof *offsets_; - tree = index->tree; - if (latch_mode == BTR_MODIFY_TREE) { - mtr_x_lock(dict_tree_get_lock(tree), mtr); + mtr_x_lock(dict_index_get_lock(index), mtr); } else { - mtr_s_lock(dict_tree_get_lock(tree), mtr); + mtr_s_lock(dict_index_get_lock(index), mtr); } page_cursor = btr_cur_get_page_cur(cursor); cursor->index = index; - space = dict_tree_get_space(tree); - page_no = dict_tree_get_page(tree); + space = dict_index_get_space(index); + page_no = dict_index_get_page(index); height = ULINT_UNDEFINED; @@ -759,7 +750,7 @@ btr_cur_open_at_rnd_pos( BUF_GET, __FILE__, __LINE__, mtr); - ut_ad(0 == ut_dulint_cmp(tree->id, + ut_ad(0 == ut_dulint_cmp(index->id, btr_page_get_index_id(page))); if (height == ULINT_UNDEFINED) { @@ -1017,7 +1008,7 @@ calculate_sizes_again: type = index->type; if ((type & DICT_CLUSTERED) - && (dict_tree_get_space_reserve(index->tree) + rec_size > max_size) + && (dict_index_get_space_reserve() + rec_size > max_size) && (page_get_n_recs(page) >= 2) && (0 == level) && (btr_page_get_split_rec_to_right(cursor, &dummy_rec) @@ -1156,7 +1147,7 @@ btr_cur_pessimistic_insert( page = btr_cur_get_page(cursor); ut_ad(mtr_memo_contains(mtr, - dict_tree_get_lock(btr_cur_get_tree(cursor)), + dict_index_get_lock(btr_cur_get_index(cursor)), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); @@ -1218,8 +1209,7 @@ btr_cur_pessimistic_insert( } } - if (dict_tree_get_page(index->tree) - == buf_frame_get_page_no(page)) { + if (dict_index_get_page(index) == buf_frame_get_page_no(page)) { /* The page is the root page */ *rec = btr_root_raise_and_insert(cursor, entry, mtr); @@ -1292,10 +1282,10 @@ btr_cur_upd_lock_and_undo( ulint offsets_[REC_OFFS_NORMAL_SIZE]; *offsets_ = (sizeof offsets_) / sizeof *offsets_; - err = lock_clust_rec_modify_check_and_lock - (flags, rec, index, - rec_get_offsets(rec, index, offsets_, - ULINT_UNDEFINED, &heap), thr); + err = lock_clust_rec_modify_check_and_lock( + flags, rec, index, + rec_get_offsets(rec, index, offsets_, + ULINT_UNDEFINED, &heap), thr); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } @@ -1328,7 +1318,7 @@ btr_cur_update_in_place_log( mtr_t* mtr) /* in: mtr */ { byte* log_ptr; - page_t* page = ut_align_down(rec, UNIV_PAGE_SIZE); + page_t* page = page_align(rec); ut_ad(flags < 256); ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table)); @@ -1355,7 +1345,7 @@ btr_cur_update_in_place_log( log_ptr = row_upd_write_sys_vals_to_log(index, trx, roll_ptr, log_ptr, mtr); - mach_write_to_2(log_ptr, ut_align_offset(rec, UNIV_PAGE_SIZE)); + mach_write_to_2(log_ptr, page_offset(rec)); log_ptr += 2; row_upd_index_write_log(update, log_ptr, mtr); @@ -1514,11 +1504,8 @@ btr_cur_update_in_place( row_upd_rec_sys_fields(rec, index, offsets, trx, roll_ptr); } - /* FIXME: in a mixed tree, all records may not have enough ordering - fields for btr search: */ - - was_delete_marked = rec_get_deleted_flag - (rec, page_is_comp(buf_block_get_frame(block))); + was_delete_marked = rec_get_deleted_flag( + rec, page_is_comp(buf_block_get_frame(block))); row_upd_rec_in_place(rec, offsets, update); @@ -1529,8 +1516,8 @@ btr_cur_update_in_place( btr_cur_update_in_place_log(flags, rec, index, update, trx, roll_ptr, mtr); if (was_delete_marked - && !rec_get_deleted_flag(rec, page_is_comp - (buf_block_get_frame(block)))) { + && !rec_get_deleted_flag(rec, page_is_comp( + buf_block_get_frame(block)))) { /* The new updated record owns its possible externally stored fields */ @@ -1801,7 +1788,6 @@ btr_cur_pessimistic_update( big_rec_t* dummy_big_rec; dict_index_t* index; page_t* page; - dict_tree_t* tree; rec_t* rec; page_cur_t* page_cursor; dtuple_t* new_entry; @@ -1825,9 +1811,8 @@ btr_cur_pessimistic_update( page = btr_cur_get_page(cursor); rec = btr_cur_get_rec(cursor); index = cursor->index; - tree = index->tree; - ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree), + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); @@ -1912,8 +1897,8 @@ btr_cur_pessimistic_update( n_ext_vect = btr_push_update_extern_fields(ext_vect, offsets, update); if (UNIV_UNLIKELY(rec_get_converted_size(index, new_entry) - >= ut_min(page_get_free_space_of_empty - (page_is_comp(page)) / 2, + >= ut_min(page_get_free_space_of_empty( + page_is_comp(page)) / 2, REC_MAX_DATA_SIZE))) { big_rec_vec = dtuple_convert_big_rec(index, new_entry, @@ -2064,7 +2049,7 @@ btr_cur_del_mark_set_clust_rec_log( log_ptr = row_upd_write_sys_vals_to_log(index, trx, roll_ptr, log_ptr, mtr); - mach_write_to_2(log_ptr, ut_align_offset(rec, UNIV_PAGE_SIZE)); + mach_write_to_2(log_ptr, page_offset(rec)); log_ptr += 2; mlog_close(mtr, log_ptr); @@ -2129,10 +2114,10 @@ btr_cur_parse_del_mark_set_clust_rec( ulint offsets_[REC_OFFS_NORMAL_SIZE]; *offsets_ = (sizeof offsets_) / sizeof *offsets_; - row_upd_rec_sys_fields_in_recovery - (rec, rec_get_offsets(rec, index, offsets_, - ULINT_UNDEFINED, &heap), - pos, trx_id, roll_ptr); + row_upd_rec_sys_fields_in_recovery( + rec, rec_get_offsets(rec, index, offsets_, + ULINT_UNDEFINED, &heap), + pos, trx_id, roll_ptr); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } @@ -2261,12 +2246,12 @@ btr_cur_del_mark_set_sec_rec_log( return; } - log_ptr = mlog_write_initial_log_record_fast - (rec, MLOG_REC_SEC_DELETE_MARK, log_ptr, mtr); + log_ptr = mlog_write_initial_log_record_fast( + rec, MLOG_REC_SEC_DELETE_MARK, log_ptr, mtr); mach_write_to_1(log_ptr, val); log_ptr++; - mach_write_to_2(log_ptr, ut_align_offset(rec, UNIV_PAGE_SIZE)); + mach_write_to_2(log_ptr, page_offset(rec)); log_ptr += 2; mlog_close(mtr, log_ptr); @@ -2404,7 +2389,7 @@ btr_cur_compress( mtr_t* mtr) /* in: mtr */ { ut_ad(mtr_memo_contains(mtr, - dict_tree_get_lock(btr_cur_get_tree(cursor)), + dict_index_get_lock(btr_cur_get_index(cursor)), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_rec(cursor)), MTR_MEMO_PAGE_X_FIX)); @@ -2430,7 +2415,7 @@ btr_cur_compress_if_useful( mtr_t* mtr) /* in: mtr */ { ut_ad(mtr_memo_contains(mtr, - dict_tree_get_lock(btr_cur_get_tree(cursor)), + dict_index_get_lock(btr_cur_get_index(cursor)), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_rec(cursor)), MTR_MEMO_PAGE_X_FIX)); @@ -2483,8 +2468,8 @@ btr_cur_optimistic_delete( ULINT_UNDEFINED, &heap); no_compress_needed = !rec_offs_any_extern(offsets) - && btr_cur_can_delete_without_compress - (cursor, rec_offs_size(offsets), mtr); + && btr_cur_can_delete_without_compress( + cursor, rec_offs_size(offsets), mtr); if (no_compress_needed) { @@ -2492,8 +2477,8 @@ btr_cur_optimistic_delete( btr_search_update_hash_on_delete(cursor); - max_ins_size = page_get_max_insert_size_after_reorganize - (page, 1); + max_ins_size = page_get_max_insert_size_after_reorganize( + page, 1); page_cur_delete_rec(btr_cur_get_page_cur(cursor), cursor->index, offsets, mtr); @@ -2537,7 +2522,7 @@ btr_cur_pessimistic_delete( mtr_t* mtr) /* in: mtr */ { page_t* page; - dict_tree_t* tree; + dict_index_t* index; rec_t* rec; dtuple_t* node_ptr; ulint n_extents = 0; @@ -2549,9 +2534,9 @@ btr_cur_pessimistic_delete( ulint* offsets; page = btr_cur_get_page(cursor); - tree = btr_cur_get_tree(cursor); + index = btr_cur_get_index(cursor); - ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree), + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_X_FIX)); @@ -2563,7 +2548,7 @@ btr_cur_pessimistic_delete( n_extents = cursor->tree_height / 32 + 1; success = fsp_reserve_free_extents(&n_reserved, - cursor->index->space, + index->space, n_extents, FSP_CLEANING, mtr); if (!success) { @@ -2576,8 +2561,7 @@ btr_cur_pessimistic_delete( heap = mem_heap_create(1024); rec = btr_cur_get_rec(cursor); - offsets = rec_get_offsets(rec, cursor->index, - NULL, ULINT_UNDEFINED, &heap); + offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); /* Free externally stored fields if the record is neither a node pointer nor in two-byte format. @@ -2585,13 +2569,13 @@ btr_cur_pessimistic_delete( if (page_is_comp(page) ? !rec_get_node_ptr_flag(rec) : !rec_get_1byte_offs_flag(rec)) { - btr_rec_free_externally_stored_fields(cursor->index, + btr_rec_free_externally_stored_fields(index, rec, offsets, in_rollback, mtr); } if (UNIV_UNLIKELY(page_get_n_recs(page) < 2) - && UNIV_UNLIKELY(dict_tree_get_page(btr_cur_get_tree(cursor)) + && UNIV_UNLIKELY(dict_index_get_page(btr_cur_get_index(cursor)) != buf_frame_get_page_no(page))) { /* If there is only one record, drop the whole page in @@ -2609,8 +2593,8 @@ btr_cur_pessimistic_delete( level = btr_page_get_level(page, mtr); if (level > 0 - && UNIV_UNLIKELY(rec == page_rec_get_next - (page_get_infimum_rec(page)))) { + && UNIV_UNLIKELY(rec == page_rec_get_next( + page_get_infimum_rec(page)))) { rec_t* next_rec = page_rec_get_next(rec); @@ -2628,23 +2612,22 @@ btr_cur_pessimistic_delete( so that it is equal to the new leftmost node pointer on the page */ - btr_node_ptr_delete(tree, page, mtr); + btr_node_ptr_delete(index, page, mtr); - node_ptr = dict_tree_build_node_ptr - (tree, next_rec, buf_frame_get_page_no(page), - heap, level); + node_ptr = dict_index_build_node_ptr( + index, next_rec, buf_frame_get_page_no(page), + heap, level); - btr_insert_on_non_leaf_level(tree, + btr_insert_on_non_leaf_level(index, level + 1, node_ptr, mtr); } } btr_search_update_hash_on_delete(cursor); - page_cur_delete_rec(btr_cur_get_page_cur(cursor), cursor->index, - offsets, mtr); + page_cur_delete_rec(btr_cur_get_page_cur(cursor), index, offsets, mtr); - ut_ad(btr_check_node_ptr(tree, page, mtr)); + ut_ad(btr_check_node_ptr(index, page, mtr)); *err = DB_SUCCESS; @@ -2656,8 +2639,7 @@ return_after_reservations: } if (n_extents > 0) { - fil_space_release_free_extents(cursor->index->space, - n_reserved); + fil_space_release_free_extents(index->space, n_reserved); } return(ret); @@ -2939,8 +2921,8 @@ btr_estimate_number_of_different_key_vals( } total_external_size - += btr_rec_get_externally_stored_len - (rec, offsets_rec); + += btr_rec_get_externally_stored_len( + rec, offsets_rec); rec = next_rec; /* Initialize offsets_rec for the next round @@ -2974,8 +2956,8 @@ btr_estimate_number_of_different_key_vals( offsets_rec = rec_get_offsets(rec, index, offsets_rec, ULINT_UNDEFINED, &heap); - total_external_size += btr_rec_get_externally_stored_len - (rec, offsets_rec); + total_external_size += btr_rec_get_externally_stored_len( + rec, offsets_rec); mtr_commit(&mtr); } @@ -3137,8 +3119,8 @@ btr_cur_mark_extern_inherited_fields( } if (!is_updated) { - btr_cur_set_ownership_of_extern_field - (rec, offsets, i, FALSE, mtr); + btr_cur_set_ownership_of_extern_field( + rec, offsets, i, FALSE, mtr); } } } @@ -3291,8 +3273,8 @@ btr_push_update_extern_fields( if (upd_get_nth_field(update, i)->extern_storage) { - ext_vect[n_pushed] = upd_get_nth_field - (update, i)->field_no; + ext_vect[n_pushed] = upd_get_nth_field( + update, i)->field_no; n_pushed++; } @@ -3389,7 +3371,7 @@ btr_store_big_rec_extern_fields( mtr_t mtr; ut_ad(rec_offs_validate(rec, index, offsets)); - ut_ad(mtr_memo_contains(local_mtr, dict_tree_get_lock(index->tree), + ut_ad(mtr_memo_contains(local_mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(local_mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)); @@ -3422,7 +3404,7 @@ btr_store_big_rec_extern_fields( hint_page_no = prev_page_no + 1; } - page = btr_page_alloc(index->tree, hint_page_no, + page = btr_page_alloc(index, hint_page_no, FSP_NO_DIR, 0, &mtr); if (page == NULL) { @@ -3509,10 +3491,10 @@ btr_store_big_rec_extern_fields( /* Set the bit denoting that this field in rec is stored externally */ - rec_set_nth_field_extern_bit - (rec, index, - big_rec_vec->fields[i].field_no, - TRUE, &mtr); + rec_set_nth_field_extern_bit( + rec, index, + big_rec_vec->fields[i].field_no, + TRUE, &mtr); } prev_page_no = page_no; @@ -3563,7 +3545,7 @@ btr_free_externally_stored_field( mtr_t mtr; ut_a(local_len >= BTR_EXTERN_FIELD_REF_SIZE); - ut_ad(mtr_memo_contains(local_mtr, dict_tree_get_lock(index->tree), + ut_ad(mtr_memo_contains(local_mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK)); ut_ad(mtr_memo_contains(local_mtr, buf_block_align(data), MTR_MEMO_PAGE_X_FIX)); @@ -3635,7 +3617,7 @@ btr_free_externally_stored_field( because we did not store it on the page (we save the space overhead from an index page header. */ - btr_page_free_low(index->tree, page, 0, &mtr); + btr_page_free_low(index, page, 0, &mtr); mlog_write_ulint(data + local_len + BTR_EXTERN_PAGE_NO, next_page_no, diff --git a/storage/innobase/btr/btr0pcur.c b/storage/innobase/btr/btr0pcur.c index a8d8b2f072a..65b3c90c809 100644 --- a/storage/innobase/btr/btr0pcur.c +++ b/storage/innobase/btr/btr0pcur.c @@ -76,20 +76,20 @@ btr_pcur_store_position( { page_cur_t* page_cursor; rec_t* rec; - dict_tree_t* tree; + dict_index_t* index; page_t* page; ulint offs; ut_a(cursor->pos_state == BTR_PCUR_IS_POSITIONED); ut_ad(cursor->latch_mode != BTR_NO_LATCHES); - tree = btr_cur_get_tree(btr_pcur_get_btr_cur(cursor)); + index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor)); page_cursor = btr_pcur_get_page_cur(cursor); rec = page_cur_get_rec(page_cursor); - page = ut_align_down(rec, UNIV_PAGE_SIZE); - offs = ut_align_offset(rec, UNIV_PAGE_SIZE); + page = page_align(rec); + offs = page_offset(rec); ut_ad(mtr_memo_contains(mtr, buf_block_align(page), MTR_MEMO_PAGE_S_FIX) @@ -133,13 +133,13 @@ btr_pcur_store_position( } cursor->old_stored = BTR_PCUR_OLD_STORED; - cursor->old_rec = dict_tree_copy_rec_order_prefix - (tree, rec, &cursor->old_n_fields, - &cursor->old_rec_buf, &cursor->buf_size); + cursor->old_rec = dict_index_copy_rec_order_prefix( + index, rec, &cursor->old_n_fields, + &cursor->old_rec_buf, &cursor->buf_size); cursor->block_when_stored = buf_block_align(page); - cursor->modify_clock = buf_block_get_modify_clock - (cursor->block_when_stored); + cursor->modify_clock = buf_block_get_modify_clock( + cursor->block_when_stored); } /****************************************************************** @@ -197,13 +197,15 @@ btr_pcur_restore_position( btr_pcur_t* cursor, /* in: detached persistent cursor */ mtr_t* mtr) /* in: mtr */ { - dict_tree_t* tree; + dict_index_t* index; page_t* page; dtuple_t* tuple; ulint mode; ulint old_mode; mem_heap_t* heap; + index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor)); + if (UNIV_UNLIKELY(cursor->old_stored != BTR_PCUR_OLD_STORED) || UNIV_UNLIKELY(cursor->pos_state != BTR_PCUR_WAS_POSITIONED && cursor->pos_state != BTR_PCUR_IS_POSITIONED)) { @@ -215,17 +217,16 @@ btr_pcur_restore_position( ut_error; } - if (UNIV_UNLIKELY - (cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE - || cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) { + if (UNIV_UNLIKELY( + cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE + || cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) { /* In these cases we do not try an optimistic restoration, but always do a search */ - btr_cur_open_at_index_side - (cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE, - btr_pcur_get_btr_cur(cursor)->index, latch_mode, - btr_pcur_get_btr_cur(cursor), mtr); + btr_cur_open_at_index_side( + cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE, + index, latch_mode, btr_pcur_get_btr_cur(cursor), mtr); cursor->block_when_stored = buf_block_align(btr_pcur_get_page(cursor)); @@ -242,10 +243,10 @@ btr_pcur_restore_position( || UNIV_LIKELY(latch_mode == BTR_MODIFY_LEAF)) { /* Try optimistic restoration */ - if (UNIV_LIKELY - (buf_page_optimistic_get(latch_mode, - cursor->block_when_stored, page, - cursor->modify_clock, mtr))) { + if (UNIV_LIKELY(buf_page_optimistic_get( + latch_mode, + cursor->block_when_stored, page, + cursor->modify_clock, mtr))) { cursor->pos_state = BTR_PCUR_IS_POSITIONED; #ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TREE_NODE); @@ -255,20 +256,18 @@ btr_pcur_restore_position( rec_t* rec; ulint* offsets1; ulint* offsets2; - dict_index_t* index; #endif /* UNIV_DEBUG */ cursor->latch_mode = latch_mode; #ifdef UNIV_DEBUG rec = btr_pcur_get_rec(cursor); - index = btr_pcur_get_btr_cur(cursor)->index; heap = mem_heap_create(256); - offsets1 = rec_get_offsets - (cursor->old_rec, index, NULL, - cursor->old_n_fields, &heap); - offsets2 = rec_get_offsets - (rec, index, NULL, - cursor->old_n_fields, &heap); + offsets1 = rec_get_offsets( + cursor->old_rec, index, NULL, + cursor->old_n_fields, &heap); + offsets2 = rec_get_offsets( + rec, index, NULL, + cursor->old_n_fields, &heap); ut_ad(!cmp_rec_rec(cursor->old_rec, rec, offsets1, offsets2, @@ -286,9 +285,8 @@ btr_pcur_restore_position( heap = mem_heap_create(256); - tree = btr_cur_get_tree(btr_pcur_get_btr_cur(cursor)); - tuple = dict_tree_build_data_tuple(tree, cursor->old_rec, - cursor->old_n_fields, heap); + tuple = dict_index_build_data_tuple(index, cursor->old_rec, + cursor->old_n_fields, heap); /* Save the old search mode of the cursor */ old_mode = cursor->search_mode; @@ -302,8 +300,8 @@ btr_pcur_restore_position( mode = PAGE_CUR_L; } - btr_pcur_open_with_no_init(btr_pcur_get_btr_cur(cursor)->index, tuple, - mode, latch_mode, cursor, 0, mtr); + btr_pcur_open_with_no_init(index, tuple, mode, latch_mode, + cursor, 0, mtr); /* Restore the old search mode */ cursor->search_mode = old_mode; @@ -311,19 +309,18 @@ btr_pcur_restore_position( if (cursor->rel_pos == BTR_PCUR_ON && btr_pcur_is_on_user_rec(cursor, mtr) && 0 == cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor), - rec_get_offsets - (btr_pcur_get_rec(cursor), - btr_pcur_get_btr_cur(cursor)->index, - NULL, ULINT_UNDEFINED, &heap))) { + rec_get_offsets( + btr_pcur_get_rec(cursor), index, + NULL, ULINT_UNDEFINED, &heap))) { /* We have to store the NEW value for the modify clock, since the cursor can now be on a different page! But we can retain the value of old_rec */ - cursor->block_when_stored = buf_block_align - (btr_pcur_get_page(cursor)); - cursor->modify_clock = buf_block_get_modify_clock - (cursor->block_when_stored); + cursor->block_when_stored = buf_block_align( + btr_pcur_get_page(cursor)); + cursor->modify_clock = buf_block_get_modify_clock( + cursor->block_when_stored); cursor->old_stored = BTR_PCUR_OLD_STORED; mem_heap_free(heap); diff --git a/storage/innobase/btr/btr0sea.c b/storage/innobase/btr/btr0sea.c index bb089c4c417..2fe3606a390 100644 --- a/storage/innobase/btr/btr0sea.c +++ b/storage/innobase/btr/btr0sea.c @@ -74,7 +74,7 @@ btr_search_build_page_hash_index( ulint n_fields,/* in: hash this many full fields */ ulint n_bytes,/* in: hash this many bytes from the next field */ - ulint side); /* in: hash for searches from this side */ + ibool left_side);/* in: hash for searches from left side? */ /********************************************************************* This function should be called before reserving any btr search mutex, if @@ -157,10 +157,10 @@ btr_search_info_create( info = mem_heap_alloc(heap, sizeof(btr_search_t)); +#ifdef UNIV_DEBUG info->magic_n = BTR_SEARCH_MAGIC_N; +#endif /* UNIV_DEBUG */ - info->last_search = NULL; - info->n_direction = 0; info->root_guess = NULL; info->hash_analysis = 0; @@ -179,7 +179,7 @@ btr_search_info_create( info->n_fields = 1; info->n_bytes = 0; - info->side = BTR_SEARCH_LEFT_SIDE; + info->left_side = TRUE; return(info); } @@ -224,7 +224,7 @@ btr_search_info_update_hash( hash prefix */ if (info->n_fields >= n_unique && cursor->up_match >= n_unique) { - +increment_potential: info->n_hash_potential++; return; @@ -233,8 +233,7 @@ btr_search_info_update_hash( cmp = ut_pair_cmp(info->n_fields, info->n_bytes, cursor->low_match, cursor->low_bytes); - if ((info->side == BTR_SEARCH_LEFT_SIDE && cmp <= 0) - || (info->side == BTR_SEARCH_RIGHT_SIDE && cmp > 0)) { + if (info->left_side ? cmp <= 0 : cmp > 0) { goto set_new_recomm; } @@ -242,16 +241,11 @@ btr_search_info_update_hash( cmp = ut_pair_cmp(info->n_fields, info->n_bytes, cursor->up_match, cursor->up_bytes); - if ((info->side == BTR_SEARCH_LEFT_SIDE && cmp > 0) - || (info->side == BTR_SEARCH_RIGHT_SIDE && cmp <= 0)) { + if (info->left_side ? cmp <= 0 : cmp > 0) { - goto set_new_recomm; + goto increment_potential; } - info->n_hash_potential++; - - return; - set_new_recomm: /* We have to set a new recommendation; skip the hash analysis for a while to avoid unnecessary CPU time usage when there is no @@ -269,7 +263,7 @@ set_new_recomm: info->n_fields = 1; info->n_bytes = 0; - info->side = BTR_SEARCH_LEFT_SIDE; + info->left_side = TRUE; } else if (cmp > 0) { info->n_hash_potential = 1; @@ -288,7 +282,7 @@ set_new_recomm: info->n_bytes = cursor->low_bytes + 1; } - info->side = BTR_SEARCH_LEFT_SIDE; + info->left_side = TRUE; } else { info->n_hash_potential = 1; @@ -306,7 +300,7 @@ set_new_recomm: info->n_bytes = cursor->up_bytes + 1; } - info->side = BTR_SEARCH_RIGHT_SIDE; + info->left_side = FALSE; } } @@ -322,7 +316,8 @@ btr_search_update_block_hash_info( the block is recommended */ btr_search_t* info, /* in: search info */ buf_block_t* block, /* in: buffer block */ - btr_cur_t* cursor) /* in: cursor */ + btr_cur_t* cursor __attribute__((unused))) + /* in: cursor */ { #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); @@ -335,18 +330,18 @@ btr_search_update_block_hash_info( info->last_hash_succ = FALSE; ut_a(block->magic_n == BUF_BLOCK_MAGIC_N); - ut_a(info->magic_n == BTR_SEARCH_MAGIC_N); + ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N); if ((block->n_hash_helps > 0) && (info->n_hash_potential > 0) && (block->n_fields == info->n_fields) && (block->n_bytes == info->n_bytes) - && (block->side == info->side)) { + && (block->left_side == info->left_side)) { if ((block->is_hashed) && (block->curr_n_fields == info->n_fields) && (block->curr_n_bytes == info->n_bytes) - && (block->curr_side == info->side)) { + && (block->curr_left_side == info->left_side)) { /* The search would presumably have succeeded using the hash index */ @@ -359,12 +354,14 @@ btr_search_update_block_hash_info( block->n_hash_helps = 1; block->n_fields = info->n_fields; block->n_bytes = info->n_bytes; - block->side = info->side; + block->left_side = info->left_side; } +#ifdef UNIV_DEBUG if (cursor->index->table->does_not_fit_in_memory) { block->n_hash_helps = 0; } +#endif /* UNIV_DEBUG */ if ((block->n_hash_helps > page_get_n_recs(block->frame) / BTR_SEARCH_PAGE_BUILD_LIMIT) @@ -375,7 +372,7 @@ btr_search_update_block_hash_info( > 2 * page_get_n_recs(block->frame)) || (block->n_fields != block->curr_n_fields) || (block->n_bytes != block->curr_n_bytes) - || (block->side != block->curr_side)) { + || (block->left_side != block->curr_left_side)) { /* Build a new hash index on the page */ @@ -404,7 +401,7 @@ btr_search_update_hash_ref( { ulint fold; rec_t* rec; - dulint tree_id; + dulint index_id; ut_ad(cursor->flag == BTR_CUR_HASH_FAIL); #ifdef UNIV_SYNC_DEBUG @@ -419,7 +416,7 @@ btr_search_update_hash_ref( && (info->n_hash_potential > 0) && (block->curr_n_fields == info->n_fields) && (block->curr_n_bytes == info->n_bytes) - && (block->curr_side == info->side)) { + && (block->curr_left_side == info->left_side)) { mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; *offsets_ = (sizeof offsets_) / sizeof *offsets_; @@ -431,12 +428,12 @@ btr_search_update_hash_ref( return; } - tree_id = ((cursor->index)->tree)->id; + index_id = cursor->index->id; fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_, ULINT_UNDEFINED, &heap), block->curr_n_fields, - block->curr_n_bytes, tree_id); + block->curr_n_bytes, index_id); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } @@ -509,7 +506,7 @@ btr_search_info_update_slow( params = mem_alloc(3 * sizeof(ulint)); params[0] = block->n_fields; params[1] = block->n_bytes; - params[2] = block->side; + params[2] = block->left_side; /* Make sure the compiler cannot deduce the values and do optimizations */ @@ -618,8 +615,8 @@ btr_search_check_guess( prev_rec = page_rec_get_prev(rec); if (page_rec_is_infimum(prev_rec)) { - success = btr_page_get_prev - (buf_frame_align(prev_rec), mtr) == FIL_NULL; + success = btr_page_get_prev( + buf_frame_align(prev_rec), mtr) == FIL_NULL; goto exit_func; } @@ -643,8 +640,9 @@ btr_search_check_guess( next_rec = page_rec_get_next(rec); if (page_rec_is_supremum(next_rec)) { - if (btr_page_get_next - (buf_frame_align(next_rec), mtr) == FIL_NULL) { + if (btr_page_get_next( + buf_frame_align(next_rec), mtr) + == FIL_NULL) { cursor->up_match = 0; success = TRUE; @@ -702,7 +700,7 @@ btr_search_guess_on_hash( page_t* page; ulint fold; ulint tuple_n_fields; - dulint tree_id; + dulint index_id; ibool can_only_compare_to_cursor_rec = TRUE; #ifdef notdefined btr_cur_t cursor2; @@ -736,12 +734,12 @@ btr_search_guess_on_hash( return(FALSE); } - tree_id = (index->tree)->id; + index_id = index->id; #ifdef UNIV_SEARCH_PERF_STAT info->n_hash_succ++; #endif - fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, tree_id); + fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id); cursor->fold = fold; cursor->flag = BTR_CUR_HASH; @@ -763,10 +761,11 @@ btr_search_guess_on_hash( if (UNIV_LIKELY(!has_search_latch)) { - if (UNIV_UNLIKELY - (!buf_page_get_known_nowait(latch_mode, page, - BUF_MAKE_YOUNG, - __FILE__, __LINE__, mtr))) { + if (UNIV_UNLIKELY( + !buf_page_get_known_nowait(latch_mode, page, + BUF_MAKE_YOUNG, + __FILE__, __LINE__, + mtr))) { goto failure_unlock; } @@ -801,7 +800,8 @@ btr_search_guess_on_hash( is positioned on. We cannot look at the next of the previous record to determine if our guess for the cursor position is right. */ - if (UNIV_EXPECT(ut_dulint_cmp(tree_id, btr_page_get_index_id(page)), 0) + if (UNIV_EXPECT( + ut_dulint_cmp(index_id, btr_page_get_index_id(page)), 0) || !btr_search_check_guess(cursor, can_only_compare_to_cursor_rec, tuple, mode, mtr)) { @@ -904,7 +904,7 @@ btr_search_drop_page_hash_index( rec_t* rec; ulint fold; ulint prev_fold; - dulint tree_id; + dulint index_id; ulint n_cached; ulint n_recs; ulint* folds; @@ -961,9 +961,9 @@ retry: rec = page_get_infimum_rec(page); rec = page_rec_get_next(rec); - tree_id = btr_page_get_index_id(page); + index_id = btr_page_get_index_id(page); - ut_a(0 == ut_dulint_cmp(tree_id, index->id)); + ut_a(0 == ut_dulint_cmp(index_id, index->id)); prev_fold = 0; @@ -971,12 +971,10 @@ retry: offsets = NULL; while (!page_rec_is_supremum(rec)) { - /* FIXME: in a mixed tree, not all records may have enough - ordering fields: */ offsets = rec_get_offsets(rec, index, offsets, n_fields + (n_bytes > 0), &heap); ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0)); - fold = rec_fold(rec, offsets, n_fields, n_bytes, tree_id); + fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); if (fold == prev_fold && prev_fold != 0) { @@ -1101,7 +1099,7 @@ btr_search_build_page_hash_index( ulint n_fields,/* in: hash this many full fields */ ulint n_bytes,/* in: hash this many bytes from the next field */ - ulint side) /* in: hash for searches from this side */ + ibool left_side)/* in: hash for searches from left side? */ { hash_table_t* table; buf_block_t* block; @@ -1109,7 +1107,7 @@ btr_search_build_page_hash_index( rec_t* next_rec; ulint fold; ulint next_fold; - dulint tree_id; + dulint index_id; ulint n_cached; ulint n_recs; ulint* folds; @@ -1135,7 +1133,7 @@ btr_search_build_page_hash_index( if (block->is_hashed && ((block->curr_n_fields != n_fields) || (block->curr_n_bytes != n_bytes) - || (block->curr_side != side))) { + || (block->curr_left_side != left_side))) { rw_lock_s_unlock(&btr_search_latch); @@ -1172,7 +1170,7 @@ btr_search_build_page_hash_index( n_cached = 0; - tree_id = btr_page_get_index_id(page); + index_id = btr_page_get_index_id(page); rec = page_get_infimum_rec(page); rec = page_rec_get_next(rec); @@ -1188,11 +1186,9 @@ btr_search_build_page_hash_index( } } - /* FIXME: in a mixed tree, all records may not have enough ordering - fields: */ - fold = rec_fold(rec, offsets, n_fields, n_bytes, tree_id); + fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); - if (side == BTR_SEARCH_LEFT_SIDE) { + if (left_side) { folds[n_cached] = fold; recs[n_cached] = rec; @@ -1204,7 +1200,7 @@ btr_search_build_page_hash_index( if (page_rec_is_supremum(next_rec)) { - if (side == BTR_SEARCH_RIGHT_SIDE) { + if (!left_side) { folds[n_cached] = fold; recs[n_cached] = rec; @@ -1217,12 +1213,12 @@ btr_search_build_page_hash_index( offsets = rec_get_offsets(next_rec, index, offsets, n_fields + (n_bytes > 0), &heap); next_fold = rec_fold(next_rec, offsets, n_fields, - n_bytes, tree_id); + n_bytes, index_id); if (fold != next_fold) { /* Insert an entry into the hash index */ - if (side == BTR_SEARCH_LEFT_SIDE) { + if (left_side) { folds[n_cached] = next_fold; recs[n_cached] = next_rec; @@ -1244,7 +1240,7 @@ btr_search_build_page_hash_index( if (block->is_hashed && ((block->curr_n_fields != n_fields) || (block->curr_n_bytes != n_bytes) - || (block->curr_side != side))) { + || (block->curr_left_side != left_side))) { goto exit_func; } @@ -1253,7 +1249,7 @@ btr_search_build_page_hash_index( block->curr_n_fields = n_fields; block->curr_n_bytes = n_bytes; - block->curr_side = side; + block->curr_left_side = left_side; block->index = index; for (i = 0; i < n_cached; i++) { @@ -1292,7 +1288,7 @@ btr_search_move_or_delete_hash_entries( buf_block_t* new_block; ulint n_fields; ulint n_bytes; - ulint side; + ibool left_side; block = buf_block_align(page); new_block = buf_block_align(new_page); @@ -1320,22 +1316,23 @@ btr_search_move_or_delete_hash_entries( n_fields = block->curr_n_fields; n_bytes = block->curr_n_bytes; - side = block->curr_side; + left_side = block->curr_left_side; new_block->n_fields = block->curr_n_fields; new_block->n_bytes = block->curr_n_bytes; - new_block->side = block->curr_side; + new_block->left_side = left_side; rw_lock_s_unlock(&btr_search_latch); ut_a(n_fields + n_bytes > 0); btr_search_build_page_hash_index(index, new_page, n_fields, - n_bytes, side); + n_bytes, left_side); +#if 1 /* TODO: safe to remove? */ ut_a(n_fields == block->curr_n_fields); ut_a(n_bytes == block->curr_n_bytes); - ut_a(side == block->curr_side); - + ut_a(left_side == block->curr_left_side); +#endif return; } @@ -1356,7 +1353,7 @@ btr_search_update_hash_on_delete( buf_block_t* block; rec_t* rec; ulint fold; - dulint tree_id; + dulint index_id; ibool found; ulint offsets_[REC_OFFS_NORMAL_SIZE]; mem_heap_t* heap = NULL; @@ -1380,10 +1377,10 @@ btr_search_update_hash_on_delete( table = btr_search_sys->hash_index; - tree_id = cursor->index->tree->id; + index_id = cursor->index->id; fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_, ULINT_UNDEFINED, &heap), - block->curr_n_fields, block->curr_n_bytes, tree_id); + block->curr_n_fields, block->curr_n_bytes, index_id); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } @@ -1429,7 +1426,7 @@ btr_search_update_hash_node_on_insert( if ((cursor->flag == BTR_CUR_HASH) && (cursor->n_fields == block->curr_n_fields) && (cursor->n_bytes == block->curr_n_bytes) - && (block->curr_side == BTR_SEARCH_RIGHT_SIDE)) { + && !block->curr_left_side) { table = btr_search_sys->hash_index; @@ -1460,13 +1457,13 @@ btr_search_update_hash_on_insert( rec_t* rec; rec_t* ins_rec; rec_t* next_rec; - dulint tree_id; + dulint index_id; ulint fold; ulint ins_fold; ulint next_fold = 0; /* remove warning (??? bug ???) */ ulint n_fields; ulint n_bytes; - ulint side; + ibool left_side; ibool locked = FALSE; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; @@ -1492,32 +1489,32 @@ btr_search_update_hash_on_insert( ut_a(block->index == cursor->index); - tree_id = ((cursor->index)->tree)->id; + index_id = cursor->index->id; n_fields = block->curr_n_fields; n_bytes = block->curr_n_bytes; - side = block->curr_side; + left_side = block->curr_left_side; ins_rec = page_rec_get_next(rec); next_rec = page_rec_get_next(ins_rec); offsets = rec_get_offsets(ins_rec, cursor->index, offsets, ULINT_UNDEFINED, &heap); - ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, tree_id); + ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id); if (!page_rec_is_supremum(next_rec)) { offsets = rec_get_offsets(next_rec, cursor->index, offsets, n_fields + (n_bytes > 0), &heap); next_fold = rec_fold(next_rec, offsets, n_fields, - n_bytes, tree_id); + n_bytes, index_id); } if (!page_rec_is_infimum(rec)) { offsets = rec_get_offsets(rec, cursor->index, offsets, n_fields + (n_bytes > 0), &heap); - fold = rec_fold(rec, offsets, n_fields, n_bytes, tree_id); + fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); } else { - if (side == BTR_SEARCH_LEFT_SIDE) { + if (left_side) { rw_lock_x_lock(&btr_search_latch); @@ -1538,7 +1535,7 @@ btr_search_update_hash_on_insert( locked = TRUE; } - if (side == BTR_SEARCH_RIGHT_SIDE) { + if (!left_side) { ha_insert_for_fold(table, fold, rec); } else { ha_insert_for_fold(table, ins_fold, ins_rec); @@ -1548,7 +1545,7 @@ btr_search_update_hash_on_insert( check_next_rec: if (page_rec_is_supremum(next_rec)) { - if (side == BTR_SEARCH_RIGHT_SIDE) { + if (!left_side) { if (!locked) { rw_lock_x_lock(&btr_search_latch); @@ -1571,7 +1568,7 @@ check_next_rec: locked = TRUE; } - if (side == BTR_SEARCH_RIGHT_SIDE) { + if (!left_side) { ha_insert_for_fold(table, ins_fold, ins_rec); /* @@ -1659,17 +1656,17 @@ btr_search_validate(void) " node fold %lu, rec fold %lu\n", (ulong) buf_frame_get_page_no(page), node->data, - (ulong) ut_dulint_get_high - (btr_page_get_index_id(page)), - (ulong) ut_dulint_get_low - (btr_page_get_index_id(page)), + (ulong) ut_dulint_get_high( + btr_page_get_index_id(page)), + (ulong) ut_dulint_get_low( + btr_page_get_index_id(page)), (ulong) node->fold, (ulong) rec_fold((rec_t*)(node->data), offsets, block->curr_n_fields, block->curr_n_bytes, - btr_page_get_index_id - (page))); + btr_page_get_index_id( + page))); fputs("InnoDB: Record ", stderr); rec_print_new(stderr, (rec_t*)node->data, @@ -1681,7 +1678,7 @@ btr_search_validate(void) (void*) page, (ulong) block->is_hashed, (ulong) block->curr_n_fields, (ulong) block->curr_n_bytes, - (ulong) block->curr_side); + (ulong) block->curr_left_side); if (n_page_dumps < 20) { buf_page_print(page); diff --git a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c index 4e8cd97c9a8..222acc015a6 100644 --- a/storage/innobase/buf/buf0buf.c +++ b/storage/innobase/buf/buf0buf.c @@ -350,9 +350,9 @@ buf_page_is_corrupted( if (srv_use_checksums) { old_checksum = buf_calc_page_old_checksum(read_buf); - old_checksum_field = mach_read_from_4 - (read_buf + UNIV_PAGE_SIZE - - FIL_PAGE_END_LSN_OLD_CHKSUM); + old_checksum_field = mach_read_from_4( + read_buf + UNIV_PAGE_SIZE + - FIL_PAGE_END_LSN_OLD_CHKSUM); /* There are 2 valid formulas for old_checksum_field: @@ -459,8 +459,8 @@ buf_page_print( if (dict_sys != NULL) { - index = dict_index_find_on_id_low - (btr_page_get_index_id(read_buf)); + index = dict_index_find_on_id_low( + btr_page_get_index_id(read_buf)); if (index) { fputs("InnoDB: (", stderr); dict_index_name_print(stderr, NULL, index); @@ -598,8 +598,8 @@ buf_pool_init( /* Allocate the virtual address space window, i.e., the buffer pool frames */ - buf_pool->frame_mem = os_awe_allocate_virtual_mem_window - (UNIV_PAGE_SIZE * (n_frames + 1)); + buf_pool->frame_mem = os_awe_allocate_virtual_mem_window( + UNIV_PAGE_SIZE * (n_frames + 1)); /* Allocate the physical memory for AWE and the AWE info array for buf_pool */ @@ -625,8 +625,8 @@ buf_pool_init( } /*----------------------------------------*/ } else { - buf_pool->frame_mem = os_mem_alloc_large - (UNIV_PAGE_SIZE * (n_frames + 1), TRUE, FALSE); + buf_pool->frame_mem = os_mem_alloc_large( + UNIV_PAGE_SIZE * (n_frames + 1), TRUE, FALSE); } if (buf_pool->frame_mem == NULL) { @@ -821,10 +821,10 @@ buf_awe_map_page_to_frame( } else { /* We can map block to the frame of bck */ - os_awe_map_physical_mem_to_window - (bck->frame, - UNIV_PAGE_SIZE / OS_AWE_X86_PAGE_SIZE, - block->awe_info); + os_awe_map_physical_mem_to_window( + bck->frame, + UNIV_PAGE_SIZE / OS_AWE_X86_PAGE_SIZE, + block->awe_info); block->frame = bck->frame; @@ -840,10 +840,10 @@ buf_awe_map_page_to_frame( bck); if (add_to_mapped_list) { - UT_LIST_ADD_FIRST - (awe_LRU_free_mapped, - buf_pool->awe_LRU_free_mapped, - block); + UT_LIST_ADD_FIRST( + awe_LRU_free_mapped, + buf_pool->awe_LRU_free_mapped, + block); } buf_pool->n_pages_awe_remapped++; @@ -1583,7 +1583,7 @@ buf_page_init_for_backup_restore( block->is_hashed = FALSE; block->n_fields = 1; block->n_bytes = 0; - block->side = BTR_SEARCH_LEFT_SIDE; + block->left_side = TRUE; block->file_page_was_freed = FALSE; } @@ -1650,7 +1650,7 @@ buf_page_init( block->is_hashed = FALSE; block->n_fields = 1; block->n_bytes = 0; - block->side = BTR_SEARCH_LEFT_SIDE; + block->left_side = TRUE; block->file_page_was_freed = FALSE; } @@ -1710,8 +1710,8 @@ buf_page_init_for_read( mutex_enter(&(buf_pool->mutex)); - if (fil_tablespace_deleted_or_being_deleted_in_mem - (space, tablespace_version)) { + if (fil_tablespace_deleted_or_being_deleted_in_mem( + space, tablespace_version)) { *err = DB_TABLESPACE_DELETED; } @@ -1891,10 +1891,10 @@ buf_page_io_complete( /* If this page is not uninitialized and not in the doublewrite buffer, then the page number and space id should be the same as in block. */ - ulint read_page_no = mach_read_from_4 - (block->frame + FIL_PAGE_OFFSET); - ulint read_space_id = mach_read_from_4 - (block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + ulint read_page_no = mach_read_from_4( + block->frame + FIL_PAGE_OFFSET); + ulint read_space_id = mach_read_from_4( + block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); if (!block->space && trx_doublewrite_page_inside(block->offset)) { @@ -1980,9 +1980,9 @@ buf_page_io_complete( } if (!recv_no_ibuf_operations) { - ibuf_merge_or_delete_for_page - (block->frame, block->space, - block->offset, TRUE); + ibuf_merge_or_delete_for_page( + block->frame, block->space, block->offset, + TRUE); } } @@ -2110,8 +2110,9 @@ buf_validate(void) if (block->flush_type == BUF_FLUSH_LRU) { n_lru_flush++; - ut_a(rw_lock_is_locked - (&block->lock, RW_LOCK_SHARED)); + ut_a(rw_lock_is_locked( + &block->lock, + RW_LOCK_SHARED)); } else if (block->flush_type == BUF_FLUSH_LIST) { n_list_flush++; diff --git a/storage/innobase/buf/buf0flu.c b/storage/innobase/buf/buf0flu.c index 9274b67a555..8ef8c3b9358 100644 --- a/storage/innobase/buf/buf0flu.c +++ b/storage/innobase/buf/buf0flu.c @@ -890,8 +890,8 @@ buf_flush_batch( old_page_count = page_count; /* Try to flush also all the neighbors */ - page_count += buf_flush_try_neighbors - (space, offset, flush_type); + page_count += buf_flush_try_neighbors( + space, offset, flush_type); /* fprintf(stderr, "Flush type %lu, page no %lu, neighb %lu\n", flush_type, offset, diff --git a/storage/innobase/buf/buf0lru.c b/storage/innobase/buf/buf0lru.c index f0264f2e323..4ebe94c40ec 100644 --- a/storage/innobase/buf/buf0lru.c +++ b/storage/innobase/buf/buf0lru.c @@ -542,16 +542,16 @@ buf_LRU_old_adjust_len(void) if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) { - buf_pool->LRU_old = UT_LIST_GET_PREV - (LRU, buf_pool->LRU_old); + buf_pool->LRU_old = UT_LIST_GET_PREV( + LRU, buf_pool->LRU_old); (buf_pool->LRU_old)->old = TRUE; buf_pool->LRU_old_len++; } else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) { (buf_pool->LRU_old)->old = FALSE; - buf_pool->LRU_old = UT_LIST_GET_NEXT - (LRU, buf_pool->LRU_old); + buf_pool->LRU_old = UT_LIST_GET_NEXT( + LRU, buf_pool->LRU_old); buf_pool->LRU_old_len--; } else { ut_a(buf_pool->LRU_old); /* Check that we did not diff --git a/storage/innobase/buf/buf0rea.c b/storage/innobase/buf/buf0rea.c index cf7aa97191f..fdec0206990 100644 --- a/storage/innobase/buf/buf0rea.c +++ b/storage/innobase/buf/buf0rea.c @@ -257,10 +257,10 @@ buf_read_ahead_random( mode: hence FALSE as the first parameter */ if (!ibuf_bitmap_page(i)) { - count += buf_read_page_low - (&err, FALSE, - ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER, - space, tablespace_version, i); + count += buf_read_page_low( + &err, FALSE, + ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER, + space, tablespace_version, i); if (err == DB_TABLESPACE_DELETED) { ut_print_timestamp(stderr); fprintf(stderr, @@ -549,10 +549,10 @@ buf_read_ahead_linear( aio mode: hence FALSE as the first parameter */ if (!ibuf_bitmap_page(i)) { - count += buf_read_page_low - (&err, FALSE, - ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER, - space, tablespace_version, i); + count += buf_read_page_low( + &err, FALSE, + ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER, + space, tablespace_version, i); if (err == DB_TABLESPACE_DELETED) { ut_print_timestamp(stderr); fprintf(stderr, diff --git a/storage/innobase/data/data0data.c b/storage/innobase/data/data0data.c index 804d02e8f59..fc4494d991a 100644 --- a/storage/innobase/data/data0data.c +++ b/storage/innobase/data/data0data.c @@ -540,8 +540,8 @@ dtuple_convert_big_rec( n_fields = 0; while (rec_get_converted_size(index, entry) - >= ut_min(page_get_free_space_of_empty - (dict_table_is_comp(index->table)) / 2, + >= ut_min(page_get_free_space_of_empty( + dict_table_is_comp(index->table)) / 2, REC_MAX_DATA_SIZE)) { longest = 0; @@ -610,8 +610,8 @@ dtuple_convert_big_rec( vector->fields[n_fields].len = dfield->len - DICT_MAX_INDEX_COL_LEN; - vector->fields[n_fields].data = mem_heap_alloc - (heap, vector->fields[n_fields].len); + vector->fields[n_fields].data = mem_heap_alloc( + heap, vector->fields[n_fields].len); /* Copy data (from the end of field) to big rec vector */ diff --git a/storage/innobase/data/data0type.c b/storage/innobase/data/data0type.c index 43fc8c55e35..77779d185cf 100644 --- a/storage/innobase/data/data0type.c +++ b/storage/innobase/data/data0type.c @@ -40,9 +40,6 @@ charset-collation code for them. */ ulint data_mysql_default_charset_coll = 99999999; -dtype_t dtype_binary_val = {DATA_BINARY, 0, 0, 0, 0, 0}; -dtype_t* dtype_binary = &dtype_binary_val; - /************************************************************************* Determine how many bytes the first n characters of the given string occupy. If the string is shorter than n characters, returns the number of bytes @@ -53,7 +50,11 @@ dtype_get_at_most_n_mbchars( /*========================*/ /* out: length of the prefix, in bytes */ - const dtype_t* dtype, /* in: data type */ + ulint prtype, /* in: precise type */ + ulint mbminlen, /* in: minimum length of a + multi-byte character */ + ulint mbmaxlen, /* in: maximum length of a + multi-byte character */ ulint prefix_len, /* in: length of the requested prefix, in characters, multiplied by dtype_get_mbmaxlen(dtype) */ @@ -63,12 +64,12 @@ dtype_get_at_most_n_mbchars( { #ifndef UNIV_HOTBACKUP ut_a(data_len != UNIV_SQL_NULL); - ut_ad(!dtype->mbmaxlen || !(prefix_len % dtype->mbmaxlen)); + ut_ad(!mbmaxlen || !(prefix_len % mbmaxlen)); - if (dtype->mbminlen != dtype->mbmaxlen) { - ut_a(!(prefix_len % dtype->mbmaxlen)); - return(innobase_get_at_most_n_mbchars - (dtype_get_charset_coll(dtype->prtype), + if (mbminlen != mbmaxlen) { + ut_a(!(prefix_len % mbmaxlen)); + return(innobase_get_at_most_n_mbchars( + dtype_get_charset_coll(prtype), prefix_len, data_len, str)); } @@ -270,8 +271,6 @@ dtype_print( } else if (prtype == DATA_TRX_ID) { fputs("DATA_TRX_ID", stderr); len = DATA_TRX_ID_LEN; - } else if (prtype == DATA_MIX_ID) { - fputs("DATA_MIX_ID", stderr); } else if (prtype == DATA_ENGLISH) { fputs("DATA_ENGLISH", stderr); } else { @@ -291,38 +290,5 @@ dtype_print( } } - fprintf(stderr, " len %lu prec %lu", (ulong) len, (ulong) type->prec); -} - -/*************************************************************************** -Returns the maximum size of a data type. Note: types in system tables may be -incomplete and return incorrect information. */ - -ulint -dtype_get_max_size( -/*===============*/ - /* out: maximum size (ULINT_MAX for - unbounded types) */ - const dtype_t* type) /* in: type */ -{ - switch (type->mtype) { - case DATA_SYS: - case DATA_CHAR: - case DATA_FIXBINARY: - case DATA_INT: - case DATA_FLOAT: - case DATA_DOUBLE: - case DATA_MYSQL: - case DATA_VARCHAR: - case DATA_BINARY: - case DATA_DECIMAL: - case DATA_VARMYSQL: - return(type->len); - case DATA_BLOB: - return(ULINT_MAX); - default: - ut_error; - } - - return(ULINT_MAX); + fprintf(stderr, " len %lu", (ulong) len); } diff --git a/storage/innobase/dict/dict0boot.c b/storage/innobase/dict/dict0boot.c index 0515adb9a8f..08515d8fb13 100644 --- a/storage/innobase/dict/dict0boot.c +++ b/storage/innobase/dict/dict0boot.c @@ -58,8 +58,7 @@ dict_hdr_get_new_id( dulint id; mtr_t mtr; - ut_ad((type == DICT_HDR_TABLE_ID) || (type == DICT_HDR_INDEX_ID) - || (type == DICT_HDR_MIX_ID)); + ut_ad((type == DICT_HDR_TABLE_ID) || (type == DICT_HDR_INDEX_ID)); mtr_start(&mtr); @@ -141,6 +140,7 @@ dict_hdr_create( mlog_write_dulint(dict_header + DICT_HDR_INDEX_ID, ut_dulint_create(0, DICT_HDR_FIRST_ID), mtr); + /* Obsolete, but we must initialize it to 0 anyway. */ mlog_write_dulint(dict_header + DICT_HDR_MIX_ID, ut_dulint_create(0, DICT_HDR_FIRST_ID), mtr); @@ -214,7 +214,6 @@ dict_boot(void) dict_index_t* index; dict_hdr_t* dict_hdr; mtr_t mtr; - ibool success; mtr_start(&mtr); @@ -236,25 +235,25 @@ dict_boot(void) ..._MARGIN, it will immediately be updated to the disk-based header. */ - dict_sys->row_id = ut_dulint_add - (ut_dulint_align_up(mtr_read_dulint - (dict_hdr + DICT_HDR_ROW_ID, &mtr), - DICT_HDR_ROW_ID_WRITE_MARGIN), - DICT_HDR_ROW_ID_WRITE_MARGIN); + dict_sys->row_id = ut_dulint_add( + ut_dulint_align_up(mtr_read_dulint(dict_hdr + DICT_HDR_ROW_ID, + &mtr), + DICT_HDR_ROW_ID_WRITE_MARGIN), + DICT_HDR_ROW_ID_WRITE_MARGIN); /* Insert into the dictionary cache the descriptions of the basic system tables */ /*-------------------------*/ table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, 0); - dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0, 0); - dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0, 0); - dict_mem_table_add_col(table, "N_COLS", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "MIX_ID", DATA_BINARY, 0, 0, 0); - dict_mem_table_add_col(table, "MIX_LEN", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "CLUSTER_NAME", DATA_BINARY, 0, 0, 0); - dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4, 0); + dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "N_COLS", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "MIX_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "MIX_LEN", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "CLUSTER_NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4); table->id = DICT_TABLES_ID; @@ -269,31 +268,30 @@ dict_boot(void) index->id = DICT_TABLES_ID; - success = dict_index_add_to_cache(table, index, mtr_read_ulint - (dict_hdr + DICT_HDR_TABLES, - MLOG_4BYTES, &mtr)); - ut_a(success); + dict_index_add_to_cache(table, index, + mtr_read_ulint(dict_hdr + DICT_HDR_TABLES, + MLOG_4BYTES, &mtr)); + /*-------------------------*/ index = dict_mem_index_create("SYS_TABLES", "ID_IND", DICT_HDR_SPACE, DICT_UNIQUE, 1); dict_mem_index_add_field(index, "ID", 0); index->id = DICT_TABLE_IDS_ID; - success = dict_index_add_to_cache(table, index, - mtr_read_ulint - (dict_hdr + DICT_HDR_TABLE_IDS, - MLOG_4BYTES, &mtr)); - ut_a(success); + dict_index_add_to_cache(table, index, + mtr_read_ulint(dict_hdr + DICT_HDR_TABLE_IDS, + MLOG_4BYTES, &mtr)); + /*-------------------------*/ table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, 0); - dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY,0,0,0); - dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0, 0); - dict_mem_table_add_col(table, "MTYPE", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "PRTYPE", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "LEN", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "PREC", DATA_INT, 0, 4, 0); + dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "MTYPE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "PRTYPE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "LEN", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "PREC", DATA_INT, 0, 4); table->id = DICT_COLUMNS_ID; @@ -308,20 +306,20 @@ dict_boot(void) dict_mem_index_add_field(index, "POS", 0); index->id = DICT_COLUMNS_ID; - success = dict_index_add_to_cache(table, index, mtr_read_ulint - (dict_hdr + DICT_HDR_COLUMNS, - MLOG_4BYTES, &mtr)); - ut_a(success); + dict_index_add_to_cache(table, index, + mtr_read_ulint(dict_hdr + DICT_HDR_COLUMNS, + MLOG_4BYTES, &mtr)); + /*-------------------------*/ table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, 0); - dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0,0,0); - dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0, 0); - dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0, 0); - dict_mem_table_add_col(table, "N_FIELDS", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "PAGE_NO", DATA_INT, 0, 4, 0); + dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "N_FIELDS", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "PAGE_NO", DATA_INT, 0, 4); /* The '+ 2' below comes from the 2 system fields */ #if DICT_SYS_INDEXES_PAGE_NO_FIELD != 6 + 2 @@ -346,16 +344,16 @@ dict_boot(void) dict_mem_index_add_field(index, "ID", 0); index->id = DICT_INDEXES_ID; - success = dict_index_add_to_cache(table, index, mtr_read_ulint - (dict_hdr + DICT_HDR_INDEXES, - MLOG_4BYTES, &mtr)); - ut_a(success); + dict_index_add_to_cache(table, index, + mtr_read_ulint(dict_hdr + DICT_HDR_INDEXES, + MLOG_4BYTES, &mtr)); + /*-------------------------*/ table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0); - dict_mem_table_add_col(table, "INDEX_ID", DATA_BINARY, 0,0,0); - dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4, 0); - dict_mem_table_add_col(table, "COL_NAME", DATA_BINARY, 0,0,0); + dict_mem_table_add_col(table, "INDEX_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4); + dict_mem_table_add_col(table, "COL_NAME", DATA_BINARY, 0, 0); table->id = DICT_FIELDS_ID; dict_table_add_to_cache(table); @@ -369,10 +367,9 @@ dict_boot(void) dict_mem_index_add_field(index, "POS", 0); index->id = DICT_FIELDS_ID; - success = dict_index_add_to_cache(table, index, mtr_read_ulint - (dict_hdr + DICT_HDR_FIELDS, - MLOG_4BYTES, &mtr)); - ut_a(success); + dict_index_add_to_cache(table, index, + mtr_read_ulint(dict_hdr + DICT_HDR_FIELDS, + MLOG_4BYTES, &mtr)); mtr_commit(&mtr); /*-------------------------*/ diff --git a/storage/innobase/dict/dict0crea.c b/storage/innobase/dict/dict0crea.c index c912ea0fd1a..75422b929b1 100644 --- a/storage/innobase/dict/dict0crea.c +++ b/storage/innobase/dict/dict0crea.c @@ -78,14 +78,14 @@ dict_create_sys_tables_tuple( mach_write_to_4(ptr, DICT_TABLE_ORDINARY); dfield_set_data(dfield, ptr, 4); - /* 6: MIX_ID ---------------------------*/ + /* 6: MIX_ID (obsolete) ---------------------------*/ dfield = dtuple_get_nth_field(entry, 4); ptr = mem_heap_alloc(heap, 8); memset(ptr, 0, 8); dfield_set_data(dfield, ptr, 8); - /* 7: MIX_LEN --------------------------*/ + /* 7: MIX_LEN (obsolete) --------------------------*/ dfield = dtuple_get_nth_field(entry, 5); @@ -124,11 +124,12 @@ dict_create_sys_columns_tuple( mem_heap_t* heap) /* in: memory heap from which the memory for the built tuple is allocated */ { - dict_table_t* sys_columns; - dtuple_t* entry; - dict_col_t* column; - dfield_t* dfield; - byte* ptr; + dict_table_t* sys_columns; + dtuple_t* entry; + const dict_col_t* column; + dfield_t* dfield; + byte* ptr; + const char* col_name; ut_ad(table && heap); @@ -155,33 +156,34 @@ dict_create_sys_columns_tuple( /* 4: NAME ---------------------------*/ dfield = dtuple_get_nth_field(entry, 2); - dfield_set_data(dfield, column->name, ut_strlen(column->name)); + col_name = dict_table_get_col_name(table, i); + dfield_set_data(dfield, col_name, ut_strlen(col_name)); /* 5: MTYPE --------------------------*/ dfield = dtuple_get_nth_field(entry, 3); ptr = mem_heap_alloc(heap, 4); - mach_write_to_4(ptr, (column->type).mtype); + mach_write_to_4(ptr, column->mtype); dfield_set_data(dfield, ptr, 4); /* 6: PRTYPE -------------------------*/ dfield = dtuple_get_nth_field(entry, 4); ptr = mem_heap_alloc(heap, 4); - mach_write_to_4(ptr, (column->type).prtype); + mach_write_to_4(ptr, column->prtype); dfield_set_data(dfield, ptr, 4); /* 7: LEN ----------------------------*/ dfield = dtuple_get_nth_field(entry, 5); ptr = mem_heap_alloc(heap, 4); - mach_write_to_4(ptr, (column->type).len); + mach_write_to_4(ptr, column->len); dfield_set_data(dfield, ptr, 4); /* 8: PREC ---------------------------*/ dfield = dtuple_get_nth_field(entry, 6); ptr = mem_heap_alloc(heap, 4); - mach_write_to_4(ptr, (column->type).prec); + mach_write_to_4(ptr, 0/* unused */); dfield_set_data(dfield, ptr, 4); /*---------------------------------*/ @@ -222,8 +224,7 @@ dict_build_table_def_step( row_len = 0; for (i = 0; i < table->n_def; i++) { - row_len += dtype_get_min_size(dict_col_get_type - (&table->cols[i])); + row_len += dict_col_get_min_size(&table->cols[i]); } if (row_len > BTR_PAGE_MAX_REC_SIZE) { return(DB_TOO_BIG_RECORD); @@ -238,7 +239,7 @@ dict_build_table_def_step( - page 3 will contain the root of the clustered index of the table we create here. */ - table->space = 0; /* reset to zero for the call below */ + ulint space = 0; /* reset to zero for the call below */ if (table->dir_path_of_temp_table) { /* We place tables created with CREATE TEMPORARY @@ -251,9 +252,11 @@ dict_build_table_def_step( is_path = FALSE; } - error = fil_create_new_single_table_tablespace - (&table->space, path_or_name, is_path, - FIL_IBD_FILE_INITIAL_SIZE); + error = fil_create_new_single_table_tablespace( + &space, path_or_name, is_path, + FIL_IBD_FILE_INITIAL_SIZE); + table->space = space; + if (error != DB_SUCCESS) { return(error); @@ -768,8 +771,8 @@ dict_truncate_index_tree( appropriate field in the SYS_INDEXES record: this mini-transaction marks the B-tree totally truncated */ - comp = page_is_comp(btr_page_get - (space, root_page_no, RW_X_LATCH, mtr)); + comp = page_is_comp(btr_page_get(space, root_page_no, RW_X_LATCH, + mtr)); btr_free_root(space, root_page_no, mtr); /* We will temporarily write FIL_NULL to the PAGE_NO field @@ -798,7 +801,7 @@ dict_truncate_index_tree( root_page_no = btr_create(type, space, index_id, comp, mtr); if (index) { - index->tree->page = root_page_no; + index->page = root_page_no; } else { ut_print_timestamp(stderr); fprintf(stderr, @@ -1004,7 +1007,6 @@ dict_create_index_step( que_thr_t* thr) /* in: query thread */ { ind_node_t* node; - ibool success; ulint err = DB_ERROR; trx_t* trx; @@ -1088,10 +1090,8 @@ dict_create_index_step( if (node->state == INDEX_ADD_TO_CACHE) { - success = dict_index_add_to_cache(node->table, node->index, - node->page_no); - - ut_a(success); + dict_index_add_to_cache(node->table, node->index, + node->page_no); err = DB_SUCCESS; } @@ -1328,13 +1328,14 @@ dict_create_add_foreign_field_to_dictionary( pars_info_add_str_literal(info, "ref_col_name", foreign->referenced_col_names[field_nr]); - return(dict_foreign_eval_sql - (info, "PROCEDURE P () IS\n" - "BEGIN\n" - "INSERT INTO SYS_FOREIGN_COLS VALUES" - "(:id, :pos, :for_col_name, :ref_col_name);\n" - "END;\n", - table, foreign, trx)); + return(dict_foreign_eval_sql( + info, + "PROCEDURE P () IS\n" + "BEGIN\n" + "INSERT INTO SYS_FOREIGN_COLS VALUES" + "(:id, :pos, :for_col_name, :ref_col_name);\n" + "END;\n", + table, foreign, trx)); } /************************************************************************ @@ -1393,8 +1394,8 @@ dict_create_add_foreign_to_dictionary( } for (i = 0; i < foreign->n_fields; i++) { - error = dict_create_add_foreign_field_to_dictionary - (i, table, foreign, trx); + error = dict_create_add_foreign_field_to_dictionary( + i, table, foreign, trx); if (error != DB_SUCCESS) { @@ -1450,8 +1451,8 @@ dict_create_add_foreigns_to_dictionary( foreign; foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { - error = dict_create_add_foreign_to_dictionary - (&number, table, foreign, trx); + error = dict_create_add_foreign_to_dictionary(&number, table, + foreign, trx); if (error != DB_SUCCESS) { diff --git a/storage/innobase/dict/dict0dict.c b/storage/innobase/dict/dict0dict.c index f98898a44a0..44a374fe550 100644 --- a/storage/innobase/dict/dict0dict.c +++ b/storage/innobase/dict/dict0dict.c @@ -48,8 +48,6 @@ rw_lock_t dict_operation_lock; /* table create, drop, etc. reserve creating a table or index object */ #define DICT_POOL_PER_TABLE_HASH 512 /* buffer pool max size per table hash table fixed size in bytes */ -#define DICT_POOL_PER_COL_HASH 128 /* buffer pool max size per column - hash table fixed size in bytes */ #define DICT_POOL_PER_VARYING 4 /* buffer pool max size per data dictionary varying size in bytes */ @@ -129,32 +127,6 @@ innobase_get_charset( void* mysql_thd); /* in: MySQL thread handle */ #endif /* !UNIV_HOTBACKUP */ -/************************************************************************** -Adds a column to the data dictionary hash table. */ -static -void -dict_col_add_to_cache( -/*==================*/ - dict_table_t* table, /* in: table */ - dict_col_t* col); /* in: column */ -/************************************************************************** -Repositions a column in the data dictionary hash table when the table name -changes. */ -static -void -dict_col_reposition_in_cache( -/*=========================*/ - dict_table_t* table, /* in: table */ - dict_col_t* col, /* in: column */ - const char* new_name); /* in: new table name */ -/************************************************************************** -Removes a column from the data dictionary hash table. */ -static -void -dict_col_remove_from_cache( -/*=======================*/ - dict_table_t* table, /* in: table */ - dict_col_t* col); /* in: column */ /************************************************************************** Removes an index from the dictionary cache. */ static @@ -171,16 +143,16 @@ dict_index_copy( /*============*/ dict_index_t* index1, /* in: index to copy to */ dict_index_t* index2, /* in: index to copy from */ + dict_table_t* table, /* in: table */ ulint start, /* in: first position to copy */ ulint end); /* in: last position to copy */ /*********************************************************************** -Tries to find column names for the index in the column hash table and -sets the col field of the index. */ +Tries to find column names for the index and sets the col field of the +index. */ static -ibool +void dict_index_find_cols( /*=================*/ - /* out: TRUE if success */ dict_table_t* table, /* in: table */ dict_index_t* index); /* in: index */ /*********************************************************************** @@ -220,7 +192,8 @@ static void dict_col_print_low( /*===============*/ - dict_col_t* col); /* in: column */ + const dict_table_t* table, /* in: table */ + const dict_col_t* col); /* in: column */ /************************************************************************** Prints an index data. */ static @@ -352,15 +325,27 @@ dict_table_decrement_handle_count( mutex_exit(&(dict_sys->mutex)); } +/************************************************************************* +Gets the column data type. */ + +void +dict_col_copy_type_noninline( +/*=========================*/ + const dict_col_t* col, /* in: column */ + dtype_t* type) /* out: data type */ +{ + dict_col_copy_type(col, type); +} + /************************************************************************ Gets the nth column of a table. */ -dict_col_t* +const dict_col_t* dict_table_get_nth_col_noninline( /*=============================*/ - /* out: pointer to column object */ - dict_table_t* table, /* in: table */ - ulint pos) /* in: position of column */ + /* out: pointer to column object */ + const dict_table_t* table, /* in: table */ + ulint pos) /* in: position of column */ { return(dict_table_get_nth_col(table, pos)); } @@ -402,6 +387,35 @@ dict_table_get_index_noninline( return(dict_table_get_index(table, name)); } +/************************************************************************** +Returns a column's name. */ + +const char* +dict_table_get_col_name( +/*====================*/ + /* out: column name. NOTE: not + guaranteed to stay valid if table is + modified in any way (columns added, + etc.). */ + const dict_table_t* table, /* in: table */ + ulint col_nr) /* in: column number */ +{ + ulint i; + const char* s; + + ut_ad(table); + ut_ad(col_nr < table->n_def); + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + + s = table->col_names; + + for (i = 0; i < col_nr; i++) { + s += strlen(s) + 1; + } + + return(s); +} + /************************************************************************ Initializes the autoinc counter. It is not an error to initialize an already initialized counter. */ @@ -544,10 +558,10 @@ dict_index_get_nth_col_pos( dict_index_t* index, /* in: index */ ulint n) /* in: column number */ { - dict_field_t* field; - dict_col_t* col; - ulint pos; - ulint n_fields; + const dict_field_t* field; + const dict_col_t* col; + ulint pos; + ulint n_fields; ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); @@ -556,7 +570,7 @@ dict_index_get_nth_col_pos( if (index->type & DICT_CLUSTERED) { - return(col->clust_pos); + return(dict_col_get_clust_pos(col, index)); } n_fields = dict_index_get_n_fields(index); @@ -584,10 +598,10 @@ dict_index_contains_col_or_prefix( dict_index_t* index, /* in: index */ ulint n) /* in: column number */ { - dict_field_t* field; - dict_col_t* col; - ulint pos; - ulint n_fields; + const dict_field_t* field; + const dict_col_t* col; + ulint pos; + ulint n_fields; ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); @@ -731,11 +745,11 @@ dict_table_col_in_clustered_key( dict_table_t* table, /* in: table */ ulint n) /* in: column number */ { - dict_index_t* index; - dict_field_t* field; - dict_col_t* col; - ulint pos; - ulint n_fields; + dict_index_t* index; + const dict_field_t* field; + const dict_col_t* col; + ulint pos; + ulint n_fields; ut_ad(table); @@ -774,9 +788,6 @@ dict_init(void) dict_sys->table_id_hash = hash_create(buf_pool_get_max_size() / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); - dict_sys->col_hash = hash_create(buf_pool_get_max_size() - / (DICT_POOL_PER_COL_HASH - * UNIV_WORD_SIZE)); dict_sys->size = 0; UT_LIST_INIT(dict_sys->table_LRU); @@ -884,52 +895,49 @@ dict_table_add_to_cache( dict_mem_table_add_col(table, "DB_ROW_ID", DATA_SYS, DATA_ROW_ID | DATA_NOT_NULL, - DATA_ROW_ID_LEN, 0); + DATA_ROW_ID_LEN); #if DATA_ROW_ID != 0 #error "DATA_ROW_ID != 0" #endif dict_mem_table_add_col(table, "DB_TRX_ID", DATA_SYS, DATA_TRX_ID | DATA_NOT_NULL, - DATA_TRX_ID_LEN, 0); + DATA_TRX_ID_LEN); #if DATA_TRX_ID != 1 #error "DATA_TRX_ID != 1" #endif dict_mem_table_add_col(table, "DB_ROLL_PTR", DATA_SYS, DATA_ROLL_PTR | DATA_NOT_NULL, - DATA_ROLL_PTR_LEN, 0); + DATA_ROLL_PTR_LEN); #if DATA_ROLL_PTR != 2 #error "DATA_ROLL_PTR != 2" -#endif - dict_mem_table_add_col(table, "DB_MIX_ID", DATA_SYS, - DATA_MIX_ID | DATA_NOT_NULL, - DATA_MIX_ID_LEN, 0); -#if DATA_MIX_ID != 3 -#error "DATA_MIX_ID != 3" #endif /* This check reminds that if a new system column is added to the program, it should be dealt with here */ -#if DATA_N_SYS_COLS != 4 -#error "DATA_N_SYS_COLS != 4" +#if DATA_N_SYS_COLS != 3 +#error "DATA_N_SYS_COLS != 3" #endif + /* The lower limit for what we consider a "big" row */ +#define BIG_ROW_SIZE 1024 + row_len = 0; for (i = 0; i < table->n_def; i++) { - ulint col_len = dtype_get_max_size - (dict_col_get_type(dict_table_get_nth_col(table, i))); + ulint col_len = dict_col_get_max_size( + dict_table_get_nth_col(table, i)); + + row_len += col_len; /* If we have a single unbounded field, or several gigantic - fields, mark the maximum row size as ULINT_MAX. */ - if (ut_max(col_len, row_len) >= (ULINT_MAX / 2)) { - row_len = ULINT_MAX; + fields, mark the maximum row size as BIG_ROW_SIZE. */ + if (row_len >= BIG_ROW_SIZE || col_len >= BIG_ROW_SIZE) { + row_len = BIG_ROW_SIZE; break; } - - row_len += col_len; } - table->max_row_size = row_len; + table->big_rows = row_len >= BIG_ROW_SIZE; /* Look for a table with the same name: error if such exists */ { @@ -947,11 +955,6 @@ dict_table_add_to_cache( ut_a(table2 == NULL); } - /* Add the columns to the column hash table */ - for (i = 0; i < table->n_cols; i++) { - dict_col_add_to_cache(table, dict_table_get_nth_col(table, i)); - } - /* Add table to hash table of tables */ HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, fold, table); @@ -985,7 +988,7 @@ dict_index_find_on_id_low( index = dict_table_get_first_index(table); while (index) { - if (0 == ut_dulint_cmp(id, index->tree->id)) { + if (0 == ut_dulint_cmp(id, index->id)) { /* Found */ return(index); @@ -1019,7 +1022,6 @@ dict_table_rename_in_cache( ulint old_size; char* old_name; ibool success; - ulint i; ut_ad(table); #ifdef UNIV_SYNC_DEBUG @@ -1056,8 +1058,8 @@ dict_table_rename_in_cache( table->name, table->dir_path_of_temp_table); success = FALSE; } else { - success = fil_rename_tablespace - (table->name, table->space, new_name); + success = fil_rename_tablespace( + table->name, table->space, new_name); } if (!success) { @@ -1066,15 +1068,6 @@ dict_table_rename_in_cache( } } - /* Reposition the columns in the column hash table; they are hashed - according to the pair (table name, column name) */ - - for (i = 0; i < table->n_cols; i++) { - dict_col_reposition_in_cache(table, - dict_table_get_nth_col(table, i), - new_name); - } - /* Remove table from the hash tables of tables */ HASH_DELETE(dict_table_t, name_hash, dict_sys->table_hash, ut_fold_string(table->name), table); @@ -1167,10 +1160,10 @@ dict_table_rename_in_cache( /* This is a generated >= 4.0.18 format id */ if (strlen(table->name) > strlen(old_name)) { - foreign->id = mem_heap_alloc - (foreign->heap, - strlen(table->name) - + strlen(old_id) + 1); + foreign->id = mem_heap_alloc( + foreign->heap, + strlen(table->name) + + strlen(old_id) + 1); } /* Replace the prefix 'databasename/tablename' @@ -1186,9 +1179,9 @@ dict_table_rename_in_cache( if (dict_get_db_name_len(table->name) > dict_get_db_name_len(foreign->id)) { - foreign->id = mem_heap_alloc - (foreign->heap, - db_len + strlen(old_id) + 1); + foreign->id = mem_heap_alloc( + foreign->heap, + db_len + strlen(old_id) + 1); } /* Replace the database prefix in id with the @@ -1214,8 +1207,8 @@ dict_table_rename_in_cache( /* Allocate a longer name buffer; TODO: store buf len to save memory */ - foreign->referenced_table_name = mem_heap_alloc - (foreign->heap, strlen(table->name) + 1); + foreign->referenced_table_name = mem_heap_alloc( + foreign->heap, strlen(table->name) + 1); } strcpy(foreign->referenced_table_name, table->name); @@ -1264,7 +1257,6 @@ dict_table_remove_from_cache( dict_foreign_t* foreign; dict_index_t* index; ulint size; - ulint i; ut_ad(table); #ifdef UNIV_SYNC_DEBUG @@ -1305,12 +1297,6 @@ dict_table_remove_from_cache( index = UT_LIST_GET_LAST(table->indexes); } - /* Remove the columns of the table from the cache */ - for (i = 0; i < table->n_cols; i++) { - dict_col_remove_from_cache(table, - dict_table_get_nth_col(table, i)); - } - /* Remove table from the hash tables of tables */ HASH_DELETE(dict_table_t, name_hash, dict_sys->table_hash, ut_fold_string(table->name), table); @@ -1329,90 +1315,16 @@ dict_table_remove_from_cache( dict_mem_table_free(table); } -/************************************************************************** -Adds a column to the data dictionary hash table. */ -static -void -dict_col_add_to_cache( -/*==================*/ - dict_table_t* table, /* in: table */ - dict_col_t* col) /* in: column */ +/************************************************************************* +Gets the column position in the clustered index. */ + +ulint +dict_col_get_clust_pos_noninline( +/*=============================*/ + const dict_col_t* col, /* in: table column */ + const dict_index_t* clust_index) /* in: clustered index */ { - ulint fold; - - ut_ad(table && col); -#ifdef UNIV_SYNC_DEBUG - ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - - fold = ut_fold_ulint_pair(ut_fold_string(table->name), - ut_fold_string(col->name)); - - /* Look for a column with same table name and column name: error */ - { - dict_col_t* col2; - HASH_SEARCH(hash, dict_sys->col_hash, fold, col2, - (ut_strcmp(col->name, col2->name) == 0) - && (ut_strcmp((col2->table)->name, table->name) - == 0)); - ut_a(col2 == NULL); - } - - HASH_INSERT(dict_col_t, hash, dict_sys->col_hash, fold, col); -} - -/************************************************************************** -Removes a column from the data dictionary hash table. */ -static -void -dict_col_remove_from_cache( -/*=======================*/ - dict_table_t* table, /* in: table */ - dict_col_t* col) /* in: column */ -{ - ulint fold; - - ut_ad(table && col); -#ifdef UNIV_SYNC_DEBUG - ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - - fold = ut_fold_ulint_pair(ut_fold_string(table->name), - ut_fold_string(col->name)); - - HASH_DELETE(dict_col_t, hash, dict_sys->col_hash, fold, col); -} - -/************************************************************************** -Repositions a column in the data dictionary hash table when the table name -changes. */ -static -void -dict_col_reposition_in_cache( -/*=========================*/ - dict_table_t* table, /* in: table */ - dict_col_t* col, /* in: column */ - const char* new_name) /* in: new table name */ -{ - ulint fold; - - ut_ad(table && col); -#ifdef UNIV_SYNC_DEBUG - ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - - fold = ut_fold_ulint_pair(ut_fold_string(table->name), - ut_fold_string(col->name)); - - HASH_DELETE(dict_col_t, hash, dict_sys->col_hash, fold, col); - - fold = ut_fold_ulint_pair(ut_fold_string(new_name), - ut_fold_string(col->name)); - - HASH_INSERT(dict_col_t, hash, dict_sys->col_hash, fold, col); + return(dict_col_get_clust_pos(col, clust_index)); } /******************************************************************** @@ -1427,12 +1339,12 @@ dict_col_name_is_reserved( { /* This check reminds that if a new system column is added to the program, it should be dealt with here. */ -#if DATA_N_SYS_COLS != 4 -#error "DATA_N_SYS_COLS != 4" +#if DATA_N_SYS_COLS != 3 +#error "DATA_N_SYS_COLS != 3" #endif static const char* reserved_names[] = { - "DB_ROW_ID", "DB_TRX_ID", "DB_ROLL_PTR", "DB_MIX_ID" + "DB_ROW_ID", "DB_TRX_ID", "DB_ROLL_PTR" }; ulint i; @@ -1450,20 +1362,16 @@ dict_col_name_is_reserved( /************************************************************************** Adds an index to the dictionary cache. */ -ibool +void dict_index_add_to_cache( /*====================*/ - /* out: TRUE if success */ dict_table_t* table, /* in: table on which the index is */ dict_index_t* index, /* in, own: index; NOTE! The index memory object is freed in this function! */ ulint page_no)/* in: root page number of the index */ { dict_index_t* new_index; - dict_tree_t* tree; - dict_field_t* field; ulint n_ord; - ibool success; ulint i; ut_ad(index); @@ -1491,13 +1399,7 @@ dict_index_add_to_cache( ut_a(!(index->type & DICT_CLUSTERED) || UT_LIST_GET_LEN(table->indexes) == 0); - success = dict_index_find_cols(table, index); - - if (!success) { - dict_mem_index_free(index); - - return(FALSE); - } + dict_index_find_cols(table, index); /* Build the cache internal representation of the index, containing also the added system fields */ @@ -1531,23 +1433,18 @@ dict_index_add_to_cache( for (i = 0; i < n_ord; i++) { - field = dict_index_get_nth_field(new_index, i); - - dict_field_get_col(field)->ord_part++; + dict_index_get_nth_field(new_index, i)->col->ord_part = 1; } - /* Create an index tree memory object for the index */ - tree = dict_tree_create(new_index, page_no); - ut_ad(tree); - - new_index->tree = tree; + new_index->page = page_no; + rw_lock_create(&new_index->lock, SYNC_INDEX_TREE); if (!UNIV_UNLIKELY(new_index->type & DICT_UNIVERSAL)) { - new_index->stat_n_diff_key_vals = mem_heap_alloc - (new_index->heap, - (1 + dict_index_get_n_unique(new_index)) - * sizeof(ib_longlong)); + new_index->stat_n_diff_key_vals = mem_heap_alloc( + new_index->heap, + (1 + dict_index_get_n_unique(new_index)) + * sizeof(ib_longlong)); /* Give some sensible values to stat_n_... in case we do not calculate statistics quickly enough */ @@ -1557,14 +1454,9 @@ dict_index_add_to_cache( } } - /* Add the index to the list of indexes stored in the tree */ - tree->tree_index = new_index; - dict_sys->size += mem_heap_get_size(new_index->heap); dict_mem_index_free(index); - - return(TRUE); } /************************************************************************** @@ -1576,9 +1468,7 @@ dict_index_remove_from_cache( dict_table_t* table, /* in: table */ dict_index_t* index) /* in, own: index */ { - dict_field_t* field; ulint size; - ulint i; ut_ad(table && index); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); @@ -1587,17 +1477,7 @@ dict_index_remove_from_cache( ut_ad(mutex_own(&(dict_sys->mutex))); #endif /* UNIV_SYNC_DEBUG */ - ut_ad(index->tree->tree_index); - dict_tree_free(index->tree); - - /* Decrement the ord_part counts in columns which are ordering */ - for (i = 0; i < dict_index_get_n_unique(index); i++) { - - field = dict_index_get_nth_field(index, i); - - ut_ad(dict_field_get_col(field)->ord_part > 0); - (dict_field_get_col(field)->ord_part)--; - } + rw_lock_free(&index->lock); /* Remove the index from the list of indexes of the table */ UT_LIST_REMOVE(indexes, table->indexes, index); @@ -1612,19 +1492,15 @@ dict_index_remove_from_cache( } /*********************************************************************** -Tries to find column names for the index in the column hash table and -sets the col field of the index. */ +Tries to find column names for the index and sets the col field of the +index. */ static -ibool +void dict_index_find_cols( /*=================*/ - /* out: TRUE if success */ dict_table_t* table, /* in: table */ dict_index_t* index) /* in: index */ { - dict_col_t* col; - dict_field_t* field; - ulint fold; ulint i; ut_ad(table && index); @@ -1634,24 +1510,25 @@ dict_index_find_cols( #endif /* UNIV_SYNC_DEBUG */ for (i = 0; i < index->n_fields; i++) { - field = dict_index_get_nth_field(index, i); + ulint j; + dict_field_t* field = dict_index_get_nth_field(index, i); - fold = ut_fold_ulint_pair(ut_fold_string(table->name), - ut_fold_string(field->name)); + for (j = 0; j < table->n_cols; j++) { + if (!strcmp(dict_table_get_col_name(table, j), + field->name)) { + field->col = (dict_col_t*) + dict_table_get_nth_col(table, j); - HASH_SEARCH(hash, dict_sys->col_hash, fold, col, - (ut_strcmp(col->name, field->name) == 0) - && (ut_strcmp((col->table)->name, table->name) - == 0)); - if (col == NULL) { - - return(FALSE); - } else { - field->col = col; + goto found; + } } - } - return(TRUE); + /* It is an error not to find a matching column. */ + ut_error; + + found: + ; + } } /*********************************************************************** @@ -1661,17 +1538,21 @@ void dict_index_add_col( /*===============*/ dict_index_t* index, /* in: index */ + dict_table_t* table, /* in: table */ dict_col_t* col, /* in: column */ ulint prefix_len) /* in: column prefix length */ { dict_field_t* field; + const char* col_name; - dict_mem_index_add_field(index, col->name, prefix_len); + col_name = dict_table_get_col_name(table, dict_col_get_no(col)); + + dict_mem_index_add_field(index, col_name, prefix_len); field = dict_index_get_nth_field(index, index->n_def - 1); field->col = col; - field->fixed_len = dtype_get_fixed_size(&col->type); + field->fixed_len = dict_col_get_fixed_size(col); if (prefix_len && field->fixed_len > prefix_len) { field->fixed_len = prefix_len; @@ -1685,7 +1566,7 @@ dict_index_add_col( field->fixed_len = 0; } - if (!(dtype_get_prtype(&col->type) & DATA_NOT_NULL)) { + if (!(col->prtype & DATA_NOT_NULL)) { index->n_nullable++; } } @@ -1698,6 +1579,7 @@ dict_index_copy( /*============*/ dict_index_t* index1, /* in: index to copy to */ dict_index_t* index2, /* in: index to copy from */ + dict_table_t* table, /* in: table */ ulint start, /* in: first position to copy */ ulint end) /* in: last position to copy */ { @@ -1709,7 +1591,8 @@ dict_index_copy( for (i = start; i < end; i++) { field = dict_index_get_nth_field(index2, i); - dict_index_add_col(index1, field->col, field->prefix_len); + dict_index_add_col(index1, table, field->col, + field->prefix_len); } } @@ -1723,8 +1606,6 @@ dict_index_copy_types( dict_index_t* index, /* in: index */ ulint n_fields) /* in: number of field types to copy */ { - dtype_t* dfield_type; - dtype_t* type; ulint i; if (UNIV_UNLIKELY(index->type & DICT_UNIVERSAL)) { @@ -1734,10 +1615,15 @@ dict_index_copy_types( } for (i = 0; i < n_fields; i++) { + dict_field_t* ifield; + dtype_t* dfield_type; + + ifield = dict_index_get_nth_field(index, i); dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i)); - type = dict_col_get_type(dict_field_get_col - (dict_index_get_nth_field(index, i))); - *dfield_type = *type; + dict_col_copy_type(dict_field_get_col(ifield), dfield_type); + if (UNIV_UNLIKELY(ifield->prefix_len)) { + dfield_type->len = ifield->prefix_len; + } } } @@ -1751,15 +1637,13 @@ dict_table_copy_types( dict_table_t* table) /* in: index */ { dtype_t* dfield_type; - dtype_t* type; ulint i; for (i = 0; i < dtuple_get_n_fields(tuple); i++) { dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i)); - type = dict_col_get_type(dict_table_get_nth_col(table, i)); - - *dfield_type = *type; + dict_col_copy_type(dict_table_get_nth_col(table, i), + dfield_type); } } @@ -1778,7 +1662,6 @@ dict_index_build_internal_clust( { dict_index_t* new_index; dict_field_t* field; - dict_col_t* col; ulint fixed_size; ulint trx_id_pos; ulint i; @@ -1805,12 +1688,12 @@ dict_index_build_internal_clust( new_index->id = index->id; /* Copy the fields of index */ - dict_index_copy(new_index, index, 0, index->n_fields); + dict_index_copy(new_index, index, table, 0, index->n_fields); if (UNIV_UNLIKELY(index->type & DICT_UNIVERSAL)) { /* No fixed number of fields determines an entry uniquely */ - new_index->n_uniq = ULINT_MAX; + new_index->n_uniq = REC_MAX_N_FIELDS; } else if (index->type & DICT_UNIQUE) { /* Only the fields defined so far are needed to identify @@ -1840,24 +1723,26 @@ dict_index_build_internal_clust( #endif if (!(index->type & DICT_UNIQUE)) { - dict_index_add_col(new_index, - dict_table_get_sys_col - (table, DATA_ROW_ID), 0); + dict_index_add_col(new_index, table, (dict_col_t*) + dict_table_get_sys_col( + table, DATA_ROW_ID), + 0); trx_id_pos++; } - dict_index_add_col(new_index, - dict_table_get_sys_col - (table, DATA_TRX_ID), 0); + dict_index_add_col(new_index, table, (dict_col_t*) + dict_table_get_sys_col(table, DATA_TRX_ID), + 0); - dict_index_add_col(new_index, - dict_table_get_sys_col - (table, DATA_ROLL_PTR), 0); + dict_index_add_col(new_index, table, (dict_col_t*) + dict_table_get_sys_col(table, + DATA_ROLL_PTR), + 0); for (i = 0; i < trx_id_pos; i++) { - fixed_size = dtype_get_fixed_size - (dict_index_get_nth_type(new_index, i)); + fixed_size = dict_col_get_fixed_size( + dict_index_get_nth_col(new_index, i)); if (fixed_size == 0) { new_index->trx_id_offset = 0; @@ -1897,13 +1782,14 @@ dict_index_build_internal_clust( /* Add to new_index non-system columns of table not yet included there */ - for (i = 0; i < table->n_cols - DATA_N_SYS_COLS; i++) { + for (i = 0; i + DATA_N_SYS_COLS < (ulint) table->n_cols; i++) { - col = dict_table_get_nth_col(table, i); - ut_ad(col->type.mtype != DATA_SYS); + dict_col_t* col = (dict_col_t*) + dict_table_get_nth_col(table, i); + ut_ad(col->mtype != DATA_SYS); if (!indexed[col->ind]) { - dict_index_add_col(new_index, col, 0); + dict_index_add_col(new_index, table, col, 0); } } @@ -1912,18 +1798,6 @@ dict_index_build_internal_clust( ut_ad((index->type & DICT_IBUF) || (UT_LIST_GET_LEN(table->indexes) == 0)); - /* Store to the column structs the position of the table columns - in the clustered index */ - - for (i = 0; i < new_index->n_def; i++) { - field = dict_index_get_nth_field(new_index, i); - - if (field->prefix_len == 0) { - - field->col->clust_pos = i; - } - } - new_index->cached = TRUE; return(new_index); @@ -1963,9 +1837,9 @@ dict_index_build_internal_non_clust( ut_ad(!(clust_index->type & DICT_UNIVERSAL)); /* Create a new index */ - new_index = dict_mem_index_create - (table->name, index->name, index->space, index->type, - index->n_fields + 1 + clust_index->n_uniq); + new_index = dict_mem_index_create( + table->name, index->name, index->space, index->type, + index->n_fields + 1 + clust_index->n_uniq); /* Copy other relevant data from the old index struct to the new struct: it inherits the values */ @@ -1975,7 +1849,7 @@ dict_index_build_internal_non_clust( new_index->id = index->id; /* Copy fields from index to new_index */ - dict_index_copy(new_index, index, 0, index->n_fields); + dict_index_copy(new_index, index, table, 0, index->n_fields); /* Remember the table columns already contained in new_index */ indexed = mem_alloc(table->n_cols * sizeof *indexed); @@ -2003,7 +1877,7 @@ dict_index_build_internal_non_clust( field = dict_index_get_nth_field(clust_index, i); if (!indexed[field->col->ind]) { - dict_index_add_col(new_index, field->col, + dict_index_add_col(new_index, table, field->col, field->prefix_len); } } @@ -2146,6 +2020,7 @@ dict_foreign_find_index( only has an effect if types_idx != NULL */ { dict_index_t* index; + dict_field_t* field; const char* col_name; ulint i; @@ -2155,10 +2030,12 @@ dict_foreign_find_index( if (dict_index_get_n_fields(index) >= n_cols) { for (i = 0; i < n_cols; i++) { - col_name = dict_index_get_nth_field(index, i) - ->col->name; - if (dict_index_get_nth_field(index, i) - ->prefix_len != 0) { + field = dict_index_get_nth_field(index, i); + + col_name = dict_table_get_col_name( + table, dict_col_get_no(field->col)); + + if (field->prefix_len != 0) { /* We do not accept column prefix indexes here */ @@ -2170,10 +2047,11 @@ dict_foreign_find_index( break; } - if (types_idx && !cmp_types_are_equal - (dict_index_get_nth_type(index, i), - dict_index_get_nth_type(types_idx, i), - check_charsets)) { + if (types_idx && !cmp_cols_are_equal( + dict_index_get_nth_col(index, i), + dict_index_get_nth_col(types_idx, + i), + check_charsets)) { break; } @@ -2260,11 +2138,11 @@ dict_foreign_add_to_cache( ut_ad(mutex_own(&(dict_sys->mutex))); #endif /* UNIV_SYNC_DEBUG */ - for_table = dict_table_check_if_in_cache_low - (foreign->foreign_table_name); + for_table = dict_table_check_if_in_cache_low( + foreign->foreign_table_name); - ref_table = dict_table_check_if_in_cache_low - (foreign->referenced_table_name); + ref_table = dict_table_check_if_in_cache_low( + foreign->referenced_table_name); ut_a(for_table || ref_table); if (for_table) { @@ -2283,21 +2161,21 @@ dict_foreign_add_to_cache( } if (for_in_cache->referenced_table == NULL && ref_table) { - index = dict_foreign_find_index - (ref_table, - (const char**) for_in_cache->referenced_col_names, - for_in_cache->n_fields, - for_in_cache->foreign_index, check_charsets); + index = dict_foreign_find_index( + ref_table, + (const char**) for_in_cache->referenced_col_names, + for_in_cache->n_fields, for_in_cache->foreign_index, + check_charsets); if (index == NULL) { - dict_foreign_error_report - (ef, for_in_cache, - "there is no index in referenced table" - " which would contain\n" - "the columns as the first columns," - " or the data types in the\n" - "referenced table do not match" - " the ones in table."); + dict_foreign_error_report( + ef, for_in_cache, + "there is no index in referenced table" + " which would contain\n" + "the columns as the first columns," + " or the data types in the\n" + "referenced table do not match" + " the ones in table."); if (for_in_cache == foreign) { mem_heap_free(foreign->heap); @@ -2315,28 +2193,28 @@ dict_foreign_add_to_cache( } if (for_in_cache->foreign_table == NULL && for_table) { - index = dict_foreign_find_index - (for_table, - (const char**) for_in_cache->foreign_col_names, - for_in_cache->n_fields, - for_in_cache->referenced_index, check_charsets); + index = dict_foreign_find_index( + for_table, + (const char**) for_in_cache->foreign_col_names, + for_in_cache->n_fields, + for_in_cache->referenced_index, check_charsets); if (index == NULL) { - dict_foreign_error_report - (ef, for_in_cache, - "there is no index in the table" - " which would contain\n" - "the columns as the first columns," - " or the data types in the\n" - "table do not match" - " the ones in the referenced table."); + dict_foreign_error_report( + ef, for_in_cache, + "there is no index in the table" + " which would contain\n" + "the columns as the first columns," + " or the data types in the\n" + "table do not match" + " the ones in the referenced table."); if (for_in_cache == foreign) { if (added_to_referenced_list) { - UT_LIST_REMOVE - (referenced_list, - ref_table->referenced_list, - for_in_cache); + UT_LIST_REMOVE( + referenced_list, + ref_table->referenced_list, + for_in_cache); } mem_heap_free(foreign->heap); @@ -2560,17 +2438,16 @@ static const char* dict_scan_col( /*==========*/ - /* out: scanned to */ - struct charset_info_st* cs,/* in: the character set of ptr */ - const char* ptr, /* in: scanned to */ - ibool* success,/* out: TRUE if success */ - dict_table_t* table, /* in: table in which the column is */ - dict_col_t** column, /* out: pointer to column if success */ - mem_heap_t* heap, /* in: heap where to allocate the name */ - const char** name) /* out,own: the column name; NULL if no name - was scannable */ + /* out: scanned to */ + struct charset_info_st* cs, /* in: the character set of ptr */ + const char* ptr, /* in: scanned to */ + ibool* success,/* out: TRUE if success */ + dict_table_t* table, /* in: table in which the column is */ + const dict_col_t** column, /* out: pointer to column if success */ + mem_heap_t* heap, /* in: heap where to allocate */ + const char** name) /* out,own: the column name; + NULL if no name was scannable */ { - dict_col_t* col; ulint i; *success = FALSE; @@ -2588,14 +2465,15 @@ dict_scan_col( } else { for (i = 0; i < dict_table_get_n_cols(table); i++) { - col = dict_table_get_nth_col(table, i); + const char* col_name = dict_table_get_col_name( + table, i); - if (0 == innobase_strcasecmp(col->name, *name)) { + if (0 == innobase_strcasecmp(col_name, *name)) { /* Found */ *success = TRUE; - *column = col; - strcpy((char*) *name, col->name); + *column = dict_table_get_nth_col(table, i); + strcpy((char*) *name, col_name); break; } @@ -2933,7 +2811,7 @@ dict_create_foreign_constraints_low( ibool is_on_delete; ulint n_on_deletes; ulint n_on_updates; - dict_col_t* columns[500]; + const dict_col_t*columns[500]; const char* column_names[500]; const char* referenced_table_name; @@ -2997,8 +2875,8 @@ dict_create_foreign_constraints_low( if (table_to_alter == NULL) { highest_id_so_far = 0; } else { - highest_id_so_far = dict_table_get_highest_foreign_id - (table_to_alter); + highest_id_so_far = dict_table_get_highest_foreign_id( + table_to_alter); } /* Scan for foreign key declarations in a loop */ @@ -3054,8 +2932,8 @@ loop: /* The following call adds the foreign key constraints to the data dictionary system tables on disk */ - error = dict_create_add_foreigns_to_dictionary - (highest_id_so_far, table, trx); + error = dict_create_add_foreigns_to_dictionary( + highest_id_so_far, table, trx); return(error); } @@ -3085,8 +2963,8 @@ loop: ptr = dict_skip_word(cs, ptr, &success); if (!success) { - dict_foreign_report_syntax_err - (name, start_of_latest_foreign, ptr); + dict_foreign_report_syntax_err( + name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); } @@ -3129,8 +3007,8 @@ col_loop1: ptr = dict_accept(cs, ptr, ")", &success); if (!success) { - dict_foreign_report_syntax_err - (name, start_of_latest_foreign, ptr); + dict_foreign_report_syntax_err( + name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); } @@ -3157,8 +3035,8 @@ col_loop1: ptr = dict_accept(cs, ptr, "REFERENCES", &success); if (!success || !my_isspace(cs, *ptr)) { - dict_foreign_report_syntax_err - (name, start_of_latest_foreign, ptr); + dict_foreign_report_syntax_err( + name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); } @@ -3176,8 +3054,8 @@ col_loop1: db_len = dict_get_db_name_len(table->name); - foreign->id = mem_heap_alloc - (foreign->heap, db_len + strlen(constraint_name) + 2); + foreign->id = mem_heap_alloc( + foreign->heap, db_len + strlen(constraint_name) + 2); ut_memcpy(foreign->id, table->name, db_len); foreign->id[db_len] = '/'; @@ -3192,8 +3070,10 @@ col_loop1: foreign->foreign_col_names = mem_heap_alloc(foreign->heap, i * sizeof(void*)); for (i = 0; i < foreign->n_fields; i++) { - foreign->foreign_col_names[i] = mem_heap_strdup - (foreign->heap, columns[i]->name); + foreign->foreign_col_names[i] = mem_heap_strdup( + foreign->heap, + dict_table_get_col_name(table, + dict_col_get_no(columns[i]))); } ptr = dict_scan_table_name(cs, ptr, &referenced_table, name, @@ -3282,8 +3162,8 @@ scan_on_conditions: if (!success) { dict_foreign_free(foreign); - dict_foreign_report_syntax_err - (name, start_of_latest_foreign, ptr); + dict_foreign_report_syntax_err( + name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); } @@ -3319,8 +3199,8 @@ scan_on_conditions: if (!success) { dict_foreign_free(foreign); - dict_foreign_report_syntax_err - (name, start_of_latest_foreign, ptr); + dict_foreign_report_syntax_err( + name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); } @@ -3353,8 +3233,7 @@ scan_on_conditions: } for (j = 0; j < foreign->n_fields; j++) { - if ((dict_index_get_nth_type - (foreign->foreign_index, j)->prtype) + if ((dict_index_get_nth_col(foreign->foreign_index, j)->prtype) & DATA_NOT_NULL) { /* It is not sensible to define SET NULL @@ -3519,9 +3398,9 @@ dict_create_foreign_constraints( str = dict_strip_comments(sql_string); heap = mem_heap_create(10000); - err = dict_create_foreign_constraints_low - (trx, heap, innobase_get_charset(trx->mysql_thd), - str, name, reject_fks); + err = dict_create_foreign_constraints_low( + trx, heap, innobase_get_charset(trx->mysql_thd), str, name, + reject_fks); mem_heap_free(heap); mem_free(str); @@ -3710,68 +3589,18 @@ found: return(index); } -/************************************************************************** -Creates an index tree struct. */ - -dict_tree_t* -dict_tree_create( -/*=============*/ - /* out, own: created tree */ - dict_index_t* index, /* in: the index for which to create: in the - case of a mixed tree, this should be the - index of the cluster object */ - ulint page_no)/* in: root page number of the index */ -{ - dict_tree_t* tree; - - tree = mem_alloc(sizeof(dict_tree_t)); - - /* Inherit info from the index */ - - tree->type = index->type; - tree->space = index->space; - tree->page = page_no; - - tree->id = index->id; - - tree->tree_index = NULL; - - tree->magic_n = DICT_TREE_MAGIC_N; - - rw_lock_create(&tree->lock, SYNC_INDEX_TREE); - - return(tree); -} - -/************************************************************************** -Frees an index tree struct. */ - -void -dict_tree_free( -/*===========*/ - dict_tree_t* tree) /* in, own: index tree */ -{ - ut_a(tree); - ut_ad(tree->magic_n == DICT_TREE_MAGIC_N); - - rw_lock_free(&(tree->lock)); - mem_free(tree); -} - #ifdef UNIV_DEBUG /************************************************************************** Checks that a tuple has n_fields_cmp value in a sensible range, so that no comparison can occur with the page number field in a node pointer. */ ibool -dict_tree_check_search_tuple( -/*=========================*/ +dict_index_check_search_tuple( +/*==========================*/ /* out: TRUE if ok */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ dtuple_t* tuple) /* in: tuple used in a search */ { - dict_index_t* index = tree->tree_index; - ut_a(index); ut_a(dtuple_get_n_fields_cmp(tuple) <= dict_index_get_n_unique_in_tree(index)); @@ -3783,10 +3612,10 @@ dict_tree_check_search_tuple( Builds a node pointer out of a physical record and a page number. */ dtuple_t* -dict_tree_build_node_ptr( -/*=====================*/ +dict_index_build_node_ptr( +/*======================*/ /* out, own: node pointer */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ rec_t* rec, /* in: record for which to build node pointer */ ulint page_no,/* in: page number to put in node pointer */ @@ -3795,20 +3624,17 @@ dict_tree_build_node_ptr( level */ { dtuple_t* tuple; - dict_index_t* ind; dfield_t* field; byte* buf; ulint n_unique; - ind = tree->tree_index; - - if (UNIV_UNLIKELY(tree->type & DICT_UNIVERSAL)) { + if (UNIV_UNLIKELY(index->type & DICT_UNIVERSAL)) { /* In a universal index tree, we take the whole record as - the node pointer if the reord is on the leaf level, + the node pointer if the record is on the leaf level, on non-leaf levels we remove the last field, which contains the page number of the child page */ - ut_a(!dict_table_is_comp(ind->table)); + ut_a(!dict_table_is_comp(index->table)); n_unique = rec_get_n_fields_old(rec); if (level > 0) { @@ -3816,7 +3642,7 @@ dict_tree_build_node_ptr( n_unique--; } } else { - n_unique = dict_index_get_n_unique_in_tree(ind); + n_unique = dict_index_get_n_unique_in_tree(index); } tuple = dtuple_create(heap, n_unique + 1); @@ -3829,7 +3655,7 @@ dict_tree_build_node_ptr( dtuple_set_n_fields_cmp(tuple, n_unique); - dict_index_copy_types(tuple, ind, n_unique); + dict_index_copy_types(tuple, index, n_unique); buf = mem_heap_alloc(heap, 4); @@ -3838,9 +3664,9 @@ dict_tree_build_node_ptr( field = dtuple_get_nth_field(tuple, n_unique); dfield_set_data(field, buf, 4); - dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4, 0); + dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4); - rec_copy_prefix_to_dtuple(tuple, rec, ind, n_unique, heap); + rec_copy_prefix_to_dtuple(tuple, rec, index, n_unique, heap); dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple) | REC_STATUS_NODE_PTR); @@ -3854,23 +3680,21 @@ Copies an initial segment of a physical record, long enough to specify an index entry uniquely. */ rec_t* -dict_tree_copy_rec_order_prefix( -/*============================*/ +dict_index_copy_rec_order_prefix( +/*=============================*/ /* out: pointer to the prefix record */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ rec_t* rec, /* in: record for which to copy prefix */ ulint* n_fields,/* out: number of fields copied */ byte** buf, /* in/out: memory buffer for the copied prefix, or NULL */ ulint* buf_size)/* in/out: buffer size */ { - dict_index_t* index; ulint n; UNIV_PREFETCH_R(rec); - index = tree->tree_index; - if (UNIV_UNLIKELY(tree->type & DICT_UNIVERSAL)) { + if (UNIV_UNLIKELY(index->type & DICT_UNIVERSAL)) { ut_a(!dict_table_is_comp(index->table)); n = rec_get_n_fields_old(rec); } else { @@ -3885,27 +3709,24 @@ dict_tree_copy_rec_order_prefix( Builds a typed data tuple out of a physical record. */ dtuple_t* -dict_tree_build_data_tuple( -/*=======================*/ +dict_index_build_data_tuple( +/*========================*/ /* out, own: data tuple */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ rec_t* rec, /* in: record for which to build data tuple */ ulint n_fields,/* in: number of data fields */ mem_heap_t* heap) /* in: memory heap where tuple created */ { dtuple_t* tuple; - dict_index_t* ind; - ind = tree->tree_index; - - ut_ad(dict_table_is_comp(ind->table) + ut_ad(dict_table_is_comp(index->table) || n_fields <= rec_get_n_fields_old(rec)); tuple = dtuple_create(heap, n_fields); - dict_index_copy_types(tuple, ind, n_fields); + dict_index_copy_types(tuple, index, n_fields); - rec_copy_prefix_to_dtuple(tuple, rec, ind, n_fields, heap); + rec_copy_prefix_to_dtuple(tuple, rec, index, n_fields, heap); ut_ad(dtuple_check_typed(tuple)); @@ -3927,15 +3748,17 @@ dict_index_calc_min_rec_len( ulint nullable = 0; sum = REC_N_NEW_EXTRA_BYTES; for (i = 0; i < dict_index_get_n_fields(index); i++) { - dtype_t*t = dict_index_get_nth_type(index, i); - ulint size = dtype_get_fixed_size(t); + const dict_col_t* col + = dict_index_get_nth_col(index, i); + ulint size = dict_col_get_fixed_size(col); sum += size; if (!size) { - size = dtype_get_len(t); + size = col->len; sum += size < 128 ? 1 : 2; } - if (!(dtype_get_prtype(t) & DATA_NOT_NULL)) + if (!(col->prtype & DATA_NOT_NULL)) { nullable++; + } } /* round the NULL flags up to full bytes */ @@ -3945,7 +3768,8 @@ dict_index_calc_min_rec_len( } for (i = 0; i < dict_index_get_n_fields(index); i++) { - sum += dtype_get_fixed_size(dict_index_get_nth_type(index, i)); + sum += dict_col_get_fixed_size( + dict_index_get_nth_col(index, i)); } if (sum > 127) { @@ -4030,8 +3854,8 @@ dict_update_statistics_low( index = dict_table_get_first_index(table); - table->stat_n_rows = index->stat_n_diff_key_vals - [dict_index_get_n_unique(index)]; + table->stat_n_rows = index->stat_n_diff_key_vals[ + dict_index_get_n_unique(index)]; table->stat_clustered_index_size = index->stat_index_size; @@ -4162,8 +3986,8 @@ dict_table_print_low( (ulong) UT_LIST_GET_LEN(table->indexes), (ulong) table->stat_n_rows); - for (i = 0; i < table->n_cols - 1; i++) { - dict_col_print_low(dict_table_get_nth_col(table, i)); + for (i = 0; i + 1 < (ulint) table->n_cols; i++) { + dict_col_print_low(table, dict_table_get_nth_col(table, i)); fputs("; ", stderr); } @@ -4197,18 +4021,20 @@ static void dict_col_print_low( /*===============*/ - dict_col_t* col) /* in: column */ + const dict_table_t* table, /* in: table */ + const dict_col_t* col) /* in: column */ { - dtype_t* type; + dtype_t type; #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); #endif /* UNIV_SYNC_DEBUG */ - type = dict_col_get_type(col); - fprintf(stderr, "%s: ", col->name); + dict_col_copy_type(col, &type); + fprintf(stderr, "%s: ", dict_table_get_col_name(table, + dict_col_get_no(col))); - dtype_print(type); + dtype_print(&type); } /************************************************************************** @@ -4219,7 +4045,6 @@ dict_index_print_low( /*=================*/ dict_index_t* index) /* in: index */ { - dict_tree_t* tree; ib_longlong n_vals; ulint i; @@ -4227,11 +4052,9 @@ dict_index_print_low( ut_ad(mutex_own(&(dict_sys->mutex))); #endif /* UNIV_SYNC_DEBUG */ - tree = index->tree; - if (index->n_user_defined_cols > 0) { - n_vals = index->stat_n_diff_key_vals - [index->n_user_defined_cols]; + n_vals = index->stat_n_diff_key_vals[ + index->n_user_defined_cols]; } else { n_vals = index->stat_n_diff_key_vals[1]; } @@ -4243,13 +4066,13 @@ dict_index_print_low( " leaf pages %lu, size pages %lu\n" " FIELDS: ", index->name, - (ulong) ut_dulint_get_high(tree->id), - (ulong) ut_dulint_get_low(tree->id), + (ulong) ut_dulint_get_high(index->id), + (ulong) ut_dulint_get_low(index->id), (ulong) index->n_user_defined_cols, (ulong) index->n_fields, (ulong) index->n_uniq, (ulong) index->type, - (ulong) tree->page, + (ulong) index->page, (ulong) n_vals, (ulong) index->stat_n_leaf_pages, (ulong) index->stat_index_size); @@ -4261,9 +4084,9 @@ dict_index_print_low( putc('\n', stderr); #ifdef UNIV_BTR_PRINT - btr_print_size(tree); + btr_print_size(index); - btr_print_tree(tree, 7); + btr_print_index(index, 7); #endif /* UNIV_BTR_PRINT */ } @@ -4335,8 +4158,9 @@ dict_print_info_on_foreign_key_in_create_format( if (dict_tables_have_same_db(foreign->foreign_table_name, foreign->referenced_table_name)) { /* Do not print the database name of the referenced table */ - ut_print_name(file, trx, TRUE, dict_remove_db_name - (foreign->referenced_table_name)); + ut_print_name(file, trx, TRUE, + dict_remove_db_name( + foreign->referenced_table_name)); } else { /* Look for the '/' in the table name */ @@ -4420,8 +4244,8 @@ dict_print_info_on_foreign_keys( while (foreign != NULL) { if (create_table_format) { - dict_print_info_on_foreign_key_in_create_format - (file, trx, foreign, TRUE); + dict_print_info_on_foreign_key_in_create_format( + file, trx, foreign, TRUE); } else { ulint i; fputs("; (", file); @@ -4444,9 +4268,9 @@ dict_print_info_on_foreign_keys( if (i) { putc(' ', file); } - ut_print_name(file, trx, FALSE, - foreign->referenced_col_names - [i]); + ut_print_name( + file, trx, FALSE, + foreign->referenced_col_names[i]); } putc(')', file); diff --git a/storage/innobase/dict/dict0load.c b/storage/innobase/dict/dict0load.c index bc393140a6f..518e32ec4dc 100644 --- a/storage/innobase/dict/dict0load.c +++ b/storage/innobase/dict/dict0load.c @@ -26,6 +26,25 @@ Created 4/24/1996 Heikki Tuuri #include "srv0start.h" #include "srv0srv.h" +/******************************************************************** +Returns TRUE if index's i'th column's name is 'name' .*/ +static +ibool +name_of_col_is( +/*===========*/ + /* out: */ + dict_table_t* table, /* in: table */ + dict_index_t* index, /* in: index */ + ulint i, /* in: */ + const char* name) /* in: name to compare to */ +{ + ulint tmp = dict_col_get_no(dict_field_get_col( + dict_index_get_nth_field( + index, i))); + + return(strcmp(name, dict_table_get_col_name(table, tmp)) == 0); +} + /************************************************************************ Finds the first table name in the given database. */ @@ -331,7 +350,6 @@ dict_load_columns( ulint mtype; ulint prtype; ulint col_len; - ulint prec; ulint i; mtr_t mtr; @@ -356,7 +374,7 @@ dict_load_columns( btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); - for (i = 0; i < table->n_cols - DATA_N_SYS_COLS; i++) { + for (i = 0; i + DATA_N_SYS_COLS < (ulint) table->n_cols; i++) { rec = btr_pcur_get_rec(&pcur); @@ -372,8 +390,7 @@ dict_load_columns( ut_ad(len == 4); ut_a(i == mach_read_from_4(field)); - ut_a(!strcmp("NAME", dict_field_get_col - (dict_index_get_nth_field(sys_index, 4))->name)); + ut_a(name_of_col_is(sys_columns, sys_index, 4, "NAME")); field = rec_get_nth_field_old(rec, 4, &len); name = mem_heap_strdupl(heap, (char*) field, len); @@ -392,30 +409,25 @@ dict_load_columns( /* Use the binary collation for string columns of binary type. */ - prtype = dtype_form_prtype - (prtype, - DATA_MYSQL_BINARY_CHARSET_COLL); + prtype = dtype_form_prtype( + prtype, + DATA_MYSQL_BINARY_CHARSET_COLL); } else { /* Use the default charset for other than binary columns. */ - prtype = dtype_form_prtype - (prtype, - data_mysql_default_charset_coll); + prtype = dtype_form_prtype( + prtype, + data_mysql_default_charset_coll); } } field = rec_get_nth_field_old(rec, 7, &len); col_len = mach_read_from_4(field); - ut_a(!strcmp("PREC", dict_field_get_col - (dict_index_get_nth_field(sys_index, 8))->name)); + ut_a(name_of_col_is(sys_columns, sys_index, 8, "PREC")); - field = rec_get_nth_field_old(rec, 8, &len); - prec = mach_read_from_4(field); - - dict_mem_table_add_col(table, name, mtype, prtype, col_len, - prec); + dict_mem_table_add_col(table, name, mtype, prtype, col_len); btr_pcur_move_to_next_user_rec(&pcur, &mtr); } @@ -526,13 +538,13 @@ dict_load_fields( prefix_len = 0; } - ut_a(!strcmp("COL_NAME", dict_field_get_col - (dict_index_get_nth_field(sys_index, 4))->name)); + ut_a(name_of_col_is(sys_fields, sys_index, 4, "COL_NAME")); field = rec_get_nth_field_old(rec, 4, &len); - dict_mem_index_add_field(index, mem_heap_strdupl - (heap, (char*) field, len), + dict_mem_index_add_field(index, + mem_heap_strdupl(heap, + (char*) field, len), prefix_len); btr_pcur_move_to_next_user_rec(&pcur, &mtr); @@ -631,8 +643,7 @@ dict_load_indexes( ut_ad(len == 8); id = mach_read_from_8(field); - ut_a(!strcmp("NAME", dict_field_get_col - (dict_index_get_nth_field(sys_index, 4))->name)); + ut_a(name_of_col_is(sys_indexes, sys_index, 4, "NAME")); field = rec_get_nth_field_old(rec, 4, &name_len); name_buf = mem_heap_strdupl(heap, (char*) field, name_len); @@ -646,8 +657,7 @@ dict_load_indexes( field = rec_get_nth_field_old(rec, 7, &len); space = mach_read_from_4(field); - ut_a(!strcmp("PAGE_NO", dict_field_get_col - (dict_index_get_nth_field(sys_index, 8))->name)); + ut_a(name_of_col_is(sys_indexes, sys_index, 8, "PAGE_NO")); field = rec_get_nth_field_old(rec, 8, &len); page_no = mach_read_from_4(field); @@ -785,8 +795,7 @@ err_exit: goto err_exit; } - ut_a(!strcmp("SPACE", dict_field_get_col - (dict_index_get_nth_field(sys_index, 9))->name)); + ut_a(name_of_col_is(sys_tables, sys_index, 9, "SPACE")); field = rec_get_nth_field_old(rec, 9, &len); space = mach_read_from_4(field); @@ -819,8 +828,7 @@ err_exit: } } - ut_a(!strcmp("N_COLS", dict_field_get_col - (dict_index_get_nth_field(sys_index, 4))->name)); + ut_a(name_of_col_is(sys_tables, sys_index, 4, "N_COLS")); field = rec_get_nth_field_old(rec, 4, &len); n_cols = mach_read_from_4(field); @@ -837,8 +845,7 @@ err_exit: table->ibd_file_missing = ibd_file_missing; - ut_a(!strcmp("ID", dict_field_get_col - (dict_index_get_nth_field(sys_index, 3))->name)); + ut_a(name_of_col_is(sys_tables, sys_index, 3, "ID")); field = rec_get_nth_field_old(rec, 3, &len); table->id = mach_read_from_8(field); @@ -925,8 +932,8 @@ dict_load_table_on_id( /*---------------------------------------------------*/ /* Get the secondary index based on ID for table SYS_TABLES */ sys_tables = dict_sys->sys_tables; - sys_table_ids = dict_table_get_next_index - (dict_table_get_first_index(sys_tables)); + sys_table_ids = dict_table_get_next_index( + dict_table_get_first_index(sys_tables)); ut_a(!dict_table_is_comp(sys_tables)); heap = mem_heap_create(256); @@ -1032,11 +1039,11 @@ dict_load_foreign_cols( ut_ad(mutex_own(&(dict_sys->mutex))); #endif /* UNIV_SYNC_DEBUG */ - foreign->foreign_col_names = mem_heap_alloc - (foreign->heap, foreign->n_fields * sizeof(void*)); + foreign->foreign_col_names = mem_heap_alloc( + foreign->heap, foreign->n_fields * sizeof(void*)); - foreign->referenced_col_names = mem_heap_alloc - (foreign->heap, foreign->n_fields * sizeof(void*)); + foreign->referenced_col_names = mem_heap_alloc( + foreign->heap, foreign->n_fields * sizeof(void*)); mtr_start(&mtr); sys_foreign_cols = dict_table_get_low("SYS_FOREIGN_COLS"); @@ -1067,12 +1074,12 @@ dict_load_foreign_cols( ut_a(i == mach_read_from_4(field)); field = rec_get_nth_field_old(rec, 4, &len); - foreign->foreign_col_names[i] = mem_heap_strdupl - (foreign->heap, (char*) field, len); + foreign->foreign_col_names[i] = mem_heap_strdupl( + foreign->heap, (char*) field, len); field = rec_get_nth_field_old(rec, 5, &len); - foreign->referenced_col_names[i] = mem_heap_strdupl - (foreign->heap, (char*) field, len); + foreign->referenced_col_names[i] = mem_heap_strdupl( + foreign->heap, (char*) field, len); btr_pcur_move_to_next_user_rec(&pcur, &mtr); } @@ -1165,8 +1172,8 @@ dict_load_foreign( foreign = dict_mem_foreign_create(); - foreign->n_fields = mach_read_from_4 - (rec_get_nth_field_old(rec, 5, &len)); + foreign->n_fields = mach_read_from_4( + rec_get_nth_field_old(rec, 5, &len)); ut_a(len == 4); @@ -1178,12 +1185,12 @@ dict_load_foreign( foreign->id = mem_heap_strdup(foreign->heap, id); field = rec_get_nth_field_old(rec, 3, &len); - foreign->foreign_table_name = mem_heap_strdupl - (foreign->heap, (char*) field, len); + foreign->foreign_table_name = mem_heap_strdupl( + foreign->heap, (char*) field, len); field = rec_get_nth_field_old(rec, 4, &len); - foreign->referenced_table_name = mem_heap_strdupl - (foreign->heap, (char*) field, len); + foreign->referenced_table_name = mem_heap_strdupl( + foreign->heap, (char*) field, len); btr_pcur_close(&pcur); mtr_commit(&mtr); @@ -1257,8 +1264,8 @@ dict_load_foreigns( /* Get the secondary index based on FOR_NAME from table SYS_FOREIGN */ - sec_index = dict_table_get_next_index - (dict_table_get_first_index(sys_foreign)); + sec_index = dict_table_get_next_index( + dict_table_get_first_index(sys_foreign)); start_load: heap = mem_heap_create(256); @@ -1289,7 +1296,8 @@ loop: following call does the comparison in the latin1_swedish_ci charset-collation, in a case-insensitive way. */ - if (0 != cmp_data_data(dfield_get_type(dfield), + if (0 != cmp_data_data(dfield_get_type(dfield)->mtype, + dfield_get_type(dfield)->prtype, dfield_get_data(dfield), dfield_get_len(dfield), field, len)) { diff --git a/storage/innobase/dict/dict0mem.c b/storage/innobase/dict/dict0mem.c index a6a42dd0411..cee0ffec20b 100644 --- a/storage/innobase/dict/dict0mem.c +++ b/storage/innobase/dict/dict0mem.c @@ -66,6 +66,7 @@ dict_mem_table_create( table->cols = mem_heap_alloc(heap, (n_cols + DATA_N_SYS_COLS) * sizeof(dict_col_t)); + table->col_names = NULL; UT_LIST_INIT(table->indexes); table->auto_inc_lock = mem_heap_alloc(heap, lock_get_size()); @@ -76,20 +77,22 @@ dict_mem_table_create( UT_LIST_INIT(table->foreign_list); UT_LIST_INIT(table->referenced_list); +#ifdef UNIV_DEBUG table->does_not_fit_in_memory = FALSE; +#endif /* UNIV_DEBUG */ table->stat_initialized = FALSE; table->stat_modified_counter = 0; - table->max_row_size = 0; + table->big_rows = 0; mutex_create(&table->autoinc_mutex, SYNC_DICT_AUTOINC_MUTEX); table->autoinc_inited = FALSE; - +#ifdef UNIV_DEBUG table->magic_n = DICT_TABLE_MAGIC_N; - +#endif /* UNIV_DEBUG */ return(table); } @@ -105,9 +108,74 @@ dict_mem_table_free( ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); mutex_free(&(table->autoinc_mutex)); + + if (table->col_names && (table->n_def < table->n_cols)) { + ut_free((void*)table->col_names); + } + mem_heap_free(table->heap); } +/******************************************************************** +Add 'name' to end of the col_names array (see dict_table_t::col_names). Call +ut_free on col_names (if not NULL), allocate new array (if heap, from it, +otherwise with ut_malloc), and copy col_names + name to it. */ +static +const char* +dict_add_col_name( +/*==============*/ + /* out: new column names array */ + const char* col_names, /* in: existing column names, or + NULL */ + ulint cols, /* in: number of existing columns */ + const char* name, /* in: new column name */ + mem_heap_t* heap) /* in: heap, or NULL */ +{ + ulint i; + ulint old_len; + ulint new_len; + ulint total_len; + const char* s; + char* res; + + ut_a(((cols == 0) && !col_names) || ((cols > 0) && col_names)); + ut_a(*name); + + /* Find out length of existing array. */ + if (col_names) { + s = col_names; + + for (i = 0; i < cols; i++) { + s += strlen(s) + 1; + } + + old_len = s - col_names; + } else { + old_len = 0; + } + + new_len = strlen(name) + 1; + total_len = old_len + new_len; + + if (heap) { + res = mem_heap_alloc(heap, total_len); + } else { + res = ut_malloc(total_len); + } + + if (old_len > 0) { + memcpy(res, col_names, old_len); + } + + memcpy(res + old_len, name, new_len); + + if (col_names) { + ut_free((char*)col_names); + } + + return(res); +} + /************************************************************************** Adds a column definition to a table. */ @@ -118,29 +186,36 @@ dict_mem_table_add_col( const char* name, /* in: column name */ ulint mtype, /* in: main datatype */ ulint prtype, /* in: precise type */ - ulint len, /* in: length */ - ulint prec) /* in: precision */ + ulint len) /* in: precision */ { dict_col_t* col; - dtype_t* type; + ulint mbminlen; + ulint mbmaxlen; + mem_heap_t* heap; ut_ad(table && name); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); table->n_def++; - col = dict_table_get_nth_col(table, table->n_def - 1); + heap = table->n_def < table->n_cols ? NULL : table->heap; + table->col_names = dict_add_col_name(table->col_names, + table->n_def - 1, + name, heap); + + col = (dict_col_t*) dict_table_get_nth_col(table, table->n_def - 1); col->ind = table->n_def - 1; - col->name = mem_heap_strdup(table->heap, name); - col->table = table; col->ord_part = 0; - col->clust_pos = ULINT_UNDEFINED; + col->mtype = mtype; + col->prtype = prtype; + col->len = len; - type = dict_col_get_type(col); + dtype_get_mblen(mtype, prtype, &mbminlen, &mbmaxlen); - dtype_set(type, mtype, prtype, len, prec); + col->mbminlen = mbminlen; + col->mbmaxlen = mbmaxlen; } /************************************************************************** @@ -171,6 +246,7 @@ dict_mem_index_create( index->type = type; index->space = space; + index->page = 0; index->name = mem_heap_strdup(heap, index_name); index->table_name = table_name; index->table = NULL; @@ -183,8 +259,10 @@ dict_mem_index_create( index->stat_n_diff_key_vals = NULL; index->cached = FALSE; + memset(&index->lock, 0, sizeof index->lock); +#ifdef UNIV_DEBUG index->magic_n = DICT_INDEX_MAGIC_N; - +#endif /* UNIV_DEBUG */ return(index); } diff --git a/storage/innobase/eval/eval0eval.c b/storage/innobase/eval/eval0eval.c index 18408832e91..cbc47ec508f 100644 --- a/storage/innobase/eval/eval0eval.c +++ b/storage/innobase/eval/eval0eval.c @@ -334,7 +334,7 @@ eval_predefined_2( if (len2 > len1) { int_val = (lint) (len1 - + (eval_rnd % (len2 - len1 + 1))); + + (eval_rnd % (len2 - len1 + 1))); } else { int_val = (lint) len1; } diff --git a/storage/innobase/eval/eval0proc.c b/storage/innobase/eval/eval0proc.c index 3b3a42d56ef..f5a9d9dc2a8 100644 --- a/storage/innobase/eval/eval0proc.c +++ b/storage/innobase/eval/eval0proc.c @@ -51,8 +51,8 @@ if_step( for (;;) { eval_exp(elsif_node->cond); - if (eval_node_get_ibool_val - (elsif_node->cond)) { + if (eval_node_get_ibool_val( + elsif_node->cond)) { /* The condition evaluated to TRUE: start execution from the first diff --git a/storage/innobase/fil/fil0fil.c b/storage/innobase/fil/fil0fil.c index e0a9955f8fb..1aff3739103 100644 --- a/storage/innobase/fil/fil0fil.c +++ b/storage/innobase/fil/fil0fil.c @@ -529,9 +529,8 @@ fil_node_open_file( os_file_read() in Windows to read from a file opened for async I/O! */ - node->handle = os_file_create_simple_no_error_handling - (node->name, - OS_FILE_OPEN, OS_FILE_READ_ONLY, &success); + node->handle = os_file_create_simple_no_error_handling( + node->name, OS_FILE_OPEN, OS_FILE_READ_ONLY, &success); if (!success) { /* The following call prints an error message */ os_file_get_last_error(TRUE); @@ -1564,9 +1563,9 @@ fil_write_flushed_lsn_to_data_files( while (node) { mutex_exit(&(fil_system->mutex)); - err = fil_write_lsn_and_arch_no_to_file - (space->id, sum_of_sizes, - lsn, arch_log_no); + err = fil_write_lsn_and_arch_no_to_file( + space->id, sum_of_sizes, lsn, + arch_log_no); if (err != DB_SUCCESS) { return(err); @@ -1936,9 +1935,9 @@ fil_op_log_parse_or_replay( ut_a(space_id != 0); - if (fil_create_new_single_table_tablespace - (&space_id, name, FALSE, - FIL_IBD_FILE_INITIAL_SIZE) != DB_SUCCESS) { + if (fil_create_new_single_table_tablespace( + &space_id, name, FALSE, + FIL_IBD_FILE_INITIAL_SIZE) != DB_SUCCESS) { ut_error; } } @@ -2580,8 +2579,8 @@ fil_reset_too_high_lsns( filepath = fil_make_ibd_name(name, FALSE); - file = os_file_create_simple_no_error_handling - (filepath, OS_FILE_OPEN, OS_FILE_READ_WRITE, &success); + file = os_file_create_simple_no_error_handling( + filepath, OS_FILE_OPEN, OS_FILE_READ_WRITE, &success); if (!success) { /* The following call prints an error message */ os_file_get_last_error(TRUE); @@ -2654,8 +2653,8 @@ fil_reset_too_high_lsns( if (ut_dulint_cmp(mach_read_from_8(page + FIL_PAGE_LSN), current_lsn) > 0) { /* We have to reset the lsn */ - space_id = mach_read_from_4 - (page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + space_id = mach_read_from_4( + page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); page_no = mach_read_from_4(page + FIL_PAGE_OFFSET); buf_flush_init_for_writing(page, current_lsn, space_id, @@ -2735,8 +2734,8 @@ fil_open_single_table_tablespace( filepath = fil_make_ibd_name(name, FALSE); - file = os_file_create_simple_no_error_handling - (filepath, OS_FILE_OPEN, OS_FILE_READ_ONLY, &success); + file = os_file_create_simple_no_error_handling( + filepath, OS_FILE_OPEN, OS_FILE_READ_ONLY, &success); if (!success) { /* The following call prints an error message */ os_file_get_last_error(TRUE); @@ -2887,8 +2886,8 @@ fil_load_single_table_tablespace( dict_casedn_str(filepath); # endif /* !UNIV_HOTBACKUP */ #endif - file = os_file_create_simple_no_error_handling - (filepath, OS_FILE_OPEN, OS_FILE_READ_ONLY, &success); + file = os_file_create_simple_no_error_handling( + filepath, OS_FILE_OPEN, OS_FILE_READ_ONLY, &success); if (!success) { /* The following call prints an error message */ os_file_get_last_error(TRUE); @@ -3242,8 +3241,8 @@ fil_load_single_table_tablespaces(void) ".ibd")) { /* The name ends in .ibd; try opening the file */ - fil_load_single_table_tablespace - (dbinfo.name, fileinfo.name); + fil_load_single_table_tablespace( + dbinfo.name, fileinfo.name); } next_file_item: ret = fil_file_readdir_next_file(&err, @@ -3736,8 +3735,8 @@ fil_extend_tablespaces_to_stored_len(void) size_in_header = fsp_get_size_low(buf); - success = fil_extend_space_to_desired_size - (&actual_size, space->id, size_in_header); + success = fil_extend_space_to_desired_size( + &actual_size, space->id, size_in_header); if (!success) { fprintf(stderr, "InnoDB: Error: could not extend the" @@ -4082,9 +4081,9 @@ fil_io( for (;;) { if (node == NULL) { - fil_report_invalid_page_access - (block_offset, space_id, - space->name, byte_offset, len, type); + fil_report_invalid_page_access( + block_offset, space_id, space->name, + byte_offset, len, type); ut_error; } @@ -4113,9 +4112,9 @@ fil_io( if (space->purpose == FIL_TABLESPACE && space->id != 0 && node->size <= block_offset) { - fil_report_invalid_page_access - (block_offset, space_id, - space->name, byte_offset, len, type); + fil_report_invalid_page_access( + block_offset, space_id, space->name, byte_offset, + len, type); ut_error; } @@ -4384,10 +4383,10 @@ skip_flush: space->is_in_unflushed_spaces = FALSE; - UT_LIST_REMOVE - (unflushed_spaces, - system->unflushed_spaces, - space); + UT_LIST_REMOVE( + unflushed_spaces, + system->unflushed_spaces, + space); } } diff --git a/storage/innobase/fsp/fsp0fsp.c b/storage/innobase/fsp/fsp0fsp.c index 4da25f4d479..00c5e582b3e 100644 --- a/storage/innobase/fsp/fsp0fsp.c +++ b/storage/innobase/fsp/fsp0fsp.c @@ -768,8 +768,9 @@ xdes_lst_get_next( space = buf_frame_get_space_id(descr); - return(xdes_lst_get_descriptor - (space, flst_get_next_addr(descr + XDES_FLST_NODE, mtr), mtr)); + return(xdes_lst_get_descriptor( + space, + flst_get_next_addr(descr + XDES_FLST_NODE, mtr), mtr)); } /************************************************************************ @@ -1116,8 +1117,8 @@ fsp_try_extend_data_file( fprintf(stderr, "InnoDB: Error: Last data file size is %lu," " max size allowed %lu\n", - (ulong) srv_data_file_sizes - [srv_n_data_files - 1], + (ulong) srv_data_file_sizes[ + srv_n_data_files - 1], (ulong) srv_last_file_size_max); } @@ -1137,13 +1138,13 @@ fsp_try_extend_data_file( if (size < FSP_EXTENT_SIZE) { /* Let us first extend the file to 64 pages */ - success = fsp_try_extend_data_file_with_pages - (space, FSP_EXTENT_SIZE - 1, - header, mtr); + success = fsp_try_extend_data_file_with_pages( + space, FSP_EXTENT_SIZE - 1, + header, mtr); if (!success) { - new_size = mtr_read_ulint - (header + FSP_SIZE, - MLOG_4BYTES, mtr); + new_size = mtr_read_ulint( + header + FSP_SIZE, + MLOG_4BYTES, mtr); *actual_increase = new_size - old_size; @@ -1246,9 +1247,9 @@ fsp_fill_free_list( /* Update the free limit info in the log system and make a checkpoint */ if (space == 0) { - log_fsp_current_free_limit_set_and_checkpoint - ((i + FSP_EXTENT_SIZE) - / ((1024 * 1024) / UNIV_PAGE_SIZE)); + log_fsp_current_free_limit_set_and_checkpoint( + (i + FSP_EXTENT_SIZE) + / ((1024 * 1024) / UNIV_PAGE_SIZE)); } if (0 == i % XDES_DESCRIBED_PER_PAGE) { @@ -1967,8 +1968,8 @@ fseg_find_last_used_frag_page_slot( ut_ad(inode && mtr); for (i = 0; i < FSEG_FRAG_ARR_N_SLOTS; i++) { - page_no = fseg_get_nth_frag_page_no - (inode, FSEG_FRAG_ARR_N_SLOTS - i - 1, mtr); + page_no = fseg_get_nth_frag_page_no( + inode, FSEG_FRAG_ARR_N_SLOTS - i - 1, mtr); if (page_no != FIL_NULL) { @@ -2526,8 +2527,8 @@ fseg_alloc_free_page_low( return(FIL_NULL); } - success = fsp_try_extend_data_file_with_pages - (space, ret_page, space_header, mtr); + success = fsp_try_extend_data_file_with_pages( + space, ret_page, space_header, mtr); if (!success) { /* No disk space left */ return(FIL_NULL); @@ -3161,8 +3162,8 @@ fseg_free_extent( /* Drop search system page hash index if the page is found in the pool and is hashed */ - btr_search_drop_page_hash_when_freed - (space, first_page_in_extent + i); + btr_search_drop_page_hash_when_freed( + space, first_page_in_extent + i); } } @@ -3176,8 +3177,8 @@ fseg_free_extent( flst_remove(seg_inode + FSEG_NOT_FULL, descr + XDES_FLST_NODE, mtr); - not_full_n_used = mtr_read_ulint - (seg_inode + FSEG_NOT_FULL_N_USED, MLOG_4BYTES, mtr); + not_full_n_used = mtr_read_ulint( + seg_inode + FSEG_NOT_FULL_N_USED, MLOG_4BYTES, mtr); descr_n_used = xdes_get_n_used(descr, mtr); ut_a(not_full_n_used >= descr_n_used); @@ -3758,15 +3759,15 @@ fsp_validate( mtr_start(&mtr); mtr_x_lock(fil_space_get_latch(space), &mtr); - seg_inode_page = fut_get_ptr - (space, node_addr, RW_X_LATCH, &mtr) + seg_inode_page = fut_get_ptr( + space, node_addr, RW_X_LATCH, &mtr) - FSEG_INODE_PAGE_NODE; - seg_inode = fsp_seg_inode_page_get_nth_inode - (seg_inode_page, n, &mtr); - ut_a(ut_dulint_cmp - (mach_read_from_8(seg_inode + FSEG_ID), - ut_dulint_zero) != 0); + seg_inode = fsp_seg_inode_page_get_nth_inode( + seg_inode_page, n, &mtr); + ut_a(ut_dulint_cmp( + mach_read_from_8(seg_inode + FSEG_ID), + ut_dulint_zero) != 0); fseg_validate_low(seg_inode, &mtr); descr_count += flst_get_len(seg_inode + FSEG_FREE, @@ -3778,8 +3779,8 @@ fsp_validate( n_used2 += fseg_get_n_frag_pages(seg_inode, &mtr); - next_node_addr = flst_get_next_addr - (seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); + next_node_addr = flst_get_next_addr( + seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); mtr_commit(&mtr); } @@ -3804,29 +3805,29 @@ fsp_validate( mtr_start(&mtr); mtr_x_lock(fil_space_get_latch(space), &mtr); - seg_inode_page = fut_get_ptr - (space, node_addr, RW_X_LATCH, &mtr) + seg_inode_page = fut_get_ptr( + space, node_addr, RW_X_LATCH, &mtr) - FSEG_INODE_PAGE_NODE; - seg_inode = fsp_seg_inode_page_get_nth_inode - (seg_inode_page, n, &mtr); - if (ut_dulint_cmp - (mach_read_from_8(seg_inode + FSEG_ID), - ut_dulint_zero) != 0) { + seg_inode = fsp_seg_inode_page_get_nth_inode( + seg_inode_page, n, &mtr); + if (ut_dulint_cmp( + mach_read_from_8(seg_inode + FSEG_ID), + ut_dulint_zero) != 0) { fseg_validate_low(seg_inode, &mtr); - descr_count += flst_get_len - (seg_inode + FSEG_FREE, &mtr); - descr_count += flst_get_len - (seg_inode + FSEG_FULL, &mtr); - descr_count += flst_get_len - (seg_inode + FSEG_NOT_FULL, &mtr); - n_used2 += fseg_get_n_frag_pages - (seg_inode, &mtr); + descr_count += flst_get_len( + seg_inode + FSEG_FREE, &mtr); + descr_count += flst_get_len( + seg_inode + FSEG_FULL, &mtr); + descr_count += flst_get_len( + seg_inode + FSEG_NOT_FULL, &mtr); + n_used2 += fseg_get_n_frag_pages( + seg_inode, &mtr); } - next_node_addr = flst_get_next_addr - (seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); + next_node_addr = flst_get_next_addr( + seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); mtr_commit(&mtr); } @@ -3931,21 +3932,21 @@ fsp_print( mtr_start(&mtr); mtr_x_lock(fil_space_get_latch(space), &mtr); - seg_inode_page = fut_get_ptr - (space, node_addr, RW_X_LATCH, &mtr) + seg_inode_page = fut_get_ptr( + space, node_addr, RW_X_LATCH, &mtr) - FSEG_INODE_PAGE_NODE; - seg_inode = fsp_seg_inode_page_get_nth_inode - (seg_inode_page, n, &mtr); - ut_a(ut_dulint_cmp - (mach_read_from_8(seg_inode + FSEG_ID), - ut_dulint_zero) != 0); + seg_inode = fsp_seg_inode_page_get_nth_inode( + seg_inode_page, n, &mtr); + ut_a(ut_dulint_cmp( + mach_read_from_8(seg_inode + FSEG_ID), + ut_dulint_zero) != 0); fseg_print_low(seg_inode, &mtr); n_segs++; - next_node_addr = flst_get_next_addr - (seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); + next_node_addr = flst_get_next_addr( + seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); mtr_commit(&mtr); } @@ -3968,22 +3969,22 @@ fsp_print( mtr_start(&mtr); mtr_x_lock(fil_space_get_latch(space), &mtr); - seg_inode_page = fut_get_ptr - (space, node_addr, RW_X_LATCH, &mtr) + seg_inode_page = fut_get_ptr( + space, node_addr, RW_X_LATCH, &mtr) - FSEG_INODE_PAGE_NODE; - seg_inode = fsp_seg_inode_page_get_nth_inode - (seg_inode_page, n, &mtr); - if (ut_dulint_cmp - (mach_read_from_8(seg_inode + FSEG_ID), - ut_dulint_zero) != 0) { + seg_inode = fsp_seg_inode_page_get_nth_inode( + seg_inode_page, n, &mtr); + if (ut_dulint_cmp( + mach_read_from_8(seg_inode + FSEG_ID), + ut_dulint_zero) != 0) { fseg_print_low(seg_inode, &mtr); n_segs++; } - next_node_addr = flst_get_next_addr - (seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); + next_node_addr = flst_get_next_addr( + seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); mtr_commit(&mtr); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 754afa89378..066c0ce48d4 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -54,6 +54,12 @@ pthread_cond_t commit_cond; pthread_mutex_t commit_cond_m; bool innodb_inited= 0; +/* + This needs to exist until the query cache callback is removed + or learns to pass hton. +*/ +static handlerton *legacy_innodb_hton; + /*-----------------------------------------------------------------*/ /* These variables are used to implement (semi-)synchronous MySQL binlog replication for InnoDB tables. */ @@ -197,22 +203,25 @@ static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length, my_bool not_used __attribute__((unused))); static INNOBASE_SHARE *get_share(const char *table_name); static void free_share(INNOBASE_SHARE *share); -static int innobase_close_connection(THD* thd); -static int innobase_commit(THD* thd, bool all); -static int innobase_rollback(THD* thd, bool all); -static int innobase_rollback_to_savepoint(THD* thd, void *savepoint); -static int innobase_savepoint(THD* thd, void *savepoint); -static int innobase_release_savepoint(THD* thd, void *savepoint); -static handler *innobase_create_handler(TABLE_SHARE *table, +static int innobase_close_connection(handlerton *hton, THD* thd); +static int innobase_commit(handlerton *hton, THD* thd, bool all); +static int innobase_rollback(handlerton *hton, THD* thd, bool all); +static int innobase_rollback_to_savepoint(handlerton *hton, THD* thd, + void *savepoint); +static int innobase_savepoint(handlerton *hton, THD* thd, void *savepoint); +static int innobase_release_savepoint(handlerton *hton, THD* thd, + void *savepoint); +static handler *innobase_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root); static const char innobase_hton_name[]= "InnoDB"; -handlerton innobase_hton; - -static handler *innobase_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *innobase_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_innobase(table); + return new (mem_root) ha_innobase(hton, table); } @@ -380,7 +389,8 @@ documentation, see handler.cc. */ int innobase_release_temporary_latches( /*===============================*/ - THD *thd) + handlerton *hton, + THD *thd) { trx_t* trx; @@ -389,7 +399,7 @@ innobase_release_temporary_latches( return 0; } - trx = (trx_t*) thd->ha_data[innobase_hton.slot]; + trx = (trx_t*) thd->ha_data[hton->slot]; if (trx) { innobase_release_stat_resources(trx); @@ -841,13 +851,14 @@ trx_t* check_trx_exists( /*=============*/ /* out: InnoDB transaction handle */ + handlerton* hton, /* in: handlerton for innodb */ THD* thd) /* in: user thread handle */ { trx_t* trx; ut_ad(thd == current_thd); - trx = (trx_t*) thd->ha_data[innobase_hton.slot]; + trx = (trx_t*) thd->ha_data[hton->slot]; if (trx == NULL) { DBUG_ASSERT(thd != NULL); @@ -861,7 +872,7 @@ check_trx_exists( CPU time */ trx->support_xa = (ibool)(thd->variables.innodb_support_xa); - thd->ha_data[innobase_hton.slot] = trx; + thd->ha_data[hton->slot] = trx; } else { if (trx->magic_n != TRX_MAGIC_N) { mem_analyze_corruption(trx); @@ -889,8 +900,8 @@ check_trx_exists( /************************************************************************* Construct ha_innobase handler. */ -ha_innobase::ha_innobase(TABLE_SHARE *table_arg) - :handler(&innobase_hton, table_arg), +ha_innobase::ha_innobase(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), int_table_flags(HA_REC_NOT_IN_SEQ | HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | @@ -917,7 +928,7 @@ ha_innobase::update_thd( row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; - trx = check_trx_exists(thd); + trx = check_trx_exists(ht, thd); if (prebuilt->trx != trx) { @@ -938,10 +949,11 @@ inline void innobase_register_stmt( /*===================*/ + handlerton* hton, /* in: Innobase hton */ THD* thd) /* in: MySQL thd (connection) object */ { /* Register the statement */ - trans_register_ha(thd, FALSE, &innobase_hton); + trans_register_ha(thd, FALSE, hton); } /************************************************************************* @@ -955,17 +967,18 @@ inline void innobase_register_trx_and_stmt( /*===========================*/ + handlerton *hton, /* in: Innobase handlerton */ THD* thd) /* in: MySQL thd (connection) object */ { /* NOTE that actually innobase_register_stmt() registers also the transaction in the AUTOCOMMIT=1 mode. */ - innobase_register_stmt(thd); + innobase_register_stmt(hton, thd); if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* No autocommit mode, register for a transaction */ - trans_register_ha(thd, TRUE, &innobase_hton); + trans_register_ha(thd, TRUE, hton); } } @@ -1061,7 +1074,7 @@ innobase_query_caching_of_table_permitted( return((my_bool)FALSE); } - trx = check_trx_exists(thd); + trx = check_trx_exists(legacy_innodb_hton, thd); if (trx->has_search_latch) { ut_print_timestamp(stderr); sql_print_error("The calling thread is holding the adaptive " @@ -1120,7 +1133,7 @@ innobase_query_caching_of_table_permitted( if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(thd); + innobase_register_trx_and_stmt(legacy_innodb_hton, thd); trx->active_trans = 1; } @@ -1295,7 +1308,7 @@ ha_innobase::init_table_handle_for_HANDLER(void) if (prebuilt->trx->active_trans == 0) { - innobase_register_trx_and_stmt(current_thd); + innobase_register_trx_and_stmt(ht, current_thd); prebuilt->trx->active_trans = 1; } @@ -1329,7 +1342,7 @@ ha_innobase::init_table_handle_for_HANDLER(void) Opens an InnoDB database. */ int -innobase_init(void) +innobase_init(void *p) /*===============*/ { static char current_dir[3]; /* Set if using current lib */ @@ -1338,31 +1351,33 @@ innobase_init(void) char *default_path; DBUG_ENTER("innobase_init"); + handlerton *innobase_hton= (handlerton *)p; + legacy_innodb_hton= innobase_hton; - innobase_hton.state=have_innodb; - innobase_hton.db_type= DB_TYPE_INNODB; - innobase_hton.savepoint_offset=sizeof(trx_named_savept_t); - innobase_hton.close_connection=innobase_close_connection; - innobase_hton.savepoint_set=innobase_savepoint; - innobase_hton.savepoint_rollback=innobase_rollback_to_savepoint; - innobase_hton.savepoint_release=innobase_release_savepoint; - innobase_hton.commit=innobase_commit; - innobase_hton.rollback=innobase_rollback; - innobase_hton.prepare=innobase_xa_prepare; - innobase_hton.recover=innobase_xa_recover; - innobase_hton.commit_by_xid=innobase_commit_by_xid; - innobase_hton.rollback_by_xid=innobase_rollback_by_xid; - innobase_hton.create_cursor_read_view=innobase_create_cursor_view; - innobase_hton.set_cursor_read_view=innobase_set_cursor_view; - innobase_hton.close_cursor_read_view=innobase_close_cursor_view; - innobase_hton.create=innobase_create_handler; - innobase_hton.drop_database=innobase_drop_database; - innobase_hton.panic=innobase_end; - innobase_hton.start_consistent_snapshot=innobase_start_trx_and_assign_read_view; - innobase_hton.flush_logs=innobase_flush_logs; - innobase_hton.show_status=innobase_show_status; - innobase_hton.flags=HTON_NO_FLAGS; - innobase_hton.release_temporary_latches=innobase_release_temporary_latches; + innobase_hton->state=have_innodb; + innobase_hton->db_type= DB_TYPE_INNODB; + innobase_hton->savepoint_offset=sizeof(trx_named_savept_t); + innobase_hton->close_connection=innobase_close_connection; + innobase_hton->savepoint_set=innobase_savepoint; + innobase_hton->savepoint_rollback=innobase_rollback_to_savepoint; + innobase_hton->savepoint_release=innobase_release_savepoint; + innobase_hton->commit=innobase_commit; + innobase_hton->rollback=innobase_rollback; + innobase_hton->prepare=innobase_xa_prepare; + innobase_hton->recover=innobase_xa_recover; + innobase_hton->commit_by_xid=innobase_commit_by_xid; + innobase_hton->rollback_by_xid=innobase_rollback_by_xid; + innobase_hton->create_cursor_read_view=innobase_create_cursor_view; + innobase_hton->set_cursor_read_view=innobase_set_cursor_view; + innobase_hton->close_cursor_read_view=innobase_close_cursor_view; + innobase_hton->create=innobase_create_handler; + innobase_hton->drop_database=innobase_drop_database; + innobase_hton->panic=innobase_end; + innobase_hton->start_consistent_snapshot=innobase_start_trx_and_assign_read_view; + innobase_hton->flush_logs=innobase_flush_logs; + innobase_hton->show_status=innobase_show_status; + innobase_hton->flags=HTON_NO_FLAGS; + innobase_hton->release_temporary_latches=innobase_release_temporary_latches; if (have_innodb != SHOW_OPTION_YES) DBUG_RETURN(0); // nothing else to do @@ -1612,7 +1627,7 @@ error: Closes an InnoDB database. */ int -innobase_end(ha_panic_function type) +innobase_end(handlerton *hton, ha_panic_function type) /*==============*/ /* out: TRUE if error */ { @@ -1650,7 +1665,7 @@ Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes the logs, and the name of this function should be innobase_checkpoint. */ bool -innobase_flush_logs(void) +innobase_flush_logs(handlerton *hton) /*=====================*/ /* out: TRUE if error */ { @@ -1689,6 +1704,7 @@ int innobase_start_trx_and_assign_read_view( /*====================================*/ /* out: 0 */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd) /* in: MySQL thread handle of the user for whom the transaction should be committed */ { @@ -1698,7 +1714,7 @@ innobase_start_trx_and_assign_read_view( /* Create a new trx struct for thd, if it does not yet have one */ - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* This is just to play safe: release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to @@ -1717,9 +1733,7 @@ innobase_start_trx_and_assign_read_view( /* Set the MySQL flag to mark that there is an active transaction */ if (trx->active_trans == 0) { - - innobase_register_trx_and_stmt(current_thd); - + innobase_register_trx_and_stmt(hton, current_thd); trx->active_trans = 1; } @@ -1734,7 +1748,8 @@ int innobase_commit( /*============*/ /* out: 0 */ - THD* thd, /* in: MySQL thread handle of the user for whom + handlerton *hton, /* in: Innodb handlerton */ + THD* thd, /* in: MySQL thread handle of the user for whom the transaction should be committed */ bool all) /* in: TRUE - commit transaction FALSE - the current SQL statement ended */ @@ -1744,7 +1759,7 @@ innobase_commit( DBUG_ENTER("innobase_commit"); DBUG_PRINT("trans", ("ending transaction")); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* Update the info whether we should skip XA steps that eat CPU time */ trx->support_xa = (ibool)(thd->variables.innodb_support_xa); @@ -1870,6 +1885,7 @@ int innobase_report_binlog_offset_and_commit( /*=====================================*/ /* out: 0 */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd, /* in: user thread */ void* trx_handle, /* in: InnoDB trx handle */ char* log_file_name, /* in: latest binlog file name */ @@ -1887,7 +1903,7 @@ innobase_report_binlog_offset_and_commit( trx->flush_log_later = TRUE; - innobase_commit(thd, TRUE); + innobase_commit(hton, thd, TRUE); trx->flush_log_later = FALSE; @@ -1935,11 +1951,12 @@ int innobase_commit_complete( /*=====================*/ /* out: 0 */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd) /* in: user thread */ { trx_t* trx; - trx = (trx_t*) thd->ha_data[innobase_hton.slot]; + trx = (trx_t*) thd->ha_data[hton->slot]; if (trx && trx->active_trans) { @@ -1963,6 +1980,7 @@ static int innobase_rollback( /*==============*/ /* out: 0 or error number */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd, /* in: handle to the MySQL thread of the user whose transaction should be rolled back */ bool all) /* in: TRUE - commit transaction @@ -1974,7 +1992,7 @@ innobase_rollback( DBUG_ENTER("innobase_rollback"); DBUG_PRINT("trans", ("aborting transaction")); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* Update the info whether we should skip XA steps that eat CPU time */ trx->support_xa = (ibool)(thd->variables.innodb_support_xa); @@ -2046,6 +2064,7 @@ innobase_rollback_to_savepoint( /*===========================*/ /* out: 0 if success, HA_ERR_NO_SAVEPOINT if no savepoint with the given name */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd, /* in: handle to the MySQL thread of the user whose transaction should be rolled back */ void* savepoint) /* in: savepoint data */ @@ -2057,7 +2076,7 @@ innobase_rollback_to_savepoint( DBUG_ENTER("innobase_rollback_to_savepoint"); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -2082,6 +2101,7 @@ innobase_release_savepoint( /*=======================*/ /* out: 0 if success, HA_ERR_NO_SAVEPOINT if no savepoint with the given name */ + handlerton* hton, /* in: handlerton for Innodb */ THD* thd, /* in: handle to the MySQL thread of the user whose transaction should be rolled back */ void* savepoint) /* in: savepoint data */ @@ -2092,7 +2112,7 @@ innobase_release_savepoint( DBUG_ENTER("innobase_release_savepoint"); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* TODO: use provided savepoint data area to store savepoint data */ @@ -2110,6 +2130,7 @@ int innobase_savepoint( /*===============*/ /* out: always 0, that is, always succeeds */ + handlerton* hton, /* in: handle to the Innodb handlerton */ THD* thd, /* in: handle to the MySQL thread */ void* savepoint) /* in: savepoint data */ { @@ -2126,7 +2147,7 @@ innobase_savepoint( DBUG_ASSERT(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) || thd->in_sub_stmt); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -2153,12 +2174,13 @@ int innobase_close_connection( /*======================*/ /* out: 0 or error number */ + handlerton* hton, /* in: innobase handlerton */ THD* thd) /* in: handle to the MySQL thread of the user whose resources should be free'd */ { trx_t* trx; - trx = (trx_t*)thd->ha_data[innobase_hton.slot]; + trx = (trx_t*)thd->ha_data[hton->slot]; ut_a(trx); @@ -3024,8 +3046,9 @@ ha_innobase::store_key_val_for_row( /****************************************************************** Builds a 'template' to the prebuilt struct. The template is used in fast retrieval of just those column values MySQL needs in its processing. */ +static void -ha_innobase::build_template( +build_template( /*===========*/ row_prebuilt_t* prebuilt, /* in: prebuilt struct */ THD* thd, /* in: current user thread, used @@ -3166,8 +3189,8 @@ include_field: templ->col_no = i; if (index == clust_index) { - templ->rec_field_no = (index->table->cols + i) - ->clust_pos; + templ->rec_field_no = dict_col_get_clust_pos_noninline( + &index->table->cols[i], index); } else { templ->rec_field_no = dict_index_get_nth_col_pos( index, i); @@ -3196,7 +3219,7 @@ include_field: mysql_prefix_len = templ->mysql_col_offset + templ->mysql_col_len; } - templ->type = index->table->cols[i].type.mtype; + templ->type = index->table->cols[i].mtype; templ->mysql_type = (ulint)field->type(); if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { @@ -3205,10 +3228,10 @@ include_field: } templ->charset = dtype_get_charset_coll_noninline( - index->table->cols[i].type.prtype); - templ->mbminlen = index->table->cols[i].type.mbminlen; - templ->mbmaxlen = index->table->cols[i].type.mbmaxlen; - templ->is_unsigned = index->table->cols[i].type.prtype + index->table->cols[i].prtype); + templ->mbminlen = index->table->cols[i].mbminlen; + templ->mbmaxlen = index->table->cols[i].mbmaxlen; + templ->is_unsigned = index->table->cols[i].prtype & DATA_UNSIGNED; if (templ->type == DATA_BLOB) { prebuilt->templ_contains_blob = TRUE; @@ -3226,8 +3249,9 @@ skip_field: for (i = 0; i < n_requested_fields; i++) { templ = prebuilt->mysql_template + i; - templ->rec_field_no = - (index->table->cols + templ->col_no)->clust_pos; + templ->rec_field_no = dict_col_get_clust_pos_noninline( + &index->table->cols[templ->col_no], + clust_index); } } } @@ -3251,11 +3275,11 @@ ha_innobase::write_row( DBUG_ENTER("ha_innobase::write_row"); if (prebuilt->trx != - (trx_t*) current_thd->ha_data[innobase_hton.slot]) { + (trx_t*) current_thd->ha_data[ht->slot]) { sql_print_error("The transaction object for the table handle is at " "%p, but for the current thread it is at %p", prebuilt->trx, - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); fputs("InnoDB: Dump of 200 bytes around prebuilt: ", stderr); ut_print_buf(stderr, ((const byte*)prebuilt) - 100, 200); @@ -3263,7 +3287,7 @@ ha_innobase::write_row( "InnoDB: Dump of 200 bytes around transaction.all: ", stderr); ut_print_buf(stderr, - ((byte*)(&(current_thd->ha_data[innobase_hton.slot]))) - 100, + ((byte*)(&(current_thd->ha_data[ht->slot]))) - 100, 200); putc('\n', stderr); ut_error; @@ -3317,7 +3341,7 @@ no_commit: no need to re-acquire locks on it. */ /* Altering to InnoDB format */ - innobase_commit(user_thd, 1); + innobase_commit(ht, user_thd, 1); /* Note that this transaction is still active. */ prebuilt->trx->active_trans = 1; /* We will need an IX lock on the destination table. */ @@ -3333,7 +3357,7 @@ no_commit: /* Commit the transaction. This will release the table locks, so they have to be acquired again. */ - innobase_commit(user_thd, 1); + innobase_commit(ht, user_thd, 1); /* Note that this transaction is still active. */ prebuilt->trx->active_trans = 1; /* Re-acquire the table lock on the source table. */ @@ -3400,7 +3424,8 @@ no_commit: /* We must use the handler code to update the auto-increment value to be sure that we increment it correctly. */ - update_auto_increment(); + if ((error= update_auto_increment())) + goto func_exit; auto_inc_used = 1; } @@ -3493,9 +3518,11 @@ calc_row_difference( ulint col_type; ulint n_changed = 0; dfield_t dfield; + dict_index_t* clust_index; uint i; n_fields = table->s->fields; + clust_index = dict_table_get_first_index_noninline(prebuilt->table); /* We use upd_buff to convert changed fields */ buf = (byte*) upd_buff; @@ -3526,7 +3553,7 @@ calc_row_difference( field_mysql_type = field->type(); - col_type = prebuilt->table->cols[i].type.mtype; + col_type = prebuilt->table->cols[i].mtype; switch (col_type) { @@ -3581,7 +3608,8 @@ calc_row_difference( /* Let us use a dummy dfield to make the conversion from the MySQL column format to the InnoDB format */ - dfield.type = (prebuilt->table->cols + i)->type; + dict_col_copy_type_noninline(prebuilt->table->cols + i, + &dfield.type); if (n_len != UNIV_SQL_NULL) { buf = row_mysql_store_col_in_innobase_format( @@ -3600,7 +3628,8 @@ calc_row_difference( } ufield->exp = NULL; - ufield->field_no = prebuilt->table->cols[i].clust_pos; + ufield->field_no = dict_col_get_clust_pos_noninline( + &prebuilt->table->cols[i], clust_index); n_changed++; } } @@ -3635,7 +3664,7 @@ ha_innobase::update_row( DBUG_ENTER("ha_innobase::update_row"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -3696,7 +3725,7 @@ ha_innobase::delete_row( DBUG_ENTER("ha_innobase::delete_row"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (last_query_id != user_thd->query_id) { prebuilt->sql_stat_start = TRUE; @@ -3794,7 +3823,7 @@ ha_innobase::try_semi_consistent_read(bool yes) row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); /* Row read type is set to semi consistent read if this was requested by the MySQL and either innodb_locks_unsafe_for_binlog @@ -3961,7 +3990,7 @@ ha_innobase::index_read( DBUG_ENTER("index_read"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); @@ -4076,7 +4105,7 @@ ha_innobase::change_active_index( ut_ad(user_thd == current_thd); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); active_index = keynr; @@ -4166,7 +4195,7 @@ ha_innobase::general_fetch( DBUG_ENTER("general_fetch"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); innodb_srv_conc_enter_innodb(prebuilt->trx); @@ -4402,7 +4431,7 @@ ha_innobase::rnd_pos( &LOCK_status); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -4452,7 +4481,7 @@ ha_innobase::position( uint len; ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -4580,8 +4609,7 @@ create_table_def( | nulls_allowed | unsigned_type | binary_type | long_true_varchar, charset_no), - col_len, - 0); + col_len); } error = row_create_table_for_mysql(table, trx); @@ -4780,7 +4808,7 @@ ha_innobase::create( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(thd); + parent_trx = check_trx_exists(ht, thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -4953,7 +4981,7 @@ ha_innobase::discard_or_import_tablespace( ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); dict_table = prebuilt->table; trx = prebuilt->trx; @@ -5033,7 +5061,7 @@ ha_innobase::delete_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(thd); + parent_trx = check_trx_exists(ht, thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5100,6 +5128,7 @@ void innobase_drop_database( /*===================*/ /* out: error number */ + handlerton *hton, /* in: handlerton of Innodb */ char* path) /* in: database path; inside InnoDB the name of the last directory in the path is used as the database name: for example, in 'mysql/data/test' @@ -5115,7 +5144,7 @@ innobase_drop_database( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(current_thd); + parent_trx = check_trx_exists(hton, current_thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5194,7 +5223,7 @@ ha_innobase::rename_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(current_thd); + parent_trx = check_trx_exists(ht, current_thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5281,7 +5310,7 @@ ha_innobase::records_in_range( DBUG_ENTER("records_in_range"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); prebuilt->trx->op_info = (char*)"estimating records in index range"; @@ -5723,7 +5752,7 @@ ha_innobase::check( ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (prebuilt->mysql_template == NULL) { /* Build the template; we will use a dummy template @@ -6007,7 +6036,7 @@ ha_innobase::can_switch_engines(void) DBUG_ENTER("ha_innobase::can_switch_engines"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton.slot]); + (trx_t*) current_thd->ha_data[ht->slot]); prebuilt->trx->op_info = "determining if there are foreign key constraints"; @@ -6174,25 +6203,11 @@ ha_innobase::start_stmt( 1) ::store_lock(), 2) ::external_lock(), 3) ::init_table_handle_for_HANDLER(), and - 4) :.transactional_table_lock(). */ + 4) ::transactional_table_lock(). */ prebuilt->select_lock_type = prebuilt->stored_select_lock_type; } - - if (prebuilt->stored_select_lock_type != LOCK_S - && prebuilt->stored_select_lock_type != LOCK_X) { - sql_print_error( - "stored_select_lock_type is %lu inside " - "::start_stmt()!", - prebuilt->stored_select_lock_type); - - /* Set the value to LOCK_X: this is just fault - tolerance, we do not know what the correct value - should be! */ - - prebuilt->select_lock_type = LOCK_X; - } } trx->detailed_error[0] = '\0'; @@ -6200,10 +6215,10 @@ ha_innobase::start_stmt( /* Set the MySQL flag to mark that there is an active transaction */ if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(thd); + innobase_register_trx_and_stmt(ht, thd); trx->active_trans = 1; } else { - innobase_register_stmt(thd); + innobase_register_stmt(ht, thd); } return(0); @@ -6276,10 +6291,10 @@ ha_innobase::external_lock( transaction */ if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(thd); + innobase_register_trx_and_stmt(ht, thd); trx->active_trans = 1; } else if (trx->n_mysql_tables_in_use == 0) { - innobase_register_stmt(thd); + innobase_register_stmt(ht, thd); } trx->n_mysql_tables_in_use++; @@ -6357,7 +6372,7 @@ ha_innobase::external_lock( if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { if (trx->active_trans != 0) { - innobase_commit(thd, TRUE); + innobase_commit(ht, thd, TRUE); } } else { if (trx->isolation_level <= TRX_ISO_READ_COMMITTED @@ -6438,7 +6453,7 @@ ha_innobase::transactional_table_lock( /* Set the MySQL flag to mark that there is an active transaction */ if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(thd); + innobase_register_trx_and_stmt(ht, thd); trx->active_trans = 1; } @@ -6486,6 +6501,7 @@ Monitor to the client. */ bool innodb_show_status( /*===============*/ + handlerton* hton, /* in: the innodb handlerton */ THD* thd, /* in: the MySQL query thread of the caller */ stat_print_fn *stat_print) { @@ -6501,7 +6517,7 @@ innodb_show_status( DBUG_RETURN(FALSE); } - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); innobase_release_stat_resources(trx); @@ -6576,6 +6592,7 @@ Implements the SHOW MUTEX STATUS command. . */ bool innodb_mutex_show_status( /*=====================*/ + handlerton* hton, /* in: the innodb handlerton */ THD* thd, /* in: the MySQL query thread of the caller */ stat_print_fn* stat_print) @@ -6657,14 +6674,15 @@ innodb_mutex_show_status( DBUG_RETURN(FALSE); } -bool innobase_show_status(THD* thd, stat_print_fn* stat_print, - enum ha_stat_type stat_type) +bool innobase_show_status(handlerton *hton, THD* thd, + stat_print_fn* stat_print, + enum ha_stat_type stat_type) { switch (stat_type) { case HA_ENGINE_STATUS: - return innodb_show_status(thd, stat_print); + return innodb_show_status(hton, thd, stat_print); case HA_ENGINE_MUTEX: - return innodb_mutex_show_status(thd, stat_print); + return innodb_mutex_show_status(hton, thd, stat_print); default: return FALSE; } @@ -6764,7 +6782,7 @@ ha_innobase::store_lock( because we call update_thd() later, in ::external_lock()! Failure to understand this caused a serious memory corruption bug in 5.1.11. */ - trx = check_trx_exists(thd); + trx = check_trx_exists(ht, thd); /* NOTE: MySQL can call this function with lock 'type' TL_IGNORE! Be careful to ignore TL_IGNORE if we are going to do something with @@ -7150,7 +7168,7 @@ ha_innobase::reset_auto_increment(ulonglong value) bool ha_innobase::get_error_message(int error, String *buf) { - trx_t* trx = check_trx_exists(current_thd); + trx_t* trx = check_trx_exists(ht, current_thd); buf->copy(trx->detailed_error, strlen(trx->detailed_error), system_charset_info); @@ -7369,13 +7387,14 @@ int innobase_xa_prepare( /*================*/ /* out: 0 or error number */ + handlerton *hton, THD* thd, /* in: handle to the MySQL thread of the user whose XA transaction should be prepared */ bool all) /* in: TRUE - commit transaction FALSE - the current SQL statement ended */ { int error = 0; - trx_t* trx = check_trx_exists(thd); + trx_t* trx = check_trx_exists(hton, thd); if (thd->lex->sql_command != SQLCOM_XA_PREPARE && (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) @@ -7465,6 +7484,7 @@ innobase_xa_recover( /*================*/ /* out: number of prepared transactions stored in xid_list */ + handlerton *hton, XID* xid_list, /* in/out: prepared transactions */ uint len) /* in: number of slots in xid_list */ { @@ -7484,6 +7504,7 @@ int innobase_commit_by_xid( /*===================*/ /* out: 0 or error number */ + handlerton *hton, XID* xid) /* in: X/Open XA transaction identification */ { trx_t* trx; @@ -7507,6 +7528,7 @@ int innobase_rollback_by_xid( /*=====================*/ /* out: 0 or error number */ + handlerton *hton, XID *xid) /* in: X/Open XA transaction identification */ { trx_t* trx; @@ -7527,12 +7549,13 @@ This consistent view is then used inside of MySQL when accessing records using a cursor. */ void* -innobase_create_cursor_view(void) -/*=============================*/ - /* out: Pointer to cursor view or NULL */ +innobase_create_cursor_view( + /* out: pointer to cursor view or NULL */ + handlerton *hton, /* in: innobase hton */ + THD* thd) /* in: user thread handle */ { return(read_cursor_view_create_for_mysql( - check_trx_exists(current_thd))); + check_trx_exists(hton, thd))); } /*********************************************************************** @@ -7542,10 +7565,11 @@ corresponding MySQL thread still lacks one. */ void innobase_close_cursor_view( -/*=======================*/ + handlerton *hton, + THD* thd, /* in: user thread handle */ void* curview)/* in: Consistent read view to be closed */ { - read_cursor_view_close_for_mysql(check_trx_exists(current_thd), + read_cursor_view_close_for_mysql(check_trx_exists(hton, current_thd), (cursor_view_t*) curview); } @@ -7558,9 +7582,11 @@ restored to a transaction read view. */ void innobase_set_cursor_view( /*=====================*/ + handlerton *hton, + THD* thd, /* in: user thread handle */ void* curview)/* in: Consistent cursor view to be set */ { - read_cursor_set_for_mysql(check_trx_exists(current_thd), + read_cursor_set_for_mysql(check_trx_exists(hton, current_thd), (cursor_view_t*) curview); } @@ -7605,7 +7631,7 @@ SHOW_VAR innodb_status_variables_export[]= { }; struct st_mysql_storage_engine innobase_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &innobase_hton}; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(innobase) { @@ -7614,6 +7640,7 @@ mysql_declare_plugin(innobase) innobase_hton_name, "Innobase OY", "Supports transactions, row-level locking, and foreign keys", + PLUGIN_LICENSE_GPL, innobase_init, /* Plugin Init */ NULL, /* Plugin Deinit */ 0x0100 /* 1.0 */, diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 4d5dc6b52d6..d6c9b8a0d9f 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -80,7 +80,7 @@ class ha_innobase: public handler /* Init values for the class: */ public: - ha_innobase(TABLE_SHARE *table_arg); + ha_innobase(handlerton *hton, TABLE_SHARE *table_arg); ~ha_innobase() {} /* Get the row type from the storage engine. If this method returns @@ -201,8 +201,6 @@ class ha_innobase: public handler int cmp_ref(const byte *ref1, const byte *ref2); bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); - void build_template(struct row_prebuilt_struct *prebuilt, THD *thd, - TABLE *table, uint templ_type); }; extern SHOW_VAR innodb_status_variables[]; @@ -240,8 +238,8 @@ extern ulong srv_flush_log_at_trx_commit; } int innobase_init(void); -int innobase_end(ha_panic_function type); -bool innobase_flush_logs(void); +int innobase_end(handlerton *hton, ha_panic_function type); +bool innobase_flush_logs(handlerton *hton); uint innobase_get_free_space(void); /* @@ -258,14 +256,14 @@ int innobase_commit_complete(void* trx_handle); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); #endif -void innobase_drop_database(char *path); -bool innobase_show_status(THD* thd, stat_print_fn*, enum ha_stat_type); +void innobase_drop_database(handlerton *hton, char *path); +bool innobase_show_status(handlerton *hton, THD* thd, stat_print_fn*, enum ha_stat_type); -int innobase_release_temporary_latches(THD *thd); +int innobase_release_temporary_latches(handlerton *hton, THD *thd); -void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); +void innobase_store_binlog_offset_and_flush_log(handlerton *hton, char *binlog_name,longlong offset); -int innobase_start_trx_and_assign_read_view(THD* thd); +int innobase_start_trx_and_assign_read_view(handlerton *hton, THD* thd); /*********************************************************************** This function is used to prepare X/Open XA distributed transaction */ @@ -273,6 +271,7 @@ This function is used to prepare X/Open XA distributed transaction */ int innobase_xa_prepare( /*====================*/ /* out: 0 or error number */ + handlerton *hton, /* in: innobase hton */ THD* thd, /* in: handle to the MySQL thread of the user whose XA transaction should be prepared */ bool all); /* in: TRUE - commit transaction @@ -285,6 +284,7 @@ int innobase_xa_recover( /*====================*/ /* out: number of prepared transactions stored in xid_list */ + handlerton *hton, /* in: innobase hton */ XID* xid_list, /* in/out: prepared transactions */ uint len); /* in: number of slots in xid_list */ @@ -295,6 +295,7 @@ which is in the prepared state */ int innobase_commit_by_xid( /*=======================*/ /* out: 0 or error number */ + handlerton *hton, /* in: innobase hton */ XID* xid); /* in : X/Open XA Transaction Identification */ /*********************************************************************** @@ -303,6 +304,7 @@ which is in the prepared state */ int innobase_rollback_by_xid( /* out: 0 or error number */ + handlerton *hton, /* in: innobase hton */ XID *xid); /* in : X/Open XA Transaction Identification */ @@ -313,9 +315,10 @@ This consistent view is then used inside of MySQL when accessing records using a cursor. */ void* -innobase_create_cursor_view(void); -/*=============================*/ - /* out: Pointer to cursor view or NULL */ +innobase_create_cursor_view( + /* out: Pointer to cursor view or NULL */ + handlerton *hton, /* in: innobase hton */ + THD* thd); /* in: user thread handle */ /*********************************************************************** Close the given consistent cursor view of a transaction and restore @@ -325,8 +328,11 @@ corresponding MySQL thread still lacks one. */ void innobase_close_cursor_view( /*=======================*/ + handlerton *hton, /* in: innobase hton */ + THD* thd, /* in: user thread handle */ void* curview); /* in: Consistent read view to be closed */ + /*********************************************************************** Set the given consistent cursor view to a transaction which is created if the corresponding MySQL thread still lacks one. If the given @@ -336,4 +342,6 @@ restored to a transaction read view. */ void innobase_set_cursor_view( /*=====================*/ + handlerton *hton, /* in: innobase hton */ + THD* thd, /* in: user thread handle */ void* curview); /* in: Consistent read view to be set */ diff --git a/storage/innobase/ibuf/ibuf0ibuf.c b/storage/innobase/ibuf/ibuf0ibuf.c index 2f4d0ae1009..96ab928a436 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.c +++ b/storage/innobase/ibuf/ibuf0ibuf.c @@ -305,7 +305,7 @@ ibuf_tree_root_get( ut_a(space == 0); ut_ad(ibuf_inside()); - mtr_x_lock(dict_tree_get_lock((data->index)->tree), mtr); + mtr_x_lock(dict_index_get_lock(data->index), mtr); page = buf_page_get(space, FSP_IBUF_TREE_ROOT_PAGE_NO, RW_X_LATCH, mtr); @@ -524,16 +524,16 @@ ibuf_data_init_for_space( /* use old-style record format for the insert buffer */ table = dict_mem_table_create(buf, space, 2, 0); - dict_mem_table_add_col(table, "PAGE_NO", DATA_BINARY, 0, 0, 0); - dict_mem_table_add_col(table, "TYPES", DATA_BINARY, 0, 0, 0); + dict_mem_table_add_col(table, "PAGE_NO", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, "TYPES", DATA_BINARY, 0, 0); table->id = ut_dulint_add(DICT_IBUF_ID_MIN, space); dict_table_add_to_cache(table); - index = dict_mem_index_create - (buf, "CLUST_IND", space, - DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, 2); + index = dict_mem_index_create( + buf, "CLUST_IND", space, + DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, 2); dict_mem_index_add_field(index, "PAGE_NO", 0); dict_mem_index_add_field(index, "TYPES", 0); @@ -768,9 +768,9 @@ ibuf_set_free_bits_low( return; } - bitmap_page = ibuf_bitmap_get_map_page - (buf_frame_get_space_id(page), - buf_frame_get_page_no(page), mtr); + bitmap_page = ibuf_bitmap_get_map_page( + buf_frame_get_space_id(page), + buf_frame_get_page_no(page), mtr); #ifdef UNIV_IBUF_DEBUG # if 0 fprintf(stderr, @@ -818,17 +818,17 @@ ibuf_set_free_bits( mtr_start(&mtr); - bitmap_page = ibuf_bitmap_get_map_page - (buf_frame_get_space_id(page), - buf_frame_get_page_no(page), &mtr); + bitmap_page = ibuf_bitmap_get_map_page( + buf_frame_get_space_id(page), buf_frame_get_page_no(page), + &mtr); if (max_val != ULINT_UNDEFINED) { #ifdef UNIV_IBUF_DEBUG ulint old_val; - old_val = ibuf_bitmap_page_get_bits - (bitmap_page, buf_frame_get_page_no(page), - IBUF_BITMAP_FREE, &mtr); + old_val = ibuf_bitmap_page_get_bits( + bitmap_page, buf_frame_get_page_no(page), + IBUF_BITMAP_FREE, &mtr); # if 0 if (old_val != max_val) { fprintf(stderr, @@ -1145,9 +1145,8 @@ ibuf_dummy_index_add_col( dict_mem_table_add_col(index->table, "DUMMY", dtype_get_mtype(type), dtype_get_prtype(type), - dtype_get_len(type), - dtype_get_prec(type)); - dict_index_add_col(index, + dtype_get_len(type)); + dict_index_add_col(index, index->table, (dict_col_t*) dict_table_get_nth_col(index->table, i), len); } /************************************************************************ @@ -1165,6 +1164,11 @@ ibuf_dummy_index_free( dict_mem_table_free(table); } +void +dict_index_print_low( +/*=================*/ + dict_index_t* index); /* in: index */ + /************************************************************************* Builds the entry to insert into a non-clustered index when we have the corresponding record in an ibuf index. */ @@ -1213,9 +1217,9 @@ ibuf_build_entry_from_ibuf_rec( dfield_set_data(field, data, len); - dtype_read_for_order_and_null_size - (dfield_get_type(field), - types + i * DATA_ORDER_NULL_TYPE_BUF_SIZE); + dtype_read_for_order_and_null_size( + dfield_get_type(field), + types + i * DATA_ORDER_NULL_TYPE_BUF_SIZE); } *pindex = ibuf_dummy_index_create(n_fields, FALSE); @@ -1235,8 +1239,8 @@ ibuf_build_entry_from_ibuf_rec( types = rec_get_nth_field_old(ibuf_rec, 3, &len); ut_a(len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE <= 1); - index = ibuf_dummy_index_create - (n_fields, len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); + index = ibuf_dummy_index_create( + n_fields, len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); if (len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE) { /* compact record format */ @@ -1254,9 +1258,9 @@ ibuf_build_entry_from_ibuf_rec( dfield_set_data(field, data, len); - dtype_new_read_for_order_and_null_size - (dfield_get_type(field), - types + i * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); + dtype_new_read_for_order_and_null_size( + dfield_get_type(field), + types + i * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); ibuf_dummy_index_add_col(index, dfield_get_type(field), len); } @@ -1315,8 +1319,8 @@ ibuf_rec_get_volume( ulint volume; dict_index_t* dummy_index; mem_heap_t* heap = mem_heap_create(500); - dtuple_t* entry = ibuf_build_entry_from_ibuf_rec - (ibuf_rec, heap, &dummy_index); + dtuple_t* entry = ibuf_build_entry_from_ibuf_rec( + ibuf_rec, heap, &dummy_index); volume = rec_get_converted_size(dummy_index, entry); ibuf_dummy_index_free(dummy_index); mem_heap_free(heap); @@ -1332,15 +1336,15 @@ ibuf_rec_get_volume( if (new_format) { data = rec_get_nth_field_old(ibuf_rec, i + 4, &len); - dtype_new_read_for_order_and_null_size - (&dtype, types + i - * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); + dtype_new_read_for_order_and_null_size( + &dtype, types + i + * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); } else { data = rec_get_nth_field_old(ibuf_rec, i + 2, &len); - dtype_read_for_order_and_null_size - (&dtype, types + i - * DATA_ORDER_NULL_TYPE_BUF_SIZE); + dtype_read_for_order_and_null_size( + &dtype, types + i + * DATA_ORDER_NULL_TYPE_BUF_SIZE); } if (len == UNIV_SQL_NULL) { @@ -1365,8 +1369,8 @@ ibuf_entry_build( index tree; NOTE that the original entry must be kept because we copy pointers to its fields */ + dict_index_t* index, /* in: non-clustered index */ dtuple_t* entry, /* in: entry for a non-clustered index */ - ibool comp, /* in: flag: TRUE=compact record format */ ulint space, /* in: space id */ ulint page_no,/* in: index page number where entry should be inserted */ @@ -1430,13 +1434,12 @@ ibuf_entry_build( dfield_set_data(field, buf, 4); - ut_ad(comp == 0 || comp == 1); /* Store the type info in buf2, and add the fields from entry to tuple */ buf2 = mem_heap_alloc(heap, n_fields * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE - + comp); - if (comp) { + + dict_table_is_comp(index->table)); + if (dict_table_is_comp(index->table)) { *buf2++ = 0; /* write the compact format indicator */ } for (i = 0; i < n_fields; i++) { @@ -1447,22 +1450,23 @@ ibuf_entry_build( entry_field = dtuple_get_nth_field(entry, i); dfield_copy(field, entry_field); - dtype_new_store_for_order_and_null_size - (buf2 + i * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE, - dfield_get_type(entry_field)); + dtype_new_store_for_order_and_null_size( + buf2 + i * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE, + dfield_get_type(entry_field), + dict_index_get_nth_field(index, i)->prefix_len); } /* Store the type info in buf2 to field 3 of tuple */ field = dtuple_get_nth_field(tuple, 3); - if (comp) { + if (dict_table_is_comp(index->table)) { buf2--; } dfield_set_data(field, buf2, n_fields * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE - + comp); + + dict_table_is_comp(index->table)); /* Set all the types in the new tuple binary */ dtuple_set_types_binary(tuple, n_fields + 4); @@ -2605,8 +2609,7 @@ ibuf_insert_low( the first fields and the type information for other fields, and which will be inserted to the insert buffer. */ - ibuf_entry = ibuf_entry_build(entry, dict_table_is_comp(index->table), - space, page_no, heap); + ibuf_entry = ibuf_entry_build(index, entry, space, page_no, heap); /* Open a cursor to the insert buffer tree to calculate if we can add the new entry to it without exceeding the free space limit for the @@ -2879,8 +2882,9 @@ dump: PAGE_CUR_LE, &page_cur); /* This time the record must fit */ - if (UNIV_UNLIKELY(!page_cur_tuple_insert - (&page_cur, entry, index, mtr))) { + if (UNIV_UNLIKELY(!page_cur_tuple_insert( + &page_cur, entry, index, + mtr))) { ut_print_timestamp(stderr); @@ -2888,10 +2892,10 @@ dump: "InnoDB: Error: Insert buffer insert" " fails; page free %lu," " dtuple size %lu\n", - (ulong) page_get_max_insert_size - (page, 1), - (ulong) rec_get_converted_size - (index, entry)); + (ulong) page_get_max_insert_size( + page, 1), + (ulong) rec_get_converted_size( + index, entry)); fputs("InnoDB: Cannot insert index record ", stderr); dtuple_print(stderr, entry); @@ -2901,14 +2905,14 @@ dump: " Please run CHECK TABLE on\n" "InnoDB: that table.\n", stderr); - bitmap_page = ibuf_bitmap_get_map_page - (buf_frame_get_space_id(page), - buf_frame_get_page_no(page), - mtr); - old_bits = ibuf_bitmap_page_get_bits - (bitmap_page, - buf_frame_get_page_no(page), - IBUF_BITMAP_FREE, mtr); + bitmap_page = ibuf_bitmap_get_map_page( + buf_frame_get_space_id(page), + buf_frame_get_page_no(page), + mtr); + old_bits = ibuf_bitmap_page_get_bits( + bitmap_page, + buf_frame_get_page_no(page), + IBUF_BITMAP_FREE, mtr); fprintf(stderr, "Bitmap bits %lu\n", (ulong) old_bits); @@ -2996,7 +3000,7 @@ ibuf_delete_rec( btr_pcur_commit_specify_mtr(pcur, mtr); fputs("InnoDB: Validating insert buffer tree:\n", stderr); - if (!btr_validate_tree(ibuf_data->index->tree, NULL)) { + if (!btr_validate_index(ibuf_data->index, NULL)) { ut_error; } @@ -3235,12 +3239,12 @@ loop: keep the latch to the ibuf_rec page until the insertion is finished! */ dict_index_t* dummy_index; - dulint max_trx_id = page_get_max_trx_id - (buf_frame_align(ibuf_rec)); + dulint max_trx_id = page_get_max_trx_id( + buf_frame_align(ibuf_rec)); page_update_max_trx_id(page, max_trx_id); - entry = ibuf_build_entry_from_ibuf_rec - (ibuf_rec, heap, &dummy_index); + entry = ibuf_build_entry_from_ibuf_rec( + ibuf_rec, heap, &dummy_index); #ifdef UNIV_IBUF_DEBUG volume += rec_get_converted_size(dummy_index, entry) + page_dir_calc_reserved_space(1); @@ -3283,15 +3287,15 @@ reset_bit: ibuf_bitmap_page_set_bits(bitmap_page, page_no, IBUF_BITMAP_BUFFERED, FALSE, &mtr); if (page) { - ulint old_bits = ibuf_bitmap_page_get_bits - (bitmap_page, page_no, IBUF_BITMAP_FREE, &mtr); + ulint old_bits = ibuf_bitmap_page_get_bits( + bitmap_page, page_no, IBUF_BITMAP_FREE, &mtr); ulint new_bits = ibuf_index_page_calc_free(page); #if 0 /* defined UNIV_IBUF_DEBUG */ fprintf(stderr, "Old bits %lu new bits %lu" " max size %lu\n", old_bits, new_bits, - page_get_max_insert_size_after_reorganize - (page, 1)); + page_get_max_insert_size_after_reorganize( + page, 1)); #endif if (old_bits != new_bits) { ibuf_bitmap_page_set_bits(bitmap_page, page_no, diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h index 288b0459b13..664607a26aa 100644 --- a/storage/innobase/include/btr0btr.h +++ b/storage/innobase/include/btr0btr.h @@ -52,7 +52,7 @@ page_t* btr_root_get( /*=========*/ /* out: root page, x-latched */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ mtr_t* mtr); /* in: mtr */ /****************************************************************** Gets a buffer page and declares its latching order level. */ @@ -255,7 +255,7 @@ that mtr holds an x-latch on the tree. */ void btr_insert_on_non_leaf_level( /*=========================*/ - dict_tree_t* tree, /* in: tree */ + dict_index_t* index, /* in: index */ ulint level, /* in: level, must be > 0 */ dtuple_t* tuple, /* in: the record to be inserted */ mtr_t* mtr); /* in: mtr */ @@ -274,7 +274,7 @@ Deletes on the upper level the node pointer to a page. */ void btr_node_ptr_delete( /*================*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page whose node pointer is deleted */ mtr_t* mtr); /* in: mtr */ #ifdef UNIV_DEBUG @@ -285,7 +285,7 @@ ibool btr_check_node_ptr( /*===============*/ /* out: TRUE */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: index page */ mtr_t* mtr); /* in: mtr */ #endif /* UNIV_DEBUG */ @@ -361,7 +361,7 @@ btr_page_alloc( /*===========*/ /* out: new allocated page, x-latched; NULL if out of space */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ ulint hint_page_no, /* in: hint of a good page */ byte file_direction, /* in: direction where a possible page split is made */ @@ -375,7 +375,7 @@ storage pages because the page must contain info on its level. */ void btr_page_free( /*==========*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page to be freed, x-latched */ mtr_t* mtr); /* in: mtr */ /****************************************************************** @@ -386,7 +386,7 @@ argument. */ void btr_page_free_low( /*==============*/ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index tree */ page_t* page, /* in: page to be freed, x-latched */ ulint level, /* in: page level */ mtr_t* mtr); /* in: mtr */ @@ -397,14 +397,14 @@ Prints size info of a B-tree. */ void btr_print_size( /*===========*/ - dict_tree_t* tree); /* in: index tree */ + dict_index_t* index); /* in: index tree */ /****************************************************************** -Prints directories and other info of all nodes in the tree. */ +Prints directories and other info of all nodes in the index. */ void -btr_print_tree( -/*===========*/ - dict_tree_t* tree, /* in: tree */ +btr_print_index( +/*============*/ + dict_index_t* index, /* in: index */ ulint width); /* in: print this many entries from start and end */ #endif /* UNIV_BTR_PRINT */ @@ -425,10 +425,10 @@ btr_index_rec_validate( Checks the consistency of an index tree. */ ibool -btr_validate_tree( -/*==============*/ +btr_validate_index( +/*===============*/ /* out: TRUE if ok */ - dict_tree_t* tree, /* in: tree */ + dict_index_t* index, /* in: index */ trx_t* trx); /* in: transaction or NULL */ #define BTR_N_LEAF_PAGES 1 diff --git a/storage/innobase/include/btr0btr.ic b/storage/innobase/include/btr0btr.ic index 1f39b52f3c8..4a88f58b318 100644 --- a/storage/innobase/include/btr0btr.ic +++ b/storage/innobase/include/btr0btr.ic @@ -204,7 +204,7 @@ btr_node_ptr_get_child_page_no( fprintf(stderr, "InnoDB: a nonsensical page number 0" " in a node ptr record at offset %lu\n", - (ulong) ut_align_offset(rec, UNIV_PAGE_SIZE)); + (ulong) page_offset(rec)); buf_page_print(buf_frame_align(rec)); } diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index 1d41a345699..213dcb7f568 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -59,13 +59,13 @@ btr_cur_get_page( /* out: pointer to page */ btr_cur_t* cursor);/* in: tree cursor */ /************************************************************* -Returns the tree of a cursor. */ +Returns the index of a cursor. */ UNIV_INLINE -dict_tree_t* -btr_cur_get_tree( -/*=============*/ - /* out: tree */ - btr_cur_t* cursor);/* in: tree cursor */ +dict_index_t* +btr_cur_get_index( +/*==============*/ + /* out: index */ + btr_cur_t* cursor);/* in: B-tree cursor */ /************************************************************* Positions a tree cursor at a given record. */ UNIV_INLINE diff --git a/storage/innobase/include/btr0cur.ic b/storage/innobase/include/btr0cur.ic index 76a4ff938a2..bd2c46eb734 100644 --- a/storage/innobase/include/btr0cur.ic +++ b/storage/innobase/include/btr0cur.ic @@ -56,15 +56,15 @@ btr_cur_get_page( } /************************************************************* -Returns the tree of a cursor. */ +Returns the index of a cursor. */ UNIV_INLINE -dict_tree_t* -btr_cur_get_tree( -/*=============*/ - /* out: tree */ - btr_cur_t* cursor) /* in: tree cursor */ +dict_index_t* +btr_cur_get_index( +/*==============*/ + /* out: index */ + btr_cur_t* cursor) /* in: B-tree cursor */ { - return((cursor->index)->tree); + return(cursor->index); } /************************************************************* @@ -109,15 +109,8 @@ btr_cur_compress_recommendation( one page: we recommend compression if this is not the root page. */ - if (dict_tree_get_page((cursor->index)->tree) - == buf_frame_get_page_no(page)) { - - /* It is the root page */ - - return(FALSE); - } - - return(TRUE); + return(dict_index_get_page(cursor->index) + != buf_frame_get_page_no(page)); } return(FALSE); @@ -153,15 +146,8 @@ btr_cur_can_delete_without_compress( one page, OR the page will become empty: we recommend compression if this is not the root page. */ - if (dict_tree_get_page((cursor->index)->tree) - == buf_frame_get_page_no(page)) { - - /* It is the root page */ - - return(TRUE); - } - - return(FALSE); + return(dict_index_get_page(cursor->index) + == buf_frame_get_page_no(page)); } return(TRUE); diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h index 0276fe45922..ee40e905544 100644 --- a/storage/innobase/include/btr0pcur.h +++ b/storage/innobase/include/btr0pcur.h @@ -445,7 +445,7 @@ selects, updates, and deletes. */ struct btr_pcur_struct{ btr_cur_t btr_cur; /* a B-tree cursor */ - ulint latch_mode; /* see FIXME note below! + ulint latch_mode; /* see TODO note below! BTR_SEARCH_LEAF, BTR_MODIFY_LEAF, BTR_MODIFY_TREE, or BTR_NO_LATCHES, depending on the latching state of @@ -473,7 +473,7 @@ struct btr_pcur_struct{ dulint modify_clock; /* the modify clock value of the buffer block when the cursor position was stored */ - ulint pos_state; /* see FIXME note below! + ulint pos_state; /* see TODO note below! BTR_PCUR_IS_POSITIONED, BTR_PCUR_WAS_POSITIONED, BTR_PCUR_NOT_POSITIONED */ @@ -495,14 +495,18 @@ struct btr_pcur_struct{ is not NULL */ }; -#define BTR_PCUR_IS_POSITIONED 1997660512 /* FIXME: currently, the state +#define BTR_PCUR_IS_POSITIONED 1997660512 /* TODO: currently, the state can be BTR_PCUR_IS_POSITIONED, though it really should be BTR_PCUR_WAS_POSITIONED, because we have no obligation to commit the cursor with mtr; similarly latch_mode may - be out of date */ + be out of date. This can + lead to problems if btr_pcur + is not used the right way; + all current code should be + ok. */ #define BTR_PCUR_WAS_POSITIONED 1187549791 #define BTR_PCUR_NOT_POSITIONED 1328997689 diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h index 62b1d2db559..8b4408ef852 100644 --- a/storage/innobase/include/btr0sea.h +++ b/storage/innobase/include/btr0sea.h @@ -134,48 +134,34 @@ btr_search_validate(void); /*======================*/ /* out: TRUE if ok */ -/* Search info directions */ -#define BTR_SEA_NO_DIRECTION 1 -#define BTR_SEA_LEFT 2 -#define BTR_SEA_RIGHT 3 -#define BTR_SEA_SAME_REC 4 - /* The search info struct in an index */ struct btr_search_struct{ - ulint magic_n; /* magic number */ - /* The following 4 fields are currently not used: */ - rec_t* last_search; /* pointer to the lower limit record of the - previous search; NULL if not known */ - ulint n_direction; /* number of consecutive searches in the - same direction */ - ulint direction; /* BTR_SEA_NO_DIRECTION, BTR_SEA_LEFT, - BTR_SEA_RIGHT, BTR_SEA_SAME_REC, - or BTR_SEA_SAME_PAGE */ - dulint modify_clock; /* value of modify clock at the time - last_search was stored */ - /*----------------------*/ - /* The following 4 fields are not protected by any latch: */ + /* The following fields are not protected by any latch. + Unfortunately, this means that they must be aligned to + the machine word, i.e., they cannot be turned into bit-fields. */ page_t* root_guess; /* the root page frame when it was last time fetched, or NULL */ - ulint hash_analysis; /* when this exceeds a certain value, the - hash analysis starts; this is reset if no + ulint hash_analysis; /* when this exceeds BTR_SEARCH_HASH_ANALYSIS, + the hash analysis starts; this is reset if no success noticed */ ibool last_hash_succ; /* TRUE if the last search would have succeeded, or did succeed, using the hash index; NOTE that the value here is not exact: it is not calculated for every search, and the calculation itself is not always accurate! */ - ulint n_hash_potential;/* number of consecutive searches which would - have succeeded, or did succeed, using the hash - index */ + ulint n_hash_potential; + /* number of consecutive searches + which would have succeeded, or did succeed, + using the hash index; + the range is 0 .. BTR_SEARCH_BUILD_LIMIT + 5 */ /*----------------------*/ ulint n_fields; /* recommended prefix length for hash search: number of full fields */ ulint n_bytes; /* recommended prefix: number of bytes in - an incomplete field */ - ulint side; /* BTR_SEARCH_LEFT_SIDE or - BTR_SEARCH_RIGHT_SIDE, depending on whether + an incomplete field; + cf. BTR_PAGE_MAX_REC_SIZE */ + ibool left_side; /* TRUE or FALSE, depending on whether the leftmost record of several records with the same prefix should be indexed in the hash index */ @@ -188,10 +174,12 @@ struct btr_search_struct{ far */ ulint n_searches; /* number of searches */ #endif /* UNIV_SEARCH_PERF_STAT */ +#ifdef UNIV_DEBUG + ulint magic_n; /* magic number */ +# define BTR_SEARCH_MAGIC_N 1112765 +#endif /* UNIV_DEBUG */ }; -#define BTR_SEARCH_MAGIC_N 1112765 - /* The hash index system */ typedef struct btr_search_sys_struct btr_search_sys_t; @@ -229,9 +217,6 @@ is no hope in building a hash index. */ #define BTR_SEARCH_HASH_ANALYSIS 17 -#define BTR_SEARCH_LEFT_SIDE 1 -#define BTR_SEARCH_RIGHT_SIDE 2 - /* Limit of consecutive searches for trying a search shortcut on the search pattern */ diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 110ef36a296..2fc6bf4bcb9 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -825,8 +825,7 @@ struct buf_block_struct{ search: number of full fields */ ulint n_bytes; /* recommended prefix: number of bytes in an incomplete field */ - ulint side; /* BTR_SEARCH_LEFT_SIDE or - BTR_SEARCH_RIGHT_SIDE, depending on + ibool left_side; /* TRUE or FALSE, depending on whether the leftmost record of several records with the same prefix should be indexed in the hash index */ @@ -851,9 +850,7 @@ struct buf_block_struct{ ulint curr_n_fields; /* prefix length for hash indexing: number of full fields */ ulint curr_n_bytes; /* number of bytes in hash indexing */ - ulint curr_side; /* BTR_SEARCH_LEFT_SIDE or - BTR_SEARCH_RIGHT_SIDE in hash - indexing */ + ibool curr_left_side; /* TRUE or FALSE in hash indexing */ dict_index_t* index; /* Index for which the adaptive hash index has been created. */ /* 6. Debug fields */ diff --git a/storage/innobase/include/data0data.ic b/storage/innobase/include/data0data.ic index 77fce029851..753fa9ba45f 100644 --- a/storage/innobase/include/data0data.ic +++ b/storage/innobase/include/data0data.ic @@ -334,7 +334,7 @@ dtuple_set_types_binary( for (i = 0; i < n; i++) { dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i)); - dtype_set(dfield_type, DATA_BINARY, 0, 0, 0); + dtype_set(dfield_type, DATA_BINARY, 0, 0); } } diff --git a/storage/innobase/include/data0type.h b/storage/innobase/include/data0type.h index 7a9456637d6..e5e9c5076be 100644 --- a/storage/innobase/include/data0type.h +++ b/storage/innobase/include/data0type.h @@ -18,10 +18,6 @@ extern ulint data_mysql_default_charset_coll; /* SQL data type struct */ typedef struct dtype_struct dtype_t; -/* This variable is initialized as the standard binary variable length -data type */ -extern dtype_t* dtype_binary; - /*-------------------------------------------*/ /* The 'MAIN TYPE' of a column */ #define DATA_VARCHAR 1 /* character varying of the @@ -124,11 +120,7 @@ be less than 256 */ #define DATA_ROLL_PTR 2 /* rollback data pointer: 7 bytes */ #define DATA_ROLL_PTR_LEN 7 -#define DATA_MIX_ID 3 /* mixed index label: a dulint, stored in - a row in a compressed form */ -#define DATA_MIX_ID_LEN 9 /* maximum stored length for mix id (in a - compressed dulint form) */ -#define DATA_N_SYS_COLS 4 /* number of system columns defined above */ +#define DATA_N_SYS_COLS 3 /* number of system columns defined above */ /* Flags ORed to the precise data type */ #define DATA_NOT_NULL 256 /* this is ORed to the precise type when @@ -176,7 +168,11 @@ dtype_get_at_most_n_mbchars( /*========================*/ /* out: length of the prefix, in bytes */ - const dtype_t* dtype, /* in: data type */ + ulint prtype, /* in: precise type */ + ulint mbminlen, /* in: minimum length of a + multi-byte character */ + ulint mbmaxlen, /* in: maximum length of a + multi-byte character */ ulint prefix_len, /* in: length of the requested prefix, in characters, multiplied by dtype_get_mbmaxlen(dtype) */ @@ -224,8 +220,7 @@ dtype_set( dtype_t* type, /* in: type struct to init */ ulint mtype, /* in: main data type */ ulint prtype, /* in: precise type */ - ulint len, /* in: length of type */ - ulint prec); /* in: precision of type */ + ulint len); /* in: precision of type */ /************************************************************************* Copies a data type structure. */ UNIV_INLINE @@ -233,7 +228,7 @@ void dtype_copy( /*=======*/ dtype_t* type1, /* in: type struct to copy to */ - dtype_t* type2); /* in: type struct to copy from */ + const dtype_t* type2); /* in: type struct to copy from */ /************************************************************************* Gets the SQL main data type. */ UNIV_INLINE @@ -249,6 +244,18 @@ dtype_get_prtype( /*=============*/ dtype_t* type); /************************************************************************* +Compute the mbminlen and mbmaxlen members of a data type structure. */ +UNIV_INLINE +void +dtype_get_mblen( +/*============*/ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type (and collation) */ + ulint* mbminlen, /* out: minimum length of a + multi-byte character */ + ulint* mbmaxlen); /* out: maximum length of a + multi-byte character */ +/************************************************************************* Gets the MySQL charset-collation code for MySQL string types. */ ulint @@ -280,13 +287,6 @@ dtype_get_len( /*==========*/ dtype_t* type); /************************************************************************* -Gets the type precision. */ -UNIV_INLINE -ulint -dtype_get_prec( -/*===========*/ - dtype_t* type); -/************************************************************************* Gets the minimum length of a character, in bytes. */ UNIV_INLINE ulint @@ -312,50 +312,52 @@ dtype_get_pad_char( /*===============*/ /* out: padding character code, or ULINT_UNDEFINED if no padding specified */ - const dtype_t* type); /* in: type */ + ulint mtype, /* in: main type */ + ulint prtype); /* in: precise type */ /*************************************************************************** Returns the size of a fixed size data type, 0 if not a fixed size type. */ UNIV_INLINE ulint -dtype_get_fixed_size( -/*=================*/ +dtype_get_fixed_size_low( +/*=====================*/ /* out: fixed size, or 0 */ - dtype_t* type); /* in: type */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ + ulint len, /* in: length */ + ulint mbminlen, /* in: minimum length of a multibyte char */ + ulint mbmaxlen); /* in: maximum length of a multibyte char */ /*************************************************************************** Returns the minimum size of a data type. */ UNIV_INLINE ulint -dtype_get_min_size( -/*===============*/ +dtype_get_min_size_low( +/*===================*/ /* out: minimum size */ - const dtype_t* type); /* in: type */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ + ulint len, /* in: length */ + ulint mbminlen, /* in: minimum length of a multibyte char */ + ulint mbmaxlen); /* in: maximum length of a multibyte char */ /*************************************************************************** Returns the maximum size of a data type. Note: types in system tables may be incomplete and return incorrect information. */ - +UNIV_INLINE ulint -dtype_get_max_size( -/*===============*/ - /* out: maximum size (ULINT_MAX for - unbounded types) */ - const dtype_t* type); /* in: type */ +dtype_get_max_size_low( +/*===================*/ + /* out: maximum size */ + ulint mtype, /* in: main type */ + ulint len); /* in: length */ /*************************************************************************** -Returns a stored SQL NULL size for a type. For fixed length types it is -the fixed length of the type, otherwise 0. */ +Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a type. +For fixed length types it is the fixed length of the type, otherwise 0. */ UNIV_INLINE ulint dtype_get_sql_null_size( /*====================*/ - /* out: SQL null storage size */ - dtype_t* type); /* in: type */ -/*************************************************************************** -Returns TRUE if a type is of a fixed size. */ -UNIV_INLINE -ibool -dtype_is_fixed_size( -/*================*/ - /* out: TRUE if fixed size */ - dtype_t* type); /* in: type */ + /* out: SQL null storage size + in ROW_FORMAT=REDUNDANT */ + const dtype_t* type); /* in: type */ /************************************************************************** Reads to a type the stored information which determines its alphabetical ordering and the storage size of an SQL NULL value. */ @@ -376,7 +378,9 @@ dtype_new_store_for_order_and_null_size( byte* buf, /* in: buffer for DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE bytes where we store the info */ - dtype_t* type); /* in: type struct */ + dtype_t* type, /* in: type struct */ + ulint prefix_len);/* in: prefix length to + replace type->len, or 0 */ /************************************************************************** Reads to a type the stored information which determines its alphabetical ordering and the storage size of an SQL NULL value. This is the 4.1.x storage @@ -413,25 +417,30 @@ dtype_new_read_for_order_and_null_size() sym_tab_add_null_lit() */ struct dtype_struct{ - ulint mtype; /* main data type */ - ulint prtype; /* precise type; MySQL data type, charset code, - flags to indicate nullability, signedness, - whether this is a binary string, whether this - is a true VARCHAR where MySQL uses 2 bytes to - store the length */ + unsigned mtype:8; /* main data type */ + unsigned prtype:24; /* precise type; MySQL data + type, charset code, flags to + indicate nullability, + signedness, whether this is a + binary string, whether this is + a true VARCHAR where MySQL + uses 2 bytes to store the length */ /* the remaining fields do not affect alphabetical ordering: */ - ulint len; /* length; for MySQL data this is - field->pack_length(), except that for a - >= 5.0.3 type true VARCHAR this is the - maximum byte length of the string data - (in addition to the string, MySQL uses 1 or - 2 bytes to store the string length) */ - ulint prec; /* precision */ + unsigned len:16; /* length; for MySQL data this + is field->pack_length(), + except that for a >= 5.0.3 + type true VARCHAR this is the + maximum byte length of the + string data (in addition to + the string, MySQL uses 1 or 2 + bytes to store the string length) */ - ulint mbminlen; /* minimum length of a character, in bytes */ - ulint mbmaxlen; /* maximum length of a character, in bytes */ + unsigned mbminlen:2; /* minimum length of a + character, in bytes */ + unsigned mbmaxlen:3; /* maximum length of a + character, in bytes */ }; #ifndef UNIV_NONINL diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic index d2027592c09..b447c9c39c2 100644 --- a/storage/innobase/include/data0type.ic +++ b/storage/innobase/include/data0type.ic @@ -48,33 +48,50 @@ dtype_get_mysql_type( } /************************************************************************* -Sets the mbminlen and mbmaxlen members of a data type structure. */ +Compute the mbminlen and mbmaxlen members of a data type structure. */ +UNIV_INLINE +void +dtype_get_mblen( +/*============*/ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type (and collation) */ + ulint* mbminlen, /* out: minimum length of a + multi-byte character */ + ulint* mbmaxlen) /* out: maximum length of a + multi-byte character */ +{ + if (dtype_is_string_type(mtype)) { +#ifndef UNIV_HOTBACKUP + innobase_get_cset_width(dtype_get_charset_coll(prtype), + mbminlen, mbmaxlen); + ut_ad(*mbminlen <= *mbmaxlen); + ut_ad(*mbminlen <= 2); /* cf. the bit-field in dtype_t */ + ut_ad(*mbmaxlen < 1 << 3); /* cf. the bit-field in dtype_t */ +#else /* !UNIV_HOTBACKUP */ + ut_a(mtype <= DATA_BINARY); + *mbminlen = *mbmaxlen = 1; +#endif /* !UNIV_HOTBACKUP */ + } else { + *mbminlen = *mbmaxlen = 0; + } +} + +/************************************************************************* +Compute the mbminlen and mbmaxlen members of a data type structure. */ UNIV_INLINE void dtype_set_mblen( /*============*/ - dtype_t* type) /* in/out: type struct */ + dtype_t* type) /* in/out: type */ { - ut_ad(type); - if (dtype_is_string_type(type->mtype)) { -#ifndef UNIV_HOTBACKUP - innobase_get_cset_width(dtype_get_charset_coll(type->prtype), - &type->mbminlen, &type->mbmaxlen); - ut_ad(type->mbminlen <= type->mbmaxlen); -#else /* !UNIV_HOTBACKUP */ -#ifdef notdefined - printf("ibbackup: DEBUG: type->mtype=%lu, type->prtype=%lu\n", - type->mtype, type->prtype); -#endif - ut_a(type->mtype <= DATA_BINARY); -#ifdef notdefined - ut_a(type->prtype == (DATA_BINARY | DATA_NOT_NULL)); -#endif - type->mbminlen = type->mbmaxlen = 1; -#endif /* !UNIV_HOTBACKUP */ - } else { - type->mbminlen = type->mbmaxlen = 0; - } + ulint mbminlen; + ulint mbmaxlen; + + dtype_get_mblen(type->mtype, type->prtype, &mbminlen, &mbmaxlen); + type->mbminlen = mbminlen; + type->mbmaxlen = mbmaxlen; + + ut_ad(dtype_validate(type)); } /************************************************************************* @@ -86,8 +103,7 @@ dtype_set( dtype_t* type, /* in: type struct to init */ ulint mtype, /* in: main data type */ ulint prtype, /* in: precise type */ - ulint len, /* in: length of type */ - ulint prec) /* in: precision of type */ + ulint len) /* in: precision of type */ { ut_ad(type); ut_ad(mtype <= DATA_MTYPE_MAX); @@ -95,10 +111,8 @@ dtype_set( type->mtype = mtype; type->prtype = prtype; type->len = len; - type->prec = prec; dtype_set_mblen(type); - ut_ad(dtype_validate(type)); } /************************************************************************* @@ -108,7 +122,7 @@ void dtype_copy( /*=======*/ dtype_t* type1, /* in: type struct to copy to */ - dtype_t* type2) /* in: type struct to copy from */ + const dtype_t* type2) /* in: type struct to copy from */ { *type1 = *type2; @@ -154,19 +168,6 @@ dtype_get_len( return(type->len); } -/************************************************************************* -Gets the type precision. */ -UNIV_INLINE -ulint -dtype_get_prec( -/*===========*/ - dtype_t* type) -{ - ut_ad(type); - - return(type->prec); -} - /************************************************************************* Gets the minimum length of a character, in bytes. */ UNIV_INLINE @@ -195,19 +196,20 @@ dtype_get_mbmaxlen( } /************************************************************************* -Gets the padding character code for the type. */ +Gets the padding character code for a type. */ UNIV_INLINE ulint dtype_get_pad_char( /*===============*/ /* out: padding character code, or ULINT_UNDEFINED if no padding specified */ - const dtype_t* type) /* in: type */ + ulint mtype, /* in: main type */ + ulint prtype) /* in: precise type */ { - switch (type->mtype) { + switch (mtype) { case DATA_FIXBINARY: case DATA_BINARY: - if (UNIV_UNLIKELY(dtype_get_charset_coll(type->prtype) + if (UNIV_UNLIKELY(dtype_get_charset_coll(prtype) == DATA_MYSQL_BINARY_CHARSET_COLL)) { /* Starting from 5.0.18, do not pad VARBINARY or BINARY columns. */ @@ -223,7 +225,7 @@ dtype_get_pad_char( return(0x20); case DATA_BLOB: - if ((type->prtype & DATA_BINARY_TYPE) == 0) { + if (!(prtype & DATA_BINARY_TYPE)) { return(0x20); } /* Fall through */ @@ -244,11 +246,14 @@ dtype_new_store_for_order_and_null_size( byte* buf, /* in: buffer for DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE bytes where we store the info */ - dtype_t* type) /* in: type struct */ + dtype_t* type, /* in: type struct */ + ulint prefix_len)/* in: prefix length to + replace type->len, or 0 */ { #if 6 != DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE #error "6 != DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE" #endif + ulint len; buf[0] = (byte)(type->mtype & 0xFFUL); @@ -263,7 +268,9 @@ dtype_new_store_for_order_and_null_size( buf[1] = (byte)(type->prtype & 0xFFUL); - mach_write_to_2(buf + 2, type->len & 0xFFFFUL); + len = prefix_len ? prefix_len : type->len; + + mach_write_to_2(buf + 2, len & 0xFFFFUL); ut_ad(dtype_get_charset_coll(type->prtype) < 256); mach_write_to_2(buf + 4, dtype_get_charset_coll(type->prtype)); @@ -284,7 +291,9 @@ dtype_read_for_order_and_null_size( dtype_t* type, /* in: type struct */ byte* buf) /* in: buffer for stored type order info */ { - ut_ad(4 == DATA_ORDER_NULL_TYPE_BUF_SIZE); +#if 4 != DATA_ORDER_NULL_TYPE_BUF_SIZE +# error "4 != DATA_ORDER_NULL_TYPE_BUF_SIZE" +#endif type->mtype = buf[0] & 63; type->prtype = buf[1]; @@ -356,34 +365,31 @@ dtype_new_read_for_order_and_null_size( Returns the size of a fixed size data type, 0 if not a fixed size type. */ UNIV_INLINE ulint -dtype_get_fixed_size( -/*=================*/ +dtype_get_fixed_size_low( +/*=====================*/ /* out: fixed size, or 0 */ - dtype_t* type) /* in: type */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ + ulint len, /* in: length */ + ulint mbminlen, /* in: minimum length of a multibyte char */ + ulint mbmaxlen) /* in: maximum length of a multibyte char */ { - ulint mtype; - - mtype = dtype_get_mtype(type); - switch (mtype) { case DATA_SYS: #ifdef UNIV_DEBUG - switch (type->prtype & DATA_MYSQL_TYPE_MASK) { + switch (prtype & DATA_MYSQL_TYPE_MASK) { + case DATA_ROW_ID: + ut_ad(len == DATA_ROW_ID_LEN); + break; + case DATA_TRX_ID: + ut_ad(len == DATA_TRX_ID_LEN); + break; + case DATA_ROLL_PTR: + ut_ad(len == DATA_ROLL_PTR_LEN); + break; default: ut_ad(0); return(0); - case DATA_ROW_ID: - ut_ad(type->len == DATA_ROW_ID_LEN); - break; - case DATA_TRX_ID: - ut_ad(type->len == DATA_TRX_ID_LEN); - break; - case DATA_ROLL_PTR: - ut_ad(type->len == DATA_ROLL_PTR_LEN); - break; - case DATA_MIX_ID: - ut_ad(type->len == DATA_MIX_ID_LEN); - break; } #endif /* UNIV_DEBUG */ case DATA_CHAR: @@ -391,32 +397,32 @@ dtype_get_fixed_size( case DATA_INT: case DATA_FLOAT: case DATA_DOUBLE: - return(dtype_get_len(type)); + return(len); case DATA_MYSQL: - if (type->prtype & DATA_BINARY_TYPE) { - return(dtype_get_len(type)); + if (prtype & DATA_BINARY_TYPE) { + return(len); } else { #ifdef UNIV_HOTBACKUP - if (type->mbminlen == type->mbmaxlen) { - return(dtype_get_len(type)); + if (mbminlen == mbmaxlen) { + return(len); } #else /* UNIV_HOTBACKUP */ /* We play it safe here and ask MySQL for mbminlen and mbmaxlen. Although - type->mbminlen and type->mbmaxlen are - initialized if and only if type->prtype + mbminlen and mbmaxlen are + initialized if and only if prtype is (in one of the 3 functions in this file), it could be that none of these functions has been called. */ - ulint mbminlen, mbmaxlen; + ulint i_mbminlen, i_mbmaxlen; - innobase_get_cset_width - (dtype_get_charset_coll(type->prtype), - &mbminlen, &mbmaxlen); + innobase_get_cset_width( + dtype_get_charset_coll(prtype), + &i_mbminlen, &i_mbmaxlen); - if (UNIV_UNLIKELY(type->mbminlen != mbminlen) - || UNIV_UNLIKELY(type->mbmaxlen != mbmaxlen)) { + if (UNIV_UNLIKELY(mbminlen != i_mbminlen) + || UNIV_UNLIKELY(mbmaxlen != i_mbmaxlen)) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: " @@ -424,13 +430,13 @@ dtype_get_fixed_size( "mbmaxlen=%lu, " "type->mbminlen=%lu, " "type->mbmaxlen=%lu\n", + (ulong) i_mbminlen, + (ulong) i_mbmaxlen, (ulong) mbminlen, - (ulong) mbmaxlen, - (ulong) type->mbminlen, - (ulong) type->mbmaxlen); + (ulong) mbmaxlen); } if (mbminlen == mbmaxlen) { - return(dtype_get_len(type)); + return(len); } #endif /* !UNIV_HOTBACKUP */ } @@ -452,30 +458,31 @@ dtype_get_fixed_size( Returns the minimum size of a data type. */ UNIV_INLINE ulint -dtype_get_min_size( -/*===============*/ +dtype_get_min_size_low( +/*===================*/ /* out: minimum size */ - const dtype_t* type) /* in: type */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ + ulint len, /* in: length */ + ulint mbminlen, /* in: minimum length of a multibyte char */ + ulint mbmaxlen) /* in: maximum length of a multibyte char */ { - switch (type->mtype) { + switch (mtype) { case DATA_SYS: #ifdef UNIV_DEBUG - switch (type->prtype & DATA_MYSQL_TYPE_MASK) { + switch (prtype & DATA_MYSQL_TYPE_MASK) { + case DATA_ROW_ID: + ut_ad(len == DATA_ROW_ID_LEN); + break; + case DATA_TRX_ID: + ut_ad(len == DATA_TRX_ID_LEN); + break; + case DATA_ROLL_PTR: + ut_ad(len == DATA_ROLL_PTR_LEN); + break; default: ut_ad(0); return(0); - case DATA_ROW_ID: - ut_ad(type->len == DATA_ROW_ID_LEN); - break; - case DATA_TRX_ID: - ut_ad(type->len == DATA_TRX_ID_LEN); - break; - case DATA_ROLL_PTR: - ut_ad(type->len == DATA_ROLL_PTR_LEN); - break; - case DATA_MIX_ID: - ut_ad(type->len == DATA_MIX_ID_LEN); - break; } #endif /* UNIV_DEBUG */ case DATA_CHAR: @@ -483,58 +490,73 @@ dtype_get_min_size( case DATA_INT: case DATA_FLOAT: case DATA_DOUBLE: - return(type->len); + return(len); case DATA_MYSQL: - if ((type->prtype & DATA_BINARY_TYPE) - || type->mbminlen == type->mbmaxlen) { - return(type->len); + if ((prtype & DATA_BINARY_TYPE) || mbminlen == mbmaxlen) { + return(len); } /* this is a variable-length character set */ - ut_a(type->mbminlen > 0); - ut_a(type->mbmaxlen > type->mbminlen); - ut_a(type->len % type->mbmaxlen == 0); - return(type->len * type->mbminlen / type->mbmaxlen); + ut_a(mbminlen > 0); + ut_a(mbmaxlen > mbminlen); + ut_a(len % mbmaxlen == 0); + return(len * mbminlen / mbmaxlen); case DATA_VARCHAR: case DATA_BINARY: case DATA_DECIMAL: case DATA_VARMYSQL: case DATA_BLOB: return(0); - default: ut_error; + default: + ut_error; } return(0); } /*************************************************************************** -Returns a stored SQL NULL size for a type. For fixed length types it is -the fixed length of the type, otherwise 0. */ +Returns the maximum size of a data type. Note: types in system tables may be +incomplete and return incorrect information. */ +UNIV_INLINE +ulint +dtype_get_max_size_low( +/*===================*/ + /* out: maximum size */ + ulint mtype, /* in: main type */ + ulint len) /* in: length */ +{ + switch (mtype) { + case DATA_SYS: + case DATA_CHAR: + case DATA_FIXBINARY: + case DATA_INT: + case DATA_FLOAT: + case DATA_DOUBLE: + case DATA_MYSQL: + case DATA_VARCHAR: + case DATA_BINARY: + case DATA_DECIMAL: + case DATA_VARMYSQL: + return(len); + case DATA_BLOB: + break; + default: + ut_error; + } + + return(ULINT_MAX); +} + +/*************************************************************************** +Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a type. +For fixed length types it is the fixed length of the type, otherwise 0. */ UNIV_INLINE ulint dtype_get_sql_null_size( /*====================*/ - /* out: SQL null storage size */ - dtype_t* type) /* in: type */ + /* out: SQL null storage size + in ROW_FORMAT=REDUNDANT */ + const dtype_t* type) /* in: type */ { - return(dtype_get_fixed_size(type)); -} - -/*************************************************************************** -Returns TRUE if a type is of a fixed size. */ -UNIV_INLINE -ibool -dtype_is_fixed_size( -/*================*/ - /* out: TRUE if fixed size */ - dtype_t* type) /* in: type */ -{ - ulint size; - - size = dtype_get_fixed_size(type); - - if (size) { - return(TRUE); - } - - return(FALSE); + return(dtype_get_fixed_size_low(type->mtype, type->prtype, type->len, + type->mbminlen, type->mbmaxlen) > 0); } diff --git a/storage/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h index 41ce0ac059a..cac79410b24 100644 --- a/storage/innobase/include/dict0boot.h +++ b/storage/innobase/include/dict0boot.h @@ -103,7 +103,7 @@ dict_create(void); #define DICT_HDR_ROW_ID 0 /* The latest assigned row id */ #define DICT_HDR_TABLE_ID 8 /* The latest assigned table id */ #define DICT_HDR_INDEX_ID 16 /* The latest assigned index id */ -#define DICT_HDR_MIX_ID 24 /* The latest assigned mix id */ +#define DICT_HDR_MIX_ID 24 /* Obsolete, always 0. */ #define DICT_HDR_TABLES 32 /* Root of the table index tree */ #define DICT_HDR_TABLE_IDS 36 /* Root of the table index tree */ #define DICT_HDR_COLUMNS 40 /* Root of the column index tree */ diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index b30aebe5ff6..a9775ab3d40 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -79,24 +79,77 @@ dict_load_space_id_list(void); /************************************************************************* Gets the column data type. */ UNIV_INLINE -dtype_t* -dict_col_get_type( -/*==============*/ - dict_col_t* col); +void +dict_col_copy_type( +/*===============*/ + const dict_col_t* col, /* in: column */ + dtype_t* type); /* out: data type */ +/************************************************************************* +Gets the column data type. */ + +void +dict_col_copy_type_noninline( +/*=========================*/ + const dict_col_t* col, /* in: column */ + dtype_t* type); /* out: data type */ +/*************************************************************************** +Returns the minimum size of the column. */ +UNIV_INLINE +ulint +dict_col_get_min_size( +/*==================*/ + /* out: minimum size */ + const dict_col_t* col); /* in: column */ +/*************************************************************************** +Returns the maximum size of the column. */ +UNIV_INLINE +ulint +dict_col_get_max_size( +/*==================*/ + /* out: maximum size */ + const dict_col_t* col); /* in: column */ +/*************************************************************************** +Returns the size of a fixed size column, 0 if not a fixed size column. */ +UNIV_INLINE +ulint +dict_col_get_fixed_size( +/*====================*/ + /* out: fixed size, or 0 */ + const dict_col_t* col); /* in: column */ +/*************************************************************************** +Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a column. +For fixed length types it is the fixed length of the type, otherwise 0. */ +UNIV_INLINE +ulint +dict_col_get_sql_null_size( +/*=======================*/ + /* out: SQL null storage size + in ROW_FORMAT=REDUNDANT */ + const dict_col_t* col); /* in: column */ + /************************************************************************* Gets the column number. */ UNIV_INLINE ulint dict_col_get_no( /*============*/ - dict_col_t* col); + const dict_col_t* col); /************************************************************************* Gets the column position in the clustered index. */ UNIV_INLINE ulint dict_col_get_clust_pos( /*===================*/ - dict_col_t* col); + const dict_col_t* col, /* in: table column */ + const dict_index_t* clust_index); /* in: clustered index */ +/************************************************************************* +Gets the column position in the clustered index. */ + +ulint +dict_col_get_clust_pos_noninline( +/*=============================*/ + const dict_col_t* col, /* in: table column */ + const dict_index_t* clust_index); /* in: clustered index */ /******************************************************************** If the given column name is reserved for InnoDB system columns, return TRUE. */ @@ -354,6 +407,19 @@ dict_table_get_index_noninline( dict_table_t* table, /* in: table */ const char* name); /* in: index name */ /************************************************************************** +Returns a column's name. */ + +const char* +dict_table_get_col_name( +/*====================*/ + /* out: column name. NOTE: not + guaranteed to stay valid if table is + modified in any way (columns added, + etc.). */ + const dict_table_t* table, /* in: table */ + ulint col_nr);/* in: column number */ + +/************************************************************************** Prints a table definition. */ void @@ -468,30 +534,30 @@ dict_table_get_n_cols( /************************************************************************ Gets the nth column of a table. */ UNIV_INLINE -dict_col_t* +const dict_col_t* dict_table_get_nth_col( /*===================*/ - /* out: pointer to column object */ - dict_table_t* table, /* in: table */ - ulint pos); /* in: position of column */ + /* out: pointer to column object */ + const dict_table_t* table, /* in: table */ + ulint pos); /* in: position of column */ /************************************************************************ Gets the nth column of a table. */ -dict_col_t* +const dict_col_t* dict_table_get_nth_col_noninline( /*=============================*/ - /* out: pointer to column object */ - dict_table_t* table, /* in: table */ - ulint pos); /* in: position of column */ + /* out: pointer to column object */ + const dict_table_t* table, /* in: table */ + ulint pos); /* in: position of column */ /************************************************************************ Gets the given system column of a table. */ UNIV_INLINE -dict_col_t* +const dict_col_t* dict_table_get_sys_col( /*===================*/ - /* out: pointer to column object */ - dict_table_t* table, /* in: table */ - ulint sys); /* in: DATA_ROW_ID, ... */ + /* out: pointer to column object */ + const dict_table_t* table, /* in: table */ + ulint sys); /* in: DATA_ROW_ID, ... */ /************************************************************************ Gets the given system column number of a table. */ UNIV_INLINE @@ -549,12 +615,11 @@ dict_index_find_on_id_low( /* out: index or NULL if not found from cache */ dulint id); /* in: index id */ /************************************************************************** -Adds an index to dictionary cache. */ +Adds an index to the dictionary cache. */ -ibool +void dict_index_add_to_cache( /*====================*/ - /* out: TRUE if success */ dict_table_t* table, /* in: table on which the index is */ dict_index_t* index, /* in, own: index; NOTE! The index memory object is freed in this function! */ @@ -614,23 +679,23 @@ dict_index_get_nth_field( dict_index_t* index, /* in: index */ ulint pos); /* in: position of field */ /************************************************************************ -Gets pointer to the nth field data type in an index. */ +Gets pointer to the nth column in an index. */ UNIV_INLINE -dtype_t* -dict_index_get_nth_type( -/*====================*/ - /* out: data type */ - dict_index_t* index, /* in: index */ - ulint pos); /* in: position of the field */ +const dict_col_t* +dict_index_get_nth_col( +/*===================*/ + /* out: column */ + const dict_index_t* index, /* in: index */ + ulint pos); /* in: position of the field */ /************************************************************************ Gets the column number of the nth field in an index. */ UNIV_INLINE ulint dict_index_get_nth_col_no( /*======================*/ - /* out: column number */ - dict_index_t* index, /* in: index */ - ulint pos); /* in: position of the field */ + /* out: column number */ + const dict_index_t* index, /* in: index */ + ulint pos); /* in: position of the field */ /************************************************************************ Looks for column n in an index. */ @@ -694,6 +759,7 @@ void dict_index_add_col( /*===============*/ dict_index_t* index, /* in: index */ + dict_table_t* table, /* in: table */ dict_col_t* col, /* in: column */ ulint prefix_len); /* in: column prefix length */ /*********************************************************************** @@ -706,38 +772,12 @@ dict_index_copy_types( dict_index_t* index, /* in: index */ ulint n_fields); /* in: number of field types to copy */ /************************************************************************* -Gets the index tree where the index is stored. */ -UNIV_INLINE -dict_tree_t* -dict_index_get_tree( -/*================*/ - /* out: index tree */ - dict_index_t* index); /* in: index */ -/************************************************************************* Gets the field column. */ UNIV_INLINE -dict_col_t* +const dict_col_t* dict_field_get_col( /*===============*/ - dict_field_t* field); -/************************************************************************** -Creates an index tree struct. */ - -dict_tree_t* -dict_tree_create( -/*=============*/ - /* out, own: created tree */ - dict_index_t* index, /* in: the index for which to create: in the - case of a mixed tree, this should be the - index of the cluster object */ - ulint page_no);/* in: root page number of the index */ -/************************************************************************** -Frees an index tree struct. */ - -void -dict_tree_free( -/*===========*/ - dict_tree_t* tree); /* in, own: index tree */ + const dict_field_t* field); /************************************************************************** In an index tree, finds the index corresponding to a record in the tree. */ @@ -755,20 +795,20 @@ Checks that a tuple has n_fields_cmp value in a sensible range, so that no comparison can occur with the page number field in a node pointer. */ ibool -dict_tree_check_search_tuple( -/*=========================*/ +dict_index_check_search_tuple( +/*==========================*/ /* out: TRUE if ok */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index */ dtuple_t* tuple); /* in: tuple used in a search */ #endif /* UNIV_DEBUG */ /************************************************************************** Builds a node pointer out of a physical record and a page number. */ dtuple_t* -dict_tree_build_node_ptr( -/*=====================*/ +dict_index_build_node_ptr( +/*======================*/ /* out, own: node pointer */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index */ rec_t* rec, /* in: record for which to build node pointer */ ulint page_no,/* in: page number to put in node pointer */ @@ -780,10 +820,10 @@ Copies an initial segment of a physical record, long enough to specify an index entry uniquely. */ rec_t* -dict_tree_copy_rec_order_prefix( -/*============================*/ +dict_index_copy_rec_order_prefix( +/*=============================*/ /* out: pointer to the prefix record */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index */ rec_t* rec, /* in: record for which to copy prefix */ ulint* n_fields,/* out: number of fields copied */ byte** buf, /* in/out: memory buffer for the copied prefix, @@ -793,10 +833,10 @@ dict_tree_copy_rec_order_prefix( Builds a typed data tuple out of a physical record. */ dtuple_t* -dict_tree_build_data_tuple( -/*=======================*/ +dict_index_build_data_tuple( +/*========================*/ /* out, own: data tuple */ - dict_tree_t* tree, /* in: index tree */ + dict_index_t* index, /* in: index */ rec_t* rec, /* in: record for which to build data tuple */ ulint n_fields,/* in: number of data fields */ mem_heap_t* heap); /* in: memory heap where tuple created */ @@ -804,61 +844,60 @@ dict_tree_build_data_tuple( Gets the space id of the root of the index tree. */ UNIV_INLINE ulint -dict_tree_get_space( -/*================*/ +dict_index_get_space( +/*=================*/ /* out: space id */ - dict_tree_t* tree); /* in: tree */ + dict_index_t* index); /* in: index */ /************************************************************************* Sets the space id of the root of the index tree. */ UNIV_INLINE void -dict_tree_set_space( -/*================*/ - dict_tree_t* tree, /* in: tree */ +dict_index_set_space( +/*=================*/ + dict_index_t* index, /* in: index */ ulint space); /* in: space id */ /************************************************************************* Gets the page number of the root of the index tree. */ UNIV_INLINE ulint -dict_tree_get_page( -/*===============*/ +dict_index_get_page( +/*================*/ /* out: page number */ - dict_tree_t* tree); /* in: tree */ + dict_index_t* tree); /* in: index */ /************************************************************************* Sets the page number of the root of index tree. */ UNIV_INLINE void -dict_tree_set_page( -/*===============*/ - dict_tree_t* tree, /* in: tree */ +dict_index_set_page( +/*================*/ + dict_index_t* index, /* in: index */ ulint page); /* in: page number */ /************************************************************************* Gets the type of the index tree. */ UNIV_INLINE ulint -dict_tree_get_type( -/*===============*/ +dict_index_get_type( +/*================*/ /* out: type */ - dict_tree_t* tree); /* in: tree */ + dict_index_t* index); /* in: index */ /************************************************************************* Gets the read-write lock of the index tree. */ UNIV_INLINE rw_lock_t* -dict_tree_get_lock( -/*===============*/ +dict_index_get_lock( +/*================*/ /* out: read-write lock */ - dict_tree_t* tree); /* in: tree */ + dict_index_t* index); /* in: index */ /************************************************************************ Returns free space reserved for future updates of records. This is relevant only in the case of many consecutive inserts, as updates which make the records bigger might fragment the index. */ UNIV_INLINE ulint -dict_tree_get_space_reserve( -/*========================*/ +dict_index_get_space_reserve(void); +/*==============================*/ /* out: number of free bytes on page, reserved for updates */ - dict_tree_t* tree); /* in: a tree */ /************************************************************************* Calculates the minimum record length in an index. */ @@ -945,7 +984,6 @@ struct dict_sys_struct{ on name */ hash_table_t* table_id_hash; /* hash table of the tables, based on id */ - hash_table_t* col_hash; /* hash table of the columns */ UT_LIST_BASE_NODE_T(dict_table_t) table_LRU; /* LRU list of tables */ ulint size; /* varying space in bytes occupied diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index de00148535f..d59e99277da 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -9,18 +9,74 @@ Created 1/8/1996 Heikki Tuuri #include "dict0load.h" #include "trx0undo.h" #include "trx0sys.h" +#include "rem0types.h" +#include "data0type.h" /************************************************************************* Gets the column data type. */ UNIV_INLINE -dtype_t* -dict_col_get_type( -/*==============*/ - dict_col_t* col) +void +dict_col_copy_type( +/*===============*/ + const dict_col_t* col, /* in: column */ + dtype_t* type) /* out: data type */ { - ut_ad(col); + ut_ad(col && type); - return(&col->type); + type->mtype = col->mtype; + type->prtype = col->prtype; + type->len = col->len; + type->mbminlen = col->mbminlen; + type->mbmaxlen = col->mbmaxlen; +} + +/*************************************************************************** +Returns the minimum size of the column. */ +UNIV_INLINE +ulint +dict_col_get_min_size( +/*==================*/ + /* out: minimum size */ + const dict_col_t* col) /* in: column */ +{ + return(dtype_get_min_size_low(col->mtype, col->prtype, col->len, + col->mbminlen, col->mbmaxlen)); +} +/*************************************************************************** +Returns the maximum size of the column. */ +UNIV_INLINE +ulint +dict_col_get_max_size( +/*==================*/ + /* out: maximum size */ + const dict_col_t* col) /* in: column */ +{ + return(dtype_get_max_size_low(col->mtype, col->len)); +} +/*************************************************************************** +Returns the size of a fixed size column, 0 if not a fixed size column. */ +UNIV_INLINE +ulint +dict_col_get_fixed_size( +/*====================*/ + /* out: fixed size, or 0 */ + const dict_col_t* col) /* in: column */ +{ + return(dtype_get_fixed_size_low(col->mtype, col->prtype, col->len, + col->mbminlen, col->mbmaxlen)); +} +/*************************************************************************** +Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a column. +For fixed length types it is the fixed length of the type, otherwise 0. */ +UNIV_INLINE +ulint +dict_col_get_sql_null_size( +/*=======================*/ + /* out: SQL null storage size + in ROW_FORMAT=REDUNDANT */ + const dict_col_t* col) /* in: column */ +{ + return(dict_col_get_fixed_size(col)); } /************************************************************************* @@ -29,7 +85,7 @@ UNIV_INLINE ulint dict_col_get_no( /*============*/ - dict_col_t* col) + const dict_col_t* col) { ut_ad(col); @@ -42,11 +98,23 @@ UNIV_INLINE ulint dict_col_get_clust_pos( /*===================*/ - dict_col_t* col) + const dict_col_t* col, /* in: table column */ + const dict_index_t* clust_index) /* in: clustered index */ { - ut_ad(col); + ulint i; - return(col->clust_pos); + ut_ad(col); + ut_ad(clust_index && clust_index->type & DICT_CLUSTERED); + + for (i = 0; i < clust_index->n_def; i++) { + const dict_field_t* field = &clust_index->fields[i]; + + if (!field->prefix_len && field->col == col) { + return(i); + } + } + + return(ULINT_UNDEFINED); } /************************************************************************ @@ -132,12 +200,12 @@ dict_table_get_n_cols( /************************************************************************ Gets the nth column of a table. */ UNIV_INLINE -dict_col_t* +const dict_col_t* dict_table_get_nth_col( /*===================*/ - /* out: pointer to column object */ - dict_table_t* table, /* in: table */ - ulint pos) /* in: position of column */ + /* out: pointer to column object */ + const dict_table_t* table, /* in: table */ + ulint pos) /* in: position of column */ { ut_ad(table); ut_ad(pos < table->n_def); @@ -149,14 +217,14 @@ dict_table_get_nth_col( /************************************************************************ Gets the given system column of a table. */ UNIV_INLINE -dict_col_t* +const dict_col_t* dict_table_get_sys_col( /*===================*/ - /* out: pointer to column object */ - dict_table_t* table, /* in: table */ - ulint sys) /* in: DATA_ROW_ID, ... */ + /* out: pointer to column object */ + const dict_table_t* table, /* in: table */ + ulint sys) /* in: DATA_ROW_ID, ... */ { - dict_col_t* col; + const dict_col_t* col; ut_ad(table); ut_ad(sys < DATA_N_SYS_COLS); @@ -164,8 +232,8 @@ dict_table_get_sys_col( col = dict_table_get_nth_col(table, table->n_cols - DATA_N_SYS_COLS + sys); - ut_ad(col->type.mtype == DATA_SYS); - ut_ad(col->type.prtype == (sys | DATA_NOT_NULL)); + ut_ad(col->mtype == DATA_SYS); + ut_ad(col->prtype == (sys | DATA_NOT_NULL)); return(col); } @@ -311,45 +379,28 @@ dict_index_get_sys_col_pos( dict_index_t* index, /* in: index */ ulint type) /* in: DATA_ROW_ID, ... */ { - dict_col_t* col; - ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); ut_ad(!(index->type & DICT_UNIVERSAL)); - col = dict_table_get_sys_col(index->table, type); - if (index->type & DICT_CLUSTERED) { - return(col->clust_pos); + return(dict_col_get_clust_pos( + dict_table_get_sys_col(index->table, type), + index)); } - return(dict_index_get_nth_col_pos - (index, dict_table_get_sys_col_no(index->table, type))); -} - -/************************************************************************* -Gets the index tree where the index is stored. */ -UNIV_INLINE -dict_tree_t* -dict_index_get_tree( -/*================*/ - /* out: index tree */ - dict_index_t* index) /* in: index */ -{ - ut_ad(index); - ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - - return(index->tree); + return(dict_index_get_nth_col_pos( + index, dict_table_get_sys_col_no(index->table, type))); } /************************************************************************* Gets the field column. */ UNIV_INLINE -dict_col_t* +const dict_col_t* dict_field_get_col( /*===============*/ - dict_field_t* field) + const dict_field_t* field) { ut_ad(field); @@ -357,17 +408,17 @@ dict_field_get_col( } /************************************************************************ -Gets pointer to the nth field data type in an index. */ +Gets pointer to the nth column in an index. */ UNIV_INLINE -dtype_t* -dict_index_get_nth_type( -/*====================*/ - /* out: data type */ - dict_index_t* index, /* in: index */ - ulint pos) /* in: position of the field */ +const dict_col_t* +dict_index_get_nth_col( +/*===================*/ + /* out: column */ + const dict_index_t* index, /* in: index */ + ulint pos) /* in: position of the field */ { - return(dict_col_get_type(dict_field_get_col - (dict_index_get_nth_field(index, pos)))); + return(dict_field_get_col(dict_index_get_nth_field((dict_index_t*) + index, pos))); } /************************************************************************ @@ -376,102 +427,101 @@ UNIV_INLINE ulint dict_index_get_nth_col_no( /*======================*/ - /* out: column number */ - dict_index_t* index, /* in: index */ - ulint pos) /* in: position of the field */ + /* out: column number */ + const dict_index_t* index, /* in: index */ + ulint pos) /* in: position of the field */ { - return(dict_col_get_no(dict_field_get_col - (dict_index_get_nth_field(index, pos)))); + return(dict_col_get_no(dict_index_get_nth_col(index, pos))); } /************************************************************************* Gets the space id of the root of the index tree. */ UNIV_INLINE ulint -dict_tree_get_space( -/*================*/ +dict_index_get_space( +/*=================*/ /* out: space id */ - dict_tree_t* tree) /* in: tree */ + dict_index_t* index) /* in: index */ { - ut_ad(tree); - ut_ad(tree->magic_n == DICT_TREE_MAGIC_N); + ut_ad(index); + ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - return(tree->space); + return(index->space); } /************************************************************************* Sets the space id of the root of the index tree. */ UNIV_INLINE void -dict_tree_set_space( -/*================*/ - dict_tree_t* tree, /* in: tree */ +dict_index_set_space( +/*=================*/ + dict_index_t* index, /* in: index */ ulint space) /* in: space id */ { - ut_ad(tree); - ut_ad(tree->magic_n == DICT_TREE_MAGIC_N); + ut_ad(index); + ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - tree->space = space; + index->space = space; } /************************************************************************* Gets the page number of the root of the index tree. */ UNIV_INLINE ulint -dict_tree_get_page( -/*===============*/ +dict_index_get_page( +/*================*/ /* out: page number */ - dict_tree_t* tree) /* in: tree */ + dict_index_t* index) /* in: index */ { - ut_ad(tree); - ut_ad(tree->magic_n == DICT_TREE_MAGIC_N); + ut_ad(index); + ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - return(tree->page); + return(index->page); } /************************************************************************* Sets the page number of the root of index tree. */ UNIV_INLINE void -dict_tree_set_page( -/*===============*/ - dict_tree_t* tree, /* in: tree */ +dict_index_set_page( +/*================*/ + dict_index_t* index, /* in: index */ ulint page) /* in: page number */ { - ut_ad(tree); - ut_ad(tree->magic_n == DICT_TREE_MAGIC_N); + ut_ad(index); + ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - tree->page = page; + index->page = page; } /************************************************************************* Gets the type of the index tree. */ UNIV_INLINE ulint -dict_tree_get_type( -/*===============*/ +dict_index_get_type( +/*================*/ /* out: type */ - dict_tree_t* tree) /* in: tree */ + dict_index_t* index) /* in: index */ { - ut_ad(tree); - ut_ad(tree->magic_n == DICT_TREE_MAGIC_N); + ut_ad(index); + ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - return(tree->type); + return(index->type); } /************************************************************************* Gets the read-write lock of the index tree. */ UNIV_INLINE rw_lock_t* -dict_tree_get_lock( -/*===============*/ +dict_index_get_lock( +/*================*/ /* out: read-write lock */ - dict_tree_t* tree) /* in: tree */ + dict_index_t* index) /* in: index */ { - ut_ad(tree); - ut_ad(tree->magic_n == DICT_TREE_MAGIC_N); + ut_ad(index); + ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - return(&(tree->lock)); + return(&(index->lock)); } /************************************************************************ @@ -480,16 +530,11 @@ relevant only in the case of many consecutive inserts, as updates which make the records bigger might fragment the index. */ UNIV_INLINE ulint -dict_tree_get_space_reserve( -/*========================*/ +dict_index_get_space_reserve(void) +/*==============================*/ /* out: number of free bytes on page, reserved for updates */ - dict_tree_t* tree) /* in: a tree */ { - ut_ad(tree); - - UT_NOT_USED(tree); - return(UNIV_PAGE_SIZE / 16); } diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index e8682b79387..a23f89954a4 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -75,8 +75,7 @@ dict_mem_table_add_col( const char* name, /* in: column name */ ulint mtype, /* in: main datatype */ ulint prtype, /* in: precise type */ - ulint len, /* in: length */ - ulint prec); /* in: precision */ + ulint len); /* in: precision */ /************************************************************************** Creates an index memory object. */ @@ -122,16 +121,41 @@ dict_mem_foreign_create(void); /* Data structure for a column in a table */ struct dict_col_struct{ - hash_node_t hash; /* hash chain node */ - ulint ind; /* table column position (they are numbered - starting from 0) */ - ulint clust_pos;/* position of the column in the - clustered index */ - ulint ord_part;/* count of how many times this column - appears in ordering fields of an index */ - const char* name; /* name */ - dtype_t type; /* data type */ - dict_table_t* table; /* back pointer to table of this column */ + /*----------------------*/ + /* The following are copied from dtype_t, + so that all bit-fields can be packed tightly. */ + unsigned mtype:8; /* main data type */ + unsigned prtype:24; /* precise type; MySQL data + type, charset code, flags to + indicate nullability, + signedness, whether this is a + binary string, whether this is + a true VARCHAR where MySQL + uses 2 bytes to store the length */ + + /* the remaining fields do not affect alphabetical ordering: */ + + unsigned len:16; /* length; for MySQL data this + is field->pack_length(), + except that for a >= 5.0.3 + type true VARCHAR this is the + maximum byte length of the + string data (in addition to + the string, MySQL uses 1 or 2 + bytes to store the string length) */ + + unsigned mbminlen:2; /* minimum length of a + character, in bytes */ + unsigned mbmaxlen:3; /* maximum length of a + character, in bytes */ + /*----------------------*/ + /* End of definitions copied from dtype_t */ + + unsigned ind:10; /* table column position + (starting from 0) */ + unsigned ord_part:1; /* nonzero if this column + appears in the ordering fields + of an index */ }; /* DICT_MAX_INDEX_COL_LEN is measured in bytes and is the max index column @@ -145,35 +169,18 @@ UTF-8 charset. In that charset, a character may take at most 3 bytes. */ struct dict_field_struct{ dict_col_t* col; /* pointer to the table column */ const char* name; /* name of the column */ - ulint prefix_len; /* 0 or the length of the column + unsigned prefix_len:10; /* 0 or the length of the column prefix in bytes in a MySQL index of type, e.g., INDEX (textcol(25)); must be smaller than DICT_MAX_INDEX_COL_LEN; NOTE that in the UTF-8 charset, MySQL sets this to 3 * the prefix len in UTF-8 chars */ - ulint fixed_len; /* 0 or the fixed length of the + unsigned fixed_len:10; /* 0 or the fixed length of the column if smaller than DICT_MAX_INDEX_COL_LEN */ }; -/* Data structure for an index tree */ -struct dict_tree_struct{ - ulint type; /* tree type */ - dulint id; /* id of the index stored in the tree */ - ulint space; /* space of index tree */ - ulint page; /* index tree root page number */ - byte pad[64];/* Padding to prevent other memory hotspots on - the same memory cache line */ - rw_lock_t lock; /* read-write lock protecting the upper levels - of the index tree */ - dict_index_t* tree_index; /* the index stored in the - index tree */ - ulint magic_n;/* magic number */ -}; - -#define DICT_TREE_MAGIC_N 7545676 - /* Data structure for an index */ struct dict_index_struct{ dulint id; /* id of the index */ @@ -182,30 +189,28 @@ struct dict_index_struct{ const char* name; /* index name */ const char* table_name; /* table name */ dict_table_t* table; /* back pointer to table */ - ulint space; /* space where the index tree is placed */ - ulint trx_id_offset;/* position of the the trx id column + unsigned space:32; + /* space where the index tree is placed */ + unsigned page:32;/* index tree root page number */ + unsigned trx_id_offset:10;/* position of the the trx id column in a clustered index record, if the fields before it are known to be of a fixed size, 0 otherwise */ - ulint n_user_defined_cols; + unsigned n_user_defined_cols:10; /* number of columns the user defined to be in the index: in the internal representation we add more columns */ - ulint n_uniq; /* number of fields from the beginning + unsigned n_uniq:10;/* number of fields from the beginning which are enough to determine an index entry uniquely */ - ulint n_def; /* number of fields defined so far */ - ulint n_fields;/* number of fields in the index */ + unsigned n_def:10;/* number of fields defined so far */ + unsigned n_fields:10;/* number of fields in the index */ + unsigned n_nullable:10;/* number of nullable fields */ + unsigned cached:1;/* TRUE if the index object is in the + dictionary cache */ dict_field_t* fields; /* array of field descriptions */ - ulint n_nullable;/* number of nullable fields */ UT_LIST_NODE_T(dict_index_t) indexes;/* list of indexes of the table */ - dict_tree_t* tree; /* index tree struct */ - UT_LIST_NODE_T(dict_index_t) - tree_indexes; /* list of indexes of the same index - tree */ - ibool cached; /* TRUE if the index object is in the - dictionary cache */ btr_search_t* search_info; /* info used in optimistic searches */ /*----------------------*/ ib_longlong* stat_n_diff_key_vals; @@ -218,7 +223,12 @@ struct dict_index_struct{ ulint stat_n_leaf_pages; /* approximate number of leaf pages in the index tree */ + rw_lock_t lock; /* read-write lock protecting the upper levels + of the index tree */ +#ifdef UNIV_DEBUG ulint magic_n;/* magic number */ +# define DICT_INDEX_MAGIC_N 76789786 +#endif }; /* Data structure for a foreign key constraint; an example: @@ -229,7 +239,13 @@ struct dict_foreign_struct{ this memory heap */ char* id; /* id of the constraint as a null-terminated string */ - ulint type; /* 0 or DICT_FOREIGN_ON_DELETE_CASCADE + unsigned n_fields:10; /* number of indexes' first fields + for which the the foreign key + constraint is defined: we allow the + indexes to contain more fields than + mentioned in the constraint, as long + as the first fields are as mentioned */ + unsigned type:6; /* 0 or DICT_FOREIGN_ON_DELETE_CASCADE or DICT_FOREIGN_ON_DELETE_SET_NULL */ char* foreign_table_name;/* foreign table name */ dict_table_t* foreign_table; /* table where the foreign key is */ @@ -240,12 +256,6 @@ struct dict_foreign_struct{ is */ const char** referenced_col_names;/* names of the referenced columns in the referenced table */ - ulint n_fields; /* number of indexes' first fields - for which the the foreign key - constraint is defined: we allow the - indexes to contain more fields than - mentioned in the constraint, as long - as the first fields are as mentioned */ dict_index_t* foreign_index; /* foreign index; we require that both tables contain explicitly defined indexes for the constraint: InnoDB @@ -270,12 +280,9 @@ a foreign key constraint is enforced, therefore RESTRICT just means no flag */ #define DICT_FOREIGN_ON_UPDATE_NO_ACTION 32 -#define DICT_INDEX_MAGIC_N 76789786 - /* Data structure for a database table */ struct dict_table_struct{ dulint id; /* id of the table */ - ulint flags; /* DICT_TF_COMPACT, ... */ mem_heap_t* heap; /* memory heap */ const char* name; /* table name */ const char* dir_path_of_temp_table;/* NULL or the directory path @@ -284,20 +291,33 @@ struct dict_table_struct{ innodb_file_per_table is defined in my.cnf; in Unix this is usually /tmp/..., in Windows \temp\... */ - ulint space; /* space where the clustered index of the + unsigned space:32; + /* space where the clustered index of the table is placed */ - ibool ibd_file_missing;/* TRUE if this is in a single-table + unsigned ibd_file_missing:1; + /* TRUE if this is in a single-table tablespace and the .ibd file is missing; then we must return in ha_innodb.cc an error if the user tries to query such an orphaned table */ - ibool tablespace_discarded;/* this flag is set TRUE when the - user calls DISCARD TABLESPACE on this table, - and reset to FALSE in IMPORT TABLESPACE */ + unsigned tablespace_discarded:1; + /* this flag is set TRUE when the user + calls DISCARD TABLESPACE on this + table, and reset to FALSE in IMPORT + TABLESPACE */ + unsigned cached:1;/* TRUE if the table object has been added + to the dictionary cache */ + unsigned flags:8;/* DICT_TF_COMPACT, ... */ + unsigned n_def:10;/* number of columns defined so far */ + unsigned n_cols:10;/* number of columns */ + dict_col_t* cols; /* array of column descriptions */ + const char* col_names; + /* n_def column names packed in an + "name1\0name2\0...nameN\0" array. until + n_def reaches n_cols, this is allocated with + ut_malloc, and the final size array is + allocated through the table's heap. */ hash_node_t name_hash; /* hash chain node */ hash_node_t id_hash; /* hash chain node */ - ulint n_def; /* number of columns defined so far */ - ulint n_cols; /* number of columns */ - dict_col_t* cols; /* array of column descriptions */ UT_LIST_BASE_NODE_T(dict_index_t) indexes; /* list of indexes of the table */ UT_LIST_BASE_NODE_T(dict_foreign_t) @@ -321,8 +341,6 @@ struct dict_table_struct{ on the table: we cannot drop the table while there are foreign key checks running on it! */ - ibool cached; /* TRUE if the table object has been added - to the dictionary cache */ lock_t* auto_inc_lock;/* a buffer for an auto-inc lock for this table: we allocate the memory here so that individual transactions can get it @@ -339,12 +357,7 @@ struct dict_table_struct{ had an IX lock on */ UT_LIST_BASE_NODE_T(lock_t) locks; /* list of locks on the table */ - ulint max_row_size; - /* maximum size of a single row in the - table, not guaranteed to be especially - accurate. it's ULINT_MAX if there are - unbounded variable-width fields. initialized - in dict_table_add_to_cache. */ +#ifdef UNIV_DEBUG /*----------------------*/ ibool does_not_fit_in_memory; /* this field is used to specify in simulations @@ -355,7 +368,15 @@ struct dict_table_struct{ dictionary on disk, and the database will forget about value TRUE if it has to reload the table definition from disk */ +#endif /* UNIV_DEBUG */ /*----------------------*/ + unsigned big_rows:1; + /* flag: TRUE if the maximum length of + a single row exceeds BIG_ROW_SIZE; + initialized in dict_table_add_to_cache() */ + unsigned stat_initialized:1; /* TRUE if statistics have + been calculated the first time + after database startup or table creation */ ib_longlong stat_n_rows; /* approximate number of rows in the table; we periodically calculate new estimates */ @@ -364,9 +385,6 @@ struct dict_table_struct{ database pages */ ulint stat_sum_of_other_index_sizes; /* other indexes in database pages */ - ibool stat_initialized; /* TRUE if statistics have - been calculated the first time - after database startup or table creation */ ulint stat_modified_counter; /* when a row is inserted, updated, or deleted, we add 1 to this number; we calculate new @@ -389,9 +407,11 @@ struct dict_table_struct{ SELECT MAX(auto inc column) */ ib_longlong autoinc;/* autoinc counter value to give to the next inserted row */ +#ifdef UNIV_DEBUG ulint magic_n;/* magic number */ +# define DICT_TABLE_MAGIC_N 76333786 +#endif /* UNIV_DEBUG */ }; -#define DICT_TABLE_MAGIC_N 76333786 #ifndef UNIV_NONINL #include "dict0mem.ic" diff --git a/storage/innobase/include/dict0types.h b/storage/innobase/include/dict0types.h index bd8a1a996d1..b90545f2105 100644 --- a/storage/innobase/include/dict0types.h +++ b/storage/innobase/include/dict0types.h @@ -13,7 +13,6 @@ typedef struct dict_sys_struct dict_sys_t; typedef struct dict_col_struct dict_col_t; typedef struct dict_field_struct dict_field_t; typedef struct dict_index_struct dict_index_t; -typedef struct dict_tree_struct dict_tree_t; typedef struct dict_table_struct dict_table_t; typedef struct dict_foreign_struct dict_foreign_t; diff --git a/storage/innobase/include/ibuf0ibuf.ic b/storage/innobase/include/ibuf0ibuf.ic index 4c5f6dbf5e1..4d65a7f5250 100644 --- a/storage/innobase/include/ibuf0ibuf.ic +++ b/storage/innobase/include/ibuf0ibuf.ic @@ -165,8 +165,8 @@ ibuf_index_page_calc_free( /* out: value for ibuf bitmap bits */ page_t* page) /* in: non-unique secondary index page */ { - return(ibuf_index_page_calc_free_bits - (page_get_max_insert_size_after_reorganize(page, 1))); + return(ibuf_index_page_calc_free_bits( + page_get_max_insert_size_after_reorganize(page, 1))); } /**************************************************************************** diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h index 7f99f670190..833d268c9de 100644 --- a/storage/innobase/include/page0page.h +++ b/storage/innobase/include/page0page.h @@ -132,6 +132,24 @@ directory. */ #define PAGE_DIR_SLOT_MAX_N_OWNED 8 #define PAGE_DIR_SLOT_MIN_N_OWNED 4 +/**************************************************************** +Gets the start of a page. */ +UNIV_INLINE +page_t* +page_align( +/*=======*/ + /* out: start of the page */ + void* ptr) /* in: pointer to page frame */ + __attribute__((const)); +/**************************************************************** +Gets the offset within a page. */ +UNIV_INLINE +ulint +page_offset( +/*========*/ + /* out: offset from the start of the page */ + const void* ptr) /* in: pointer to page frame */ + __attribute__((const)); /***************************************************************** Returns the max trx id field value. */ UNIV_INLINE diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic index 9348acd5ffc..d9e67f3eeeb 100644 --- a/storage/innobase/include/page0page.ic +++ b/storage/innobase/include/page0page.ic @@ -15,6 +15,28 @@ Created 2/2/1994 Heikki Tuuri #define UNIV_INLINE #endif +/**************************************************************** +Gets the start of a page. */ +UNIV_INLINE +page_t* +page_align( +/*=======*/ + /* out: start of the page */ + void* ptr) /* in: pointer to page frame */ +{ + return((page_t*) ut_align_down(ptr, UNIV_PAGE_SIZE)); +} +/**************************************************************** +Gets the offset within a page. */ +UNIV_INLINE +ulint +page_offset( +/*========*/ + /* out: offset from the start of the page */ + const void* ptr) /* in: pointer to page frame */ +{ + return(ut_align_offset(ptr, UNIV_PAGE_SIZE)); +} /***************************************************************** Returns the max trx id field value. */ UNIV_INLINE @@ -175,7 +197,7 @@ page_rec_is_comp( /* out: nonzero if in compact format */ const rec_t* rec) /* in: record */ { - return(page_is_comp(ut_align_down((rec_t*) rec, UNIV_PAGE_SIZE))); + return(page_is_comp(page_align((rec_t*) rec))); } /**************************************************************** @@ -291,7 +313,7 @@ page_rec_is_user_rec( /* out: TRUE if a user record */ const rec_t* rec) /* in: record */ { - return(page_rec_is_user_rec_low(ut_align_offset(rec, UNIV_PAGE_SIZE))); + return(page_rec_is_user_rec_low(page_offset(rec))); } /**************************************************************** @@ -303,7 +325,7 @@ page_rec_is_supremum( /* out: TRUE if the supremum record */ const rec_t* rec) /* in: record */ { - return(page_rec_is_supremum_low(ut_align_offset(rec, UNIV_PAGE_SIZE))); + return(page_rec_is_supremum_low(page_offset(rec))); } /**************************************************************** @@ -315,7 +337,7 @@ page_rec_is_infimum( /* out: TRUE if the infimum record */ const rec_t* rec) /* in: record */ { - return(page_rec_is_infimum_low(ut_align_offset(rec, UNIV_PAGE_SIZE))); + return(page_rec_is_infimum_low(page_offset(rec))); } /***************************************************************** @@ -351,7 +373,7 @@ page_cmp_dtuple_rec_with_match( ut_ad(rec_offs_validate(rec, NULL, offsets)); ut_ad(!rec_offs_comp(offsets) == !page_rec_is_comp(rec)); - rec_offset = ut_align_offset(rec, UNIV_PAGE_SIZE); + rec_offset = page_offset(rec); if (UNIV_UNLIKELY(rec_offset == PAGE_NEW_INFIMUM) || UNIV_UNLIKELY(rec_offset == PAGE_OLD_INFIMUM)) { @@ -492,7 +514,7 @@ page_dir_slot_set_rec( { ut_ad(page_rec_check(rec)); - mach_write_to_2(slot, ut_align_offset(rec, UNIV_PAGE_SIZE)); + mach_write_to_2(slot, page_offset(rec)); } /******************************************************************* @@ -550,7 +572,7 @@ page_rec_get_next( ut_ad(page_rec_check(rec)); - page = ut_align_down(rec, UNIV_PAGE_SIZE); + page = page_align(rec); offs = rec_get_next_offs(rec, page_is_comp(page)); @@ -592,11 +614,11 @@ page_rec_set_next( ut_ad(page_rec_check(rec)); ut_ad(!page_rec_is_supremum(rec)); - page = ut_align_down(rec, UNIV_PAGE_SIZE); + page = page_align(rec); if (next) { ut_ad(!page_rec_is_infimum(next)); - ut_ad(page == ut_align_down(next, UNIV_PAGE_SIZE)); + ut_ad(page == page_align(next)); offs = (ulint) (next - page); } else { offs = 0; @@ -623,7 +645,7 @@ page_rec_get_prev( ut_ad(page_rec_check(rec)); - page = ut_align_down(rec, UNIV_PAGE_SIZE); + page = page_align(rec); ut_ad(!page_rec_is_infimum(rec)); @@ -735,15 +757,15 @@ page_get_max_insert_size( if (page_is_comp(page)) { occupied = page_header_get_field(page, PAGE_HEAP_TOP) - PAGE_NEW_SUPREMUM_END - + page_dir_calc_reserved_space - (n_recs + page_dir_get_n_heap(page) - 2); + + page_dir_calc_reserved_space( + n_recs + page_dir_get_n_heap(page) - 2); free_space = page_get_free_space_of_empty(TRUE); } else { occupied = page_header_get_field(page, PAGE_HEAP_TOP) - PAGE_OLD_SUPREMUM_END - + page_dir_calc_reserved_space - (n_recs + page_dir_get_n_heap(page) - 2); + + page_dir_calc_reserved_space( + n_recs + page_dir_get_n_heap(page) - 2); free_space = page_get_free_space_of_empty(FALSE); } diff --git a/storage/innobase/include/read0read.ic b/storage/innobase/include/read0read.ic index 9785cf1b3ce..3aded1ca07c 100644 --- a/storage/innobase/include/read0read.ic +++ b/storage/innobase/include/read0read.ic @@ -69,9 +69,9 @@ read_view_sees_trx_id( for (i = 0; i < n_ids; i++) { - cmp = ut_dulint_cmp - (trx_id, - read_view_get_nth_trx_id(view, n_ids - i - 1)); + cmp = ut_dulint_cmp( + trx_id, + read_view_get_nth_trx_id(view, n_ids - i - 1)); if (cmp <= 0) { return(cmp < 0); } diff --git a/storage/innobase/include/rem0cmp.h b/storage/innobase/include/rem0cmp.h index 36d91db0e87..c6a6e5de4db 100644 --- a/storage/innobase/include/rem0cmp.h +++ b/storage/innobase/include/rem0cmp.h @@ -16,16 +16,17 @@ Created 7/1/1994 Heikki Tuuri #include "rem0rec.h" /***************************************************************** -Returns TRUE if two types are equal for comparison purposes. */ +Returns TRUE if two columns are equal for comparison purposes. */ ibool -cmp_types_are_equal( -/*================*/ - /* out: TRUE if the types are considered - equal in comparisons */ - dtype_t* type1, /* in: type 1 */ - dtype_t* type2, /* in: type 2 */ - ibool check_charsets); /* in: whether to check charsets */ +cmp_cols_are_equal( +/*===============*/ + /* out: TRUE if the columns are + considered equal in comparisons */ + const dict_col_t* col1, /* in: column 1 */ + const dict_col_t* col2, /* in: column 2 */ + ibool check_charsets); + /* in: whether to check charsets */ /***************************************************************** This function is used to compare two data fields for which we know the data type. */ @@ -35,7 +36,8 @@ cmp_data_data( /*==========*/ /* out: 1, 0, -1, if data1 is greater, equal, less than data2, respectively */ - dtype_t* cur_type,/* in: data type of the fields */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ byte* data1, /* in: data field (== a pointer to a memory buffer) */ ulint len1, /* in: data field length or UNIV_SQL_NULL */ @@ -51,7 +53,8 @@ cmp_data_data_slow( /*===============*/ /* out: 1, 0, -1, if data1 is greater, equal, less than data2, respectively */ - dtype_t* cur_type,/* in: data type of the fields */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ byte* data1, /* in: data field (== a pointer to a memory buffer) */ ulint len1, /* in: data field length or UNIV_SQL_NULL */ diff --git a/storage/innobase/include/rem0cmp.ic b/storage/innobase/include/rem0cmp.ic index 5653ec1ac44..52dc7ff5dc9 100644 --- a/storage/innobase/include/rem0cmp.ic +++ b/storage/innobase/include/rem0cmp.ic @@ -15,7 +15,8 @@ cmp_data_data( /*==========*/ /* out: 1, 0, -1, if data1 is greater, equal, less than data2, respectively */ - dtype_t* cur_type,/* in: data type of the fields */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ byte* data1, /* in: data field (== a pointer to a memory buffer) */ ulint len1, /* in: data field length or UNIV_SQL_NULL */ @@ -23,7 +24,7 @@ cmp_data_data( buffer) */ ulint len2) /* in: data field length or UNIV_SQL_NULL */ { - return(cmp_data_data_slow(cur_type, data1, len1, data2, len2)); + return(cmp_data_data_slow(mtype, prtype, data1, len1, data2, len2)); } /***************************************************************** @@ -38,12 +39,17 @@ cmp_dfield_dfield( dfield_t* dfield1,/* in: data field; must have type field set */ dfield_t* dfield2)/* in: data field */ { + const dtype_t* type; + ut_ad(dfield_check_typed(dfield1)); - return(cmp_data_data - (dfield_get_type(dfield1), - dfield_get_data(dfield1), dfield_get_len(dfield1), - dfield_get_data(dfield2), dfield_get_len(dfield2))); + type = dfield_get_type(dfield1); + + return(cmp_data_data(type->mtype, type->prtype, + dfield_get_data(dfield1), + dfield_get_len(dfield1), + dfield_get_data(dfield2), + dfield_get_len(dfield2))); } /***************************************************************** diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h index af11198d001..43ef6650e90 100644 --- a/storage/innobase/include/rem0rec.h +++ b/storage/innobase/include/rem0rec.h @@ -14,11 +14,6 @@ Created 5/30/1994 Heikki Tuuri #include "rem0types.h" #include "mtr0types.h" -/* Maximum values for various fields (for non-blob tuples) */ -#define REC_MAX_N_FIELDS (1024 - 1) -#define REC_MAX_HEAP_NO (2 * 8192 - 1) -#define REC_MAX_N_OWNED (16 - 1) - /* Info bit denoting the predefined minimum record: this bit is set if and only if the record is the first user record on a non-leaf B-tree page that is the leftmost page on its level diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic index 5e4c0ea0664..ace90247b80 100644 --- a/storage/innobase/include/rem0rec.ic +++ b/storage/innobase/include/rem0rec.ic @@ -336,8 +336,8 @@ rec_set_next_offs( as a non-negative number */ field_value = (ulint)((lint)next - - (lint)ut_align_offset - (rec, UNIV_PAGE_SIZE)); + - (lint)ut_align_offset( + rec, UNIV_PAGE_SIZE)); field_value &= REC_NEXT_MASK; } else { field_value = 0; @@ -597,15 +597,15 @@ rec_get_deleted_flag( ulint comp) /* in: nonzero=compact page format */ { if (UNIV_EXPECT(comp, REC_OFFS_COMPACT)) { - return(UNIV_UNLIKELY - (rec_get_bit_field_1(rec, REC_NEW_INFO_BITS, - REC_INFO_DELETED_FLAG, - REC_INFO_BITS_SHIFT))); + return(UNIV_UNLIKELY( + rec_get_bit_field_1(rec, REC_NEW_INFO_BITS, + REC_INFO_DELETED_FLAG, + REC_INFO_BITS_SHIFT))); } else { - return(UNIV_UNLIKELY - (rec_get_bit_field_1(rec, REC_OLD_INFO_BITS, - REC_INFO_DELETED_FLAG, - REC_INFO_BITS_SHIFT))); + return(UNIV_UNLIKELY( + rec_get_bit_field_1(rec, REC_OLD_INFO_BITS, + REC_INFO_DELETED_FLAG, + REC_INFO_BITS_SHIFT))); } } @@ -846,16 +846,16 @@ rec_offs_validate( if (index) { ulint max_n_fields; ut_ad((ulint) index == offsets[3]); - max_n_fields = ut_max - (dict_index_get_n_fields(index), - dict_index_get_n_unique_in_tree(index) + 1); + max_n_fields = ut_max( + dict_index_get_n_fields(index), + dict_index_get_n_unique_in_tree(index) + 1); if (comp && rec) { switch (rec_get_status(rec)) { case REC_STATUS_ORDINARY: break; case REC_STATUS_NODE_PTR: - max_n_fields = dict_index_get_n_unique_in_tree - (index) + 1; + max_n_fields = dict_index_get_n_unique_in_tree( + index) + 1; break; case REC_STATUS_INFIMUM: case REC_STATUS_SUPREMUM: @@ -1453,8 +1453,8 @@ rec_get_converted_size( data_size = dtuple_get_data_size(dtuple); - extra_size = rec_get_converted_extra_size - (data_size, dtuple_get_n_fields(dtuple)); + extra_size = rec_get_converted_extra_size( + data_size, dtuple_get_n_fields(dtuple)); return(data_size + extra_size); } diff --git a/storage/innobase/include/rem0types.h b/storage/innobase/include/rem0types.h index 94c394499c5..79c162392d2 100644 --- a/storage/innobase/include/rem0types.h +++ b/storage/innobase/include/rem0types.h @@ -12,5 +12,9 @@ Created 5/30/1994 Heikki Tuuri /* We define the physical record simply as an array of bytes */ typedef byte rec_t; +/* Maximum values for various fields (for non-blob tuples) */ +#define REC_MAX_N_FIELDS (1024 - 1) +#define REC_MAX_HEAP_NO (2 * 8192 - 1) +#define REC_MAX_N_OWNED (16 - 1) #endif diff --git a/storage/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic index 5e852559fe4..6173849e68f 100644 --- a/storage/innobase/include/row0upd.ic +++ b/storage/innobase/include/row0upd.ic @@ -93,8 +93,8 @@ upd_field_set_field_no( (ulong) dict_index_get_n_fields(index)); } - dtype_copy(dfield_get_type(&(upd_field->new_val)), - dict_index_get_nth_type(index, field_no)); + dict_col_copy_type(dict_index_get_nth_col(index, field_no), + dfield_get_type(&(upd_field->new_val))); } /************************************************************************* diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h index e5f3e1341c5..d8f3c400918 100644 --- a/storage/innobase/include/sync0rw.h +++ b/storage/innobase/include/sync0rw.h @@ -426,13 +426,6 @@ struct rw_lock_struct { waiters (readers or writers) in the global wait array, waiting for this rw_lock. Otherwise, == 0. */ - ibool writer_is_wait_ex; - /* This is TRUE if the writer field is - RW_LOCK_WAIT_EX; this field is located far - from the memory update hotspot fields which - are at the start of this struct, thus we can - peek this field without causing much memory - bus traffic */ UT_LIST_NODE_T(rw_lock_t) list; /* All allocated rw locks are put into a list */ @@ -440,15 +433,21 @@ struct rw_lock_struct { UT_LIST_BASE_NODE_T(rw_lock_debug_t) debug_list; /* In the debug version: pointer to the debug info list of the lock */ -#endif /* UNIV_SYNC_DEBUG */ - ulint level; /* Level in the global latching order. */ +#endif /* UNIV_SYNC_DEBUG */ const char* cfile_name;/* File name where lock created */ - ulint cline; /* Line where created */ const char* last_s_file_name;/* File name where last s-locked */ const char* last_x_file_name;/* File name where last x-locked */ - ulint last_s_line; /* Line number where last time s-locked */ - ulint last_x_line; /* Line number where last time x-locked */ + ibool writer_is_wait_ex; + /* This is TRUE if the writer field is + RW_LOCK_WAIT_EX; this field is located far + from the memory update hotspot fields which + are at the start of this struct, thus we can + peek this field without causing much memory + bus traffic */ + unsigned cline:14; /* Line where created */ + unsigned last_s_line:14; /* Line number where last time s-locked */ + unsigned last_x_line:14; /* Line number where last time x-locked */ ulint magic_n; }; diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index e60241b0ff3..63fdabbe823 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -91,6 +91,7 @@ memory is read outside the allocated blocks. */ #define UNIV_SYNC_PERF_STAT #define UNIV_SEARCH_PERF_STAT #define UNIV_SRV_PRINT_LATCH_WAITS +#define UNIV_BTR_PRINT #endif #define UNIV_BTR_DEBUG diff --git a/storage/innobase/lock/lock0lock.c b/storage/innobase/lock/lock0lock.c index 3a2f17f41e5..551470f49b6 100644 --- a/storage/innobase/lock/lock0lock.c +++ b/storage/innobase/lock/lock0lock.c @@ -1041,8 +1041,8 @@ lock_has_to_wait( return(lock_rec_has_to_wait(lock1->trx, lock1->type_mode, lock2, - lock_rec_get_nth_bit - (lock1, 1))); + lock_rec_get_nth_bit( + lock1, 1))); } return(TRUE); @@ -1303,13 +1303,13 @@ lock_rec_get_next( if (page_rec_is_comp(rec)) { do { lock = lock_rec_get_next_on_page(lock); - } while (lock && !lock_rec_get_nth_bit - (lock, rec_get_heap_no(rec, TRUE))); + } while (lock && !lock_rec_get_nth_bit( + lock, rec_get_heap_no(rec, TRUE))); } else { do { lock = lock_rec_get_next_on_page(lock); - } while (lock && !lock_rec_get_nth_bit - (lock, rec_get_heap_no(rec, FALSE))); + } while (lock && !lock_rec_get_nth_bit( + lock, rec_get_heap_no(rec, FALSE))); } return(lock); @@ -1880,8 +1880,8 @@ lock_rec_enqueue_waiting( if (lock_deadlock_occurs(lock, trx)) { lock_reset_lock_and_trx_wait(lock); - lock_rec_reset_nth_bit(lock, rec_get_heap_no - (rec, page_rec_is_comp(rec))); + lock_rec_reset_nth_bit(lock, rec_get_heap_no( + rec, page_rec_is_comp(rec))); return(DB_DEADLOCK); } @@ -2661,8 +2661,9 @@ lock_move_reorganize_page( for (;;) { ut_ad(comp || !memcmp(page_cur_get_rec(&cur1), page_cur_get_rec(&cur2), - rec_get_data_size_old - (page_cur_get_rec(&cur2)))); + rec_get_data_size_old( + page_cur_get_rec( + &cur2)))); old_heap_no = rec_get_heap_no(page_cur_get_rec(&cur2), comp); @@ -2759,8 +2760,9 @@ lock_move_rec_list_end( while (page_cur_get_rec(&cur1) != sup) { ut_ad(comp || !memcmp(page_cur_get_rec(&cur1), page_cur_get_rec(&cur2), - rec_get_data_size_old - (page_cur_get_rec(&cur2)))); + rec_get_data_size_old( + page_cur_get_rec( + &cur2)))); heap_no = rec_get_heap_no(page_cur_get_rec(&cur1), comp); @@ -2839,8 +2841,9 @@ lock_move_rec_list_start( while (page_cur_get_rec(&cur1) != rec) { ut_ad(comp || !memcmp(page_cur_get_rec(&cur1), page_cur_get_rec(&cur2), - rec_get_data_size_old - (page_cur_get_rec(&cur2)))); + rec_get_data_size_old( + page_cur_get_rec( + &cur2)))); heap_no = rec_get_heap_no(page_cur_get_rec(&cur1), comp); @@ -2898,8 +2901,8 @@ lock_update_split_right( of the infimum on right page */ lock_rec_inherit_to_gap(page_get_supremum_rec(left_page), - page_rec_get_next - (page_get_infimum_rec(right_page))); + page_rec_get_next( + page_get_infimum_rec(right_page))); lock_mutex_exit_kernel(); } @@ -3000,8 +3003,8 @@ lock_update_split_left( successor of the infimum on the right page */ lock_rec_inherit_to_gap(page_get_supremum_rec(left_page), - page_rec_get_next - (page_get_infimum_rec(right_page))); + page_rec_get_next( + page_get_infimum_rec(right_page))); lock_mutex_exit_kernel(); } @@ -3439,9 +3442,9 @@ lock_deadlock_recursive( incompatible mode, and is itself waiting for a lock */ - ret = lock_deadlock_recursive - (start, lock_trx, - lock_trx->wait_lock, cost, depth + 1); + ret = lock_deadlock_recursive( + start, lock_trx, + lock_trx->wait_lock, cost, depth + 1); if (ret != 0) { return(ret); @@ -4219,9 +4222,9 @@ lock_rec_print( if (page) { rec_t* rec = page_find_rec_with_heap_no(page, i); - offsets = rec_get_offsets - (rec, lock->index, offsets, - ULINT_UNDEFINED, &heap); + offsets = rec_get_offsets( + rec, lock->index, offsets, + ULINT_UNDEFINED, &heap); rec_print_new(file, rec, offsets); } @@ -4378,14 +4381,14 @@ loop: fprintf(file, "Trx read view will not see trx with" " id >= %lu %lu, sees < %lu %lu\n", - (ulong) ut_dulint_get_high - (trx->read_view->low_limit_id), - (ulong) ut_dulint_get_low - (trx->read_view->low_limit_id), - (ulong) ut_dulint_get_high - (trx->read_view->up_limit_id), - (ulong) ut_dulint_get_low - (trx->read_view->up_limit_id)); + (ulong) ut_dulint_get_high( + trx->read_view->low_limit_id), + (ulong) ut_dulint_get_low( + trx->read_view->low_limit_id), + (ulong) ut_dulint_get_high( + trx->read_view->up_limit_id), + (ulong) ut_dulint_get_low( + trx->read_view->up_limit_id)); } fprintf(file, @@ -4443,8 +4446,8 @@ loop: mtr_start(&mtr); - page = buf_page_get_with_no_latch - (space, page_no, &mtr); + page = buf_page_get_with_no_latch( + space, page_no, &mtr); mtr_commit(&mtr); @@ -4510,8 +4513,9 @@ lock_table_queue_validate( ut_a(!is_waiting); - ut_a(!lock_table_other_has_incompatible - (lock->trx, 0, table, lock_get_mode(lock))); + ut_a(!lock_table_other_has_incompatible( + lock->trx, 0, table, + lock_get_mode(lock))); } else { is_waiting = TRUE; @@ -4580,8 +4584,8 @@ lock_rec_queue_validate( impl_trx = lock_clust_rec_some_has_impl(rec, index, offsets); - if (impl_trx && lock_rec_other_has_expl_req - (LOCK_S, 0, LOCK_WAIT, rec, impl_trx)) { + if (impl_trx && lock_rec_other_has_expl_req( + LOCK_S, 0, LOCK_WAIT, rec, impl_trx)) { ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, rec, impl_trx)); @@ -4594,11 +4598,11 @@ lock_rec_queue_validate( next function call: we have to release lock table mutex to obey the latching order */ - impl_trx = lock_sec_rec_some_has_impl_off_kernel - (rec, index, offsets); + impl_trx = lock_sec_rec_some_has_impl_off_kernel( + rec, index, offsets); - if (impl_trx && lock_rec_other_has_expl_req - (LOCK_S, 0, LOCK_WAIT, rec, impl_trx)) { + if (impl_trx && lock_rec_other_has_expl_req( + LOCK_S, 0, LOCK_WAIT, rec, impl_trx)) { ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, rec, impl_trx)); @@ -4626,8 +4630,8 @@ lock_rec_queue_validate( } else { mode = LOCK_S; } - ut_a(!lock_rec_other_has_expl_req - (mode, 0, 0, rec, lock->trx)); + ut_a(!lock_rec_other_has_expl_req( + mode, 0, 0, rec, lock->trx)); } else if (lock_get_wait(lock) && !lock_rec_get_gap(lock)) { @@ -4764,8 +4768,8 @@ lock_validate(void) while (lock) { if (lock_get_type(lock) & LOCK_TABLE) { - lock_table_queue_validate - (lock->un_member.tab_lock.table); + lock_table_queue_validate( + lock->un_member.tab_lock.table); } lock = UT_LIST_GET_NEXT(trx_locks, lock); @@ -4787,9 +4791,9 @@ lock_validate(void) space = lock->un_member.rec_lock.space; page_no = lock->un_member.rec_lock.page_no; - if (ut_dulint_cmp - (ut_dulint_create(space, page_no), - limit) >= 0) { + if (ut_dulint_cmp( + ut_dulint_create(space, page_no), + limit) >= 0) { break; } @@ -4889,8 +4893,9 @@ lock_rec_insert_check_and_lock( had to wait for their insert. Both had waiting gap type lock requests on the successor, which produced an unnecessary deadlock. */ - if (lock_rec_other_has_conflicting - (LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION, next_rec, trx)) { + if (lock_rec_other_has_conflicting( + LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION, next_rec, + trx)) { /* Note that we may get DB_SUCCESS also here! */ err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP @@ -4952,8 +4957,8 @@ lock_rec_convert_impl_to_expl( if (index->type & DICT_CLUSTERED) { impl_trx = lock_clust_rec_some_has_impl(rec, index, offsets); } else { - impl_trx = lock_sec_rec_some_has_impl_off_kernel - (rec, index, offsets); + impl_trx = lock_sec_rec_some_has_impl_off_kernel( + rec, index, offsets); } if (impl_trx) { @@ -4963,9 +4968,9 @@ lock_rec_convert_impl_to_expl( if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, rec, impl_trx)) { - lock_rec_add_to_queue - (LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP, - rec, index, impl_trx); + lock_rec_add_to_queue( + LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP, + rec, index, impl_trx); } } } diff --git a/storage/innobase/log/log0log.c b/storage/innobase/log/log0log.c index 9ac9e40466c..5d8875f1bd0 100644 --- a/storage/innobase/log/log0log.c +++ b/storage/innobase/log/log0log.c @@ -355,8 +355,8 @@ log_close(void) full by the current mtr: the next mtr log record group will start within this block at the offset data_len */ - log_block_set_first_rec_group - (log_block, log_block_get_data_len(log_block)); + log_block_set_first_rec_group( + log_block, log_block_get_data_len(log_block)); } if (log->buf_free > log->max_buf_free) { @@ -900,17 +900,17 @@ log_group_init( #endif /* UNIV_LOG_ARCHIVE */ for (i = 0; i < n_files; i++) { - *(group->file_header_bufs + i) = ut_align - (mem_alloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE), - OS_FILE_LOG_BLOCK_SIZE); + *(group->file_header_bufs + i) = ut_align( + mem_alloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE), + OS_FILE_LOG_BLOCK_SIZE); memset(*(group->file_header_bufs + i), '\0', LOG_FILE_HDR_SIZE); #ifdef UNIV_LOG_ARCHIVE - *(group->archive_file_header_bufs + i) = ut_align - (mem_alloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE), - OS_FILE_LOG_BLOCK_SIZE); + *(group->archive_file_header_bufs + i) = ut_align( + mem_alloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE), + OS_FILE_LOG_BLOCK_SIZE); memset(*(group->archive_file_header_bufs + i), '\0', LOG_FILE_HDR_SIZE); #endif /* UNIV_LOG_ARCHIVE */ @@ -923,9 +923,8 @@ log_group_init( group->archived_offset = 0; #endif /* UNIV_LOG_ARCHIVE */ - group->checkpoint_buf = ut_align - (mem_alloc(2 * OS_FILE_LOG_BLOCK_SIZE), - OS_FILE_LOG_BLOCK_SIZE); + group->checkpoint_buf = ut_align( + mem_alloc(2 * OS_FILE_LOG_BLOCK_SIZE), OS_FILE_LOG_BLOCK_SIZE); memset(group->checkpoint_buf, '\0', OS_FILE_LOG_BLOCK_SIZE); @@ -1029,9 +1028,9 @@ log_sys_check_flush_completion(void) /* Move the log buffer content to the start of the buffer */ - move_start = ut_calc_align_down - (log_sys->write_end_offset, - OS_FILE_LOG_BLOCK_SIZE); + move_start = ut_calc_align_down( + log_sys->write_end_offset, + OS_FILE_LOG_BLOCK_SIZE); move_end = ut_calc_align(log_sys->buf_free, OS_FILE_LOG_BLOCK_SIZE); @@ -1255,16 +1254,16 @@ loop: (ulong) ut_dulint_get_high(start_lsn), (ulong) ut_dulint_get_low(start_lsn), (ulong) log_block_get_hdr_no(buf), - (ulong) log_block_get_hdr_no - (buf + write_len - OS_FILE_LOG_BLOCK_SIZE)); + (ulong) log_block_get_hdr_no( + buf + write_len - OS_FILE_LOG_BLOCK_SIZE)); ut_a(log_block_get_hdr_no(buf) == log_block_convert_lsn_to_no(start_lsn)); for (i = 0; i < write_len / OS_FILE_LOG_BLOCK_SIZE; i++) { ut_a(log_block_get_hdr_no(buf) + i - == log_block_get_hdr_no - (buf + i * OS_FILE_LOG_BLOCK_SIZE)); + == log_block_get_hdr_no( + buf + i * OS_FILE_LOG_BLOCK_SIZE)); } } #endif /* UNIV_DEBUG */ @@ -1412,10 +1411,10 @@ loop: if (log_debug_writes) { fprintf(stderr, "Writing log from %lu %lu up to lsn %lu %lu\n", - (ulong) ut_dulint_get_high - (log_sys->written_to_all_lsn), - (ulong) ut_dulint_get_low - (log_sys->written_to_all_lsn), + (ulong) ut_dulint_get_high( + log_sys->written_to_all_lsn), + (ulong) ut_dulint_get_low( + log_sys->written_to_all_lsn), (ulong) ut_dulint_get_high(log_sys->lsn), (ulong) ut_dulint_get_low(log_sys->lsn)); } @@ -1446,9 +1445,9 @@ loop: log_sys->one_flushed = FALSE; log_block_set_flush_bit(log_sys->buf + area_start, TRUE); - log_block_set_checkpoint_no - (log_sys->buf + area_end - OS_FILE_LOG_BLOCK_SIZE, - log_sys->next_checkpoint_no); + log_block_set_checkpoint_no( + log_sys->buf + area_end - OS_FILE_LOG_BLOCK_SIZE, + log_sys->next_checkpoint_no); /* Copy the last, incompletely written, log block a log block length up, so that when the flush operation writes from the log buffer, the @@ -1466,12 +1465,12 @@ loop: /* Do the write to the log files */ while (group) { - log_group_write_buf - (group, log_sys->buf + area_start, - area_end - area_start, - ut_dulint_align_down(log_sys->written_to_all_lsn, - OS_FILE_LOG_BLOCK_SIZE), - start_offset - area_start); + log_group_write_buf( + group, log_sys->buf + area_start, + area_end - area_start, + ut_dulint_align_down(log_sys->written_to_all_lsn, + OS_FILE_LOG_BLOCK_SIZE), + start_offset - area_start); log_group_set_fields(group, log_sys->write_lsn); @@ -1730,8 +1729,8 @@ log_group_checkpoint( log_sys->next_checkpoint_lsn); mach_write_to_4(buf + LOG_CHECKPOINT_OFFSET, - log_group_calc_lsn_offset - (log_sys->next_checkpoint_lsn, group)); + log_group_calc_lsn_offset( + log_sys->next_checkpoint_lsn, group)); mach_write_to_4(buf + LOG_CHECKPOINT_LOG_BUF_SIZE, log_sys->buf_size); @@ -2355,9 +2354,9 @@ loop: OS_DATA_FILE, &ret); if (!ret && (open_mode == OS_FILE_CREATE)) { - file_handle = os_file_create - (name, OS_FILE_OPEN, OS_FILE_AIO, - OS_DATA_FILE, &ret); + file_handle = os_file_create( + name, OS_FILE_OPEN, OS_FILE_AIO, + OS_DATA_FILE, &ret); } if (!ret) { @@ -2388,10 +2387,10 @@ loop: group->archive_space_id, FALSE); if (next_offset % group->file_size == 0) { - log_group_archive_file_header_write - (group, n_files, - group->archived_file_no + n_files, - start_lsn); + log_group_archive_file_header_write( + group, n_files, + group->archived_file_no + n_files, + start_lsn); next_offset += LOG_FILE_HDR_SIZE; } @@ -2513,10 +2512,10 @@ log_archive_write_complete_groups(void) #endif /* UNIV_DEBUG */ /* Calculate the archive file space start lsn */ - start_lsn = ut_dulint_subtract - (log_sys->next_archived_lsn, - end_offset - LOG_FILE_HDR_SIZE + trunc_files - * (group->file_size - LOG_FILE_HDR_SIZE)); + start_lsn = ut_dulint_subtract( + log_sys->next_archived_lsn, + end_offset - LOG_FILE_HDR_SIZE + trunc_files + * (group->file_size - LOG_FILE_HDR_SIZE)); end_lsn = start_lsn; for (i = 0; i < trunc_files; i++) { @@ -2656,8 +2655,8 @@ loop: if (ut_dulint_cmp(limit_lsn, log_sys->lsn) >= 0) { - limit_lsn = ut_dulint_align_down - (log_sys->lsn, OS_FILE_LOG_BLOCK_SIZE); + limit_lsn = ut_dulint_align_down( + log_sys->lsn, OS_FILE_LOG_BLOCK_SIZE); } } @@ -2804,8 +2803,8 @@ log_archive_close_groups( /* Write a notice to the headers of archived log files that the file write has been completed */ - log_group_archive_completed_header_write - (group, 0, log_sys->archived_lsn); + log_group_archive_completed_header_write( + group, 0, log_sys->archived_lsn); fil_space_truncate_start(group->archive_space_id, trunc_len); diff --git a/storage/innobase/log/log0recv.c b/storage/innobase/log/log0recv.c index 3a53e0eb6e5..280ebbb6bf2 100644 --- a/storage/innobase/log/log0recv.c +++ b/storage/innobase/log/log0recv.c @@ -239,12 +239,14 @@ recv_truncate_group( archived_lsn = checkpoint_lsn; } - finish_lsn1 = ut_dulint_add(ut_dulint_align_down - (archived_lsn, OS_FILE_LOG_BLOCK_SIZE), + finish_lsn1 = ut_dulint_add(ut_dulint_align_down( + archived_lsn, + OS_FILE_LOG_BLOCK_SIZE), log_group_get_capacity(group)); - finish_lsn2 = ut_dulint_add(ut_dulint_align_up - (recovered_lsn, OS_FILE_LOG_BLOCK_SIZE), + finish_lsn2 = ut_dulint_add(ut_dulint_align_up( + recovered_lsn, + OS_FILE_LOG_BLOCK_SIZE), recv_sys->last_log_buf_size); if (ut_dulint_cmp(limit_lsn, ut_dulint_max) != 0) { @@ -274,8 +276,8 @@ recv_truncate_group( ut_memcpy(log_sys->buf, recv_sys->last_block, OS_FILE_LOG_BLOCK_SIZE); - log_block_set_data_len(log_sys->buf, ut_dulint_minus - (recovered_lsn, start_lsn)); + log_block_set_data_len(log_sys->buf, ut_dulint_minus( + recovered_lsn, start_lsn)); } if (ut_dulint_cmp(start_lsn, finish_lsn) >= 0) { @@ -441,16 +443,16 @@ recv_check_cp_is_consistent( fold = ut_fold_binary(buf, LOG_CHECKPOINT_CHECKSUM_1); - if ((fold & 0xFFFFFFFFUL) != mach_read_from_4 - (buf + LOG_CHECKPOINT_CHECKSUM_1)) { + if ((fold & 0xFFFFFFFFUL) != mach_read_from_4( + buf + LOG_CHECKPOINT_CHECKSUM_1)) { return(FALSE); } fold = ut_fold_binary(buf + LOG_CHECKPOINT_LSN, LOG_CHECKPOINT_CHECKSUM_2 - LOG_CHECKPOINT_LSN); - if ((fold & 0xFFFFFFFFUL) != mach_read_from_4 - (buf + LOG_CHECKPOINT_CHECKSUM_2)) { + if ((fold & 0xFFFFFFFFUL) != mach_read_from_4( + buf + LOG_CHECKPOINT_CHECKSUM_2)) { return(FALSE); } @@ -498,9 +500,9 @@ recv_find_max_checkpoint( " %lu at %lu invalid, %lu\n", (ulong) group->id, (ulong) field, - (ulong) mach_read_from_4 - (buf - + LOG_CHECKPOINT_CHECKSUM_1)); + (ulong) mach_read_from_4( + buf + + LOG_CHECKPOINT_CHECKSUM_1)); } #endif /* UNIV_DEBUG */ @@ -509,20 +511,20 @@ recv_find_max_checkpoint( group->state = LOG_GROUP_OK; - group->lsn = mach_read_from_8 - (buf + LOG_CHECKPOINT_LSN); - group->lsn_offset = mach_read_from_4 - (buf + LOG_CHECKPOINT_OFFSET); - checkpoint_no = mach_read_from_8 - (buf + LOG_CHECKPOINT_NO); + group->lsn = mach_read_from_8( + buf + LOG_CHECKPOINT_LSN); + group->lsn_offset = mach_read_from_4( + buf + LOG_CHECKPOINT_OFFSET); + checkpoint_no = mach_read_from_8( + buf + LOG_CHECKPOINT_NO); #ifdef UNIV_DEBUG if (log_debug_writes) { fprintf(stderr, "InnoDB: Checkpoint number %lu" " found in group %lu\n", - (ulong) ut_dulint_get_low - (checkpoint_no), + (ulong) ut_dulint_get_low( + checkpoint_no), (ulong) group->id); } #endif /* UNIV_DEBUG */ @@ -609,8 +611,8 @@ recv_read_cp_info_for_backup( if (mach_read_from_4(cp_buf + LOG_CHECKPOINT_FSP_MAGIC_N) == LOG_CHECKPOINT_FSP_MAGIC_N_VAL) { - *fsp_limit = mach_read_from_4 - (cp_buf + LOG_CHECKPOINT_FSP_FREE_LIMIT); + *fsp_limit = mach_read_from_4( + cp_buf + LOG_CHECKPOINT_FSP_FREE_LIMIT); if (*fsp_limit == 0) { *fsp_limit = 1000000000; @@ -779,10 +781,10 @@ recv_parse_or_apply_log_rec_body( ptr = mlog_parse_nbytes(type, ptr, end_ptr, page); break; case MLOG_REC_INSERT: case MLOG_COMP_REC_INSERT: - if (NULL != (ptr = mlog_parse_index - (ptr, end_ptr, - type == MLOG_COMP_REC_INSERT, - &index))) { + if (NULL != (ptr = mlog_parse_index( + ptr, end_ptr, + type == MLOG_COMP_REC_INSERT, + &index))) { ut_a(!page || (ibool)!!page_is_comp(page) == dict_table_is_comp(index->table)); @@ -791,15 +793,15 @@ recv_parse_or_apply_log_rec_body( } break; case MLOG_REC_CLUST_DELETE_MARK: case MLOG_COMP_REC_CLUST_DELETE_MARK: - if (NULL != (ptr = mlog_parse_index - (ptr, end_ptr, - type == MLOG_COMP_REC_CLUST_DELETE_MARK, - &index))) { + if (NULL != (ptr = mlog_parse_index( + ptr, end_ptr, + type == MLOG_COMP_REC_CLUST_DELETE_MARK, + &index))) { ut_a(!page || (ibool)!!page_is_comp(page) == dict_table_is_comp(index->table)); - ptr = btr_cur_parse_del_mark_set_clust_rec - (ptr, end_ptr, index, page); + ptr = btr_cur_parse_del_mark_set_clust_rec( + ptr, end_ptr, index, page); } break; case MLOG_COMP_REC_SEC_DELETE_MARK: @@ -815,10 +817,10 @@ recv_parse_or_apply_log_rec_body( ptr = btr_cur_parse_del_mark_set_sec_rec(ptr, end_ptr, page); break; case MLOG_REC_UPDATE_IN_PLACE: case MLOG_COMP_REC_UPDATE_IN_PLACE: - if (NULL != (ptr = mlog_parse_index - (ptr, end_ptr, - type == MLOG_COMP_REC_UPDATE_IN_PLACE, - &index))) { + if (NULL != (ptr = mlog_parse_index( + ptr, end_ptr, + type == MLOG_COMP_REC_UPDATE_IN_PLACE, + &index))) { ut_a(!page || (ibool)!!page_is_comp(page) == dict_table_is_comp(index->table)); @@ -828,11 +830,11 @@ recv_parse_or_apply_log_rec_body( break; case MLOG_LIST_END_DELETE: case MLOG_COMP_LIST_END_DELETE: case MLOG_LIST_START_DELETE: case MLOG_COMP_LIST_START_DELETE: - if (NULL != (ptr = mlog_parse_index - (ptr, end_ptr, - type == MLOG_COMP_LIST_END_DELETE - || type == MLOG_COMP_LIST_START_DELETE, - &index))) { + if (NULL != (ptr = mlog_parse_index( + ptr, end_ptr, + type == MLOG_COMP_LIST_END_DELETE + || type == MLOG_COMP_LIST_START_DELETE, + &index))) { ut_a(!page || (ibool)!!page_is_comp(page) == dict_table_is_comp(index->table)); @@ -841,22 +843,22 @@ recv_parse_or_apply_log_rec_body( } break; case MLOG_LIST_END_COPY_CREATED: case MLOG_COMP_LIST_END_COPY_CREATED: - if (NULL != (ptr = mlog_parse_index - (ptr, end_ptr, - type == MLOG_COMP_LIST_END_COPY_CREATED, - &index))) { + if (NULL != (ptr = mlog_parse_index( + ptr, end_ptr, + type == MLOG_COMP_LIST_END_COPY_CREATED, + &index))) { ut_a(!page || (ibool)!!page_is_comp(page) == dict_table_is_comp(index->table)); - ptr = page_parse_copy_rec_list_to_created_page - (ptr, end_ptr, index, page, mtr); + ptr = page_parse_copy_rec_list_to_created_page( + ptr, end_ptr, index, page, mtr); } break; case MLOG_PAGE_REORGANIZE: case MLOG_COMP_PAGE_REORGANIZE: - if (NULL != (ptr = mlog_parse_index - (ptr, end_ptr, - type == MLOG_COMP_PAGE_REORGANIZE, - &index))) { + if (NULL != (ptr = mlog_parse_index( + ptr, end_ptr, + type == MLOG_COMP_PAGE_REORGANIZE, + &index))) { ut_a(!page || (ibool)!!page_is_comp(page) == dict_table_is_comp(index->table)); @@ -887,15 +889,15 @@ recv_parse_or_apply_log_rec_body( page, mtr); break; case MLOG_REC_MIN_MARK: case MLOG_COMP_REC_MIN_MARK: - ptr = btr_parse_set_min_rec_mark - (ptr, end_ptr, type == MLOG_COMP_REC_MIN_MARK, - page, mtr); + ptr = btr_parse_set_min_rec_mark( + ptr, end_ptr, type == MLOG_COMP_REC_MIN_MARK, + page, mtr); break; case MLOG_REC_DELETE: case MLOG_COMP_REC_DELETE: - if (NULL != (ptr = mlog_parse_index - (ptr, end_ptr, - type == MLOG_COMP_REC_DELETE, - &index))) { + if (NULL != (ptr = mlog_parse_index( + ptr, end_ptr, + type == MLOG_COMP_REC_DELETE, + &index))) { ut_a(!page || (ibool)!!page_is_comp(page) == dict_table_is_comp(index->table)); @@ -1430,8 +1432,8 @@ loop: RW_X_LATCH, &mtr); #ifdef UNIV_SYNC_DEBUG - buf_page_dbg_add_level - (page, SYNC_NO_ORDER_CHECK); + buf_page_dbg_add_level( + page, SYNC_NO_ORDER_CHECK); #endif /* UNIV_SYNC_DEBUG */ recv_recover_page(FALSE, FALSE, page, space, page_no); @@ -1570,18 +1572,18 @@ recv_apply_log_recs_for_backup(void) the block corresponding to buf_pool->frame_zero (== page). */ - buf_page_init_for_backup_restore - (recv_addr->space, recv_addr->page_no, - buf_block_align(page)); + buf_page_init_for_backup_restore( + recv_addr->space, recv_addr->page_no, + buf_block_align(page)); /* Extend the tablespace's last file if the page_no does not fall inside its bounds; we assume the last file is auto-extending, and ibbackup copied the file when it still was smaller */ - success = fil_extend_space_to_desired_size - (&actual_size, - recv_addr->space, recv_addr->page_no + 1); + success = fil_extend_space_to_desired_size( + &actual_size, + recv_addr->space, recv_addr->page_no + 1); if (!success) { fprintf(stderr, "InnoDB: Fatal error: cannot extend" @@ -1615,9 +1617,9 @@ recv_apply_log_recs_for_backup(void) /* Write the page back to the tablespace file using the fil0fil.c routines */ - buf_flush_init_for_writing - (page, mach_read_from_8(page + FIL_PAGE_LSN), - recv_addr->space, recv_addr->page_no); + buf_flush_init_for_writing( + page, mach_read_from_8(page + FIL_PAGE_LSN), + recv_addr->space, recv_addr->page_no); error = fil_io(OS_FILE_WRITE, TRUE, recv_addr->space, recv_addr->page_no, 0, UNIV_PAGE_SIZE, @@ -1916,8 +1918,9 @@ loop: fil_path_to_mysql_datadir is set in ibbackup to point to the datadir we should use there */ - if (NULL == fil_op_log_parse_or_replay - (body, end_ptr, type, TRUE, space)) { + if (NULL == fil_op_log_parse_or_replay( + body, end_ptr, type, TRUE, + space)) { fprintf(stderr, "InnoDB: Error: file op" " log record of type %lu" @@ -1956,8 +1959,8 @@ loop: if (recv_sys->found_corrupt_log) { - recv_report_corrupt_log - (ptr, type, space, page_no); + recv_report_corrupt_log( + ptr, type, space, page_no); } return(FALSE); @@ -1998,8 +2001,8 @@ loop: } } - new_recovered_lsn = recv_calc_lsn_on_data_add - (recv_sys->recovered_lsn, total_len); + new_recovered_lsn = recv_calc_lsn_on_data_add( + recv_sys->recovered_lsn, total_len); if (ut_dulint_cmp(new_recovered_lsn, recv_sys->scanned_lsn) > 0) { @@ -2202,22 +2205,21 @@ recv_scan_log_recs( || !log_block_checksum_is_ok_or_old_format(log_block)) { if (no == log_block_convert_lsn_to_no(scanned_lsn) - && !log_block_checksum_is_ok_or_old_format - (log_block)) { + && !log_block_checksum_is_ok_or_old_format( + log_block)) { fprintf(stderr, "InnoDB: Log block no %lu at" " lsn %lu %lu has\n" "InnoDB: ok header, but checksum field" " contains %lu, should be %lu\n", (ulong) no, - (ulong) ut_dulint_get_high - (scanned_lsn), - (ulong) ut_dulint_get_low - (scanned_lsn), - (ulong) log_block_get_checksum - (log_block), - (ulong) log_block_calc_checksum - (log_block)); + (ulong) ut_dulint_get_high( + scanned_lsn), + (ulong) ut_dulint_get_low(scanned_lsn), + (ulong) log_block_get_checksum( + log_block), + (ulong) log_block_calc_checksum( + log_block)); } /* Garbage or an incompletely written log block */ @@ -2273,8 +2275,8 @@ recv_scan_log_recs( recv_sys->parse_start_lsn = ut_dulint_add(scanned_lsn, - log_block_get_first_rec_group - (log_block)); + log_block_get_first_rec_group( + log_block)); recv_sys->scanned_lsn = recv_sys->parse_start_lsn; recv_sys->recovered_lsn = recv_sys->parse_start_lsn; } @@ -2297,8 +2299,8 @@ recv_scan_log_recs( recv_sys->found_corrupt_log = TRUE; } else if (!recv_sys->found_corrupt_log) { - more_data = recv_sys_add_to_parsing_buf - (log_block, scanned_lsn); + more_data = recv_sys_add_to_parsing_buf( + log_block, scanned_lsn); } recv_sys->scanned_lsn = scanned_lsn; @@ -2385,11 +2387,10 @@ recv_group_scan_log_recs( log_group_read_log_seg(LOG_RECOVER, log_sys->buf, group, start_lsn, end_lsn); - finished = recv_scan_log_recs - (TRUE, (buf_pool->n_frames - recv_n_pool_free_frames) - * UNIV_PAGE_SIZE, TRUE, log_sys->buf, - RECV_SCAN_SIZE, start_lsn, - contiguous_lsn, group_scanned_lsn); + finished = recv_scan_log_recs( + TRUE, (buf_pool->n_frames - recv_n_pool_free_frames) + * UNIV_PAGE_SIZE, TRUE, log_sys->buf, RECV_SCAN_SIZE, + start_lsn, contiguous_lsn, group_scanned_lsn); start_lsn = end_lsn; } @@ -2561,18 +2562,18 @@ recv_recovery_from_checkpoint_start( "InnoDB: %lu %lu and %lu %lu.\n" "InnoDB: #########################" "#################################\n", - (ulong) ut_dulint_get_high - (checkpoint_lsn), - (ulong) ut_dulint_get_low - (checkpoint_lsn), - (ulong) ut_dulint_get_high - (min_flushed_lsn), - (ulong) ut_dulint_get_low - (min_flushed_lsn), - (ulong) ut_dulint_get_high - (max_flushed_lsn), - (ulong) ut_dulint_get_low - (max_flushed_lsn)); + (ulong) ut_dulint_get_high( + checkpoint_lsn), + (ulong) ut_dulint_get_low( + checkpoint_lsn), + (ulong) ut_dulint_get_high( + min_flushed_lsn), + (ulong) ut_dulint_get_low( + min_flushed_lsn), + (ulong) ut_dulint_get_high( + max_flushed_lsn), + (ulong) ut_dulint_get_low( + max_flushed_lsn)); } recv_needed_recovery = TRUE; @@ -2602,8 +2603,8 @@ recv_recovery_from_checkpoint_start( " half-written data pages from" " the doublewrite\n" "InnoDB: buffer...\n"); - trx_sys_doublewrite_init_or_restore_pages - (TRUE); + trx_sys_doublewrite_init_or_restore_pages( + TRUE); } ut_print_timestamp(stderr); @@ -2629,10 +2630,10 @@ recv_recovery_from_checkpoint_start( group = recv_sys->archive_group; capacity = log_group_get_capacity(group); - if ((ut_dulint_cmp(recv_sys->scanned_lsn, ut_dulint_add - (checkpoint_lsn, capacity)) > 0) - || (ut_dulint_cmp(checkpoint_lsn, ut_dulint_add - (recv_sys->scanned_lsn, capacity)) + if ((ut_dulint_cmp(recv_sys->scanned_lsn, ut_dulint_add( + checkpoint_lsn, capacity)) > 0) + || (ut_dulint_cmp(checkpoint_lsn, ut_dulint_add( + recv_sys->scanned_lsn, capacity)) > 0)) { mutex_exit(&(log_sys->mutex)); @@ -2756,10 +2757,10 @@ recv_recovery_from_checkpoint_start( " lsn %lu %lu up to lsn %lu %lu\n", (ulong) ut_dulint_get_high(checkpoint_lsn), (ulong) ut_dulint_get_low(checkpoint_lsn), - (ulong) ut_dulint_get_high - (recv_sys->recovered_lsn), - (ulong) ut_dulint_get_low - (recv_sys->recovered_lsn)); + (ulong) ut_dulint_get_high( + recv_sys->recovered_lsn), + (ulong) ut_dulint_get_low( + recv_sys->recovered_lsn)); } } else { srv_start_lsn = recv_sys->recovered_lsn; @@ -3189,10 +3190,10 @@ ask_again: group->archive_space_id, read_offset / UNIV_PAGE_SIZE, read_offset % UNIV_PAGE_SIZE, len, buf, NULL); - ret = recv_scan_log_recs - (TRUE, (buf_pool->n_frames - recv_n_pool_free_frames) - * UNIV_PAGE_SIZE, TRUE, buf, len, start_lsn, - &dummy_lsn, &scanned_lsn); + ret = recv_scan_log_recs( + TRUE, (buf_pool->n_frames - recv_n_pool_free_frames) + * UNIV_PAGE_SIZE, TRUE, buf, len, start_lsn, + &dummy_lsn, &scanned_lsn); if (ut_dulint_cmp(scanned_lsn, file_end_lsn) == 0) { diff --git a/storage/innobase/mem/mem0dbg.c b/storage/innobase/mem/mem0dbg.c index 3cab454e3ac..419f410cf1b 100644 --- a/storage/innobase/mem/mem0dbg.c +++ b/storage/innobase/mem/mem0dbg.c @@ -497,8 +497,8 @@ mem_heap_validate_or_print( " %lx but trailer %lx\n", (ulint)block, (ulint)field, len, check_field, - mem_field_trailer_get_check - (user_field)); + mem_field_trailer_get_check( + user_field)); return; } diff --git a/storage/innobase/mem/mem0pool.c b/storage/innobase/mem/mem0pool.c index 91e7532aba8..6a419ff33ec 100644 --- a/storage/innobase/mem/mem0pool.c +++ b/storage/innobase/mem/mem0pool.c @@ -501,8 +501,8 @@ mem_area_free( ulint next_size; - next_size = mem_area_get_size - ((mem_area_t*)(((byte*)area) + size)); + next_size = mem_area_get_size( + (mem_area_t*)(((byte*)area) + size)); if (ut_2_power_up(next_size) != next_size) { fprintf(stderr, "InnoDB: Error: Memory area size %lu," diff --git a/storage/innobase/mtr/mtr0log.c b/storage/innobase/mtr/mtr0log.c index ba5284d9169..cb03f207a56 100644 --- a/storage/innobase/mtr/mtr0log.c +++ b/storage/innobase/mtr/mtr0log.c @@ -445,21 +445,21 @@ mlog_open_and_write_index( dict_index_get_n_unique_in_tree(index)); log_ptr += 2; for (i = 0; i < n; i++) { - dict_field_t* field; - dtype_t* type; - ulint len; + dict_field_t* field; + const dict_col_t* col; + ulint len; + field = dict_index_get_nth_field(index, i); - type = dict_col_get_type(dict_field_get_col(field)); + col = dict_field_get_col(field); len = field->fixed_len; ut_ad(len < 0x7fff); if (len == 0 - && (dtype_get_len(type) > 255 - || dtype_get_mtype(type) == DATA_BLOB)) { + && (col->len > 255 || col->mtype == DATA_BLOB)) { /* variable-length field with maximum length > 255 */ len = 0x7fff; } - if (dtype_get_prtype(type) & DATA_NOT_NULL) { + if (col->prtype & DATA_NOT_NULL) { len |= 0x8000; } if (log_ptr + 2 > log_end) { @@ -541,14 +541,16 @@ mlog_parse_index( /* The high-order bit of len is the NOT NULL flag; the rest is 0 or 0x7fff for variable-length fields, and 1..0x7ffe for fixed-length fields. */ - dict_mem_table_add_col - (table, "DUMMY", - ((len + 1) & 0x7fff) <= 1 - ? DATA_BINARY : DATA_FIXBINARY, - len & 0x8000 ? DATA_NOT_NULL : 0, - len & 0x7fff, 0); - dict_index_add_col - (ind, dict_table_get_nth_col(table, i), 0); + dict_mem_table_add_col( + table, "DUMMY", + ((len + 1) & 0x7fff) <= 1 + ? DATA_BINARY : DATA_FIXBINARY, + len & 0x8000 ? DATA_NOT_NULL : 0, + len & 0x7fff); + + dict_index_add_col(ind, table, (dict_col_t*) + dict_table_get_nth_col(table, i), + 0); } ptr += 2; } diff --git a/storage/innobase/mtr/mtr0mtr.c b/storage/innobase/mtr/mtr0mtr.c index 13c579c646b..1a65da6e7f4 100644 --- a/storage/innobase/mtr/mtr0mtr.c +++ b/storage/innobase/mtr/mtr0mtr.c @@ -131,9 +131,9 @@ mtr_log_reserve_and_write( } if (mlog->heap == NULL) { - mtr->end_lsn = log_reserve_and_write_fast - (first_data, dyn_block_get_used(mlog), - &(mtr->start_lsn), &success); + mtr->end_lsn = log_reserve_and_write_fast( + first_data, dyn_block_get_used(mlog), + &(mtr->start_lsn), &success); if (success) { return; diff --git a/storage/innobase/os/os0file.c b/storage/innobase/os/os0file.c index 65903717cfb..a4acb0cd485 100644 --- a/storage/innobase/os/os0file.c +++ b/storage/innobase/os/os0file.c @@ -3483,9 +3483,9 @@ try_again: #endif } else { if (!wake_later) { - os_aio_simulated_wake_handler_thread - (os_aio_get_segment_no_from_slot - (array, slot)); + os_aio_simulated_wake_handler_thread( + os_aio_get_segment_no_from_slot( + array, slot)); } } } else if (type == OS_FILE_WRITE) { @@ -3501,9 +3501,9 @@ try_again: #endif } else { if (!wake_later) { - os_aio_simulated_wake_handler_thread - (os_aio_get_segment_no_from_slot - (array, slot)); + os_aio_simulated_wake_handler_thread( + os_aio_get_segment_no_from_slot( + array, slot)); } } } else { diff --git a/storage/innobase/os/os0thread.c b/storage/innobase/os/os0thread.c index 0c9434ccc7f..a0b1e51d359 100644 --- a/storage/innobase/os/os0thread.c +++ b/storage/innobase/os/os0thread.c @@ -142,8 +142,9 @@ os_thread_create( AIX is always big enough. An empirical test on AIX-4.3 suggested the size was 96 kB, though. */ - ret = pthread_attr_setstacksize - (&attr, (size_t)(PTHREAD_STACK_MIN + 32 * 1024)); + ret = pthread_attr_setstacksize(&attr, + (size_t)(PTHREAD_STACK_MIN + + 32 * 1024)); if (ret) { fprintf(stderr, "InnoDB: Error: pthread_attr_setstacksize" diff --git a/storage/innobase/page/page0cur.c b/storage/innobase/page/page0cur.c index 2a440c4f479..a297eb545a0 100644 --- a/storage/innobase/page/page0cur.c +++ b/storage/innobase/page/page0cur.c @@ -259,11 +259,11 @@ page_cur_search_with_match( && (page_header_get_ptr(page, PAGE_LAST_INSERT)) && (page_header_get_field(page, PAGE_DIRECTION) == PAGE_RIGHT)) { - if (page_cur_try_search_shortcut - (page, index, tuple, - iup_matched_fields, iup_matched_bytes, - ilow_matched_fields, ilow_matched_bytes, - cursor)) { + if (page_cur_try_search_shortcut( + page, index, tuple, + iup_matched_fields, iup_matched_bytes, + ilow_matched_fields, ilow_matched_bytes, + cursor)) { return; } } @@ -326,8 +326,9 @@ low_slot_match: } else if (UNIV_EXPECT(cmp, -1)) { #ifdef PAGE_CUR_LE_OR_EXTENDS if (mode == PAGE_CUR_LE_OR_EXTENDS - && page_cur_rec_field_extends - (tuple, mid_rec, offsets, cur_matched_fields)) { + && page_cur_rec_field_extends( + tuple, mid_rec, offsets, + cur_matched_fields)) { goto low_slot_match; } @@ -382,8 +383,9 @@ low_rec_match: } else if (UNIV_EXPECT(cmp, -1)) { #ifdef PAGE_CUR_LE_OR_EXTENDS if (mode == PAGE_CUR_LE_OR_EXTENDS - && page_cur_rec_field_extends - (tuple, mid_rec, offsets, cur_matched_fields)) { + && page_cur_rec_field_extends( + tuple, mid_rec, offsets, + cur_matched_fields)) { goto low_rec_match; } @@ -645,9 +647,9 @@ page_cur_insert_rec_write_log( + extra_info_yes); if (extra_info_yes) { /* Write the info bits */ - mach_write_to_1 - (log_ptr, - rec_get_info_and_status_bits(insert_rec, comp)); + mach_write_to_1(log_ptr, + rec_get_info_and_status_bits(insert_rec, + comp)); log_ptr++; /* Write the record origin offset */ @@ -796,8 +798,8 @@ page_cur_parse_insert_rec( ULINT_UNDEFINED, &heap); if (extra_info_yes == 0) { - info_and_status_bits = rec_get_info_and_status_bits - (cursor_rec, page_is_comp(page)); + info_and_status_bits = rec_get_info_and_status_bits( + cursor_rec, page_is_comp(page)); origin_offset = rec_offs_extra_size(offsets); mismatch_index = rec_offs_size(offsets) - end_seg_len; } @@ -975,8 +977,8 @@ page_cur_insert_rec_low( page_header_set_field(page, PAGE_DIRECTION, PAGE_RIGHT); page_header_set_field(page, PAGE_N_DIRECTION, - page_header_get_field - (page, PAGE_N_DIRECTION) + 1); + page_header_get_field( + page, PAGE_N_DIRECTION) + 1); } else if ((page_rec_get_next(insert_rec) == last_insert) && (page_header_get_field(page, PAGE_DIRECTION) @@ -984,8 +986,8 @@ page_cur_insert_rec_low( page_header_set_field(page, PAGE_DIRECTION, PAGE_LEFT); page_header_set_field(page, PAGE_N_DIRECTION, - page_header_get_field - (page, PAGE_N_DIRECTION) + 1); + page_header_get_field( + page, PAGE_N_DIRECTION) + 1); } else { page_header_set_field(page, PAGE_DIRECTION, PAGE_NO_DIRECTION); page_header_set_field(page, PAGE_N_DIRECTION, 0); @@ -1278,7 +1280,7 @@ page_cur_delete_rec_write_log( } /* Write the cursor rec offset as a 2-byte ulint */ - mach_write_to_2(log_ptr, ut_align_offset(rec, UNIV_PAGE_SIZE)); + mach_write_to_2(log_ptr, page_offset(rec)); mlog_close(mtr, log_ptr + 2); } diff --git a/storage/innobase/page/page0page.c b/storage/innobase/page/page0page.c index 0c869445546..4212df7a631 100644 --- a/storage/innobase/page/page0page.c +++ b/storage/innobase/page/page0page.c @@ -381,7 +381,7 @@ page_create( dfield_set_data(field, "infimum", 8); dtype_set(dfield_get_type(field), - DATA_VARCHAR, DATA_ENGLISH | DATA_NOT_NULL, 8, 0); + DATA_VARCHAR, DATA_ENGLISH | DATA_NOT_NULL, 8); /* Set the corresponding physical record to its place in the page record heap */ @@ -407,7 +407,7 @@ page_create( dfield_set_data(field, "supremum", comp ? 8 : 9); dtype_set(dfield_get_type(field), - DATA_VARCHAR, DATA_ENGLISH | DATA_NOT_NULL, comp ? 8 : 9, 0); + DATA_VARCHAR, DATA_ENGLISH | DATA_NOT_NULL, comp ? 8 : 9); supremum_rec = rec_convert_dtuple_to_rec(heap_top, index, tuple); @@ -650,7 +650,7 @@ page_delete_rec_list_write_log( log_ptr = mlog_open_and_write_index(mtr, rec, index, type, 2); if (log_ptr) { /* Write the parameter as a 2-byte ulint */ - mach_write_to_2(log_ptr, ut_align_offset(rec, UNIV_PAGE_SIZE)); + mach_write_to_2(log_ptr, page_offset(rec)); mlog_close(mtr, log_ptr + 2); } } diff --git a/storage/innobase/pars/pars0opt.c b/storage/innobase/pars/pars0opt.c index 7c564d3ee4c..2abe6720235 100644 --- a/storage/innobase/pars/pars0opt.c +++ b/storage/innobase/pars/pars0opt.c @@ -333,9 +333,9 @@ opt_calc_index_goodness( col_no = dict_index_get_nth_col_no(index, j); - exp = opt_look_for_col_in_cond_before - (OPT_EQUAL, col_no, sel_node->search_cond, - sel_node, nth_table, &op); + exp = opt_look_for_col_in_cond_before( + OPT_EQUAL, col_no, sel_node->search_cond, + sel_node, nth_table, &op); if (exp) { /* The value for this column is exactly known already at this stage of the join */ @@ -346,9 +346,9 @@ opt_calc_index_goodness( } else { /* Look for non-equality comparisons */ - exp = opt_look_for_col_in_cond_before - (OPT_COMPARISON, col_no, sel_node->search_cond, - sel_node, nth_table, &op); + exp = opt_look_for_col_in_cond_before( + OPT_COMPARISON, col_no, sel_node->search_cond, + sel_node, nth_table, &op); if (exp) { index_plan[j] = exp; *last_op = op; @@ -675,10 +675,11 @@ opt_classify_comparison( access the table, it is classified as OPT_END_COND */ if ((dict_index_get_n_fields(plan->index) > plan->n_exact_match) - && opt_look_for_col_in_comparison_before - (OPT_COMPARISON, - dict_index_get_nth_col_no(plan->index, plan->n_exact_match), - cond, sel_node, i, &op)) { + && opt_look_for_col_in_comparison_before( + OPT_COMPARISON, + dict_index_get_nth_col_no(plan->index, + plan->n_exact_match), + cond, sel_node, i, &op)) { if (sel_node->asc && ((op == '<') || (op == PARS_LE_TOKEN))) { @@ -903,8 +904,8 @@ opt_find_all_cols( /* Fill in the field_no fields in sym_node */ - sym_node->field_nos[SYM_CLUST_FIELD_NO] = dict_index_get_nth_col_pos - (dict_table_get_first_index(index->table), sym_node->col_no); + sym_node->field_nos[SYM_CLUST_FIELD_NO] = dict_index_get_nth_col_pos( + dict_table_get_first_index(index->table), sym_node->col_no); if (!(index->type & DICT_CLUSTERED)) { ut_a(plan); diff --git a/storage/innobase/pars/pars0pars.c b/storage/innobase/pars/pars0pars.c index 214fff68c55..6861844870c 100644 --- a/storage/innobase/pars/pars0pars.c +++ b/storage/innobase/pars/pars0pars.c @@ -254,43 +254,43 @@ pars_resolve_func_data_type( == DATA_INT); } else if (func == PARS_COUNT_TOKEN) { ut_a(arg); - dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4, 0); + dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4); } else if (func == PARS_TO_CHAR_TOKEN) { ut_a(dtype_get_mtype(que_node_get_data_type(arg)) == DATA_INT); dtype_set(que_node_get_data_type(node), DATA_VARCHAR, - DATA_ENGLISH, 0, 0); + DATA_ENGLISH, 0); } else if (func == PARS_TO_BINARY_TOKEN) { if (dtype_get_mtype(que_node_get_data_type(arg)) == DATA_INT) { dtype_set(que_node_get_data_type(node), DATA_VARCHAR, - DATA_ENGLISH, 0, 0); + DATA_ENGLISH, 0); } else { dtype_set(que_node_get_data_type(node), DATA_BINARY, - 0, 0, 0); + 0, 0); } } else if (func == PARS_TO_NUMBER_TOKEN) { ut_a(dtype_get_mtype(que_node_get_data_type(arg)) == DATA_VARCHAR); - dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4, 0); + dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4); } else if (func == PARS_BINARY_TO_NUMBER_TOKEN) { ut_a(dtype_get_mtype(que_node_get_data_type(arg)) == DATA_VARCHAR); - dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4, 0); + dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4); } else if (func == PARS_LENGTH_TOKEN) { ut_a(dtype_get_mtype(que_node_get_data_type(arg)) == DATA_VARCHAR); - dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4, 0); + dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4); } else if (func == PARS_INSTR_TOKEN) { ut_a(dtype_get_mtype(que_node_get_data_type(arg)) == DATA_VARCHAR); - dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4, 0); + dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4); } else if (func == PARS_SYSDATE_TOKEN) { ut_a(arg == NULL); - dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4, 0); + dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4); } else if ((func == PARS_SUBSTR_TOKEN) || (func == PARS_CONCAT_TOKEN)) { @@ -298,7 +298,7 @@ pars_resolve_func_data_type( ut_a(dtype_get_mtype(que_node_get_data_type(arg)) == DATA_VARCHAR); dtype_set(que_node_get_data_type(node), DATA_VARCHAR, - DATA_ENGLISH, 0, 0); + DATA_ENGLISH, 0); } else if ((func == '>') || (func == '<') || (func == '=') || (func == PARS_GE_TOKEN) @@ -310,18 +310,18 @@ pars_resolve_func_data_type( || (func == PARS_NOTFOUND_TOKEN)) { /* We currently have no iboolean type: use integer type */ - dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4, 0); + dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4); } else if (func == PARS_RND_TOKEN) { ut_a(dtype_get_mtype(que_node_get_data_type(arg)) == DATA_INT); - dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4, 0); + dtype_set(que_node_get_data_type(node), DATA_INT, 0, 4); } else if (func == PARS_RND_STR_TOKEN) { ut_a(dtype_get_mtype(que_node_get_data_type(arg)) == DATA_INT); dtype_set(que_node_get_data_type(node), DATA_VARCHAR, - DATA_ENGLISH, 0, 0); + DATA_ENGLISH, 0); } else { ut_error; } @@ -450,7 +450,6 @@ pars_resolve_exp_columns( sym_node_t* sym_node; dict_table_t* table; sym_node_t* t_node; - dict_col_t* col; ulint n_cols; ulint i; @@ -490,10 +489,13 @@ pars_resolve_exp_columns( n_cols = dict_table_get_n_cols(table); for (i = 0; i < n_cols; i++) { - col = dict_table_get_nth_col(table, i); + const dict_col_t* col + = dict_table_get_nth_col(table, i); + const char* col_name + = dict_table_get_col_name(table, i); - if ((sym_node->name_len == ut_strlen(col->name)) - && (0 == ut_memcmp(sym_node->name, col->name, + if ((sym_node->name_len == ut_strlen(col_name)) + && (0 == ut_memcmp(sym_node->name, col_name, sym_node->name_len))) { /* Found */ sym_node->resolved = TRUE; @@ -502,8 +504,10 @@ pars_resolve_exp_columns( sym_node->col_no = i; sym_node->prefetch_buf = NULL; - dfield_set_type(&(sym_node->common.val), - dict_col_get_type(col)); + dict_col_copy_type( + col, + dfield_get_type(&sym_node + ->common.val)); return; } @@ -592,7 +596,6 @@ pars_select_all_columns( sym_node_t* col_node; sym_node_t* table_node; dict_table_t* table; - dict_col_t* col; ulint i; select_node->select_list = NULL; @@ -603,15 +606,15 @@ pars_select_all_columns( table = table_node->table; for (i = 0; i < dict_table_get_n_user_cols(table); i++) { - - col = dict_table_get_nth_col(table, i); + const char* col_name = dict_table_get_col_name( + table, i); col_node = sym_tab_add_id(pars_sym_tab_global, - (byte*)col->name, - ut_strlen(col->name)); - select_node->select_list - = que_node_list_add_last - (select_node->select_list, col_node); + (byte*)col_name, + ut_strlen(col_name)); + + select_node->select_list = que_node_list_add_last( + select_node->select_list, col_node); } table_node = que_node_get_next(table_node); @@ -888,10 +891,12 @@ pars_process_assign_list( pars_resolve_exp_columns(table_sym, assign_node->val); pars_resolve_exp_variables_and_types(NULL, assign_node->val); #if 0 - ut_a(dtype_get_mtype - (dfield_get_type(que_node_get_val(assign_node->col))) - == dtype_get_mtype - (dfield_get_type(que_node_get_val(assign_node->val)))); + ut_a(dtype_get_mtype( + dfield_get_type(que_node_get_val( + assign_node->col))) + == dtype_get_mtype( + dfield_get_type(que_node_get_val( + assign_node->val)))); #endif /* Add to the update node all the columns found in assignment @@ -915,13 +920,14 @@ pars_process_assign_list( col_sym = assign_node->col; - upd_field_set_field_no(upd_field, dict_index_get_nth_col_pos - (clust_index, col_sym->col_no), + upd_field_set_field_no(upd_field, dict_index_get_nth_col_pos( + clust_index, col_sym->col_no), clust_index, NULL); upd_field->exp = assign_node->val; - if (!dtype_is_fixed_size(dict_index_get_nth_type - (clust_index, upd_field->field_no))) { + if (!dict_col_get_fixed_size( + dict_index_get_nth_col(clust_index, + upd_field->field_no))) { changes_field_size = 0; } @@ -1126,23 +1132,23 @@ pars_set_dfield_type( if (type == &pars_int_token) { ut_a(len == 0); - dtype_set(dfield_get_type(dfield), DATA_INT, flags, 4, 0); + dtype_set(dfield_get_type(dfield), DATA_INT, flags, 4); } else if (type == &pars_char_token) { ut_a(len == 0); dtype_set(dfield_get_type(dfield), DATA_VARCHAR, - DATA_ENGLISH | flags, 0, 0); + DATA_ENGLISH | flags, 0); } else if (type == &pars_binary_token) { ut_a(len != 0); dtype_set(dfield_get_type(dfield), DATA_FIXBINARY, - DATA_BINARY_TYPE | flags, len, 0); + DATA_BINARY_TYPE | flags, len); } else if (type == &pars_blob_token) { ut_a(len == 0); dtype_set(dfield_get_type(dfield), DATA_BLOB, - DATA_BINARY_TYPE | flags, 0, 0); + DATA_BINARY_TYPE | flags, 0); } else { ut_error; } @@ -1599,7 +1605,8 @@ pars_create_table( sym_node_t* table_sym, /* in: table name node in the symbol table */ sym_node_t* column_defs, /* in: list of column names */ - void* not_fit_in_memory)/* in: a non-NULL pointer means that + void* not_fit_in_memory __attribute__((unused))) + /* in: a non-NULL pointer means that this is a table which in simulations should be simulated as not fitting in memory; thread is put to sleep @@ -1623,18 +1630,18 @@ pars_create_table( create tables in the old (not compact) record format. */ table = dict_mem_table_create(table_sym->name, 0, n_cols, 0); +#ifdef UNIV_DEBUG if (not_fit_in_memory != NULL) { table->does_not_fit_in_memory = TRUE; } - +#endif /* UNIV_DEBUG */ column = column_defs; while (column) { dtype = dfield_get_type(que_node_get_val(column)); dict_mem_table_add_col(table, column->name, dtype->mtype, - dtype->prtype, dtype->len, - dtype->prec); + dtype->prtype, dtype->len); column->resolved = TRUE; column->token_type = SYM_COLUMN; @@ -1859,8 +1866,8 @@ pars_sql( pars_sym_tab_global = sym_tab_create(heap); pars_sym_tab_global->string_len = strlen(str); - pars_sym_tab_global->sql_string = mem_heap_dup - (heap, str, pars_sym_tab_global->string_len + 1); + pars_sym_tab_global->sql_string = mem_heap_dup( + heap, str, pars_sym_tab_global->string_len + 1); pars_sym_tab_global->next_char_pos = 0; pars_sym_tab_global->info = info; diff --git a/storage/innobase/pars/pars0sym.c b/storage/innobase/pars/pars0sym.c index 8d691febb14..2d56fff2d42 100644 --- a/storage/innobase/pars/pars0sym.c +++ b/storage/innobase/pars/pars0sym.c @@ -102,7 +102,7 @@ sym_tab_add_int_lit( node->indirection = NULL; - dtype_set(&(node->common.val.type), DATA_INT, 0, 4, 0); + dtype_set(&(node->common.val.type), DATA_INT, 0, 4); data = mem_heap_alloc(sym_tab->heap, 4); mach_write_to_4(data, val); @@ -144,7 +144,7 @@ sym_tab_add_str_lit( node->indirection = NULL; - dtype_set(&(node->common.val.type), DATA_VARCHAR, DATA_ENGLISH, 0, 0); + dtype_set(&(node->common.val.type), DATA_VARCHAR, DATA_ENGLISH, 0); if (len) { data = mem_heap_alloc(sym_tab->heap, len); @@ -226,7 +226,7 @@ sym_tab_add_bound_lit( ut_error; } - dtype_set(&(node->common.val.type), blit->type, blit->prtype, len, 0); + dtype_set(&(node->common.val.type), blit->type, blit->prtype, len); dfield_set_data(&(node->common.val), blit->address, blit->length); diff --git a/storage/innobase/read/read0read.c b/storage/innobase/read/read0read.c index 4b9a116c3c1..2375c35190a 100644 --- a/storage/innobase/read/read0read.c +++ b/storage/innobase/read/read0read.c @@ -200,8 +200,9 @@ read_view_oldest_copy_or_open_new( insert_done = 1; } else { read_view_set_nth_trx_id(view_copy, i, - read_view_get_nth_trx_id - (old_view, i - insert_done)); + read_view_get_nth_trx_id( + old_view, + i - insert_done)); } i++; @@ -216,8 +217,8 @@ read_view_oldest_copy_or_open_new( if (n > 0) { /* The last active transaction has the smallest id: */ - view_copy->up_limit_id = read_view_get_nth_trx_id - (view_copy, n - 1); + view_copy->up_limit_id = read_view_get_nth_trx_id( + view_copy, n - 1); } else { view_copy->up_limit_id = old_view->up_limit_id; } @@ -379,10 +380,10 @@ read_view_print( for (i = 0; i < n_ids; i++) { fprintf(stderr, "Read view trx id %lu %lu\n", - (ulong) ut_dulint_get_high - (read_view_get_nth_trx_id(view, i)), - (ulong) ut_dulint_get_low - (read_view_get_nth_trx_id(view, i))); + (ulong) ut_dulint_get_high( + read_view_get_nth_trx_id(view, i)), + (ulong) ut_dulint_get_low( + read_view_get_nth_trx_id(view, i))); } } @@ -420,8 +421,8 @@ read_cursor_view_create_for_mysql( mutex_enter(&kernel_mutex); - curview->read_view = read_view_create_low - (UT_LIST_GET_LEN(trx_sys->trx_list), curview->heap); + curview->read_view = read_view_create_low( + UT_LIST_GET_LEN(trx_sys->trx_list), curview->heap); view = curview->read_view; view->creator_trx_id = cr_trx->id; diff --git a/storage/innobase/rem/rem0cmp.c b/storage/innobase/rem/rem0cmp.c index 57a028352fc..07e5b64c157 100644 --- a/storage/innobase/rem/rem0cmp.c +++ b/storage/innobase/rem/rem0cmp.c @@ -92,47 +92,48 @@ cmp_collate( } /***************************************************************** -Returns TRUE if two types are equal for comparison purposes. */ +Returns TRUE if two columns are equal for comparison purposes. */ ibool -cmp_types_are_equal( -/*================*/ - /* out: TRUE if the types are considered - equal in comparisons */ - dtype_t* type1, /* in: type 1 */ - dtype_t* type2, /* in: type 2 */ - ibool check_charsets) /* in: whether to check charsets */ +cmp_cols_are_equal( +/*===============*/ + /* out: TRUE if the columns are + considered equal in comparisons */ + const dict_col_t* col1, /* in: column 1 */ + const dict_col_t* col2, /* in: column 2 */ + ibool check_charsets) + /* in: whether to check charsets */ { - if (dtype_is_non_binary_string_type(type1->mtype, type1->prtype) - && dtype_is_non_binary_string_type(type2->mtype, type2->prtype)) { + if (dtype_is_non_binary_string_type(col1->mtype, col1->prtype) + && dtype_is_non_binary_string_type(col2->mtype, col2->prtype)) { /* Both are non-binary string types: they can be compared if and only if the charset-collation is the same */ if (check_charsets) { - return(dtype_get_charset_coll(type1->prtype) - == dtype_get_charset_coll(type2->prtype)); + return(dtype_get_charset_coll(col1->prtype) + == dtype_get_charset_coll(col2->prtype)); } else { return(TRUE); } } - if (dtype_is_binary_string_type(type1->mtype, type1->prtype) - && dtype_is_binary_string_type(type2->mtype, type2->prtype)) { + if (dtype_is_binary_string_type(col1->mtype, col1->prtype) + && dtype_is_binary_string_type(col2->mtype, col2->prtype)) { /* Both are binary string types: they can be compared */ return(TRUE); } - if (type1->mtype != type2->mtype) { + if (col1->mtype != col2->mtype) { return(FALSE); } - if (type1->mtype == DATA_INT - && (type1->prtype & DATA_UNSIGNED) - != (type2->prtype & DATA_UNSIGNED)) { + if (col1->mtype == DATA_INT + && (col1->prtype & DATA_UNSIGNED) + != (col2->prtype & DATA_UNSIGNED)) { /* The storage format of an unsigned integer is different from a signed integer: in a signed integer we OR @@ -141,12 +142,7 @@ cmp_types_are_equal( return(FALSE); } - if (type1->mtype == DATA_INT && type1->len != type2->len) { - - return(FALSE); - } - - return(TRUE); + return(col1->mtype != DATA_INT || col1->len == col2->len); } #ifndef UNIV_HOTBACKUP @@ -159,7 +155,8 @@ cmp_whole_field( /*============*/ /* out: 1, 0, -1, if a is greater, equal, less than b, respectively */ - dtype_t* type, /* in: data type */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ unsigned char* a, /* in: data field */ unsigned int a_length, /* in: data field length, not UNIV_SQL_NULL */ @@ -172,11 +169,8 @@ cmp_whole_field( double d_1; double d_2; int swap_flag = 1; - ulint data_type; - data_type = type->mtype; - - switch (data_type) { + switch (mtype) { case DATA_DECIMAL: /* Remove preceding spaces */ @@ -253,11 +247,8 @@ cmp_whole_field( } return(0); - case DATA_VARMYSQL: - case DATA_MYSQL: case DATA_BLOB: - if (data_type == DATA_BLOB - && 0 != (type->prtype & DATA_BINARY_TYPE)) { + if (prtype & DATA_BINARY_TYPE) { ut_print_timestamp(stderr); fprintf(stderr, @@ -265,15 +256,17 @@ cmp_whole_field( " with a character set sensitive\n" "InnoDB: comparison!\n"); } - - return(innobase_mysql_cmp - ((int)(type->prtype & DATA_MYSQL_TYPE_MASK), - (uint)dtype_get_charset_coll(type->prtype), - a, a_length, b, b_length)); + /* fall through */ + case DATA_VARMYSQL: + case DATA_MYSQL: + return(innobase_mysql_cmp( + (int)(prtype & DATA_MYSQL_TYPE_MASK), + (uint)dtype_get_charset_coll(prtype), + a, a_length, b, b_length)); default: fprintf(stderr, "InnoDB: unknown type number %lu\n", - (ulong) data_type); + (ulong) mtype); ut_error; } @@ -290,7 +283,8 @@ cmp_data_data_slow( /*===============*/ /* out: 1, 0, -1, if data1 is greater, equal, less than data2, respectively */ - dtype_t* cur_type,/* in: data type of the fields */ + ulint mtype, /* in: main type */ + ulint prtype, /* in: precise type */ byte* data1, /* in: data field (== a pointer to a memory buffer) */ ulint len1, /* in: data field length or UNIV_SQL_NULL */ @@ -303,8 +297,6 @@ cmp_data_data_slow( ulint data2_byte; ulint cur_bytes; - ut_ad(dtype_validate(cur_type)); - if (len1 == UNIV_SQL_NULL || len2 == UNIV_SQL_NULL) { if (len1 == len2) { @@ -322,13 +314,13 @@ cmp_data_data_slow( return(1); } - if (cur_type->mtype >= DATA_FLOAT - || (cur_type->mtype == DATA_BLOB - && 0 == (cur_type->prtype & DATA_BINARY_TYPE) - && dtype_get_charset_coll(cur_type->prtype) + if (mtype >= DATA_FLOAT + || (mtype == DATA_BLOB + && 0 == (prtype & DATA_BINARY_TYPE) + && dtype_get_charset_coll(prtype) != DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) { - return(cmp_whole_field(cur_type, + return(cmp_whole_field(mtype, prtype, data1, (unsigned) len1, data2, (unsigned) len2)); } @@ -344,7 +336,7 @@ cmp_data_data_slow( return(0); } - data1_byte = dtype_get_pad_char(cur_type); + data1_byte = dtype_get_pad_char(mtype, prtype); if (data1_byte == ULINT_UNDEFINED) { @@ -355,7 +347,7 @@ cmp_data_data_slow( } if (len2 <= cur_bytes) { - data2_byte = dtype_get_pad_char(cur_type); + data2_byte = dtype_get_pad_char(mtype, prtype); if (data2_byte == ULINT_UNDEFINED) { @@ -372,9 +364,9 @@ cmp_data_data_slow( goto next_byte; } - if (cur_type->mtype <= DATA_CHAR - || (cur_type->mtype == DATA_BLOB - && 0 == (cur_type->prtype & DATA_BINARY_TYPE))) { + if (mtype <= DATA_CHAR + || (mtype == DATA_BLOB + && 0 == (prtype & DATA_BINARY_TYPE))) { data1_byte = cmp_collate(data1_byte); data2_byte = cmp_collate(data2_byte); @@ -435,8 +427,6 @@ cmp_dtuple_rec_with_match( value for current comparison */ { #ifndef UNIV_HOTBACKUP - dtype_t* cur_type; /* pointer to type of the current - field in dtuple */ dfield_t* dtuple_field; /* current field in logical record */ ulint dtuple_f_len; /* the length of the current field in the logical record */ @@ -483,8 +473,17 @@ cmp_dtuple_rec_with_match( while (cur_field < dtuple_get_n_fields_cmp(dtuple)) { + ulint mtype; + ulint prtype; + dtuple_field = dtuple_get_nth_field(dtuple, cur_field); - cur_type = dfield_get_type(dtuple_field); + { + const dtype_t* type + = dfield_get_type(dtuple_field); + + mtype = type->mtype; + prtype = type->prtype; + } dtuple_f_len = dfield_get_len(dtuple_field); @@ -524,13 +523,13 @@ cmp_dtuple_rec_with_match( } } - if (cur_type->mtype >= DATA_FLOAT - || (cur_type->mtype == DATA_BLOB - && 0 == (cur_type->prtype & DATA_BINARY_TYPE) - && dtype_get_charset_coll(cur_type->prtype) + if (mtype >= DATA_FLOAT + || (mtype == DATA_BLOB + && 0 == (prtype & DATA_BINARY_TYPE) + && dtype_get_charset_coll(prtype) != DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) { - ret = cmp_whole_field(cur_type, + ret = cmp_whole_field(mtype, prtype, dfield_get_data(dtuple_field), (unsigned) dtuple_f_len, rec_b_ptr, (unsigned) rec_f_len); @@ -558,7 +557,7 @@ cmp_dtuple_rec_with_match( goto next_field; } - rec_byte = dtype_get_pad_char(cur_type); + rec_byte = dtype_get_pad_char(mtype, prtype); if (rec_byte == ULINT_UNDEFINED) { ret = 1; @@ -570,7 +569,8 @@ cmp_dtuple_rec_with_match( } if (UNIV_UNLIKELY(dtuple_f_len <= cur_bytes)) { - dtuple_byte = dtype_get_pad_char(cur_type); + dtuple_byte = dtype_get_pad_char(mtype, + prtype); if (dtuple_byte == ULINT_UNDEFINED) { ret = -1; @@ -589,9 +589,9 @@ cmp_dtuple_rec_with_match( goto next_byte; } - if (cur_type->mtype <= DATA_CHAR - || (cur_type->mtype == DATA_BLOB - && !(cur_type->prtype & DATA_BINARY_TYPE))) { + if (mtype <= DATA_CHAR + || (mtype == DATA_BLOB + && !(prtype & DATA_BINARY_TYPE))) { rec_byte = cmp_collate(rec_byte); dtuple_byte = cmp_collate(dtuple_byte); @@ -696,8 +696,8 @@ cmp_dtuple_is_prefix_of_rec( } if (matched_fields == n_fields - 1 - && matched_bytes == dfield_get_len - (dtuple_get_nth_field(dtuple, n_fields - 1))) { + && matched_bytes == dfield_get_len( + dtuple_get_nth_field(dtuple, n_fields - 1))) { return(TRUE); } @@ -730,8 +730,6 @@ cmp_rec_rec_with_match( the value for the current comparison */ { #ifndef UNIV_HOTBACKUP - dtype_t* cur_type; /* pointer to type struct of the - current field in index */ ulint rec1_n_fields; /* the number of fields in rec */ ulint rec1_f_len; /* length of current field in rec */ byte* rec1_b_ptr; /* pointer to the current byte in rec field */ @@ -764,12 +762,19 @@ cmp_rec_rec_with_match( while ((cur_field < rec1_n_fields) && (cur_field < rec2_n_fields)) { - if (index->type & DICT_UNIVERSAL) { - cur_type = dtype_binary; + ulint mtype; + ulint prtype; + + if (UNIV_UNLIKELY(index->type & DICT_UNIVERSAL)) { + /* This is for the insert buffer B-tree. */ + mtype = DATA_BINARY; + prtype = 0; } else { - cur_type = dict_col_get_type - (dict_field_get_col(dict_index_get_nth_field - (index, cur_field))); + const dict_col_t* col + = dict_index_get_nth_col(index, cur_field); + + mtype = col->mtype; + prtype = col->prtype; } rec1_b_ptr = rec_get_nth_field(rec1, offsets1, @@ -834,13 +839,13 @@ cmp_rec_rec_with_match( } } - if (cur_type->mtype >= DATA_FLOAT - || (cur_type->mtype == DATA_BLOB - && 0 == (cur_type->prtype & DATA_BINARY_TYPE) - && dtype_get_charset_coll(cur_type->prtype) + if (mtype >= DATA_FLOAT + || (mtype == DATA_BLOB + && 0 == (prtype & DATA_BINARY_TYPE) + && dtype_get_charset_coll(prtype) != DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) { - ret = cmp_whole_field(cur_type, + ret = cmp_whole_field(mtype, prtype, rec1_b_ptr, (unsigned) rec1_f_len, rec2_b_ptr, @@ -867,7 +872,7 @@ cmp_rec_rec_with_match( goto next_field; } - rec2_byte = dtype_get_pad_char(cur_type); + rec2_byte = dtype_get_pad_char(mtype, prtype); if (rec2_byte == ULINT_UNDEFINED) { ret = 1; @@ -879,7 +884,7 @@ cmp_rec_rec_with_match( } if (rec1_f_len <= cur_bytes) { - rec1_byte = dtype_get_pad_char(cur_type); + rec1_byte = dtype_get_pad_char(mtype, prtype); if (rec1_byte == ULINT_UNDEFINED) { ret = -1; @@ -898,9 +903,9 @@ cmp_rec_rec_with_match( goto next_byte; } - if (cur_type->mtype <= DATA_CHAR - || (cur_type->mtype == DATA_BLOB - && !(cur_type->prtype & DATA_BINARY_TYPE))) { + if (mtype <= DATA_CHAR + || (mtype == DATA_BLOB + && !(prtype & DATA_BINARY_TYPE))) { rec1_byte = cmp_collate(rec1_byte); rec2_byte = cmp_collate(rec2_byte); @@ -972,8 +977,6 @@ cmp_debug_dtuple_rec_with_match( returns, contains the value for current comparison */ { - dtype_t* cur_type; /* pointer to type of the current - field in dtuple */ dfield_t* dtuple_field; /* current field in logical record */ ulint dtuple_f_len; /* the length of the current field in the logical record */ @@ -1014,9 +1017,17 @@ cmp_debug_dtuple_rec_with_match( while (cur_field < dtuple_get_n_fields_cmp(dtuple)) { - dtuple_field = dtuple_get_nth_field(dtuple, cur_field); + ulint mtype; + ulint prtype; - cur_type = dfield_get_type(dtuple_field); + dtuple_field = dtuple_get_nth_field(dtuple, cur_field); + { + const dtype_t* type + = dfield_get_type(dtuple_field); + + mtype = type->mtype; + prtype = type->prtype; + } dtuple_f_data = dfield_get_data(dtuple_field); dtuple_f_len = dfield_get_len(dtuple_field); @@ -1032,7 +1043,7 @@ cmp_debug_dtuple_rec_with_match( goto order_resolved; } - ret = cmp_data_data(cur_type, dtuple_f_data, dtuple_f_len, + ret = cmp_data_data(mtype, prtype, dtuple_f_data, dtuple_f_len, rec_f_data, rec_f_len); if (ret != 0) { goto order_resolved; diff --git a/storage/innobase/rem/rem0rec.c b/storage/innobase/rem/rem0rec.c index c57a19f0230..549b5ee8b28 100644 --- a/storage/innobase/rem/rem0rec.c +++ b/storage/innobase/rem/rem0rec.c @@ -202,8 +202,7 @@ rec_init_offsets( } field = dict_index_get_nth_field(index, i); - if (!(dtype_get_prtype(dict_col_get_type - (dict_field_get_col(field))) + if (!(dict_field_get_col(field)->prtype & DATA_NOT_NULL)) { /* nullable field => read the null flag */ @@ -226,11 +225,11 @@ rec_init_offsets( if (UNIV_UNLIKELY(!field->fixed_len)) { /* Variable-length field: read the length */ - dtype_t* type = dict_col_get_type - (dict_field_get_col(field)); + const dict_col_t* col + = dict_field_get_col(field); len = *lens--; - if (UNIV_UNLIKELY(dtype_get_len(type) > 255) - || UNIV_UNLIKELY(dtype_get_mtype(type) + if (UNIV_UNLIKELY(col->len > 255) + || UNIV_UNLIKELY(col->mtype == DATA_BLOB)) { if (len & 0x80) { /* 1exxxxxxx xxxxxxxx */ @@ -442,8 +441,6 @@ rec_get_converted_size_new( { ulint size = REC_N_NEW_EXTRA_BYTES + (index->n_nullable + 7) / 8; - dict_field_t* field; - dtype_t* type; ulint i; ulint n_fields; ut_ad(index && dtuple); @@ -471,25 +468,27 @@ rec_get_converted_size_new( /* read the lengths of fields 0..n */ for (i = 0; i < n_fields; i++) { - ulint len = dtuple_get_nth_field(dtuple, i)->len; + dict_field_t* field; + ulint len; + const dict_col_t* col; + field = dict_index_get_nth_field(index, i); - type = dict_col_get_type(dict_field_get_col(field)); - ut_ad(len != UNIV_SQL_NULL - || !(dtype_get_prtype(type) & DATA_NOT_NULL)); + len = dtuple_get_nth_field(dtuple, i)->len; + col = dict_field_get_col(field); + + ut_ad(len != UNIV_SQL_NULL || !(col->prtype & DATA_NOT_NULL)); if (len == UNIV_SQL_NULL) { /* No length is stored for NULL fields. */ continue; } - ut_ad(len <= dtype_get_len(type) - || dtype_get_mtype(type) == DATA_BLOB); + ut_ad(len <= col->len || col->mtype == DATA_BLOB); ut_ad(!field->fixed_len || len == field->fixed_len); if (field->fixed_len) { } else if (len < 128 - || (dtype_get_len(type) < 256 - && dtype_get_mtype(type) != DATA_BLOB)) { + || (col->len < 256 && col->mtype != DATA_BLOB)) { size++; } else { size += 2; @@ -588,8 +587,6 @@ rec_set_nth_field_extern_bit_new( { byte* nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1); byte* lens = nulls - (index->n_nullable + 7) / 8; - dict_field_t* field; - dtype_t* type; ulint i; ulint n_fields; ulint null_mask = 1; @@ -603,9 +600,13 @@ rec_set_nth_field_extern_bit_new( /* read the lengths of fields 0..n */ for (i = 0; i < n_fields; i++) { + const dict_field_t* field; + const dict_col_t* col; + field = dict_index_get_nth_field(index, i); - type = dict_col_get_type(dict_field_get_col(field)); - if (!(dtype_get_prtype(type) & DATA_NOT_NULL)) { + col = dict_field_get_col(field); + + if (!(col->prtype & DATA_NOT_NULL)) { if (UNIV_UNLIKELY(!(byte) null_mask)) { nulls--; null_mask = 1; @@ -629,8 +630,7 @@ rec_set_nth_field_extern_bit_new( continue; } lens--; - if (dtype_get_len(type) > 255 - || dtype_get_mtype(type) == DATA_BLOB) { + if (col->len > 255 || col->mtype == DATA_BLOB) { ulint len = lens[1]; if (len & 0x80) { /* 1exxxxxx: 2-byte length */ if (i == ith) { @@ -640,9 +640,10 @@ rec_set_nth_field_extern_bit_new( /* toggle the extern bit */ len ^= 0x40; if (mtr) { - mlog_write_ulint - (lens + 1, len, - MLOG_1BYTE, mtr); + mlog_write_ulint(lens + 1, + len, + MLOG_1BYTE, + mtr); } else { lens[1] = (byte) len; } @@ -767,8 +768,8 @@ rec_convert_dtuple_to_rec_old( len = dfield_get_len(field); if (len == UNIV_SQL_NULL) { - len = dtype_get_sql_null_size - (dfield_get_type(field)); + len = dtype_get_sql_null_size( + dfield_get_type(field)); data_write_sql_null(rec + end_offset, len); end_offset += len; @@ -795,8 +796,8 @@ rec_convert_dtuple_to_rec_old( len = dfield_get_len(field); if (len == UNIV_SQL_NULL) { - len = dtype_get_sql_null_size - (dfield_get_type(field)); + len = dtype_get_sql_null_size( + dfield_get_type(field)); data_write_sql_null(rec + end_offset, len); end_offset += len; @@ -1057,8 +1058,8 @@ rec_copy_prefix_to_dtuple( ut_ad(rec_validate(rec, offsets)); ut_ad(dtuple_check_typed(tuple)); - dtuple_set_info_bits(tuple, rec_get_info_bits - (rec, dict_table_is_comp(index->table))); + dtuple_set_info_bits(tuple, rec_get_info_bits( + rec, dict_table_is_comp(index->table))); for (i = 0; i < n_fields; i++) { @@ -1137,8 +1138,6 @@ rec_copy_prefix_to_buf( { byte* nulls; byte* lens; - dict_field_t* field; - dtype_t* type; ulint i; ulint prefix_len; ulint null_mask; @@ -1148,10 +1147,10 @@ rec_copy_prefix_to_buf( if (!dict_table_is_comp(index->table)) { ut_ad(rec_validate_old(rec)); - return(rec_copy_prefix_to_buf_old - (rec, n_fields, - rec_get_field_start_offs(rec, n_fields), - buf, buf_size)); + return(rec_copy_prefix_to_buf_old( + rec, n_fields, + rec_get_field_start_offs(rec, n_fields), + buf, buf_size)); } status = rec_get_status(rec); @@ -1180,9 +1179,13 @@ rec_copy_prefix_to_buf( /* read the lengths of fields 0..n */ for (i = 0; i < n_fields; i++) { + const dict_field_t* field; + const dict_col_t* col; + field = dict_index_get_nth_field(index, i); - type = dict_col_get_type(dict_field_get_col(field)); - if (!(dtype_get_prtype(type) & DATA_NOT_NULL)) { + col = dict_field_get_col(field); + + if (!(col->prtype & DATA_NOT_NULL)) { /* nullable field => read the null flag */ if (UNIV_UNLIKELY(!(byte) null_mask)) { nulls--; @@ -1201,8 +1204,7 @@ rec_copy_prefix_to_buf( prefix_len += field->fixed_len; } else { ulint len = *lens--; - if (dtype_get_len(type) > 255 - || dtype_get_mtype(type) == DATA_BLOB) { + if (col->len > 255 || col->mtype == DATA_BLOB) { if (len & 0x80) { /* 1exxxxxx */ len &= 0x3f; diff --git a/storage/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c index 352519f5d56..aeaa83daa4d 100644 --- a/storage/innobase/row/row0ins.c +++ b/storage/innobase/row/row0ins.c @@ -135,12 +135,12 @@ row_ins_alloc_sys_fields( /*=====================*/ ins_node_t* node) /* in: insert node */ { - dtuple_t* row; - dict_table_t* table; - mem_heap_t* heap; - dict_col_t* col; - dfield_t* dfield; - byte* ptr; + dtuple_t* row; + dict_table_t* table; + mem_heap_t* heap; + const dict_col_t* col; + dfield_t* dfield; + byte* ptr; row = node->row; table = node->table; @@ -251,8 +251,8 @@ row_ins_sec_index_entry_by_modify( heap = mem_heap_create(1024); - update = row_upd_build_sec_rec_difference_binary - (cursor->index, entry, rec, thr_get_trx(thr), heap); + update = row_upd_build_sec_rec_difference_binary( + cursor->index, entry, rec, thr_get_trx(thr), heap); if (mode == BTR_MODIFY_LEAF) { /* Try an optimistic updating of the record, keeping changes within the page */ @@ -445,7 +445,6 @@ row_ins_cascade_calc_update_vec( upd_field_t* parent_ufield; ulint n_fields_updated; ulint parent_field_no; - dtype_t* type; ulint i; ulint j; @@ -470,16 +469,19 @@ row_ins_cascade_calc_update_vec( for (i = 0; i < foreign->n_fields; i++) { - parent_field_no = dict_table_get_nth_col_pos - (parent_table, - dict_index_get_nth_col_no(parent_index, i)); + parent_field_no = dict_table_get_nth_col_pos( + parent_table, + dict_index_get_nth_col_no(parent_index, i)); for (j = 0; j < parent_update->n_fields; j++) { parent_ufield = parent_update->fields + j; if (parent_ufield->field_no == parent_field_no) { - ulint min_size; + ulint min_size; + const dict_col_t* col; + + col = dict_index_get_nth_col(index, i); /* A field in the parent index record is updated. Let us make the update vector @@ -488,20 +490,17 @@ row_ins_cascade_calc_update_vec( ufield = update->fields + n_fields_updated; ufield->field_no - = dict_table_get_nth_col_pos - (table, - dict_index_get_nth_col_no(index, i)); + = dict_table_get_nth_col_pos( + table, dict_col_get_no(col)); ufield->exp = NULL; ufield->new_val = parent_ufield->new_val; - type = dict_index_get_nth_type(index, i); - /* Do not allow a NOT NULL column to be updated as NULL */ if (ufield->new_val.len == UNIV_SQL_NULL - && (type->prtype & DATA_NOT_NULL)) { + && (col->prtype & DATA_NOT_NULL)) { return(ULINT_UNDEFINED); } @@ -510,9 +509,12 @@ row_ins_cascade_calc_update_vec( column, do not allow the update */ if (ufield->new_val.len != UNIV_SQL_NULL - && dtype_get_at_most_n_mbchars - (type, dtype_get_len(type), - ufield->new_val.len, ufield->new_val.data) + && dtype_get_at_most_n_mbchars( + col->prtype, + col->mbminlen, col->mbmaxlen, + col->len, + ufield->new_val.len, + ufield->new_val.data) < ufield->new_val.len) { return(ULINT_UNDEFINED); @@ -523,7 +525,7 @@ row_ins_cascade_calc_update_vec( need to pad with spaces the new value of the child column */ - min_size = dtype_get_min_size(type); + min_size = dict_col_get_min_size(col); if (min_size && ufield->new_val.len != UNIV_SQL_NULL @@ -531,8 +533,8 @@ row_ins_cascade_calc_update_vec( char* pad_start; const char* pad_end; - ufield->new_val.data = mem_heap_alloc - (heap, min_size); + ufield->new_val.data = mem_heap_alloc( + heap, min_size); pad_start = ((char*) ufield ->new_val.data) + ufield->new_val.len; @@ -544,14 +546,13 @@ row_ins_cascade_calc_update_vec( parent_ufield->new_val.data, parent_ufield->new_val.len); - switch (UNIV_EXPECT(dtype_get_mbminlen - (type), 1)) { + switch (UNIV_EXPECT(col->mbminlen,1)) { default: ut_error; case 1: if (UNIV_UNLIKELY - (dtype_get_charset_coll - (dtype_get_prtype(type)) + (dtype_get_charset_coll( + col->prtype) == DATA_MYSQL_BINARY_CHARSET_COLL)) { /* Do not pad BINARY columns. */ @@ -603,8 +604,8 @@ row_ins_set_detailed( if (os_file_set_eof(srv_misc_tmpfile)) { ut_print_name(srv_misc_tmpfile, trx, TRUE, foreign->foreign_table_name); - dict_print_info_on_foreign_key_in_create_format - (srv_misc_tmpfile, trx, foreign, FALSE); + dict_print_info_on_foreign_key_in_create_format( + srv_misc_tmpfile, trx, foreign, FALSE); trx_set_detailed_error_from_file(trx, srv_misc_tmpfile); } else { trx_set_detailed_error(trx, "temp file operation failed"); @@ -834,8 +835,8 @@ row_ins_foreign_check_on_constraint( operation. */ node->cascade_heap = mem_heap_create(128); - node->cascade_node = row_create_update_node_for_mysql - (table, node->cascade_heap); + node->cascade_node = row_create_update_node_for_mysql( + table, node->cascade_heap); que_node_set_parent(node->cascade_node, node); } @@ -881,11 +882,11 @@ row_ins_foreign_check_on_constraint( err = DB_ROW_IS_REFERENCED; - row_ins_foreign_report_err - ("Trying an update, possibly causing a cyclic" - " cascaded update\n" - "in the child table,", thr, foreign, - btr_pcur_get_rec(pcur), entry); + row_ins_foreign_report_err( + "Trying an update, possibly causing a cyclic" + " cascaded update\n" + "in the child table,", thr, foreign, + btr_pcur_get_rec(pcur), entry); goto nonstandard_exit_func; } @@ -893,9 +894,9 @@ row_ins_foreign_check_on_constraint( if (row_ins_cascade_n_ancestors(cascade) >= 15) { err = DB_ROW_IS_REFERENCED; - row_ins_foreign_report_err - ("Trying a too deep cascaded delete or update\n", - thr, foreign, btr_pcur_get_rec(pcur), entry); + row_ins_foreign_report_err( + "Trying a too deep cascaded delete or update\n", + thr, foreign, btr_pcur_get_rec(pcur), entry); goto nonstandard_exit_func; } @@ -961,9 +962,9 @@ row_ins_foreign_check_on_constraint( we already have a normal shared lock on the appropriate gap if the search criterion was not unique */ - err = lock_clust_rec_read_check_and_lock_alt - (0, clust_rec, clust_index, - LOCK_X, LOCK_REC_NOT_GAP, thr); + err = lock_clust_rec_read_check_and_lock_alt( + 0, clust_rec, clust_index, LOCK_X, LOCK_REC_NOT_GAP, + thr); } if (err != DB_SUCCESS) { @@ -995,8 +996,9 @@ row_ins_foreign_check_on_constraint( for (i = 0; i < foreign->n_fields; i++) { (update->fields + i)->field_no - = dict_table_get_nth_col_pos - (table, dict_index_get_nth_col_no(index, i)); + = dict_table_get_nth_col_pos( + table, + dict_index_get_nth_col_no(index, i)); (update->fields + i)->exp = NULL; (update->fields + i)->new_val.len = UNIV_SQL_NULL; (update->fields + i)->new_val.data = NULL; @@ -1017,14 +1019,14 @@ row_ins_foreign_check_on_constraint( if (n_to_update == ULINT_UNDEFINED) { err = DB_ROW_IS_REFERENCED; - row_ins_foreign_report_err - ("Trying a cascaded update where the" - " updated value in the child\n" - "table would not fit in the length" - " of the column, or the value would\n" - "be NULL and the column is" - " declared as not NULL in the child table,", - thr, foreign, btr_pcur_get_rec(pcur), entry); + row_ins_foreign_report_err( + "Trying a cascaded update where the" + " updated value in the child\n" + "table would not fit in the length" + " of the column, or the value would\n" + "be NULL and the column is" + " declared as not NULL in the child table,", + thr, foreign, btr_pcur_get_rec(pcur), entry); goto nonstandard_exit_func; } @@ -1133,11 +1135,11 @@ row_ins_set_shared_rec_lock( ut_ad(rec_offs_validate(rec, index, offsets)); if (index->type & DICT_CLUSTERED) { - err = lock_clust_rec_read_check_and_lock - (0, rec, index, offsets, LOCK_S, type, thr); + err = lock_clust_rec_read_check_and_lock( + 0, rec, index, offsets, LOCK_S, type, thr); } else { - err = lock_sec_rec_read_check_and_lock - (0, rec, index, offsets, LOCK_S, type, thr); + err = lock_sec_rec_read_check_and_lock( + 0, rec, index, offsets, LOCK_S, type, thr); } return(err); @@ -1164,11 +1166,11 @@ row_ins_set_exclusive_rec_lock( ut_ad(rec_offs_validate(rec, index, offsets)); if (index->type & DICT_CLUSTERED) { - err = lock_clust_rec_read_check_and_lock - (0, rec, index, offsets, LOCK_X, type, thr); + err = lock_clust_rec_read_check_and_lock( + 0, rec, index, offsets, LOCK_X, type, thr); } else { - err = lock_sec_rec_read_check_and_lock - (0, rec, index, offsets, LOCK_X, type, thr); + err = lock_sec_rec_read_check_and_lock( + 0, rec, index, offsets, LOCK_X, type, thr); } return(err); @@ -1232,8 +1234,8 @@ run_again: for example */ for (i = 0; i < foreign->n_fields; i++) { - if (UNIV_SQL_NULL == dfield_get_len - (dtuple_get_nth_field(entry, i))) { + if (UNIV_SQL_NULL == dfield_get_len( + dtuple_get_nth_field(entry, i))) { goto exit_func; } @@ -1286,8 +1288,8 @@ run_again: ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); fputs(":\n", ef); - dict_print_info_on_foreign_key_in_create_format - (ef, trx, foreign, TRUE); + dict_print_info_on_foreign_key_in_create_format( + ef, trx, foreign, TRUE); fputs("\nTrying to add to index ", ef); ut_print_name(ef, trx, FALSE, foreign->foreign_index->name); @@ -1348,9 +1350,8 @@ run_again: if (rec == page_get_supremum_rec(page)) { - err = row_ins_set_shared_rec_lock - (LOCK_ORDINARY, rec, - check_index, offsets, thr); + err = row_ins_set_shared_rec_lock( + LOCK_ORDINARY, rec, check_index, offsets, thr); if (err != DB_SUCCESS) { break; @@ -1364,9 +1365,9 @@ run_again: if (cmp == 0) { if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))) { - err = row_ins_set_shared_rec_lock - (LOCK_ORDINARY, rec, - check_index, offsets, thr); + err = row_ins_set_shared_rec_lock( + LOCK_ORDINARY, rec, check_index, + offsets, thr); if (err != DB_SUCCESS) { break; @@ -1376,9 +1377,9 @@ run_again: a record because we can allow inserts into gaps */ - err = row_ins_set_shared_rec_lock - (LOCK_REC_NOT_GAP, rec, - check_index, offsets, thr); + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, rec, check_index, + offsets, thr); if (err != DB_SUCCESS) { @@ -1394,9 +1395,9 @@ run_again: condition: check them in a separate function */ - err = row_ins_foreign_check_on_constraint - (thr, foreign, &pcur, - entry, &mtr); + err = row_ins_foreign_check_on_constraint( + thr, foreign, &pcur, entry, + &mtr); if (err != DB_SUCCESS) { /* Since reporting a plain "duplicate key" error @@ -1417,9 +1418,9 @@ run_again: break; } } else { - row_ins_foreign_report_err - ("Trying to delete or update", - thr, foreign, rec, entry); + row_ins_foreign_report_err( + "Trying to delete or update", + thr, foreign, rec, entry); err = DB_ROW_IS_REFERENCED; break; @@ -1428,8 +1429,8 @@ run_again: } if (cmp < 0) { - err = row_ins_set_shared_rec_lock - (LOCK_GAP, rec, check_index, offsets, thr); + err = row_ins_set_shared_rec_lock( + LOCK_GAP, rec, check_index, offsets, thr); if (err != DB_SUCCESS) { break; @@ -1437,8 +1438,8 @@ run_again: if (check_ref) { err = DB_NO_REFERENCED_ROW; - row_ins_foreign_report_add_err - (trx, foreign, rec, entry); + row_ins_foreign_report_add_err( + trx, foreign, rec, entry); } else { err = DB_SUCCESS; } @@ -1453,8 +1454,8 @@ next_rec: if (!moved) { if (check_ref) { rec = btr_pcur_get_rec(&pcur); - row_ins_foreign_report_add_err - (trx, foreign, rec, entry); + row_ins_foreign_report_add_err( + trx, foreign, rec, entry); err = DB_NO_REFERENCED_ROW; } else { err = DB_SUCCESS; @@ -1546,8 +1547,8 @@ row_ins_check_foreign_constraints( But the counter on the table protects the referenced table from being dropped while the check is running. */ - err = row_ins_check_foreign_constraint - (TRUE, foreign, table, entry, thr); + err = row_ins_check_foreign_constraint( + TRUE, foreign, table, entry, thr); if (foreign->referenced_table) { mutex_enter(&(dict_sys->mutex)); @@ -1617,8 +1618,8 @@ row_ins_dupl_error_with_rec( if (!(index->type & DICT_CLUSTERED)) { for (i = 0; i < n_unique; i++) { - if (UNIV_SQL_NULL == dfield_get_len - (dtuple_get_nth_field(entry, i))) { + if (UNIV_SQL_NULL == dfield_get_len( + dtuple_get_nth_field(entry, i))) { return(FALSE); } @@ -1665,8 +1666,8 @@ row_ins_scan_sec_index_for_duplicate( since we define NULL != NULL in this case */ for (i = 0; i < n_unique; i++) { - if (UNIV_SQL_NULL == dfield_get_len - (dtuple_get_nth_field(entry, i))) { + if (UNIV_SQL_NULL == dfield_get_len( + dtuple_get_nth_field(entry, i))) { return(DB_SUCCESS); } @@ -1702,12 +1703,12 @@ row_ins_scan_sec_index_for_duplicate( duplicates ( REPLACE, LOAD DATAFILE REPLACE, INSERT ON DUPLICATE KEY UPDATE). */ - err = row_ins_set_exclusive_rec_lock - (LOCK_ORDINARY, rec, index, offsets, thr); + err = row_ins_set_exclusive_rec_lock( + LOCK_ORDINARY, rec, index, offsets, thr); } else { - err = row_ins_set_shared_rec_lock - (LOCK_ORDINARY, rec, index, offsets, thr); + err = row_ins_set_shared_rec_lock( + LOCK_ORDINARY, rec, index, offsets, thr); } if (err != DB_SUCCESS) { @@ -1831,22 +1832,22 @@ row_ins_duplicate_error_in_clust( duplicates ( REPLACE, LOAD DATAFILE REPLACE, INSERT ON DUPLICATE KEY UPDATE). */ - err = row_ins_set_exclusive_rec_lock - (LOCK_REC_NOT_GAP, rec, - cursor->index, offsets, thr); + err = row_ins_set_exclusive_rec_lock( + LOCK_REC_NOT_GAP, rec, + cursor->index, offsets, thr); } else { - err = row_ins_set_shared_rec_lock - (LOCK_REC_NOT_GAP, rec, - cursor->index, offsets, thr); + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, rec, + cursor->index, offsets, thr); } if (err != DB_SUCCESS) { goto func_exit; } - if (row_ins_dupl_error_with_rec - (rec, entry, cursor->index, offsets)) { + if (row_ins_dupl_error_with_rec( + rec, entry, cursor->index, offsets)) { trx->error_info = cursor->index; err = DB_DUPLICATE_KEY; goto func_exit; @@ -1869,22 +1870,22 @@ row_ins_duplicate_error_in_clust( duplicates ( REPLACE, LOAD DATAFILE REPLACE, INSERT ON DUPLICATE KEY UPDATE). */ - err = row_ins_set_exclusive_rec_lock - (LOCK_REC_NOT_GAP, rec, - cursor->index, offsets, thr); + err = row_ins_set_exclusive_rec_lock( + LOCK_REC_NOT_GAP, rec, + cursor->index, offsets, thr); } else { - err = row_ins_set_shared_rec_lock - (LOCK_REC_NOT_GAP, rec, - cursor->index, offsets, thr); + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, rec, + cursor->index, offsets, thr); } if (err != DB_SUCCESS) { goto func_exit; } - if (row_ins_dupl_error_with_rec - (rec, entry, cursor->index, offsets)) { + if (row_ins_dupl_error_with_rec( + rec, entry, cursor->index, offsets)) { trx->error_info = cursor->index; err = DB_DUPLICATE_KEY; goto func_exit; @@ -2021,8 +2022,8 @@ row_ins_index_entry_low( #ifdef UNIV_DEBUG { page_t* page = btr_cur_get_page(&cursor); - rec_t* first_rec = page_rec_get_next - (page_get_infimum_rec(page)); + rec_t* first_rec = page_rec_get_next( + page_get_infimum_rec(page)); if (UNIV_LIKELY(first_rec != page_get_supremum_rec(page))) { ut_a(rec_get_n_fields(first_rec, index) @@ -2040,16 +2041,16 @@ row_ins_index_entry_low( /* Note that the following may return also DB_LOCK_WAIT */ - err = row_ins_duplicate_error_in_clust - (&cursor, entry, thr, &mtr); + err = row_ins_duplicate_error_in_clust( + &cursor, entry, thr, &mtr); if (err != DB_SUCCESS) { goto function_exit; } } else { mtr_commit(&mtr); - err = row_ins_scan_sec_index_for_duplicate - (index, entry, thr); + err = row_ins_scan_sec_index_for_duplicate( + index, entry, thr); mtr_start(&mtr); if (err != DB_SUCCESS) { @@ -2084,19 +2085,19 @@ row_ins_index_entry_low( } if (index->type & DICT_CLUSTERED) { - err = row_ins_clust_index_entry_by_modify - (mode, &cursor, &big_rec, entry, - ext_vec, n_ext_vec, thr, &mtr); + err = row_ins_clust_index_entry_by_modify( + mode, &cursor, &big_rec, entry, + ext_vec, n_ext_vec, thr, &mtr); } else { - err = row_ins_sec_index_entry_by_modify - (mode, &cursor, entry, thr, &mtr); + err = row_ins_sec_index_entry_by_modify( + mode, &cursor, entry, thr, &mtr); } } else { if (mode == BTR_MODIFY_LEAF) { - err = btr_cur_optimistic_insert - (0, &cursor, entry, - &insert_rec, &big_rec, thr, &mtr); + err = btr_cur_optimistic_insert( + 0, &cursor, entry, &insert_rec, &big_rec, + thr, &mtr); } else { ut_a(mode == BTR_MODIFY_TREE); if (buf_LRU_buf_pool_running_out()) { @@ -2105,9 +2106,9 @@ row_ins_index_entry_low( goto function_exit; } - err = btr_cur_pessimistic_insert - (0, &cursor, entry, - &insert_rec, &big_rec, thr, &mtr); + err = btr_cur_pessimistic_insert( + 0, &cursor, entry, &insert_rec, &big_rec, + thr, &mtr); } if (err == DB_SUCCESS) { @@ -2211,7 +2212,6 @@ row_ins_index_entry_set_vals( dfield_t* row_field; ulint n_fields; ulint i; - dtype_t* cur_type; ut_ad(entry && row); @@ -2227,12 +2227,13 @@ row_ins_index_entry_set_vals( if (ind_field->prefix_len > 0 && dfield_get_len(row_field) != UNIV_SQL_NULL) { - cur_type = dict_col_get_type - (dict_field_get_col(ind_field)); + const dict_col_t* col + = dict_field_get_col(ind_field); - field->len = dtype_get_at_most_n_mbchars - (cur_type, ind_field->prefix_len, - dfield_get_len(row_field), row_field->data); + field->len = dtype_get_at_most_n_mbchars( + col->prtype, col->mbminlen, col->mbmaxlen, + ind_field->prefix_len, + row_field->len, row_field->data); } else { field->len = row_field->len; } diff --git a/storage/innobase/row/row0mysql.c b/storage/innobase/row/row0mysql.c index af3f80fa53b..a2895bae71a 100644 --- a/storage/innobase/row/row0mysql.c +++ b/storage/innobase/row/row0mysql.c @@ -417,13 +417,13 @@ row_mysql_convert_row_to_innobase( } } - row_mysql_store_col_in_innobase_format - (dfield, - prebuilt->ins_upd_rec_buff + templ->mysql_col_offset, - TRUE, /* MySQL row format data */ - mysql_rec + templ->mysql_col_offset, - templ->mysql_col_len, - dict_table_is_comp(prebuilt->table)); + row_mysql_store_col_in_innobase_format( + dfield, + prebuilt->ins_upd_rec_buff + templ->mysql_col_offset, + TRUE, /* MySQL row format data */ + mysql_rec + templ->mysql_col_offset, + templ->mysql_col_len, + dict_table_is_comp(prebuilt->table)); next_column: ; } @@ -619,8 +619,8 @@ row_create_prebuilt( prebuilt->sel_graph = NULL; - prebuilt->search_tuple = dtuple_create - (heap, 2 * dict_table_get_n_cols(table)); + prebuilt->search_tuple = dtuple_create( + heap, 2 * dict_table_get_n_cols(table)); clust_index = dict_table_get_first_index(table); @@ -707,16 +707,16 @@ row_prebuilt_free( for (i = 0; i < MYSQL_FETCH_CACHE_SIZE; i++) { if (prebuilt->fetch_cache[i] != NULL) { - if ((ROW_PREBUILT_FETCH_MAGIC_N != mach_read_from_4 - ((prebuilt->fetch_cache[i]) - 4)) - || (ROW_PREBUILT_FETCH_MAGIC_N != mach_read_from_4 - ((prebuilt->fetch_cache[i]) - + prebuilt->mysql_row_len))) { + if ((ROW_PREBUILT_FETCH_MAGIC_N != mach_read_from_4( + (prebuilt->fetch_cache[i]) - 4)) + || (ROW_PREBUILT_FETCH_MAGIC_N != mach_read_from_4( + (prebuilt->fetch_cache[i]) + + prebuilt->mysql_row_len))) { fputs("InnoDB: Error: trying to free" " a corrupt fetch buffer.\n", stderr); - mem_analyze_corruption - (prebuilt->fetch_cache[i]); + mem_analyze_corruption( + prebuilt->fetch_cache[i]); ut_error; } @@ -811,8 +811,8 @@ row_get_prebuilt_insert_row( prebuilt->ins_node = node; if (prebuilt->ins_upd_rec_buff == NULL) { - prebuilt->ins_upd_rec_buff = mem_heap_alloc - (prebuilt->heap, prebuilt->mysql_row_len); + prebuilt->ins_upd_rec_buff = mem_heap_alloc( + prebuilt->heap, prebuilt->mysql_row_len); } row = dtuple_create(prebuilt->heap, @@ -830,10 +830,10 @@ row_get_prebuilt_insert_row( ins_node_set_new_row(node, row); - prebuilt->ins_graph = que_node_get_parent - (pars_complete_graph_for_exec(node, - prebuilt->trx, - prebuilt->heap)); + prebuilt->ins_graph = que_node_get_parent( + pars_complete_graph_for_exec(node, + prebuilt->trx, + prebuilt->heap)); prebuilt->ins_graph->state = QUE_FORK_ACTIVE; } @@ -1188,10 +1188,10 @@ row_prebuild_sel_graph( node = sel_node_create(prebuilt->heap); - prebuilt->sel_graph = que_node_get_parent - (pars_complete_graph_for_exec(node, - prebuilt->trx, - prebuilt->heap)); + prebuilt->sel_graph = que_node_get_parent( + pars_complete_graph_for_exec(node, + prebuilt->trx, + prebuilt->heap)); prebuilt->sel_graph->state = QUE_FORK_ACTIVE; } @@ -1260,10 +1260,10 @@ row_get_prebuilt_update_vector( prebuilt->upd_node = node; - prebuilt->upd_graph = que_node_get_parent - (pars_complete_graph_for_exec(node, - prebuilt->trx, - prebuilt->heap)); + prebuilt->upd_graph = que_node_get_parent( + pars_complete_graph_for_exec(node, + prebuilt->trx, + prebuilt->heap)); prebuilt->upd_graph->state = QUE_FORK_ACTIVE; } @@ -1607,16 +1607,11 @@ row_table_got_default_clust_index( /*==============================*/ dict_table_t* table) { - dict_index_t* clust_index; + const dict_index_t* clust_index; clust_index = dict_table_get_first_index(table); - if (dtype_get_mtype(dict_index_get_nth_type(clust_index, 0)) - == DATA_SYS) { - return(TRUE); - } - - return(FALSE); + return(dict_index_get_nth_col(clust_index, 0)->mtype == DATA_SYS); } /************************************************************************* @@ -1782,9 +1777,8 @@ row_create_table_for_mysql( /* Check that no reserved column names are used. */ for (i = 0; i < dict_table_get_n_user_cols(table); i++) { - dict_col_t* col = dict_table_get_nth_col(table, i); - - if (dict_col_name_is_reserved(col->name)) { + if (dict_col_name_is_reserved( + dict_table_get_col_name(table, i))) { dict_mem_table_free(table); trx_commit_for_mysql(trx); @@ -1975,16 +1969,16 @@ row_create_index_for_mysql( for (i = 0; i < dict_index_get_n_fields(index); i++) { for (j = 0; j < i; j++) { - if (0 == ut_strcmp - (dict_index_get_nth_field(index, j)->name, - dict_index_get_nth_field(index, i)->name)) { + if (0 == ut_strcmp( + dict_index_get_nth_field(index, j)->name, + dict_index_get_nth_field(index, i)->name)) { ut_print_timestamp(stderr); fputs(" InnoDB: Error: column ", stderr); ut_print_name(stderr, trx, FALSE, - dict_index_get_nth_field - (index, i)->name); + dict_index_get_nth_field( + index, i)->name); fputs(" appears twice in ", stderr); dict_index_name_print(stderr, trx, index); fputs("\n" @@ -2205,8 +2199,8 @@ loop: goto already_dropped; } - if (DB_SUCCESS != row_drop_table_for_mysql_in_background - (drop->table_name)) { + if (DB_SUCCESS != row_drop_table_for_mysql_in_background( + drop->table_name)) { /* If the DROP fails for some table, we return, and let the main thread retry later */ @@ -2835,9 +2829,9 @@ row_truncate_table_for_mysql( rec = btr_pcur_get_rec(&pcur); if (root_page_no != FIL_NULL) { - page_rec_write_index_page_no - (rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, - root_page_no, &mtr); + page_rec_write_index_page_no( + rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, + root_page_no, &mtr); /* We will need to commit and restart the mini-transaction in order to avoid deadlocks. The dict_truncate_index_tree() call has allocated @@ -3038,8 +3032,8 @@ check_next_foreign: } if (foreign && trx->check_foreigns - && !(drop_db && dict_tables_have_same_db - (name, foreign->foreign_table_name))) { + && !(drop_db && dict_tables_have_same_db( + name, foreign->foreign_table_name))) { FILE* ef = dict_foreign_err_file; /* We only allow dropping a referenced table if @@ -3232,8 +3226,8 @@ check_next_foreign: space_id = table->space; if (table->dir_path_of_temp_table != NULL) { - dir_path_of_temp_table = mem_strdup - (table->dir_path_of_temp_table); + dir_path_of_temp_table = mem_strdup( + table->dir_path_of_temp_table); is_path = TRUE; name_or_path = dir_path_of_temp_table; } else { @@ -3447,8 +3441,8 @@ row_delete_constraint( ulint err; /* New format constraints have ids /. */ - err = row_delete_constraint_low - (mem_heap_strcat(heap, database_name, id), trx); + err = row_delete_constraint_low( + mem_heap_strcat(heap, database_name, id), trx); if ((err == DB_SUCCESS) && !strchr(id, '/')) { /* Old format < 4.0.18 constraints have constraint ids @@ -3570,9 +3564,9 @@ row_rename_table_for_mysql( heap = mem_heap_create(100); - err = dict_foreign_parse_drop_constraints - (heap, trx, table, - &n_constraints_to_drop, &constraints_to_drop); + err = dict_foreign_parse_drop_constraints( + heap, trx, table, &n_constraints_to_drop, + &constraints_to_drop); if (err != DB_SUCCESS) { @@ -3611,71 +3605,71 @@ row_rename_table_for_mysql( pars_info_add_str_literal(info, "new_table_name", new_name); pars_info_add_str_literal(info, "old_table_name", old_name); - err = que_eval_sql - (info, - "PROCEDURE RENAME_CONSTRAINT_IDS () IS\n" - "gen_constr_prefix CHAR;\n" - "new_db_name CHAR;\n" - "foreign_id CHAR;\n" - "new_foreign_id CHAR;\n" - "old_db_name_len INT;\n" - "old_t_name_len INT;\n" - "new_db_name_len INT;\n" - "id_len INT;\n" - "found INT;\n" - "BEGIN\n" - "found := 1;\n" - "old_db_name_len := INSTR(:old_table_name, '/')-1;\n" - "new_db_name_len := INSTR(:new_table_name, '/')-1;\n" - "new_db_name := SUBSTR(:new_table_name, 0,\n" - " new_db_name_len);\n" - "old_t_name_len := LENGTH(:old_table_name);\n" - "gen_constr_prefix := CONCAT(:old_table_name,\n" - " '_ibfk_');\n" - "WHILE found = 1 LOOP\n" - " SELECT ID INTO foreign_id\n" - " FROM SYS_FOREIGN\n" - " WHERE FOR_NAME = :old_table_name\n" - " AND TO_BINARY(FOR_NAME)\n" - " = TO_BINARY(:old_table_name)\n" - " LOCK IN SHARE MODE;\n" - " IF (SQL % NOTFOUND) THEN\n" - " found := 0;\n" - " ELSE\n" - " UPDATE SYS_FOREIGN\n" - " SET FOR_NAME = :new_table_name\n" - " WHERE ID = foreign_id;\n" - " id_len := LENGTH(foreign_id);\n" - " IF (INSTR(foreign_id, '/') > 0) THEN\n" - " IF (INSTR(foreign_id,\n" - " gen_constr_prefix) > 0)\n" - " THEN\n" - " new_foreign_id :=\n" - " CONCAT(:new_table_name,\n" - " SUBSTR(foreign_id, old_t_name_len,\n" - " id_len - old_t_name_len));\n" - " ELSE\n" - " new_foreign_id :=\n" - " CONCAT(new_db_name,\n" - " SUBSTR(foreign_id,\n" - " old_db_name_len,\n" - " id_len - old_db_name_len));\n" - " END IF;\n" - " UPDATE SYS_FOREIGN\n" - " SET ID = new_foreign_id\n" - " WHERE ID = foreign_id;\n" - " UPDATE SYS_FOREIGN_COLS\n" - " SET ID = new_foreign_id\n" - " WHERE ID = foreign_id;\n" - " END IF;\n" - " END IF;\n" - "END LOOP;\n" - "UPDATE SYS_FOREIGN SET REF_NAME = :new_table_name\n" - "WHERE REF_NAME = :old_table_name\n" - " AND TO_BINARY(REF_NAME)\n" - " = TO_BINARY(:old_table_name);\n" - "END;\n" - , FALSE, trx); + err = que_eval_sql( + info, + "PROCEDURE RENAME_CONSTRAINT_IDS () IS\n" + "gen_constr_prefix CHAR;\n" + "new_db_name CHAR;\n" + "foreign_id CHAR;\n" + "new_foreign_id CHAR;\n" + "old_db_name_len INT;\n" + "old_t_name_len INT;\n" + "new_db_name_len INT;\n" + "id_len INT;\n" + "found INT;\n" + "BEGIN\n" + "found := 1;\n" + "old_db_name_len := INSTR(:old_table_name, '/')-1;\n" + "new_db_name_len := INSTR(:new_table_name, '/')-1;\n" + "new_db_name := SUBSTR(:new_table_name, 0,\n" + " new_db_name_len);\n" + "old_t_name_len := LENGTH(:old_table_name);\n" + "gen_constr_prefix := CONCAT(:old_table_name,\n" + " '_ibfk_');\n" + "WHILE found = 1 LOOP\n" + " SELECT ID INTO foreign_id\n" + " FROM SYS_FOREIGN\n" + " WHERE FOR_NAME = :old_table_name\n" + " AND TO_BINARY(FOR_NAME)\n" + " = TO_BINARY(:old_table_name)\n" + " LOCK IN SHARE MODE;\n" + " IF (SQL % NOTFOUND) THEN\n" + " found := 0;\n" + " ELSE\n" + " UPDATE SYS_FOREIGN\n" + " SET FOR_NAME = :new_table_name\n" + " WHERE ID = foreign_id;\n" + " id_len := LENGTH(foreign_id);\n" + " IF (INSTR(foreign_id, '/') > 0) THEN\n" + " IF (INSTR(foreign_id,\n" + " gen_constr_prefix) > 0)\n" + " THEN\n" + " new_foreign_id :=\n" + " CONCAT(:new_table_name,\n" + " SUBSTR(foreign_id, old_t_name_len,\n" + " id_len - old_t_name_len));\n" + " ELSE\n" + " new_foreign_id :=\n" + " CONCAT(new_db_name,\n" + " SUBSTR(foreign_id,\n" + " old_db_name_len,\n" + " id_len - old_db_name_len));\n" + " END IF;\n" + " UPDATE SYS_FOREIGN\n" + " SET ID = new_foreign_id\n" + " WHERE ID = foreign_id;\n" + " UPDATE SYS_FOREIGN_COLS\n" + " SET ID = new_foreign_id\n" + " WHERE ID = foreign_id;\n" + " END IF;\n" + " END IF;\n" + "END LOOP;\n" + "UPDATE SYS_FOREIGN SET REF_NAME = :new_table_name\n" + "WHERE REF_NAME = :old_table_name\n" + " AND TO_BINARY(REF_NAME)\n" + " = TO_BINARY(:old_table_name);\n" + "END;\n" + , FALSE, trx); } else if (n_constraints_to_drop > 0) { /* Drop some constraints of tmp tables. */ @@ -3763,8 +3757,8 @@ end: /* We only want to switch off some of the type checking in an ALTER, not in a RENAME. */ - err = dict_load_foreigns - (new_name, old_is_tmp ? trx->check_foreigns : TRUE); + err = dict_load_foreigns( + new_name, old_is_tmp ? trx->check_foreigns : TRUE); if (err != DB_SUCCESS) { ut_print_timestamp(stderr); @@ -3904,8 +3898,8 @@ func_exit: for (i = 0; i < dict_index_get_n_ordering_defined_by_user(index); i++) { - if (UNIV_SQL_NULL == dfield_get_len - (dtuple_get_nth_field(prev_entry, i))) { + if (UNIV_SQL_NULL == dfield_get_len( + dtuple_get_nth_field(prev_entry, i))) { contains_null = TRUE; } @@ -3928,8 +3922,8 @@ not_ok: } else if ((index->type & DICT_UNIQUE) && !contains_null && matched_fields - >= dict_index_get_n_ordering_defined_by_user - (index)) { + >= dict_index_get_n_ordering_defined_by_user( + index)) { fputs("InnoDB: duplicate key in ", stderr); goto not_ok; @@ -4004,7 +3998,7 @@ row_check_table_for_mysql( ut_print_name(stderr, trx, FALSE, index->name); putc('\n', stderr); */ - if (!btr_validate_tree(index->tree, prebuilt->trx)) { + if (!btr_validate_index(index, prebuilt->trx)) { ret = DB_ERROR; } else { if (!row_scan_and_check_index(prebuilt, diff --git a/storage/innobase/row/row0purge.c b/storage/innobase/row/row0purge.c index 1513b18666d..889a11a7a76 100644 --- a/storage/innobase/row/row0purge.c +++ b/storage/innobase/row/row0purge.c @@ -123,10 +123,10 @@ row_purge_remove_clust_if_poss_low( rec = btr_pcur_get_rec(pcur); - if (0 != ut_dulint_cmp(node->roll_ptr, row_get_rec_roll_ptr - (rec, index, rec_get_offsets - (rec, index, offsets_, - ULINT_UNDEFINED, &heap)))) { + if (0 != ut_dulint_cmp(node->roll_ptr, row_get_rec_roll_ptr( + rec, index, rec_get_offsets( + rec, index, offsets_, + ULINT_UNDEFINED, &heap)))) { if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } @@ -248,9 +248,9 @@ row_purge_remove_sec_if_poss_low( success = row_purge_reposition_pcur(BTR_SEARCH_LEAF, node, mtr_vers); if (success) { - old_has = row_vers_old_has_index_entry - (TRUE, btr_pcur_get_rec(&(node->pcur)), - mtr_vers, index, entry); + old_has = row_vers_old_has_index_entry( + TRUE, btr_pcur_get_rec(&(node->pcur)), + mtr_vers, index, entry); } btr_pcur_commit_specify_mtr(&(node->pcur), mtr_vers); @@ -430,7 +430,7 @@ skip_secondaries: index = dict_table_get_first_index(node->table); - mtr_x_lock(dict_tree_get_lock(index->tree), &mtr); + mtr_x_lock(dict_index_get_lock(index), &mtr); /* NOTE: we must also acquire an X-latch to the root page of the tree. We will need it when we @@ -441,7 +441,7 @@ skip_secondaries: latching order if we would only later latch the root page of such a tree! */ - btr_root_get(index->tree, &mtr); + btr_root_get(index, &mtr); /* We assume in purge of externally stored fields that the space id of the undo log record is 0! */ @@ -618,8 +618,8 @@ row_purge( if (purge_needed) { node->found_clust = FALSE; - node->index = dict_table_get_next_index - (dict_table_get_first_index(node->table)); + node->index = dict_table_get_next_index( + dict_table_get_first_index(node->table)); if (node->rec_type == TRX_UNDO_DEL_MARK_REC) { row_purge_del_mark(node); diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c index b945954041e..efa129d6211 100644 --- a/storage/innobase/row/row0row.c +++ b/storage/innobase/row/row0row.c @@ -114,10 +114,8 @@ row_build_index_entry( dict_field_t* ind_field; dfield_t* dfield; dfield_t* dfield2; - dict_col_t* col; ulint i; ulint storage_len; - dtype_t* cur_type; ut_ad(row && index && heap); ut_ad(dtuple_check_typed(row)); @@ -128,11 +126,12 @@ row_build_index_entry( if (index->type & DICT_UNIVERSAL) { dtuple_set_n_fields_cmp(entry, entry_len); } else { - dtuple_set_n_fields_cmp - (entry, dict_index_get_n_unique_in_tree(index)); + dtuple_set_n_fields_cmp( + entry, dict_index_get_n_unique_in_tree(index)); } for (i = 0; i < entry_len; i++) { + const dict_col_t* col; ind_field = dict_index_get_nth_field(index, i); col = ind_field->col; @@ -143,17 +142,20 @@ row_build_index_entry( dfield_copy(dfield, dfield2); /* If a column prefix index, take only the prefix */ - if (ind_field->prefix_len > 0 - && dfield_get_len(dfield2) != UNIV_SQL_NULL) { + if (ind_field->prefix_len) { + if (dfield_get_len(dfield2) != UNIV_SQL_NULL) { - cur_type = dict_col_get_type - (dict_field_get_col(ind_field)); + storage_len = dtype_get_at_most_n_mbchars( + col->prtype, + col->mbminlen, col->mbmaxlen, + ind_field->prefix_len, + dfield_get_len(dfield2), + dfield2->data); - storage_len = dtype_get_at_most_n_mbchars - (cur_type, ind_field->prefix_len, - dfield_get_len(dfield2), dfield2->data); + dfield_set_len(dfield, storage_len); + } - dfield_set_len(dfield, storage_len); + dfield_get_type(dfield)->len = ind_field->prefix_len; } } @@ -192,7 +194,6 @@ row_build( dtuple_t* row; dict_table_t* table; dict_field_t* ind_field; - dict_col_t* col; dfield_t* dfield; ulint n_fields; byte* field; @@ -227,8 +228,8 @@ row_build( row = dtuple_create(heap, row_len); - dtuple_set_info_bits(row, rec_get_info_bits - (rec, dict_table_is_comp(table))); + dtuple_set_info_bits(row, rec_get_info_bits( + rec, dict_table_is_comp(table))); n_fields = rec_offs_n_fields(offsets); @@ -239,7 +240,9 @@ row_build( if (ind_field->prefix_len == 0) { - col = dict_field_get_col(ind_field); + const dict_col_t* col + = dict_field_get_col(ind_field); + dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); field = rec_get_nth_field(rec, offsets, i, &len); @@ -416,17 +419,22 @@ row_build_row_ref( column, or the full column, and we must adjust the length accordingly. */ - clust_col_prefix_len = dict_index_get_nth_field - (clust_index, i)->prefix_len; + clust_col_prefix_len = dict_index_get_nth_field( + clust_index, i)->prefix_len; if (clust_col_prefix_len > 0) { if (len != UNIV_SQL_NULL) { + const dtype_t* dtype + = dfield_get_type(dfield); + dfield_set_len(dfield, - dtype_get_at_most_n_mbchars - (dfield_get_type(dfield), - clust_col_prefix_len, len, - (char*) field)); + dtype_get_at_most_n_mbchars( + dtype->prtype, + dtype->mbminlen, + dtype->mbmaxlen, + clust_col_prefix_len, + len, (char*) field)); } } } @@ -513,17 +521,22 @@ notfound: column, or the full column, and we must adjust the length accordingly. */ - clust_col_prefix_len = dict_index_get_nth_field - (clust_index, i)->prefix_len; + clust_col_prefix_len = dict_index_get_nth_field( + clust_index, i)->prefix_len; if (clust_col_prefix_len > 0) { if (len != UNIV_SQL_NULL) { + const dtype_t* dtype + = dfield_get_type(dfield); + dfield_set_len(dfield, - dtype_get_at_most_n_mbchars - (dfield_get_type(dfield), - clust_col_prefix_len, len, - (char*) field)); + dtype_get_at_most_n_mbchars( + dtype->prtype, + dtype->mbminlen, + dtype->mbmaxlen, + clust_col_prefix_len, + len, (char*) field)); } } } @@ -550,13 +563,8 @@ row_build_row_ref_from_row( directly into data of this row */ { dict_index_t* clust_index; - dict_field_t* field; - dfield_t* dfield; - dfield_t* dfield2; - dict_col_t* col; ulint ref_len; ulint i; - dtype_t* cur_type; ut_ad(ref && table && row); @@ -567,6 +575,11 @@ row_build_row_ref_from_row( ut_ad(ref_len == dtuple_get_n_fields(ref)); for (i = 0; i < ref_len; i++) { + const dict_col_t* col; + dict_field_t* field; + dfield_t* dfield; + dfield_t* dfield2; + dfield = dtuple_get_nth_field(ref, i); field = dict_index_get_nth_field(clust_index, i); @@ -580,12 +593,9 @@ row_build_row_ref_from_row( if (field->prefix_len > 0 && dfield->len != UNIV_SQL_NULL) { - cur_type = dict_col_get_type - (dict_field_get_col(field)); - - dfield->len = dtype_get_at_most_n_mbchars - (cur_type, field->prefix_len, - dfield->len, dfield->data); + dfield->len = dtype_get_at_most_n_mbchars( + col->prtype, col->mbminlen, col->mbmaxlen, + field->prefix_len, dfield->len, dfield->data); } } diff --git a/storage/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c index 06aeca509d3..c6b117cdafa 100644 --- a/storage/innobase/row/row0sel.c +++ b/storage/innobase/row/row0sel.c @@ -45,9 +45,6 @@ to que_run_threads: this is to allow canceling runaway queries */ #define SEL_COST_LIMIT 100 -/* The lower limit for what we consider a "big" row */ -#define BIG_ROW_SIZE 1024 - /* Flags for search shortcut */ #define SEL_FOUND 0 #define SEL_EXHAUSTED 1 @@ -72,15 +69,12 @@ row_sel_sec_rec_is_for_clust_rec( rec_t* clust_rec, /* in: clustered index record */ dict_index_t* clust_index) /* in: clustered index */ { - dict_field_t* ifield; - dict_col_t* col; byte* sec_field; ulint sec_len; byte* clust_field; ulint clust_len; ulint n; ulint i; - dtype_t* cur_type; mem_heap_t* heap = NULL; ulint clust_offsets_[REC_OFFS_NORMAL_SIZE]; ulint sec_offsets_[REC_OFFS_SMALL_SIZE]; @@ -99,26 +93,26 @@ row_sel_sec_rec_is_for_clust_rec( n = dict_index_get_n_ordering_defined_by_user(sec_index); for (i = 0; i < n; i++) { + const dict_field_t* ifield; + const dict_col_t* col; + ifield = dict_index_get_nth_field(sec_index, i); col = dict_field_get_col(ifield); - clust_field = rec_get_nth_field(clust_rec, clust_offs, - dict_col_get_clust_pos(col), - &clust_len); + clust_field = rec_get_nth_field( + clust_rec, clust_offs, + dict_col_get_clust_pos(col, clust_index), &clust_len); sec_field = rec_get_nth_field(sec_rec, sec_offs, i, &sec_len); - if (ifield->prefix_len > 0 - && clust_len != UNIV_SQL_NULL) { + if (ifield->prefix_len > 0 && clust_len != UNIV_SQL_NULL) { - cur_type = dict_col_get_type - (dict_field_get_col(ifield)); - - clust_len = dtype_get_at_most_n_mbchars - (cur_type, ifield->prefix_len, - clust_len, (char*) clust_field); + clust_len = dtype_get_at_most_n_mbchars( + col->prtype, col->mbminlen, col->mbmaxlen, + ifield->prefix_len, + clust_len, (char*) clust_field); } - if (0 != cmp_data_data(dict_col_get_type(col), + if (0 != cmp_data_data(col->mtype, col->prtype, clust_field, clust_len, sec_field, sec_len)) { is_equal = FALSE; @@ -319,8 +313,8 @@ row_sel_fetch_columns( heap = mem_heap_create(1); - data = btr_rec_copy_externally_stored_field - (rec, offsets, field_no, &len, heap); + data = btr_rec_copy_externally_stored_field( + rec, offsets, field_no, &len, heap); ut_a(len != UNIV_SQL_NULL); @@ -557,9 +551,9 @@ row_sel_build_prev_vers( plan->old_vers_heap = mem_heap_create(512); } - err = row_vers_build_for_consistent_read - (rec, mtr, plan->index, offsets, read_view, offset_heap, - plan->old_vers_heap, old_vers); + err = row_vers_build_for_consistent_read( + rec, mtr, plan->index, offsets, read_view, offset_heap, + plan->old_vers_heap, old_vers); return(err); } @@ -592,9 +586,9 @@ row_sel_build_committed_vers_for_mysql( prebuilt->old_vers_heap = mem_heap_create(200); } - err = row_vers_build_for_semi_consistent_read - (rec, mtr, clust_index, offsets, offset_heap, - prebuilt->old_vers_heap, old_vers); + err = row_vers_build_for_semi_consistent_read( + rec, mtr, clust_index, offsets, offset_heap, + prebuilt->old_vers_heap, old_vers); return(err); } @@ -754,9 +748,9 @@ row_sel_get_clust_rec( lock_type = LOCK_ORDINARY; } - err = lock_clust_rec_read_check_and_lock - (0, clust_rec, index, offsets, - node->row_lock_mode, lock_type, thr); + err = lock_clust_rec_read_check_and_lock( + 0, clust_rec, index, offsets, + node->row_lock_mode, lock_type, thr); if (err != DB_SUCCESS) { @@ -800,8 +794,8 @@ row_sel_get_clust_rec( exist in our snapshot. */ if ((old_vers - || rec_get_deleted_flag(rec, dict_table_is_comp - (plan->table))) + || rec_get_deleted_flag(rec, dict_table_is_comp( + plan->table))) && !row_sel_sec_rec_is_for_clust_rec(rec, plan->index, clust_rec, index)) { goto func_exit; @@ -850,11 +844,11 @@ sel_set_rec_lock( } if (index->type & DICT_CLUSTERED) { - err = lock_clust_rec_read_check_and_lock - (0, rec, index, offsets, mode, type, thr); + err = lock_clust_rec_read_check_and_lock( + 0, rec, index, offsets, mode, type, thr); } else { - err = lock_sec_rec_read_check_and_lock - (0, rec, index, offsets, mode, type, thr); + err = lock_sec_rec_read_check_and_lock( + 0, rec, index, offsets, mode, type, thr); } return(err); @@ -1246,7 +1240,7 @@ table_loop: if (consistent_read && plan->unique_search && !plan->pcur_is_open && !plan->must_get_clust - && (plan->table->max_row_size < BIG_ROW_SIZE)) { + && !plan->table->big_rows) { if (!search_latch_locked) { rw_lock_s_lock(&btr_search_latch); @@ -1506,13 +1500,13 @@ skip_lock: } if (old_vers == NULL) { - offsets = rec_get_offsets - (rec, index, offsets, - ULINT_UNDEFINED, &heap); - row_sel_fetch_columns - (index, rec, offsets, - UT_LIST_GET_FIRST - (plan->columns)); + offsets = rec_get_offsets( + rec, index, offsets, + ULINT_UNDEFINED, &heap); + row_sel_fetch_columns( + index, rec, offsets, + UT_LIST_GET_FIRST( + plan->columns)); if (!row_sel_test_end_conds(plan)) { @@ -1651,7 +1645,7 @@ skip_lock: if ((plan->n_rows_fetched <= SEL_PREFETCH_LIMIT) || plan->unique_search || plan->no_prefetch - || (plan->table->max_row_size >= BIG_ROW_SIZE)) { + || plan->table->big_rows) { /* No prefetch in operation: go to the next table */ @@ -1922,8 +1916,8 @@ row_sel_step( if (node->consistent_read) { /* Assign a read view for the query */ - node->read_view = trx_assign_read_view - (thr_get_trx(thr)); + node->read_view = trx_assign_read_view( + thr_get_trx(thr)); } else { if (node->set_x_locks) { i_lock_mode = LOCK_IX; @@ -2010,8 +2004,8 @@ fetch_step( sel_assign_into_var_values(node->into_list, sel_node); } else { - void* ret = (*node->func->func) - (sel_node, node->func->arg); + void* ret = (*node->func->func)( + sel_node, node->func->arg); if (!ret) { sel_node->state @@ -2249,8 +2243,7 @@ row_sel_convert_mysql_key_to_innobase( while (key_ptr < key_end) { - ut_a(dict_col_get_type(field->col)->mtype - == dfield_get_type(dfield)->mtype); + ut_a(field->col->mtype == dfield_get_type(dfield)->mtype); data_offset = 0; is_null = FALSE; @@ -2334,11 +2327,11 @@ row_sel_convert_mysql_key_to_innobase( /* Storing may use at most data_len bytes of buf */ if (!is_null) { - row_mysql_store_col_in_innobase_format - (dfield, buf, - FALSE, /* MySQL key value format col */ - key_ptr + data_offset, data_len, - dict_table_is_comp(index->table)); + row_mysql_store_col_in_innobase_format( + dfield, buf, + FALSE, /* MySQL key value format col */ + key_ptr + data_offset, data_len, + dict_table_is_comp(index->table)); buf += data_len; } @@ -2405,9 +2398,9 @@ row_sel_store_row_id_to_prebuilt( ut_ad(rec_offs_validate(index_rec, index, offsets)); - data = rec_get_nth_field - (index_rec, offsets, - dict_index_get_sys_col_pos(index, DATA_ROW_ID), &len); + data = rec_get_nth_field( + index_rec, offsets, + dict_index_get_sys_col_pos(index, DATA_ROW_ID), &len); if (len != DATA_ROW_ID_LEN) { fprintf(stderr, @@ -2480,8 +2473,8 @@ row_sel_field_store_in_mysql_format( length of the data to the first byte or the first two bytes of dest. */ - dest = row_mysql_store_true_var_len - (dest, len, templ->mysql_length_bytes); + dest = row_mysql_store_true_var_len( + dest, len, templ->mysql_length_bytes); } /* Copy the actual data */ @@ -2614,8 +2607,8 @@ row_sel_store_mysql_rec( if (UNIV_UNLIKELY(templ->type == DATA_BLOB)) { if (prebuilt->blob_heap == NULL) { - prebuilt->blob_heap = mem_heap_create - (UNIV_PAGE_SIZE); + prebuilt->blob_heap = mem_heap_create( + UNIV_PAGE_SIZE); } heap = prebuilt->blob_heap; @@ -2630,9 +2623,9 @@ row_sel_store_mysql_rec( already run out of memory in the next call, which causes an assert */ - data = btr_rec_copy_externally_stored_field - (rec, offsets, templ->rec_field_no, - &len, heap); + data = btr_rec_copy_externally_stored_field( + rec, offsets, templ->rec_field_no, + &len, heap); ut_a(len != UNIV_SQL_NULL); } else { @@ -2643,9 +2636,9 @@ row_sel_store_mysql_rec( } if (len != UNIV_SQL_NULL) { - row_sel_field_store_in_mysql_format - (mysql_rec + templ->mysql_col_offset, - templ, data, len); + row_sel_field_store_in_mysql_format( + mysql_rec + templ->mysql_col_offset, + templ, data, len); /* Cleanup */ if (extern_field_heap) { @@ -2747,9 +2740,9 @@ row_sel_build_prev_vers_for_mysql( prebuilt->old_vers_heap = mem_heap_create(200); } - err = row_vers_build_for_consistent_read - (rec, mtr, clust_index, offsets, read_view, offset_heap, - prebuilt->old_vers_heap, old_vers); + err = row_vers_build_for_consistent_read( + rec, mtr, clust_index, offsets, read_view, offset_heap, + prebuilt->old_vers_heap, old_vers); return(err); } @@ -2853,9 +2846,9 @@ row_sel_get_clust_rec_for_mysql( the clust rec with a unique condition, hence we set a LOCK_REC_NOT_GAP type lock */ - err = lock_clust_rec_read_check_and_lock - (0, clust_rec, clust_index, *offsets, - prebuilt->select_lock_type, LOCK_REC_NOT_GAP, thr); + err = lock_clust_rec_read_check_and_lock( + 0, clust_rec, clust_index, *offsets, + prebuilt->select_lock_type, LOCK_REC_NOT_GAP, thr); if (err != DB_SUCCESS) { goto err_exit; @@ -2870,15 +2863,16 @@ row_sel_get_clust_rec_for_mysql( then we never look for an earlier version */ if (trx->isolation_level > TRX_ISO_READ_UNCOMMITTED - && !lock_clust_rec_cons_read_sees - (clust_rec, clust_index, *offsets, trx->read_view)) { + && !lock_clust_rec_cons_read_sees( + clust_rec, clust_index, *offsets, + trx->read_view)) { /* The following call returns 'offsets' associated with 'old_vers' */ - err = row_sel_build_prev_vers_for_mysql - (trx->read_view, clust_index, - prebuilt, clust_rec, offsets, offset_heap, - &old_vers, mtr); + err = row_sel_build_prev_vers_for_mysql( + trx->read_view, clust_index, prebuilt, + clust_rec, offsets, offset_heap, &old_vers, + mtr); if (err != DB_SUCCESS) { @@ -2901,16 +2895,18 @@ row_sel_get_clust_rec_for_mysql( visit through secondary index records that would not really exist in our snapshot. */ - if (clust_rec && (old_vers || rec_get_deleted_flag - (rec, dict_table_is_comp(sec_index->table))) - && !row_sel_sec_rec_is_for_clust_rec - (rec, sec_index, clust_rec, clust_index)) { + if (clust_rec && (old_vers || rec_get_deleted_flag( + rec, + dict_table_is_comp( + sec_index->table))) + && !row_sel_sec_rec_is_for_clust_rec( + rec, sec_index, clust_rec, clust_index)) { clust_rec = NULL; } else { #ifdef UNIV_SEARCH_DEBUG ut_a(clust_rec == NULL - || row_sel_sec_rec_is_for_clust_rec - (rec, sec_index, clust_rec, clust_index)); + || row_sel_sec_rec_is_for_clust_rec( + rec, sec_index, clust_rec, clust_index)); #endif } } @@ -3018,8 +3014,8 @@ row_sel_pop_cached_row_for_mysql( if (UNIV_UNLIKELY(prebuilt->keep_other_fields_on_keyread)) { /* Copy cache record field by field, don't touch fields that are not covered by current key */ - cached_rec = prebuilt->fetch_cache - [prebuilt->fetch_cache_first]; + cached_rec = prebuilt->fetch_cache[ + prebuilt->fetch_cache_first]; for (i = 0; i < prebuilt->n_template; i++) { templ = prebuilt->mysql_template + i; @@ -3031,8 +3027,7 @@ row_sel_pop_cached_row_for_mysql( if (templ->mysql_null_bit_mask) { buf[templ->mysql_null_byte_offset] ^= (buf[templ->mysql_null_byte_offset] - ^ cached_rec - [templ->mysql_null_byte_offset]) + ^ cached_rec[templ->mysql_null_byte_offset]) & (byte)templ->mysql_null_bit_mask; } } @@ -3088,9 +3083,10 @@ row_sel_push_cache_row_for_mysql( ut_ad(prebuilt->fetch_cache_first == 0); - if (UNIV_UNLIKELY(!row_sel_store_mysql_rec - (prebuilt->fetch_cache[prebuilt->n_fetch_cached], - prebuilt, rec, offsets))) { + if (UNIV_UNLIKELY(!row_sel_store_mysql_rec( + prebuilt->fetch_cache[ + prebuilt->n_fetch_cached], + prebuilt, rec, offsets))) { ut_error; } @@ -3217,15 +3213,12 @@ row_search_for_mysql( ibool mtr_has_extra_clust_latch = FALSE; ibool moves_up = FALSE; ibool set_also_gap_locks = TRUE; - /* if the query is a plain - locking SELECT, and the isolation - level is <= TRX_ISO_READ_COMMITTED, - then this is set to FALSE */ + /* if the query is a plain locking SELECT, and the isolation level + is <= TRX_ISO_READ_COMMITTED, then this is set to FALSE */ ibool did_semi_consistent_read = FALSE; - /* if the returned record was locked - and we did a semi-consistent read - (fetch the newest committed version), - then this is set to TRUE */ + /* if the returned record was locked and we did a semi-consistent + read (fetch the newest committed version), then this is set to + TRUE */ #ifdef UNIV_SEARCH_DEBUG ulint cnt = 0; #endif /* UNIV_SEARCH_DEBUG */ @@ -3478,8 +3471,9 @@ row_search_for_mysql( trx->has_search_latch = TRUE; } #endif - switch (row_sel_try_search_shortcut_for_mysql - (&rec, prebuilt, &offsets, &heap, &mtr)) { + switch (row_sel_try_search_shortcut_for_mysql( + &rec, prebuilt, &offsets, &heap, + &mtr)) { case SEL_FOUND: #ifdef UNIV_SEARCH_DEBUG ut_a(0 == cmp_dtuple_rec(search_tuple, @@ -3559,8 +3553,8 @@ shortcut_fails_too_big_rec: /* Scan the MySQL query string; check if SELECT is the first word there */ - if (dict_str_starts_with_keyword - (trx->mysql_thd, *trx->mysql_query_str, "SELECT")) { + if (dict_str_starts_with_keyword( + trx->mysql_thd, *trx->mysql_query_str, "SELECT")) { /* It is a plain locking SELECT and the isolation level is low: do not lock gaps */ @@ -3587,9 +3581,9 @@ shortcut_fails_too_big_rec: clust_index = dict_table_get_first_index(index->table); if (UNIV_LIKELY(direction != 0)) { - ibool need_to_process = sel_restore_position_for_mysql - (&same_user_rec, BTR_SEARCH_LEAF, - pcur, moves_up, &mtr); + ibool need_to_process = sel_restore_position_for_mysql( + &same_user_rec, BTR_SEARCH_LEAF, + pcur, moves_up, &mtr); if (UNIV_UNLIKELY(need_to_process)) { if (UNIV_UNLIKELY(prebuilt->row_read_type @@ -3621,13 +3615,13 @@ shortcut_fails_too_big_rec: pcur->trx_if_known = trx; } else { if (mode == PAGE_CUR_G) { - btr_pcur_open_at_index_side - (TRUE, index, - BTR_SEARCH_LEAF, pcur, FALSE, &mtr); + btr_pcur_open_at_index_side( + TRUE, index, BTR_SEARCH_LEAF, pcur, FALSE, + &mtr); } else if (mode == PAGE_CUR_L) { - btr_pcur_open_at_index_side - (FALSE, index, - BTR_SEARCH_LEAF, pcur, FALSE, &mtr); + btr_pcur_open_at_index_side( + FALSE, index, BTR_SEARCH_LEAF, pcur, FALSE, + &mtr); } } @@ -3759,7 +3753,7 @@ wrong_offs: "InnoDB: Index corruption: rec offs %lu" " next offs %lu, page no %lu,\n" "InnoDB: ", - (ulong) ut_align_offset(rec, UNIV_PAGE_SIZE), + (ulong) page_offset(rec), (ulong) next_offs, (ulong) buf_frame_get_page_no(rec)); dict_index_name_print(stderr, trx, index); @@ -3779,7 +3773,7 @@ wrong_offs: "InnoDB: Index corruption: rec offs %lu" " next offs %lu, page no %lu,\n" "InnoDB: ", - (ulong) ut_align_offset(rec, UNIV_PAGE_SIZE), + (ulong) page_offset(rec), (ulong) next_offs, (ulong) buf_frame_get_page_no(rec)); dict_index_name_print(stderr, trx, index); @@ -3804,7 +3798,7 @@ wrong_offs: "InnoDB: Index corruption: rec offs %lu" " next offs %lu, page no %lu,\n" "InnoDB: ", - (ulong) ut_align_offset(rec, UNIV_PAGE_SIZE), + (ulong) page_offset(rec), (ulong) next_offs, (ulong) buf_frame_get_page_no(rec)); dict_index_name_print(stderr, trx, index); @@ -3839,10 +3833,10 @@ wrong_offs: option is not set or this session is not using a READ COMMITTED isolation level. */ - err = sel_set_rec_lock - (rec, index, offsets, - prebuilt->select_lock_type, - LOCK_GAP, thr); + err = sel_set_rec_lock( + rec, index, offsets, + prebuilt->select_lock_type, LOCK_GAP, + thr); if (err != DB_SUCCESS) { @@ -3874,10 +3868,10 @@ wrong_offs: option is not set or this session is not using a READ COMMITTED isolation level. */ - err = sel_set_rec_lock - (rec, index, offsets, - prebuilt->select_lock_type, - LOCK_GAP, thr); + err = sel_set_rec_lock( + rec, index, offsets, + prebuilt->select_lock_type, LOCK_GAP, + thr); if (err != DB_SUCCESS) { @@ -3961,9 +3955,9 @@ no_gap_lock: /* The following call returns 'offsets' associated with 'old_vers' */ - err = row_sel_build_committed_vers_for_mysql - (clust_index, prebuilt, rec, - &offsets, &heap, &old_vers, &mtr); + err = row_sel_build_committed_vers_for_mysql( + clust_index, prebuilt, rec, + &offsets, &heap, &old_vers, &mtr); if (err != DB_SUCCESS) { @@ -3977,8 +3971,8 @@ no_gap_lock: goto lock_wait_or_error; } if (UNIV_LIKELY(trx->wait_lock != NULL)) { - lock_cancel_waiting_and_release - (trx->wait_lock); + lock_cancel_waiting_and_release( + trx->wait_lock); trx_reset_new_rec_lock_info(trx); } else { mutex_exit(&kernel_mutex); @@ -4025,16 +4019,16 @@ no_gap_lock: by skipping this lookup */ if (UNIV_LIKELY(srv_force_recovery < 5) - && !lock_clust_rec_cons_read_sees - (rec, index, offsets, trx->read_view)) { + && !lock_clust_rec_cons_read_sees( + rec, index, offsets, trx->read_view)) { rec_t* old_vers; /* The following call returns 'offsets' associated with 'old_vers' */ - err = row_sel_build_prev_vers_for_mysql - (trx->read_view, clust_index, - prebuilt, rec, &offsets, &heap, - &old_vers, &mtr); + err = row_sel_build_prev_vers_for_mysql( + trx->read_view, clust_index, + prebuilt, rec, &offsets, &heap, + &old_vers, &mtr); if (err != DB_SUCCESS) { @@ -4084,6 +4078,23 @@ no_gap_lock: row_unlock_for_mysql(prebuilt, TRUE); } + /* This is an optimization to skip setting the next key lock + on the record that follows this delete-marked record. This + optimization works because of the unique search criteria + which precludes the presence of a range lock between this + delete marked record and the record following it. + + For now this is applicable only to clustered indexes while + doing a unique search. There is scope for further optimization + applicable to unique secondary indexes. Current behaviour is + to widen the scope of a lock on an already delete marked record + if the same record is deleted twice by the same transaction */ + if (index == clust_index && unique_search) { + err = DB_RECORD_NOT_FOUND; + + goto normal_return; + } + goto next_rec; } @@ -4209,9 +4220,9 @@ requires_clust_rec: if (prebuilt->clust_index_was_generated) { if (result_rec != rec) { - offsets = rec_get_offsets - (rec, index, offsets, - ULINT_UNDEFINED, &heap); + offsets = rec_get_offsets( + rec, index, offsets, ULINT_UNDEFINED, + &heap); } row_sel_store_row_id_to_prebuilt(prebuilt, rec, index, offsets); @@ -4466,8 +4477,8 @@ row_search_check_if_query_cache_permitted( if (trx->isolation_level >= TRX_ISO_REPEATABLE_READ && !trx->read_view) { - trx->read_view = read_view_open_now - (trx->id, trx->global_read_view_heap); + trx->read_view = read_view_open_now( + trx->id, trx->global_read_view_heap); trx->global_read_view = trx->read_view; } } diff --git a/storage/innobase/row/row0uins.c b/storage/innobase/row/row0uins.c index 822b7cfb9f2..ce9ab792204 100644 --- a/storage/innobase/row/row0uins.c +++ b/storage/innobase/row/row0uins.c @@ -286,8 +286,8 @@ row_undo_ins( return(DB_SUCCESS); } - node->index = dict_table_get_next_index - (dict_table_get_first_index(node->table)); + node->index = dict_table_get_next_index( + dict_table_get_first_index(node->table)); while (node->index != NULL) { entry = row_build_index_entry(node->row, node->index, diff --git a/storage/innobase/row/row0umod.c b/storage/innobase/row/row0umod.c index 778f240d18e..68139da116e 100644 --- a/storage/innobase/row/row0umod.c +++ b/storage/innobase/row/row0umod.c @@ -108,12 +108,12 @@ row_undo_mod_clust_low( } else { ut_ad(mode == BTR_MODIFY_TREE); - err = btr_cur_pessimistic_update - (BTR_NO_LOCKING_FLAG - | BTR_NO_UNDO_LOG_FLAG - | BTR_KEEP_SYS_FLAG, - btr_cur, &dummy_big_rec, node->update, - node->cmpl_info, thr, mtr); + err = btr_cur_pessimistic_update( + BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG + | BTR_KEEP_SYS_FLAG, + btr_cur, &dummy_big_rec, node->update, + node->cmpl_info, thr, mtr); } return(err); @@ -444,8 +444,8 @@ row_undo_mod_del_unmark_sec_and_undo_update( ut_a(err == DB_SUCCESS); heap = mem_heap_create(100); - update = row_upd_build_sec_rec_difference_binary - (index, entry, btr_cur_get_rec(btr_cur), trx, heap); + update = row_upd_build_sec_rec_difference_binary( + index, entry, btr_cur_get_rec(btr_cur), trx, heap); if (upd_get_n_fields(update) == 0) { /* Do nothing */ @@ -454,18 +454,18 @@ row_undo_mod_del_unmark_sec_and_undo_update( /* Try an optimistic updating of the record, keeping changes within the page */ - err = btr_cur_optimistic_update - (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG, - btr_cur, update, 0, thr, &mtr); + err = btr_cur_optimistic_update( + BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG, + btr_cur, update, 0, thr, &mtr); if (err == DB_OVERFLOW || err == DB_UNDERFLOW) { err = DB_FAIL; } } else { ut_a(mode == BTR_MODIFY_TREE); - err = btr_cur_pessimistic_update - (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG, - btr_cur, &dummy_big_rec, - update, 0, thr, &mtr); + err = btr_cur_pessimistic_update( + BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG, + btr_cur, &dummy_big_rec, + update, 0, thr, &mtr); } mem_heap_free(heap); @@ -538,11 +538,11 @@ row_undo_mod_del_mark_sec( entry = row_build_index_entry(node->row, index, heap); - err = row_undo_mod_del_unmark_sec_and_undo_update - (BTR_MODIFY_LEAF, thr, index, entry); + err = row_undo_mod_del_unmark_sec_and_undo_update( + BTR_MODIFY_LEAF, thr, index, entry); if (err == DB_FAIL) { - err = row_undo_mod_del_unmark_sec_and_undo_update - (BTR_MODIFY_TREE, thr, index, entry); + err = row_undo_mod_del_unmark_sec_and_undo_update( + BTR_MODIFY_TREE, thr, index, entry); } if (err != DB_SUCCESS) { @@ -620,11 +620,11 @@ row_undo_mod_upd_exist_sec( row_upd_index_replace_new_col_vals(entry, index, node->update, NULL); - err = row_undo_mod_del_unmark_sec_and_undo_update - (BTR_MODIFY_LEAF, thr, index, entry); + err = row_undo_mod_del_unmark_sec_and_undo_update( + BTR_MODIFY_LEAF, thr, index, entry); if (err == DB_FAIL) { - err = row_undo_mod_del_unmark_sec_and_undo_update - (BTR_MODIFY_TREE, thr, index, entry); + err = row_undo_mod_del_unmark_sec_and_undo_update( + BTR_MODIFY_TREE, thr, index, entry); } if (err != DB_SUCCESS) { @@ -736,8 +736,8 @@ row_undo_mod( return(DB_SUCCESS); } - node->index = dict_table_get_next_index - (dict_table_get_first_index(node->table)); + node->index = dict_table_get_next_index( + dict_table_get_first_index(node->table)); if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) { diff --git a/storage/innobase/row/row0upd.c b/storage/innobase/row/row0upd.c index 84f5f2a1acf..6533ac93d7d 100644 --- a/storage/innobase/row/row0upd.c +++ b/storage/innobase/row/row0upd.c @@ -197,8 +197,9 @@ row_upd_check_references_constraints( if (foreign->referenced_index == index && (node->is_delete - || row_upd_changes_first_fields_binary - (entry, index, node->update, foreign->n_fields))) { + || row_upd_changes_first_fields_binary( + entry, index, node->update, + foreign->n_fields))) { if (foreign->foreign_table == NULL) { dict_table_get(foreign->foreign_table_name); @@ -218,8 +219,8 @@ row_upd_check_references_constraints( But the counter on the table protects 'foreign' from being dropped while the check is running. */ - err = row_ins_check_foreign_constraint - (FALSE, foreign, table, entry, thr); + err = row_ins_check_foreign_constraint( + FALSE, foreign, table, entry, thr); if (foreign->foreign_table) { mutex_enter(&(dict_sys->mutex)); @@ -235,8 +236,8 @@ row_upd_check_references_constraints( if (err != DB_SUCCESS) { if (got_s_lock) { - row_mysql_unfreeze_data_dictionary - (trx); + row_mysql_unfreeze_data_dictionary( + trx); } mem_heap_free(heap); @@ -388,9 +389,9 @@ row_upd_changes_field_size_or_external( this fix also to 4.0. The merge to 5.0 will be made manually immediately after we commit this to 4.1. */ - new_len = dtype_get_sql_null_size - (dict_index_get_nth_type(index, - upd_field->field_no)); + new_len = dict_col_get_sql_null_size( + dict_index_get_nth_col(index, + upd_field->field_no)); } old_len = rec_offs_nth_size(offsets, upd_field->field_no); @@ -479,8 +480,8 @@ row_upd_write_sys_vals_to_log( ut_ad(mtr); log_ptr += mach_write_compressed(log_ptr, - dict_index_get_sys_col_pos - (index, DATA_TRX_ID)); + dict_index_get_sys_col_pos( + index, DATA_TRX_ID)); trx_write_roll_ptr(log_ptr, roll_ptr); log_ptr += DATA_ROLL_PTR_LEN; @@ -884,7 +885,6 @@ row_upd_index_replace_new_col_vals_index_pos( ulint j; ulint i; ulint n_fields; - dtype_t* cur_type; ut_ad(index); @@ -913,8 +913,8 @@ row_upd_index_replace_new_col_vals_index_pos( dfield_set_data(dfield, new_val->data, new_val->len); if (heap && new_val->len != UNIV_SQL_NULL) { - dfield->data = mem_heap_alloc - (heap, new_val->len); + dfield->data = mem_heap_alloc( + heap, new_val->len); ut_memcpy(dfield->data, new_val->data, new_val->len); } @@ -922,13 +922,17 @@ row_upd_index_replace_new_col_vals_index_pos( if (field->prefix_len > 0 && new_val->len != UNIV_SQL_NULL) { - cur_type = dict_col_get_type - (dict_field_get_col(field)); + const dict_col_t* col + = dict_field_get_col(field); dfield->len - = dtype_get_at_most_n_mbchars - (cur_type, field->prefix_len, - new_val->len, new_val->data); + = dtype_get_at_most_n_mbchars( + col->prtype, + col->mbminlen, + col->mbmaxlen, + field->prefix_len, + new_val->len, + new_val->data); } } } @@ -952,27 +956,31 @@ row_upd_index_replace_new_col_vals( copy the new values, set this as NULL if you do not want allocation */ { - dict_field_t* field; upd_field_t* upd_field; dfield_t* dfield; dfield_t* new_val; ulint j; ulint i; - dtype_t* cur_type; + dict_index_t* clust_index; ut_ad(index); + clust_index = dict_table_get_first_index(index->table); + dtuple_set_info_bits(entry, update->info_bits); for (j = 0; j < dict_index_get_n_fields(index); j++) { - field = dict_index_get_nth_field(index, j); + ulint clust_pos; + dict_field_t* field = dict_index_get_nth_field(index, j); + + clust_pos = dict_col_get_clust_pos(field->col, clust_index); for (i = 0; i < upd_get_n_fields(update); i++) { upd_field = upd_get_nth_field(update, i); - if (upd_field->field_no == field->col->clust_pos) { + if (upd_field->field_no == clust_pos) { dfield = dtuple_get_nth_field(entry, j); @@ -981,8 +989,8 @@ row_upd_index_replace_new_col_vals( dfield_set_data(dfield, new_val->data, new_val->len); if (heap && new_val->len != UNIV_SQL_NULL) { - dfield->data = mem_heap_alloc - (heap, new_val->len); + dfield->data = mem_heap_alloc( + heap, new_val->len); ut_memcpy(dfield->data, new_val->data, new_val->len); } @@ -990,13 +998,17 @@ row_upd_index_replace_new_col_vals( if (field->prefix_len > 0 && new_val->len != UNIV_SQL_NULL) { - cur_type = dict_col_get_type - (dict_field_get_col(field)); + const dict_col_t* col + = dict_field_get_col(field); dfield->len - = dtype_get_at_most_n_mbchars - (cur_type, field->prefix_len, - new_val->len, new_val->data); + = dtype_get_at_most_n_mbchars( + col->prtype, + col->mbminlen, + col->mbmaxlen, + field->prefix_len, + new_val->len, + new_val->data); } } } @@ -1025,30 +1037,34 @@ row_upd_changes_ord_field_binary( field numbers in this MUST be clustered index positions! */ { - upd_field_t* upd_field; - dict_field_t* ind_field; - dict_col_t* col; ulint n_unique; ulint n_upd_fields; - ulint col_pos; - ulint col_no; ulint i, j; + dict_index_t* clust_index; ut_ad(update && index); n_unique = dict_index_get_n_unique(index); n_upd_fields = upd_get_n_fields(update); + clust_index = dict_table_get_first_index(index->table); + for (i = 0; i < n_unique; i++) { + const dict_field_t* ind_field; + const dict_col_t* col; + ulint col_pos; + ulint col_no; + ind_field = dict_index_get_nth_field(index, i); col = dict_field_get_col(ind_field); - col_pos = dict_col_get_clust_pos(col); + col_pos = dict_col_get_clust_pos(col, clust_index); col_no = dict_col_get_no(col); for (j = 0; j < n_upd_fields; j++) { - upd_field = upd_get_nth_field(update, j); + upd_field_t* upd_field + = upd_get_nth_field(update, j); /* Note that if the index field is a column prefix then it may be that row does not contain an externally @@ -1058,9 +1074,9 @@ row_upd_changes_ord_field_binary( if (col_pos == upd_field->field_no && (row == NULL || ind_field->prefix_len > 0 - || !dfield_datas_are_binary_equal - (dtuple_get_nth_field(row, col_no), - &(upd_field->new_val)))) { + || !dfield_datas_are_binary_equal( + dtuple_get_nth_field(row, col_no), + &(upd_field->new_val)))) { return(TRUE); } @@ -1092,8 +1108,8 @@ row_upd_changes_some_index_ord_field_binary( upd_field = upd_get_nth_field(update, i); - if (dict_field_get_col(dict_index_get_nth_field - (index, upd_field->field_no)) + if (dict_field_get_col(dict_index_get_nth_field( + index, upd_field->field_no)) ->ord_part) { return(TRUE); @@ -1117,34 +1133,37 @@ row_upd_changes_first_fields_binary( upd_t* update, /* in: update vector for the row */ ulint n) /* in: how many first fields to check */ { - upd_field_t* upd_field; - dict_field_t* ind_field; - dict_col_t* col; ulint n_upd_fields; - ulint col_pos; ulint i, j; + dict_index_t* clust_index; - ut_a(update && index); - ut_a(n <= dict_index_get_n_fields(index)); + ut_ad(update && index); + ut_ad(n <= dict_index_get_n_fields(index)); n_upd_fields = upd_get_n_fields(update); + clust_index = dict_table_get_first_index(index->table); for (i = 0; i < n; i++) { + const dict_field_t* ind_field; + const dict_col_t* col; + ulint col_pos; + ind_field = dict_index_get_nth_field(index, i); col = dict_field_get_col(ind_field); - col_pos = dict_col_get_clust_pos(col); + col_pos = dict_col_get_clust_pos(col, clust_index); ut_a(ind_field->prefix_len == 0); for (j = 0; j < n_upd_fields; j++) { - upd_field = upd_get_nth_field(update, j); + upd_field_t* upd_field + = upd_get_nth_field(update, j); if (col_pos == upd_field->field_no - && !dfield_datas_are_binary_equal - (dtuple_get_nth_field(entry, i), - &(upd_field->new_val))) { + && !dfield_datas_are_binary_equal( + dtuple_get_nth_field(entry, i), + &(upd_field->new_val))) { return(TRUE); } @@ -1322,9 +1341,9 @@ row_upd_sec_index_entry( /* NOTE that the following call loses the position of pcur ! */ - err = row_upd_check_references_constraints - (node, &pcur, index->table, - index, thr, &mtr); + err = row_upd_check_references_constraints( + node, &pcur, index->table, + index, thr, &mtr); if (err != DB_SUCCESS) { goto close_cur; @@ -1356,8 +1375,8 @@ close_cur: } /*************************************************************** -Updates secondary index record if it is changed in the row update. This -should be quite rare in database applications. */ +Updates the secondary index record if it is changed in the row update or +deletes it if this is a delete. */ UNIV_INLINE ulint row_upd_sec_step( @@ -1434,17 +1453,17 @@ row_upd_clust_rec_by_insert( free those externally stored fields even if the delete marked record is removed from the index tree, or updated. */ - btr_cur_mark_extern_inherited_fields - (btr_cur_get_rec(btr_cur), - rec_get_offsets(btr_cur_get_rec(btr_cur), - dict_table_get_first_index(table), - offsets_, ULINT_UNDEFINED, &heap), - node->update, mtr); + btr_cur_mark_extern_inherited_fields( + btr_cur_get_rec(btr_cur), + rec_get_offsets(btr_cur_get_rec(btr_cur), + dict_table_get_first_index(table), + offsets_, ULINT_UNDEFINED, &heap), + node->update, mtr); if (check_ref) { /* NOTE that the following call loses the position of pcur ! */ - err = row_upd_check_references_constraints - (node, pcur, table, index, thr, mtr); + err = row_upd_check_references_constraints( + node, pcur, table, index, thr, mtr); if (err != DB_SUCCESS) { mtr_commit(mtr); if (UNIV_LIKELY_NULL(heap)) { @@ -1573,10 +1592,10 @@ row_upd_clust_rec( ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr)); rec = btr_cur_get_rec(btr_cur); - err = btr_store_big_rec_extern_fields - (index, rec, - rec_get_offsets(rec, index, offsets_, - ULINT_UNDEFINED, &heap), + err = btr_store_big_rec_extern_fields( + index, rec, + rec_get_offsets(rec, index, offsets_, + ULINT_UNDEFINED, &heap), big_rec, mtr); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); @@ -1730,8 +1749,8 @@ row_upd_clust_step( ULINT_UNDEFINED, &heap); if (!node->has_clust_rec_x_lock) { - err = lock_clust_rec_modify_check_and_lock - (0, rec, index, offsets, thr); + err = lock_clust_rec_modify_check_and_lock( + 0, rec, index, offsets, thr); if (err != DB_SUCCESS) { mtr_commit(mtr); goto exit_func; @@ -1831,14 +1850,14 @@ row_upd( ut_ad(node && thr); - if (node->in_mysql_interface) { + if (UNIV_LIKELY(node->in_mysql_interface)) { /* We do not get the cmpl_info value from the MySQL interpreter: we must calculate it on the fly: */ if (node->is_delete - || row_upd_changes_some_index_ord_field_binary - (node->table, node->update)) { + || row_upd_changes_some_index_ord_field_binary( + node->table, node->update)) { node->cmpl_info = 0; } else { node->cmpl_info = UPD_NODE_NO_ORD_CHANGE; @@ -2046,9 +2065,9 @@ row_upd_in_place_in_select( } row_upd_eval_new_vals(node->update); - ut_ad(!rec_get_deleted_flag - (btr_pcur_get_rec(pcur), - dict_table_is_comp(btr_cur->index->table))); + ut_ad(!rec_get_deleted_flag( + btr_pcur_get_rec(pcur), + dict_table_is_comp(btr_cur->index->table))); ut_ad(node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE); ut_ad(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE); diff --git a/storage/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c index 9b51314df7b..c8b71965f75 100644 --- a/storage/innobase/row/row0vers.c +++ b/storage/innobase/row/row0vers.c @@ -158,9 +158,9 @@ row_vers_impl_x_locked_off_kernel( mem_heap_free(heap2); /* free version and clust_offsets */ if (prev_version) { - clust_offsets = rec_get_offsets - (prev_version, clust_index, NULL, - ULINT_UNDEFINED, &heap); + clust_offsets = rec_get_offsets( + prev_version, clust_index, NULL, + ULINT_UNDEFINED, &heap); row = row_build(ROW_COPY_POINTERS, clust_index, prev_version, clust_offsets, heap); entry = row_build_index_entry(row, index, heap); diff --git a/storage/innobase/srv/srv0srv.c b/storage/innobase/srv/srv0srv.c index b781a601039..5d92b913934 100644 --- a/storage/innobase/srv/srv0srv.c +++ b/storage/innobase/srv/srv0srv.c @@ -899,21 +899,21 @@ srv_init(void) table = dict_mem_table_create("SYS_DUMMY1", DICT_HDR_SPACE, 1, 0); dict_mem_table_add_col(table, "DUMMY", DATA_CHAR, - DATA_ENGLISH | DATA_NOT_NULL, 8, 0); + DATA_ENGLISH | DATA_NOT_NULL, 8); - srv_sys->dummy_ind1 = dict_mem_index_create - ("SYS_DUMMY1", "SYS_DUMMY1", DICT_HDR_SPACE, 0, 1); - dict_index_add_col(srv_sys->dummy_ind1, + srv_sys->dummy_ind1 = dict_mem_index_create( + "SYS_DUMMY1", "SYS_DUMMY1", DICT_HDR_SPACE, 0, 1); + dict_index_add_col(srv_sys->dummy_ind1, table, (dict_col_t*) dict_table_get_nth_col(table, 0), 0); srv_sys->dummy_ind1->table = table; /* create dummy table and index for new-style infimum and supremum */ table = dict_mem_table_create("SYS_DUMMY2", DICT_HDR_SPACE, 1, DICT_TF_COMPACT); dict_mem_table_add_col(table, "DUMMY", DATA_CHAR, - DATA_ENGLISH | DATA_NOT_NULL, 8, 0); - srv_sys->dummy_ind2 = dict_mem_index_create - ("SYS_DUMMY2", "SYS_DUMMY2", DICT_HDR_SPACE, 0, 1); - dict_index_add_col(srv_sys->dummy_ind2, + DATA_ENGLISH | DATA_NOT_NULL, 8); + srv_sys->dummy_ind2 = dict_mem_index_create( + "SYS_DUMMY2", "SYS_DUMMY2", DICT_HDR_SPACE, 0, 1); + dict_index_add_col(srv_sys->dummy_ind2, table, (dict_col_t*) dict_table_get_nth_col(table, 0), 0); srv_sys->dummy_ind2->table = table; @@ -1902,8 +1902,8 @@ loop: if (srv_innodb_status) { mutex_enter(&srv_monitor_file_mutex); rewind(srv_monitor_file); - srv_printf_innodb_monitor - (srv_monitor_file, NULL, NULL); + srv_printf_innodb_monitor(srv_monitor_file, NULL, + NULL); os_file_set_eof(srv_monitor_file); mutex_exit(&srv_monitor_file_mutex); } @@ -1984,9 +1984,9 @@ loop: granted: in that case do nothing */ if (thr_get_trx(slot->thr)->wait_lock) { - lock_cancel_waiting_and_release - (thr_get_trx(slot->thr) - ->wait_lock); + lock_cancel_waiting_and_release( + thr_get_trx(slot->thr) + ->wait_lock); } } } @@ -2493,8 +2493,8 @@ flush_loop: srv_main_thread_op_info = "flushing buffer pool pages"; if (srv_fast_shutdown < 2) { - n_pages_flushed = buf_flush_batch - (BUF_FLUSH_LIST, 100, ut_dulint_max); + n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100, + ut_dulint_max); } else { /* In the fastest shutdown we do not flush the buffer pool to data files: we set n_pages_flushed to 0 artificially. */ diff --git a/storage/innobase/srv/srv0start.c b/storage/innobase/srv/srv0start.c index 2b9bd402bb7..5eaea26b75d 100644 --- a/storage/innobase/srv/srv0start.c +++ b/storage/innobase/srv/srv0start.c @@ -301,8 +301,8 @@ srv_parse_data_file_paths_and_sizes( str += (sizeof ":max:") - 1; - str = srv_parse_megabytes - (str, max_auto_extend_size); + str = srv_parse_megabytes( + str, max_auto_extend_size); } if (*str != '\0') { @@ -557,8 +557,8 @@ open_or_create_log_file( *log_file_created = FALSE; srv_normalize_path_for_win(srv_log_group_home_dirs[k]); - srv_log_group_home_dirs[k] = srv_add_path_separator_if_needed - (srv_log_group_home_dirs[k]); + srv_log_group_home_dirs[k] = srv_add_path_separator_if_needed( + srv_log_group_home_dirs[k]); ut_a(strlen(srv_log_group_home_dirs[k]) < (sizeof name) - 10 - sizeof "ib_logfile"); @@ -796,17 +796,17 @@ open_or_create_data_files( } if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) { - files[i] = os_file_create - (name, OS_FILE_OPEN_RAW, - OS_FILE_NORMAL, OS_DATA_FILE, &ret); + files[i] = os_file_create( + name, OS_FILE_OPEN_RAW, + OS_FILE_NORMAL, OS_DATA_FILE, &ret); } else if (i == 0) { - files[i] = os_file_create - (name, OS_FILE_OPEN_RETRY, - OS_FILE_NORMAL, OS_DATA_FILE, &ret); + files[i] = os_file_create( + name, OS_FILE_OPEN_RETRY, + OS_FILE_NORMAL, OS_DATA_FILE, &ret); } else { - files[i] = os_file_create - (name, OS_FILE_OPEN, - OS_FILE_NORMAL, OS_DATA_FILE, &ret); + files[i] = os_file_create( + name, OS_FILE_OPEN, OS_FILE_NORMAL, + OS_DATA_FILE, &ret); } if (!ret) { @@ -876,12 +876,12 @@ open_or_create_data_files( return(DB_ERROR); } skip_size_check: - fil_read_flushed_lsn_and_arch_log_no - (files[i], one_opened, + fil_read_flushed_lsn_and_arch_log_no( + files[i], one_opened, #ifdef UNIV_LOG_ARCHIVE - min_arch_log_no, max_arch_log_no, + min_arch_log_no, max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - min_flushed_lsn, max_flushed_lsn); + min_flushed_lsn, max_flushed_lsn); one_opened = TRUE; } else { /* We created the data file and now write it full of @@ -915,10 +915,10 @@ skip_size_check: "InnoDB: Database physically writes the" " file full: wait...\n"); - ret = os_file_set_size - (name, files[i], - srv_calc_low32(srv_data_file_sizes[i]), - srv_calc_high32(srv_data_file_sizes[i])); + ret = os_file_set_size( + name, files[i], + srv_calc_low32(srv_data_file_sizes[i]), + srv_calc_high32(srv_data_file_sizes[i])); if (!ret) { fprintf(stderr, @@ -1163,7 +1163,7 @@ innobase_start_or_create_for_mysql(void) maximum number of threads that can wait in the 'srv_conc array' for their time to enter InnoDB. */ -#if defined(__WIN__) || defined(__NETWARE__) +#if defined(__NETWARE__) /* Create less event semaphores because Win 98/ME had difficulty creating 40000 event semaphores. Comment from @@ -1198,9 +1198,9 @@ innobase_start_or_create_for_mysql(void) mutex_create(&srv_monitor_file_mutex, SYNC_NO_ORDER_CHECK); if (srv_innodb_status) { - srv_monitor_file_name = mem_alloc - (strlen(fil_path_to_mysql_datadir) - + 20 + sizeof "/innodb_status."); + srv_monitor_file_name = mem_alloc( + strlen(fil_path_to_mysql_datadir) + + 20 + sizeof "/innodb_status."); sprintf(srv_monitor_file_name, "%s/innodb_status.%lu", fil_path_to_mysql_datadir, os_proc_get_number()); srv_monitor_file = fopen(srv_monitor_file_name, "w+"); @@ -1471,10 +1471,9 @@ innobase_start_or_create_for_mysql(void) fprintf(stderr, "InnoDB: Starting archive" " recovery from a backup...\n"); - err = recv_recovery_from_archive_start - (min_flushed_lsn, - srv_archive_recovery_limit_lsn, - min_arch_log_no); + err = recv_recovery_from_archive_start( + min_flushed_lsn, srv_archive_recovery_limit_lsn, + min_arch_log_no); if (err != DB_SUCCESS) { return(DB_ERROR); @@ -1535,8 +1534,8 @@ innobase_start_or_create_for_mysql(void) data dictionary tables. Does that harm the scanning of the data dictionary below? */ - dict_check_tablespaces_and_store_max_id - (recv_needed_recovery); + dict_check_tablespaces_and_store_max_id( + recv_needed_recovery); } srv_startup_is_before_trx_rollback_phase = FALSE; diff --git a/storage/innobase/sync/sync0arr.c b/storage/innobase/sync/sync0arr.c index 278dd65bb1e..e45cd48a6b4 100644 --- a/storage/innobase/sync/sync0arr.c +++ b/storage/innobase/sync/sync0arr.c @@ -737,9 +737,9 @@ sync_array_detect_deadlock( (cannot be cell thread) (wait) x-lock, and he is blocked by start thread */ - ret = sync_array_deadlock_step - (arr, start, thread, - debug->pass, depth); + ret = sync_array_deadlock_step( + arr, start, thread, debug->pass, + depth); if (ret) { print: fprintf(stderr, "rw-lock %p ", @@ -772,9 +772,9 @@ print: holding (wait) x-lock, and he is blocked by start thread */ - ret = sync_array_deadlock_step - (arr, start, thread, - debug->pass, depth); + ret = sync_array_deadlock_step( + arr, start, thread, debug->pass, + depth); if (ret) { goto print; } diff --git a/storage/innobase/sync/sync0rw.c b/storage/innobase/sync/sync0rw.c index f22b0dc8d61..304801d0a3b 100644 --- a/storage/innobase/sync/sync0rw.c +++ b/storage/innobase/sync/sync0rw.c @@ -89,7 +89,8 @@ void rw_lock_create_func( /*================*/ rw_lock_t* lock, /* in: pointer to memory */ - ulint level, /* in: level */ + ulint level __attribute__((unused)), + /* in: level */ const char* cfile_name, /* in: file name where created */ ulint cline, /* in: file line where created */ const char* cmutex_name) /* in: mutex name */ @@ -116,9 +117,9 @@ rw_lock_create_func( #ifdef UNIV_SYNC_DEBUG UT_LIST_INIT(lock->debug_list); -#endif /* UNIV_SYNC_DEBUG */ lock->level = level; +#endif /* UNIV_SYNC_DEBUG */ lock->magic_n = RW_LOCK_MAGIC_N; diff --git a/storage/innobase/sync/sync0sync.c b/storage/innobase/sync/sync0sync.c index d74b2b09ccf..91fd248502a 100644 --- a/storage/innobase/sync/sync0sync.c +++ b/storage/innobase/sync/sync0sync.c @@ -838,17 +838,17 @@ sync_thread_levels_g( ulint line; os_thread_id_t thread_id; - mutex_get_debug_info - (mutex, &file_name, - &line, &thread_id); + mutex_get_debug_info( + mutex, &file_name, + &line, &thread_id); fprintf(stderr, "InnoDB: Locked mutex:" " addr %p thread %ld" " file %s line %ld\n", (void*) mutex, - os_thread_pf - (thread_id), + os_thread_pf( + thread_id), file_name, (ulong) line); #else /* UNIV_SYNC_DEBUG */ @@ -1155,8 +1155,8 @@ sync_thread_add_level( case SYNC_IBUF_HEADER: ut_a(sync_thread_levels_g(array, SYNC_FSP - 1) && !sync_thread_levels_contain(array, SYNC_IBUF_MUTEX) - && !sync_thread_levels_contain - (array, SYNC_IBUF_PESS_INSERT_MUTEX)); + && !sync_thread_levels_contain( + array, SYNC_IBUF_PESS_INSERT_MUTEX)); break; case SYNC_DICT_AUTOINC_MUTEX: ut_a(sync_thread_levels_g(array, SYNC_DICT_AUTOINC_MUTEX)); diff --git a/storage/innobase/trx/trx0purge.c b/storage/innobase/trx/trx0purge.c index 9689a9fb179..11e089ac90e 100644 --- a/storage/innobase/trx/trx0purge.c +++ b/storage/innobase/trx/trx0purge.c @@ -284,8 +284,8 @@ trx_purge_add_update_undo_to_history( hist_size = mtr_read_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE, MLOG_4BYTES, mtr); - ut_ad(undo->size == flst_get_len - (seg_header + TRX_UNDO_PAGE_LIST, mtr)); + ut_ad(undo->size == flst_get_len( + seg_header + TRX_UNDO_PAGE_LIST, mtr)); mlog_write_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE, hist_size + undo->size, MLOG_4BYTES, mtr); @@ -454,8 +454,8 @@ trx_purge_truncate_rseg_history( rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr); - hdr_addr = trx_purge_get_log_from_hist - (flst_get_last(rseg_hdr + TRX_RSEG_HISTORY, &mtr)); + hdr_addr = trx_purge_get_log_from_hist( + flst_get_last(rseg_hdr + TRX_RSEG_HISTORY, &mtr)); loop: if (hdr_addr.page == FIL_NULL) { @@ -493,8 +493,8 @@ loop: return; } - prev_hdr_addr = trx_purge_get_log_from_hist - (flst_get_prev_addr(log_hdr + TRX_UNDO_HISTORY_NODE, &mtr)); + prev_hdr_addr = trx_purge_get_log_from_hist( + flst_get_prev_addr(log_hdr + TRX_UNDO_HISTORY_NODE, &mtr)); n_removed_logs++; seg_hdr = undo_page + TRX_UNDO_SEG_HDR; @@ -633,8 +633,8 @@ trx_purge_rseg_get_next_history_log( purge_sys->n_pages_handled++; - prev_log_addr = trx_purge_get_log_from_hist - (flst_get_prev_addr(log_hdr + TRX_UNDO_HISTORY_NODE, &mtr)); + prev_log_addr = trx_purge_get_log_from_hist( + flst_get_prev_addr(log_hdr + TRX_UNDO_HISTORY_NODE, &mtr)); if (prev_log_addr.page == FIL_NULL) { /* No logs left in the history list */ @@ -855,9 +855,9 @@ trx_purge_get_next_rec( purge_sys->hdr_page_no, purge_sys->hdr_offset); if (next_rec == NULL) { - rec2 = trx_undo_get_next_rec - (rec2, purge_sys->hdr_page_no, - purge_sys->hdr_offset, &mtr); + rec2 = trx_undo_get_next_rec( + rec2, purge_sys->hdr_page_no, + purge_sys->hdr_offset, &mtr); break; } diff --git a/storage/innobase/trx/trx0rec.c b/storage/innobase/trx/trx0rec.c index 3a214d6ce38..69e858fe71d 100644 --- a/storage/innobase/trx/trx0rec.c +++ b/storage/innobase/trx/trx0rec.c @@ -50,8 +50,8 @@ trx_undof_page_add_undo_rec_log( } log_end = &log_ptr[11 + 13 + MLOG_BUF_MARGIN]; - log_ptr = mlog_write_initial_log_record_fast - (undo_page, MLOG_UNDO_INSERT, log_ptr, mtr); + log_ptr = mlog_write_initial_log_record_fast( + undo_page, MLOG_UNDO_INSERT, log_ptr, mtr); len = new_free - old_free - 4; mach_write_to_2(log_ptr, len); @@ -413,7 +413,6 @@ trx_undo_page_report_modify( { dict_table_t* table; upd_field_t* upd_field; - dict_col_t* col; ulint first_free; byte* ptr; ulint len; @@ -486,13 +485,13 @@ trx_undo_page_report_modify( /* Store the values of the system columns */ field = rec_get_nth_field(rec, offsets, - dict_index_get_sys_col_pos - (index, DATA_TRX_ID), &len); + dict_index_get_sys_col_pos( + index, DATA_TRX_ID), &len); ut_ad(len == DATA_TRX_ID_LEN); trx_id = trx_read_trx_id(field); field = rec_get_nth_field(rec, offsets, - dict_index_get_sys_col_pos - (index, DATA_ROLL_PTR), &len); + dict_index_get_sys_col_pos( + index, DATA_ROLL_PTR), &len); ut_ad(len == DATA_ROLL_PTR_LEN); roll_ptr = trx_read_roll_ptr(field); @@ -567,9 +566,9 @@ trx_undo_page_report_modify( /* If a field has external storage, we add to flen the flag */ - len = mach_write_compressed - (ptr, - UNIV_EXTERN_STORAGE_FIELD + flen); + len = mach_write_compressed( + ptr, + UNIV_EXTERN_STORAGE_FIELD + flen); /* Notify purge that it eventually has to free the old externally stored field */ @@ -627,7 +626,8 @@ trx_undo_page_report_modify( for (col_no = 0; col_no < dict_table_get_n_cols(table); col_no++) { - col = dict_table_get_nth_col(table, col_no); + const dict_col_t* col + = dict_table_get_nth_col(table, col_no); if (col->ord_part > 0) { @@ -826,9 +826,9 @@ trx_undo_update_rec_get_update( buf = mem_heap_alloc(heap, DATA_ROLL_PTR_LEN); trx_write_roll_ptr(buf, roll_ptr); - upd_field_set_field_no - (upd_field, dict_index_get_sys_col_pos(index, DATA_ROLL_PTR), - index, trx); + upd_field_set_field_no( + upd_field, dict_index_get_sys_col_pos(index, DATA_ROLL_PTR), + index, trx); dfield_set_data(&(upd_field->new_val), buf, DATA_ROLL_PTR_LEN); /* Store then the updated ordinary columns to the update vector */ @@ -1094,14 +1094,14 @@ trx_undo_report_row_operation( #endif /* UNIV_SYNC_DEBUG */ if (op_type == TRX_UNDO_INSERT_OP) { - offset = trx_undo_page_report_insert - (undo_page, trx, index, clust_entry, &mtr); + offset = trx_undo_page_report_insert( + undo_page, trx, index, clust_entry, &mtr); } else { offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); - offset = trx_undo_page_report_modify - (undo_page, trx, index, rec, offsets, - update, cmpl_info, &mtr); + offset = trx_undo_page_report_modify( + undo_page, trx, index, rec, offsets, update, + cmpl_info, &mtr); } if (offset == 0) { diff --git a/storage/innobase/trx/trx0rseg.c b/storage/innobase/trx/trx0rseg.c index 745a29021ad..7a6989c7b4f 100644 --- a/storage/innobase/trx/trx0rseg.c +++ b/storage/innobase/trx/trx0rseg.c @@ -171,18 +171,18 @@ trx_rseg_mem_create( if (len > 0) { trx_sys->rseg_history_len += len; - node_addr = trx_purge_get_log_from_hist - (flst_get_last(rseg_header + TRX_RSEG_HISTORY, mtr)); + node_addr = trx_purge_get_log_from_hist( + flst_get_last(rseg_header + TRX_RSEG_HISTORY, mtr)); rseg->last_page_no = node_addr.page; rseg->last_offset = node_addr.boffset; undo_log_hdr = trx_undo_page_get(rseg->space, node_addr.page, mtr) + node_addr.boffset; - rseg->last_trx_no = mtr_read_dulint - (undo_log_hdr + TRX_UNDO_TRX_NO, mtr); - rseg->last_del_marks = mtr_read_ulint - (undo_log_hdr + TRX_UNDO_DEL_MARKS, MLOG_2BYTES, mtr); + rseg->last_trx_no = mtr_read_dulint( + undo_log_hdr + TRX_UNDO_TRX_NO, mtr); + rseg->last_del_marks = mtr_read_ulint( + undo_log_hdr + TRX_UNDO_DEL_MARKS, MLOG_2BYTES, mtr); } else { rseg->last_page_no = FIL_NULL; } diff --git a/storage/innobase/trx/trx0sys.c b/storage/innobase/trx/trx0sys.c index ce68f994d44..b87f3d5e090 100644 --- a/storage/innobase/trx/trx0sys.c +++ b/storage/innobase/trx/trx0sys.c @@ -105,17 +105,17 @@ trx_doublewrite_init( trx_doublewrite->first_free = 0; - trx_doublewrite->block1 = mach_read_from_4 - (doublewrite + TRX_SYS_DOUBLEWRITE_BLOCK1); - trx_doublewrite->block2 = mach_read_from_4 - (doublewrite + TRX_SYS_DOUBLEWRITE_BLOCK2); - trx_doublewrite->write_buf_unaligned = ut_malloc - ((1 + 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) * UNIV_PAGE_SIZE); + trx_doublewrite->block1 = mach_read_from_4( + doublewrite + TRX_SYS_DOUBLEWRITE_BLOCK1); + trx_doublewrite->block2 = mach_read_from_4( + doublewrite + TRX_SYS_DOUBLEWRITE_BLOCK2); + trx_doublewrite->write_buf_unaligned = ut_malloc( + (1 + 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) * UNIV_PAGE_SIZE); - trx_doublewrite->write_buf = ut_align - (trx_doublewrite->write_buf_unaligned, UNIV_PAGE_SIZE); - trx_doublewrite->buf_block_arr = mem_alloc - (2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * sizeof(void*)); + trx_doublewrite->write_buf = ut_align( + trx_doublewrite->write_buf_unaligned, UNIV_PAGE_SIZE); + trx_doublewrite->buf_block_arr = mem_alloc( + 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * sizeof(void*)); } /******************************************************************** @@ -438,8 +438,8 @@ trx_sys_doublewrite_init_or_restore_pages( /* printf("Resetting space id in page %lu\n", source_page_no); */ } else { - space_id = mach_read_from_4 - (page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + space_id = mach_read_from_4( + page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); } if (!restore_corrupt_pages) { @@ -671,12 +671,12 @@ trx_sys_print_mysql_binlog_offset_from_page( fprintf(stderr, "ibbackup: Last MySQL binlog file position %lu %lu," " file name %s\n", - (ulong) mach_read_from_4 - (sys_header + TRX_SYS_MYSQL_LOG_INFO - + TRX_SYS_MYSQL_LOG_OFFSET_HIGH), - (ulong) mach_read_from_4 - (sys_header + TRX_SYS_MYSQL_LOG_INFO - + TRX_SYS_MYSQL_LOG_OFFSET_LOW), + (ulong) mach_read_from_4( + sys_header + TRX_SYS_MYSQL_LOG_INFO + + TRX_SYS_MYSQL_LOG_OFFSET_HIGH), + (ulong) mach_read_from_4( + sys_header + TRX_SYS_MYSQL_LOG_INFO + + TRX_SYS_MYSQL_LOG_OFFSET_LOW), sys_header + TRX_SYS_MYSQL_LOG_INFO + TRX_SYS_MYSQL_LOG_NAME); } @@ -708,12 +708,12 @@ trx_sys_print_mysql_binlog_offset(void) return; } - trx_sys_mysql_bin_log_pos_high = mach_read_from_4 - (sys_header + TRX_SYS_MYSQL_LOG_INFO - + TRX_SYS_MYSQL_LOG_OFFSET_HIGH); - trx_sys_mysql_bin_log_pos_low = mach_read_from_4 - (sys_header + TRX_SYS_MYSQL_LOG_INFO - + TRX_SYS_MYSQL_LOG_OFFSET_LOW); + trx_sys_mysql_bin_log_pos_high = mach_read_from_4( + sys_header + TRX_SYS_MYSQL_LOG_INFO + + TRX_SYS_MYSQL_LOG_OFFSET_HIGH); + trx_sys_mysql_bin_log_pos_low = mach_read_from_4( + sys_header + TRX_SYS_MYSQL_LOG_INFO + + TRX_SYS_MYSQL_LOG_OFFSET_LOW); trx_sys_mysql_bin_log_pos = (((ib_longlong)trx_sys_mysql_bin_log_pos_high) << 32) @@ -777,12 +777,12 @@ trx_sys_print_mysql_master_log_pos(void) TRX_SYS_MYSQL_LOG_NAME_LEN); trx_sys_mysql_master_log_pos - = (((ib_longlong) mach_read_from_4 - (sys_header + TRX_SYS_MYSQL_MASTER_LOG_INFO - + TRX_SYS_MYSQL_LOG_OFFSET_HIGH)) << 32) - + ((ib_longlong) mach_read_from_4 - (sys_header + TRX_SYS_MYSQL_MASTER_LOG_INFO - + TRX_SYS_MYSQL_LOG_OFFSET_LOW)); + = (((ib_longlong) mach_read_from_4( + sys_header + TRX_SYS_MYSQL_MASTER_LOG_INFO + + TRX_SYS_MYSQL_LOG_OFFSET_HIGH)) << 32) + + ((ib_longlong) mach_read_from_4( + sys_header + TRX_SYS_MYSQL_MASTER_LOG_INFO + + TRX_SYS_MYSQL_LOG_OFFSET_LOW)); mtr_commit(&mtr); } @@ -847,12 +847,6 @@ trx_sysf_create( mtr); ut_a(buf_frame_get_page_no(page) == TRX_SYS_PAGE_NO); - /* Reset the doublewrite buffer magic number to zero so that we - know that the doublewrite buffer has not yet been created (this - suppresses a Valgrind warning) */ - - mach_write_to_4(page + TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC, - 0); #ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TRX_SYS_HEADER); #endif /* UNIV_SYNC_DEBUG */ @@ -860,6 +854,13 @@ trx_sysf_create( mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_TYPE_TRX_SYS, MLOG_2BYTES, mtr); + /* Reset the doublewrite buffer magic number to zero so that we + know that the doublewrite buffer has not yet been created (this + suppresses a Valgrind warning) */ + + mlog_write_ulint(page + TRX_SYS_DOUBLEWRITE + + TRX_SYS_DOUBLEWRITE_MAGIC, 0, MLOG_4BYTES, mtr); + sys_header = trx_sysf_get(mtr); /* Start counting transaction ids from number 1 up */ @@ -919,12 +920,12 @@ trx_sys_init_at_db_start(void) to the disk-based header! Thus trx id values will not overlap when the database is repeatedly started! */ - trx_sys->max_trx_id = ut_dulint_add - (ut_dulint_align_up(mtr_read_dulint - (sys_header - + TRX_SYS_TRX_ID_STORE, &mtr), - TRX_SYS_TRX_ID_WRITE_MARGIN), - 2 * TRX_SYS_TRX_ID_WRITE_MARGIN); + trx_sys->max_trx_id = ut_dulint_add( + ut_dulint_align_up(mtr_read_dulint( + sys_header + + TRX_SYS_TRX_ID_STORE, &mtr), + TRX_SYS_TRX_ID_WRITE_MARGIN), + 2 * TRX_SYS_TRX_ID_WRITE_MARGIN); UT_LIST_INIT(trx_sys->mysql_trx_list); trx_lists_init_at_db_start(); @@ -935,8 +936,8 @@ trx_sys_init_at_db_start(void) for (;;) { if ( trx->conc_state != TRX_PREPARED) { - rows_to_undo += ut_conv_dulint_to_longlong - (trx->undo_no); + rows_to_undo += ut_conv_dulint_to_longlong( + trx->undo_no); } trx = UT_LIST_GET_NEXT(trx_list, trx); diff --git a/storage/innobase/trx/trx0trx.c b/storage/innobase/trx/trx0trx.c index 78b9e2446cb..4d0a26b5757 100644 --- a/storage/innobase/trx/trx0trx.c +++ b/storage/innobase/trx/trx0trx.c @@ -556,10 +556,10 @@ trx_lists_init_at_db_start(void) "InnoDB: Transaction" " %lu %lu was in the" " XA prepared state.\n", - ut_dulint_get_high - (trx->id), - ut_dulint_get_low - (trx->id)); + ut_dulint_get_high( + trx->id), + ut_dulint_get_low( + trx->id)); if (srv_force_recovery == 0) { @@ -790,8 +790,8 @@ trx_commit_off_kernel( because only a single OS thread is allowed to do the transaction commit for this transaction. */ - update_hdr_page = trx_undo_set_state_at_finish - (trx, undo, &mtr); + update_hdr_page = trx_undo_set_state_at_finish( + trx, undo, &mtr); /* We have to do the cleanup for the update log while holding the rseg mutex because update log headers @@ -809,19 +809,19 @@ trx_commit_off_kernel( if (trx->mysql_log_file_name && trx->mysql_log_file_name[0] != '\0') { - trx_sys_update_mysql_binlog_offset - (trx->mysql_log_file_name, - trx->mysql_log_offset, - TRX_SYS_MYSQL_LOG_INFO, &mtr); + trx_sys_update_mysql_binlog_offset( + trx->mysql_log_file_name, + trx->mysql_log_offset, + TRX_SYS_MYSQL_LOG_INFO, &mtr); trx->mysql_log_file_name = NULL; } if (trx->mysql_master_log_file_name[0] != '\0') { /* This database server is a MySQL replication slave */ - trx_sys_update_mysql_binlog_offset - (trx->mysql_master_log_file_name, - trx->mysql_master_log_pos, - TRX_SYS_MYSQL_MASTER_LOG_INFO, &mtr); + trx_sys_update_mysql_binlog_offset( + trx->mysql_master_log_file_name, + trx->mysql_master_log_pos, + TRX_SYS_MYSQL_MASTER_LOG_INFO, &mtr); } /* The following call commits the mini-transaction, making the @@ -1010,8 +1010,8 @@ trx_assign_read_view( mutex_enter(&kernel_mutex); if (!trx->read_view) { - trx->read_view = read_view_open_now - (trx->id, trx->global_read_view_heap); + trx->read_view = read_view_open_now( + trx->id, trx->global_read_view_heap); trx->global_read_view = trx->read_view; } @@ -1835,8 +1835,8 @@ trx_prepare_off_kernel( } if (trx->update_undo) { - update_hdr_page = trx_undo_set_state_at_prepare - (trx, trx->update_undo, &mtr); + update_hdr_page = trx_undo_set_state_at_prepare( + trx, trx->update_undo, &mtr); } mutex_exit(&(rseg->mutex)); @@ -1983,8 +1983,8 @@ trx_recover_for_mysql( fprintf(stderr, " InnoDB: Transaction contains changes" " to %lu rows\n", - (ulong) ut_conv_dulint_to_longlong - (trx->undo_no)); + (ulong) ut_conv_dulint_to_longlong( + trx->undo_no)); count++; diff --git a/storage/innobase/trx/trx0undo.c b/storage/innobase/trx/trx0undo.c index db33c515c34..fbcfab38f01 100644 --- a/storage/innobase/trx/trx0undo.c +++ b/storage/innobase/trx/trx0undo.c @@ -153,8 +153,8 @@ trx_undo_get_prev_rec_from_prev_page( return(NULL); } - prev_page = trx_undo_page_get_s_latched - (buf_frame_get_space_id(undo_page), prev_page_no, mtr); + prev_page = trx_undo_page_get_s_latched( + buf_frame_get_space_id(undo_page), prev_page_no, mtr); return(trx_undo_page_get_last_rec(prev_page, page_no, offset)); } @@ -1041,8 +1041,8 @@ trx_undo_truncate_end( goto function_exit; } - trx_undo_free_page_in_rollback - (trx, undo, last_page_no, &mtr); + trx_undo_free_page_in_rollback( + trx, undo, last_page_no, &mtr); break; } @@ -1261,8 +1261,8 @@ trx_undo_mem_create_at_db_start( page_no, offset); mutex_exit(&(rseg->mutex)); - undo->dict_operation = mtr_read_ulint - (undo_header + TRX_UNDO_DICT_TRANS, MLOG_1BYTE, mtr); + undo->dict_operation = mtr_read_ulint( + undo_header + TRX_UNDO_DICT_TRANS, MLOG_1BYTE, mtr); undo->table_id = mtr_read_dulint(undo_header + TRX_UNDO_TABLE_ID, mtr); undo->state = state; @@ -1601,8 +1601,8 @@ trx_undo_reuse_cached( offset = trx_undo_insert_header_reuse(undo_page, trx_id, mtr); if (trx->support_xa) { - trx_undo_header_add_space_for_xid - (undo_page, undo_page + offset, mtr); + trx_undo_header_add_space_for_xid( + undo_page, undo_page + offset, mtr); } } else { ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR @@ -1612,8 +1612,8 @@ trx_undo_reuse_cached( offset = trx_undo_header_create(undo_page, trx_id, mtr); if (trx->support_xa) { - trx_undo_header_add_space_for_xid - (undo_page, undo_page + offset, mtr); + trx_undo_header_add_space_for_xid( + undo_page, undo_page + offset, mtr); } } diff --git a/storage/myisam/Makefile.am b/storage/myisam/Makefile.am index fdccb1f5b19..ad2302bf0eb 100644 --- a/storage/myisam/Makefile.am +++ b/storage/myisam/Makefile.am @@ -28,7 +28,7 @@ LDADD = DEFS = @DEFS@ -EXTRA_DIST = mi_test_all.sh mi_test_all.res ft_stem.c CMakeLists.txt +EXTRA_DIST = mi_test_all.sh mi_test_all.res ft_stem.c CMakeLists.txt plug.in pkgdata_DATA = mi_test_all mi_test_all.res pkglib_LIBRARIES = libmyisam.a diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 6a32ba95eee..03065b43dd5 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -48,9 +48,11 @@ TYPELIB myisam_stats_method_typelib= { ** MyISAM tables *****************************************************************************/ -static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *myisam_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_myisam(table); + return new (mem_root) ha_myisam(hton, table); } // collect errors printed by mi_check routines @@ -133,8 +135,8 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...) } -ha_myisam::ha_myisam(TABLE_SHARE *table_arg) - :handler(&myisam_hton, table_arg), file(0), +ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), file(0), int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS | @@ -143,6 +145,14 @@ ha_myisam::ha_myisam(TABLE_SHARE *table_arg) can_enable_indexes(1) {} +handler *ha_myisam::clone(MEM_ROOT *mem_root) +{ + ha_myisam *new_handler= static_cast (handler::clone(mem_root)); + if (new_handler) + new_handler->file->state= file->state; + return new_handler; +} + static const char *ha_myisam_exts[] = { ".MYI", @@ -335,7 +345,11 @@ int ha_myisam::write_row(byte * buf) or a new row, then update the auto_increment value in the record. */ if (table->next_number_field && buf == table->record[0]) - update_auto_increment(); + { + int error; + if ((error= update_auto_increment())) + return error; + } return mi_write(file,buf); } @@ -1775,20 +1789,27 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info, return COMPATIBLE_DATA_YES; } -handlerton myisam_hton; - -static int myisam_init() +extern int mi_panic(enum ha_panic_function flag); +int myisam_panic(handlerton *hton, ha_panic_function flag) { - myisam_hton.state=SHOW_OPTION_YES; - myisam_hton.db_type=DB_TYPE_MYISAM; - myisam_hton.create=myisam_create_handler; - myisam_hton.panic=mi_panic; - myisam_hton.flags=HTON_CAN_RECREATE; + return mi_panic(flag); +} + +static int myisam_init(void *p) +{ + handlerton *myisam_hton; + + myisam_hton= (handlerton *)p; + myisam_hton->state= SHOW_OPTION_YES; + myisam_hton->db_type= DB_TYPE_MYISAM; + myisam_hton->create= myisam_create_handler; + myisam_hton->panic= myisam_panic; + myisam_hton->flags= HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES; return 0; } struct st_mysql_storage_engine myisam_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &myisam_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(myisam) { @@ -1797,6 +1818,7 @@ mysql_declare_plugin(myisam) "MyISAM", "MySQL AB", "Default engine as of MySQL 3.23 with great performance", + PLUGIN_LICENSE_GPL, myisam_init, /* Plugin Init */ NULL, /* Plugin Deinit */ 0x0100, /* 1.0 */ diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h index 5544e5040b3..85abcdca8b2 100644 --- a/storage/myisam/ha_myisam.h +++ b/storage/myisam/ha_myisam.h @@ -43,8 +43,9 @@ class ha_myisam: public handler int repair(THD *thd, MI_CHECK ¶m, bool optimize); public: - ha_myisam(TABLE_SHARE *table_arg); + ha_myisam(handlerton *hton, TABLE_SHARE *table_arg); ~ha_myisam() {} + handler *clone(MEM_ROOT *mem_root); const char *table_type() const { return "MyISAM"; } const char *index_type(uint key_number); const char **bas_ext() const; diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 91c04866b5a..a7685bf653d 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -1370,7 +1370,8 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, param->temp_filename); goto err; } - if (filecopy(param,new_file,info->dfile,0L,new_header_length, + if (new_header_length && + filecopy(param,new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; info->s->state.dellink= HA_OFFSET_ERROR; @@ -2072,7 +2073,8 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, param->temp_filename); goto err; } - if (filecopy(param, new_file,info->dfile,0L,new_header_length, + if (new_header_length && + filecopy(param, new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; if (param->testflag & T_UNPACK) @@ -2466,7 +2468,8 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, param->temp_filename); goto err; } - if (filecopy(param, new_file,info->dfile,0L,new_header_length, + if (new_header_length && + filecopy(param, new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; if (param->testflag & T_UNPACK) diff --git a/storage/myisam/mi_delete.c b/storage/myisam/mi_delete.c index 2bc99d65dd2..85cc60bdd9d 100644 --- a/storage/myisam/mi_delete.c +++ b/storage/myisam/mi_delete.c @@ -446,7 +446,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key, else { DBUG_PRINT("test",("Inserting of key when deleting")); - if (_mi_get_last_key(info,keyinfo,leaf_buff,keybuff,endpos, + if (!_mi_get_last_key(info,keyinfo,leaf_buff,keybuff,endpos, &tmp)) goto err; ret_value=_mi_insert(info,keyinfo,key,leaf_buff,endpos,keybuff, diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c index 18b87281f26..a8e2de5ea5a 100644 --- a/storage/myisam/mi_locking.c +++ b/storage/myisam/mi_locking.c @@ -236,16 +236,20 @@ int mi_lock_database(MI_INFO *info, int lock_type) default: break; /* Impossible */ } - } + } #ifdef __WIN__ else { /* - The file has been closed and kfile is -1. - See mi_extra.c about implementation of - HA_EXTRA_PREPARE_FOR_DELETE. - */ - error=HA_ERR_NO_SUCH_TABLE; + Check for bad file descriptors if this table is part + of a merge union. Failing to capture this may cause + a crash on windows if the table is renamed and + later on referenced by the merge table. + */ + if( info->owned_by_merge && (info->s)->kfile < 0 ) + { + error = HA_ERR_NO_SUCH_TABLE; + } } #endif pthread_mutex_unlock(&share->intern_lock); diff --git a/storage/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c index 5143eb80adc..67c8b25878f 100644 --- a/storage/myisam/mi_packrec.c +++ b/storage/myisam/mi_packrec.c @@ -16,7 +16,7 @@ /* Functions to compressed records */ -#include "myisamdef.h" +#include "fulltext.h" #define IS_CHAR ((uint) 32768) /* Bit if char (not offset) in tree */ @@ -230,11 +230,19 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys) { for (i=0 ; i < share->base.keys ; i++) { - share->keyinfo[i].keylength+=(uint16) diff_length; - share->keyinfo[i].minlength+=(uint16) diff_length; - share->keyinfo[i].maxlength+=(uint16) diff_length; - share->keyinfo[i].seg[share->keyinfo[i].keysegs].length= - (uint16) rec_reflength; + MI_KEYDEF *keyinfo= &share->keyinfo[i]; + keyinfo->keylength+= (uint16) diff_length; + keyinfo->minlength+= (uint16) diff_length; + keyinfo->maxlength+= (uint16) diff_length; + keyinfo->seg[keyinfo->flag & HA_FULLTEXT ? + FT_SEGS : keyinfo->keysegs].length= (uint16) rec_reflength; + } + if (share->ft2_keyinfo.seg) + { + MI_KEYDEF *ft2_keyinfo= &share->ft2_keyinfo; + ft2_keyinfo->keylength+= (uint16) diff_length; + ft2_keyinfo->minlength+= (uint16) diff_length; + ft2_keyinfo->maxlength+= (uint16) diff_length; } } diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c index a9a8cbacb4b..43be34f6ebb 100644 --- a/storage/myisam/mi_rkey.c +++ b/storage/myisam/mi_rkey.c @@ -94,32 +94,45 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, myisam_read_vec[search_flag], info->s->state.key_root[inx])) { /* - If we are searching for an exact key (including the data pointer) - and this was added by an concurrent insert, - then the result is "key not found". + If we searching for a partial key (or using >, >=, < or <=) and + the data is outside of the data file, we need to continue searching + for the first key inside the data file */ - if ((search_flag == HA_READ_KEY_EXACT) && - (info->lastpos >= info->state->data_file_length)) + if (info->lastpos >= info->state->data_file_length && + (search_flag != HA_READ_KEY_EXACT || + last_used_keyseg != keyinfo->seg + keyinfo->keysegs)) { - my_errno= HA_ERR_KEY_NOT_FOUND; - info->lastpos= HA_OFFSET_ERROR; - } - else while (info->lastpos >= info->state->data_file_length) - { - /* - Skip rows that are inserted by other threads since we got a lock - Note that this can only happen if we are not searching after an - exact key, because the keys are sorted according to position - */ - if (_mi_search_next(info, keyinfo, info->lastkey, - info->lastkey_length, - myisam_readnext_vec[search_flag], - info->s->state.key_root[inx])) - break; + do + { + uint not_used[2]; + /* + Skip rows that are inserted by other threads since we got a lock + Note that this can only happen if we are not searching after an + full length exact key, because the keys are sorted + according to position + */ + if (_mi_search_next(info, keyinfo, info->lastkey, + info->lastkey_length, + myisam_readnext_vec[search_flag], + info->s->state.key_root[inx])) + break; + /* + Check that the found key does still match the search. + _mi_search_next() delivers the next key regardless of its + value. + */ + if (search_flag == HA_READ_KEY_EXACT && + ha_key_cmp(keyinfo->seg, key_buff, info->lastkey, use_key_length, + SEARCH_FIND, not_used)) + { + my_errno= HA_ERR_KEY_NOT_FOUND; + info->lastpos= HA_OFFSET_ERROR; + break; + } + } while (info->lastpos >= info->state->data_file_length); } } } - if (share->concurrent_insert) rw_unlock(&share->key_root_lock[inx]); diff --git a/storage/myisam/mi_test_all.res b/storage/myisam/mi_test_all.res index 16b517d3f76..4a22809b45f 100644 --- a/storage/myisam/mi_test_all.res +++ b/storage/myisam/mi_test_all.res @@ -8,46 +8,46 @@ myisamchk: MyISAM file test2 myisamchk: warning: Datafile is almost full, 65532 of 65534 used MyISAM-table 'test2' is usable but should be fixed Commands Used count Errors Recover errors -open 1 0 0 -write 50 0 0 -update 5 0 0 -delete 50 0 0 -close 1 0 0 -extra 6 0 0 -Total 113 0 0 +open 7 0 0 +write 350 0 0 +update 35 0 0 +delete 350 0 0 +close 7 0 0 +extra 42 0 0 +Total 791 0 0 Commands Used count Errors Recover errors -open 2 0 0 -write 100 0 0 -update 10 0 0 -delete 100 0 0 -close 2 0 0 -extra 12 0 0 -Total 226 0 0 +open 8 0 0 +write 400 0 0 +update 40 0 0 +delete 400 0 0 +close 8 0 0 +extra 48 0 0 +Total 904 0 0 -real 0m0.791s -user 0m0.137s -sys 0m0.117s +real 0m0.221s +user 0m0.120s +sys 0m0.100s -real 0m0.659s -user 0m0.252s -sys 0m0.102s +real 0m0.222s +user 0m0.140s +sys 0m0.084s -real 0m0.571s -user 0m0.188s -sys 0m0.098s +real 0m0.232s +user 0m0.112s +sys 0m0.120s -real 0m1.111s -user 0m0.236s -sys 0m0.037s +real 0m0.163s +user 0m0.116s +sys 0m0.036s -real 0m0.621s -user 0m0.242s -sys 0m0.022s +real 0m0.159s +user 0m0.136s +sys 0m0.020s -real 0m0.698s -user 0m0.248s -sys 0m0.021s +real 0m0.147s +user 0m0.132s +sys 0m0.016s -real 0m0.683s -user 0m0.265s -sys 0m0.079s +real 0m0.211s +user 0m0.124s +sys 0m0.088s diff --git a/storage/myisam/mi_test_all.sh b/storage/myisam/mi_test_all.sh index 62eaff2dab6..5989d9cfaf0 100755 --- a/storage/myisam/mi_test_all.sh +++ b/storage/myisam/mi_test_all.sh @@ -79,7 +79,8 @@ if test -f mi_test1$MACH ; then suffix=$MACH ; else suffix=""; fi # check of myisampack / myisamchk ./myisampack$suffix --force -s test1 -./myisamchk$suffix -es test1 +# Ignore error for index file +./myisamchk$suffix -es test1 2>&1 >& /dev/null ./myisamchk$suffix -rqs test1 ./myisamchk$suffix -es test1 ./myisamchk$suffix -rs test1 diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c index f16d9471afe..7080875009b 100644 --- a/storage/myisam/mi_write.c +++ b/storage/myisam/mi_write.c @@ -167,13 +167,13 @@ int mi_write(MI_INFO *info, byte *record) /* Update status of the table. We need to do so after each row write for the log tables, as we want the new row to become visible to - other threads as soon as possible. We lock mutex here to follow - pthread memory visibility rules. + other threads as soon as possible. We don't lock mutex here + (as it is required by pthread memory visibility rules) as (1) it's + not critical to use outdated share->is_log_table value (2) locking + mutex here for every write is too expensive. */ - pthread_mutex_lock(&share->intern_lock); if (share->is_log_table) mi_update_status((void*) info); - pthread_mutex_unlock(&share->intern_lock); allow_break(); /* Allow SIGHUP & SIGINT */ DBUG_RETURN(0); diff --git a/storage/myisam/myisam_ftdump.c b/storage/myisam/myisam_ftdump.c index 209ef49c8db..46d0ffd13a5 100644 --- a/storage/myisam/myisam_ftdump.c +++ b/storage/myisam/myisam_ftdump.c @@ -127,7 +127,6 @@ int main(int argc,char *argv[]) if (count || stats) { - doc_cnt++; if (strcmp(buf, buf2)) { if (*buf2) @@ -152,6 +151,7 @@ int main(int argc,char *argv[]) keylen2=keylen; doc_cnt=0; } + doc_cnt+= (subkeys >= 0 ? 1 : -subkeys); } if (dump) { @@ -167,7 +167,6 @@ int main(int argc,char *argv[]) if (count || stats) { - doc_cnt++; if (*buf2) { uniq++; diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index 765e26dc74d..fa83cbf0524 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -291,6 +291,9 @@ struct st_myisam_info { my_bool page_changed; /* If info->buff can't be used for rnext */ my_bool buff_used; /* If info->buff has to be reread for rnext */ my_bool once_flags; /* For MYISAMMRG */ +#ifdef __WIN__ + my_bool owned_by_merge; /* This MyISAM table is part of a merge union */ +#endif #ifdef THREAD THR_LOCK_DATA lock; #endif diff --git a/storage/myisammrg/Makefile.am b/storage/myisammrg/Makefile.am index 08cd52c363f..2008cc62559 100644 --- a/storage/myisammrg/Makefile.am +++ b/storage/myisammrg/Makefile.am @@ -39,7 +39,7 @@ libmyisammrg_a_SOURCES = myrg_open.c myrg_extra.c myrg_info.c myrg_locking.c \ myrg_rnext_same.c -EXTRA_DIST = CMakeLists.txt +EXTRA_DIST = CMakeLists.txt plug.in # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 2d1628e5e4f..5c8a05f0dde 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -34,19 +34,16 @@ static handler *myisammrg_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); -/* MyISAM MERGE handlerton */ - -handlerton myisammrg_hton; - -static handler *myisammrg_create_handler(TABLE_SHARE *table, +static handler *myisammrg_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root) { - return new (mem_root) ha_myisammrg(table); + return new (mem_root) ha_myisammrg(hton, table); } -ha_myisammrg::ha_myisammrg(TABLE_SHARE *table_arg) - :handler(&myisammrg_hton, table_arg), file(0) +ha_myisammrg::ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), file(0) {} static const char *ha_myisammrg_exts[] = { @@ -126,7 +123,11 @@ int ha_myisammrg::write_row(byte * buf) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) - update_auto_increment(); + { + int error; + if ((error= update_auto_increment())) + return error; + } return myrg_write(file,buf); } @@ -311,9 +312,22 @@ void ha_myisammrg::info(uint flag) if (flag & HA_STATUS_CONST) { if (table->s->key_parts && info.rec_per_key) + { +#ifdef HAVE_purify + /* + valgrind may be unhappy about it, because optimizer may access values + between file->keys and table->key_parts, that will be uninitialized. + It's safe though, because even if opimizer will decide to use a key + with such a number, it'll be an error later anyway. + */ + bzero((char*) table->key_info[0].rec_per_key, + sizeof(table->key_info[0].rec_per_key) * table->s->key_parts); +#endif memcpy((char*) table->key_info[0].rec_per_key, (char*) info.rec_per_key, - sizeof(table->key_info[0].rec_per_key)*table->s->key_parts); + sizeof(table->key_info[0].rec_per_key) * + min(file->keys, table->s->key_parts)); + } } } @@ -550,18 +564,29 @@ bool ha_myisammrg::check_if_incompatible_data(HA_CREATE_INFO *info, return COMPATIBLE_DATA_NO; } -static int myisammrg_init() +extern int myrg_panic(enum ha_panic_function flag); +int myisammrg_panic(handlerton *hton, ha_panic_function flag) { - myisammrg_hton.state=have_merge_db; - myisammrg_hton.db_type=DB_TYPE_MRG_MYISAM; - myisammrg_hton.create=myisammrg_create_handler; - myisammrg_hton.panic=myrg_panic; - myisammrg_hton.flags= HTON_CAN_RECREATE; + return myrg_panic(flag); +} + +static int myisammrg_init(void *p) +{ + handlerton *myisammrg_hton; + + myisammrg_hton= (handlerton *)p; + + myisammrg_hton->state= have_merge_db; + myisammrg_hton->db_type= DB_TYPE_MRG_MYISAM; + myisammrg_hton->create= myisammrg_create_handler; + myisammrg_hton->panic= myisammrg_panic; + myisammrg_hton->flags= HTON_CAN_RECREATE|HTON_NO_PARTITION; + return 0; } struct st_mysql_storage_engine myisammrg_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, &myisammrg_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(myisammrg) { @@ -570,6 +595,7 @@ mysql_declare_plugin(myisammrg) "MRG_MYISAM", "MySQL AB", "Collection of identical MyISAM tables", + PLUGIN_LICENSE_GPL, myisammrg_init, /* Plugin Init */ NULL, /* Plugin Deinit */ 0x0100, /* 1.0 */ diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h index d58a3523c26..faa3ae73c59 100644 --- a/storage/myisammrg/ha_myisammrg.h +++ b/storage/myisammrg/ha_myisammrg.h @@ -28,7 +28,7 @@ class ha_myisammrg: public handler MYRG_INFO *file; public: - ha_myisammrg(TABLE_SHARE *table_arg); + ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg); ~ha_myisammrg() {} const char *table_type() const { return "MRG_MyISAM"; } const char **bas_ext() const; diff --git a/storage/myisammrg/myrg_locking.c b/storage/myisammrg/myrg_locking.c index e5a8d3f3d9d..98e8305b9ce 100644 --- a/storage/myisammrg/myrg_locking.c +++ b/storage/myisammrg/myrg_locking.c @@ -26,8 +26,19 @@ int myrg_lock_database(MYRG_INFO *info, int lock_type) MYRG_TABLE *file; error=0; - for (file=info->open_tables ; file != info->end_table ; file++) + for (file=info->open_tables ; file != info->end_table ; file++) + { +#ifdef __WIN__ + /* + Make sure this table is marked as owned by a merge table. + The semaphore is never released as long as table remains + in memory. This should be refactored into a more generic + approach (observer pattern) + */ + (file->table)->owned_by_merge = TRUE; +#endif if ((new_error=mi_lock_database(file->table,lock_type))) error=new_error; + } return(error); } diff --git a/storage/myisammrg/myrg_open.c b/storage/myisammrg/myrg_open.c index cf82f13da57..dafb3246cbf 100644 --- a/storage/myisammrg/myrg_open.c +++ b/storage/myisammrg/myrg_open.c @@ -33,7 +33,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) { int save_errno,errpos=0; - uint files=0,i,dir_length,length,key_parts; + uint files= 0, i, dir_length, length, key_parts, min_keys= 0; ulonglong file_offset=0; char name_buff[FN_REFLEN*2],buff[FN_REFLEN],*end; MYRG_INFO *m_info=0; @@ -90,7 +90,10 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) else fn_format(buff, buff, "", "", 0); if (!(isam=mi_open(buff,mode,(handle_locking?HA_OPEN_WAIT_IF_LOCKED:0)))) + { + my_errno= HA_ERR_WRONG_MRG_TABLE_DEF; goto err; + } if (!m_info) /* First file */ { key_parts=isam->s->base.key_parts; @@ -107,6 +110,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) files= 0; } m_info->reclength=isam->s->base.reclength; + min_keys= isam->s->base.keys; errpos=3; } m_info->open_tables[files].table= isam; @@ -122,6 +126,8 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) m_info->records+= isam->state->records; m_info->del+= isam->state->del; m_info->data_file_length+= isam->state->data_file_length; + if (min_keys > isam->s->base.keys) + min_keys= isam->s->base.keys; for (i=0; i < key_parts; i++) m_info->rec_per_key_part[i]+= (isam->s->state.rec_per_key_part[i] / m_info->tables); @@ -139,7 +145,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) my_errno=HA_ERR_RECORD_FILE_FULL; goto err; } - m_info->keys= files ? isam->s->base.keys : 0; + m_info->keys= min_keys; bzero((char*) &m_info->by_key,sizeof(m_info->by_key)); /* this works ok if the table list is empty */ diff --git a/storage/myisammrg/myrg_queue.c b/storage/myisammrg/myrg_queue.c index 2e600a526c0..74fdddc7748 100644 --- a/storage/myisammrg/myrg_queue.c +++ b/storage/myisammrg/myrg_queue.c @@ -65,6 +65,8 @@ int _myrg_init_queue(MYRG_INFO *info,int inx,enum ha_rkey_function search_flag) error=my_errno; } } + else + my_errno= error= HA_ERR_WRONG_INDEX; return error; } diff --git a/storage/ndb/Makefile.am b/storage/ndb/Makefile.am index 0b99ca39114..be9dc223d9d 100644 --- a/storage/ndb/Makefile.am +++ b/storage/ndb/Makefile.am @@ -1,6 +1,6 @@ SUBDIRS = src tools . include @ndb_opt_subdirs@ DIST_SUBDIRS = src tools include test docs -EXTRA_DIST = config ndbapi-examples +EXTRA_DIST = config ndbapi-examples plug.in DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in include $(top_srcdir)/storage/ndb/config/common.mk.am diff --git a/storage/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in index 15406ff4c3c..a427e5c820d 100644 --- a/storage/ndb/include/ndb_global.h.in +++ b/storage/ndb/include/ndb_global.h.in @@ -129,14 +129,10 @@ extern "C" { #include "ndb_init.h" -#ifdef SCO - #ifndef PATH_MAX #define PATH_MAX 1024 #endif -#endif /* SCO */ - #ifndef MIN #define MIN(x,y) (((x)<(y))?(x):(y)) #endif diff --git a/strings/ctype-big5.c b/strings/ctype-big5.c index 3f604abde2e..d2dacf2d0a3 100644 --- a/strings/ctype-big5.c +++ b/strings/ctype-big5.c @@ -6371,6 +6371,7 @@ static MY_CHARSET_HANDLER my_charset_big5_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-bin.c b/strings/ctype-bin.c index e35aee79fd1..5758960ef6c 100644 --- a/strings/ctype-bin.c +++ b/strings/ctype-bin.c @@ -518,6 +518,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-cp932.c b/strings/ctype-cp932.c index a1c2812c6f6..4b9d09e06b5 100644 --- a/strings/ctype-cp932.c +++ b/strings/ctype-cp932.c @@ -5493,6 +5493,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-euc_kr.c b/strings/ctype-euc_kr.c index 6dc5ccdfe2a..82189d64b6c 100644 --- a/strings/ctype-euc_kr.c +++ b/strings/ctype-euc_kr.c @@ -8712,6 +8712,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-eucjpms.c b/strings/ctype-eucjpms.c index 0f59cc2b305..4d09bc0e01e 100644 --- a/strings/ctype-eucjpms.c +++ b/strings/ctype-eucjpms.c @@ -8678,6 +8678,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-gb2312.c b/strings/ctype-gb2312.c index 8d0bc80e695..8c85c0e79d3 100644 --- a/strings/ctype-gb2312.c +++ b/strings/ctype-gb2312.c @@ -5763,6 +5763,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-gbk.c b/strings/ctype-gbk.c index 7eb332da3bd..20deab9be6c 100644 --- a/strings/ctype-gbk.c +++ b/strings/ctype-gbk.c @@ -10016,6 +10016,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-latin1.c b/strings/ctype-latin1.c index 95ea87114d6..2c326a2826e 100644 --- a/strings/ctype-latin1.c +++ b/strings/ctype-latin1.c @@ -412,6 +412,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c index a9fd5b8852e..9b45d5a03b7 100644 --- a/strings/ctype-simple.c +++ b/strings/ctype-simple.c @@ -17,6 +17,7 @@ #include #include "m_string.h" #include "m_ctype.h" +#include "my_sys.h" /* Needed for MY_ERRNO_ERANGE */ #include #include "stdarg.h" @@ -1367,6 +1368,341 @@ int my_mb_ctype_8bit(CHARSET_INFO *cs, int *ctype, } +#undef ULONGLONG_MAX +/* + Needed under MetroWerks Compiler, since MetroWerks compiler does not + properly handle a constant expression containing a mod operator +*/ +#if defined(__NETWARE__) && defined(__MWERKS__) +static ulonglong ulonglong_max= ~(ulonglong) 0; +#define ULONGLONG_MAX ulonglong_max +#else +#define ULONGLONG_MAX (~(ulonglong) 0) +#endif /* __NETWARE__ && __MWERKS__ */ + + +#define CUTOFF (ULONGLONG_MAX / 10) +#define CUTLIM (ULONGLONG_MAX % 10) +#define DIGITS_IN_ULONGLONG 20 + +static ulonglong d10[DIGITS_IN_ULONGLONG]= +{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + 10000000000ULL, + 100000000000ULL, + 1000000000000ULL, + 10000000000000ULL, + 100000000000000ULL, + 1000000000000000ULL, + 10000000000000000ULL, + 100000000000000000ULL, + 1000000000000000000ULL, + 10000000000000000000ULL +}; + + +/* + + Convert a string to unsigned long long integer value + with rounding. + + SYNOPSYS + my_strntoull10_8bit() + cs in pointer to character set + str in pointer to the string to be converted + length in string length + unsigned_flag in whether the number is unsigned + endptr out pointer to the stop character + error out returned error code + + DESCRIPTION + This function takes the decimal representation of integer number + from string str and converts it to an signed or unsigned + long long integer value. + Space characters and tab are ignored. + A sign character might precede the digit characters. + The number may have any number of pre-zero digits. + The number may have decimal point and exponent. + Rounding is always done in "away from zero" style: + 0.5 -> 1 + -0.5 -> -1 + + The function stops reading the string str after "length" bytes + or at the first character that is not a part of correct number syntax: + + ::= + [ ] [ E [ ] ] + + ::= + [ [ ] ] + | + ::= ... + + RETURN VALUES + Value of string as a signed/unsigned longlong integer + + endptr cannot be NULL. The function will store the end pointer + to the stop character here. + + The error parameter contains information how things went: + 0 ok + ERANGE If the the value of the converted number is out of range + In this case the return value is: + - ULONGLONG_MAX if unsigned_flag and the number was too big + - 0 if unsigned_flag and the number was negative + - LONGLONG_MAX if no unsigned_flag and the number is too big + - LONGLONG_MIN if no unsigned_flag and the number it too big negative + + EDOM If the string didn't contain any digits. + In this case the return value is 0. +*/ + +ulonglong +my_strntoull10rnd_8bit(CHARSET_INFO *cs __attribute__((unused)), + const char *str, uint length, int unsigned_flag, + char **endptr, int *error) +{ + const char *dot, *end9, *beg, *end= str + length; + ulonglong ull; + ulong ul; + unsigned char ch; + int shift= 0, digits= 0, negative, addon; + + /* Skip leading spaces and tabs */ + for ( ; str < end && (*str == ' ' || *str == '\t') ; str++); + + if (str >= end) + goto ret_edom; + + if ((negative= (*str == '-')) || *str=='+') /* optional sign */ + { + if (++str == end) + goto ret_edom; + } + + beg= str; + end9= (str + 9) > end ? end : (str + 9); + /* Accumulate small number into ulong, for performance purposes */ + for (ul= 0 ; str < end9 && (ch= (unsigned char) (*str - '0')) < 10; str++) + { + ul= ul * 10 + ch; + } + + if (str >= end) /* Small number without dots and expanents */ + { + *endptr= (char*) str; + if (negative) + { + if (unsigned_flag) + { + *error= ul ? MY_ERRNO_ERANGE : 0; + return 0; + } + else + { + *error= 0; + return (ulonglong) (longlong) (long) -ul; + } + } + else + { + *error=0; + return (ulonglong) ul; + } + } + + digits= str - beg; + + /* Continue to accumulate into ulonglong */ + for (dot= NULL, ull= ul; str < end; str++) + { + if ((ch= (unsigned char) (*str - '0')) < 10) + { + if (ull < CUTOFF || (ull == CUTOFF && ch <= CUTLIM)) + { + ull= ull * 10 + ch; + digits++; + continue; + } + /* + Adding the next digit would overflow. + Remember the next digit in "addon", for rounding. + Scan all digits with an optional single dot. + */ + if (ull == CUTOFF) + { + ull= ULONGLONG_MAX; + addon= 1; + str++; + } + else + addon= (*str >= '5'); + for ( ; str < end && (ch= (unsigned char) (*str - '0')) < 10; str++) + { + if (!dot) + shift++; + } + if (str < end && *str == '.' && !dot) + { + str++; + for ( ; str < end && (ch= (unsigned char) (*str - '0')) < 10; str++); + } + goto exp; + } + + if (*str == '.') + { + if (dot) + { + /* The second dot character */ + addon= 0; + goto exp; + } + else + { + dot= str + 1; + } + continue; + } + + /* Unknown character, exit the loop */ + break; + } + shift= dot ? dot - str : 0; /* Right shift */ + addon= 0; + +exp: /* [ E [ ] ] */ + + if (!digits) + { + str= beg; + goto ret_edom; + } + + if (str < end && (*str == 'e' || *str == 'E')) + { + str++; + if (str < end) + { + int negative_exp, exp; + if ((negative_exp= (*str == '-')) || *str=='+') + { + if (++str == end) + goto ret_sign; + } + for (exp= 0 ; + str < end && (ch= (unsigned char) (*str - '0')) < 10; + str++) + { + exp= exp * 10 + ch; + } + shift+= negative_exp ? -exp : exp; + } + } + + if (shift == 0) /* No shift, check addon digit */ + { + if (addon) + { + if (ull == ULONGLONG_MAX) + goto ret_too_big; + ull++; + } + goto ret_sign; + } + + if (shift < 0) /* Right shift */ + { + ulonglong d, r; + + if (-shift >= DIGITS_IN_ULONGLONG) + goto ret_zero; /* Exponent is a big negative number, return 0 */ + + d= d10[-shift]; + r= (ull % d) * 2; + ull /= d; + if (r >= d) + ull++; + goto ret_sign; + } + + if (shift > DIGITS_IN_ULONGLONG) /* Huge left shift */ + { + if (!ull) + goto ret_sign; + goto ret_too_big; + } + + for ( ; shift > 0; shift--, ull*= 10) /* Left shift */ + { + if (ull > CUTOFF) + goto ret_too_big; /* Overflow, number too big */ + } + +ret_sign: + *endptr= (char*) str; + + if (!unsigned_flag) + { + if (negative) + { + if (ull > (ulonglong) LONGLONG_MIN) + { + *error= MY_ERRNO_ERANGE; + return (ulonglong) LONGLONG_MIN; + } + *error= 0; + return (ulonglong) -ull; + } + else + { + if (ull > (ulonglong) LONGLONG_MAX) + { + *error= MY_ERRNO_ERANGE; + return (ulonglong) LONGLONG_MAX; + } + *error= 0; + return ull; + } + } + + /* Unsigned number */ + if (negative && ull) + { + *error= MY_ERRNO_ERANGE; + return 0; + } + *error= 0; + return ull; + +ret_zero: + *endptr= (char*) str; + *error= 0; + return 0; + +ret_edom: + *endptr= (char*) str; + *error= MY_ERRNO_EDOM; + return 0; + +ret_too_big: + *endptr= (char*) str; + *error= MY_ERRNO_ERANGE; + return unsigned_flag ? + ULONGLONG_MAX : + negative ? (ulonglong) LONGLONG_MIN : (ulonglong) LONGLONG_MAX; +} + + /* Check if a constant can be propagated @@ -1448,6 +1784,7 @@ MY_CHARSET_HANDLER my_charset_8bit_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-sjis.c b/strings/ctype-sjis.c index 59156e444c1..0ce085a330e 100644 --- a/strings/ctype-sjis.c +++ b/strings/ctype-sjis.c @@ -4664,6 +4664,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-tis620.c b/strings/ctype-tis620.c index 98cc41dd26f..c5144d28b57 100644 --- a/strings/ctype-tis620.c +++ b/strings/ctype-tis620.c @@ -892,6 +892,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c index 2ab2fdc1657..df43eff3d73 100644 --- a/strings/ctype-ucs2.c +++ b/strings/ctype-ucs2.c @@ -974,6 +974,35 @@ double my_strntod_ucs2(CHARSET_INFO *cs __attribute__((unused)), } +ulonglong my_strntoull10rnd_ucs2(CHARSET_INFO *cs __attribute__((unused)), + const char *nptr, uint length, int unsign_fl, + char **endptr, int *err) +{ + char buf[256], *b= buf; + ulonglong res; + const uchar *end, *s= (const uchar*) nptr; + my_wc_t wc; + int cnv; + + /* Cut too long strings */ + if (length >= sizeof(buf)) + length= sizeof(buf)-1; + end= s + length; + + while ((cnv= cs->cset->mb_wc(cs,&wc,s,end)) > 0) + { + s+= cnv; + if (wc > (int) (uchar) 'e' || !wc) + break; /* Can't be a number part */ + *b++= (char) wc; + } + + res= my_strntoull10rnd_8bit(cs, buf, b - buf, unsign_fl, endptr, err); + *endptr= (char*) nptr + 2 * (uint) (*endptr- buf); + return res; +} + + /* This is a fast version optimized for the case of radix 10 / -10 */ @@ -1630,6 +1659,7 @@ MY_CHARSET_HANDLER my_charset_ucs2_handler= my_strntoull_ucs2, my_strntod_ucs2, my_strtoll10_ucs2, + my_strntoull10rnd_ucs2, my_scan_ucs2 }; diff --git a/strings/ctype-ujis.c b/strings/ctype-ujis.c index 06c4540e464..5474377631e 100644 --- a/strings/ctype-ujis.c +++ b/strings/ctype-ujis.c @@ -8546,6 +8546,7 @@ static MY_CHARSET_HANDLER my_charset_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index 1a952a07042..e221297eb55 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -2551,6 +2551,7 @@ MY_CHARSET_HANDLER my_charset_utf8_handler= my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, + my_strntoull10rnd_8bit, my_scan_8bit }; diff --git a/strings/strtod.c b/strings/strtod.c index 7171a6e0801..ddb570718a0 100644 --- a/strings/strtod.c +++ b/strings/strtod.c @@ -30,7 +30,8 @@ #include "m_ctype.h" #define MAX_DBL_EXP 308 -#define MAX_RESULT_FOR_MAX_EXP 1.79769313486232 +#define MAX_RESULT_FOR_MAX_EXP 1.7976931348623157 +#define MIN_RESULT_FOR_MIN_EXP 2.225073858507202 static double scaler10[] = { 1.0, 1e10, 1e20, 1e30, 1e40, 1e50, 1e60, 1e70, 1e80, 1e90 }; @@ -57,10 +58,11 @@ double my_strtod(const char *str, char **end_ptr, int *error) { double result= 0.0; uint negative= 0, ndigits, dec_digits= 0, neg_exp= 0; - int exp= 0, digits_after_dec_point= 0; + int exp= 0, digits_after_dec_point= 0, tmp_exp; const char *old_str, *end= *end_ptr, *start_of_number; char next_char; my_bool overflow=0; + double scaler= 1.0; *error= 0; if (str >= end) @@ -91,6 +93,7 @@ double my_strtod(const char *str, char **end_ptr, int *error) while ((next_char= *str) >= '0' && next_char <= '9') { result= result*10.0 + (next_char - '0'); + scaler= scaler*10.0; if (++str == end) { next_char= 0; /* Found end of string */ @@ -114,6 +117,7 @@ double my_strtod(const char *str, char **end_ptr, int *error) { result= result*10.0 + (next_char - '0'); digits_after_dec_point++; + scaler= scaler*10.0; if (++str == end) { next_char= 0; @@ -144,39 +148,54 @@ double my_strtod(const char *str, char **end_ptr, int *error) } while (str < end && my_isdigit(&my_charset_latin1, *str)); } } - if ((exp= (neg_exp ? exp + digits_after_dec_point : - exp - digits_after_dec_point))) + tmp_exp= neg_exp ? exp + digits_after_dec_point : exp - digits_after_dec_point; + if (tmp_exp) { - double scaler; + int order; + /* + Check for underflow/overflow. + order is such an integer number that f = C * 10 ^ order, + where f is the resulting floating point number and 1 <= C < 10. + Here we compute the modulus + */ + order= exp + (neg_exp ? -1 : 1) * (ndigits - 1); + if (order < 0) + order= -order; + if (order >= MAX_DBL_EXP && result) + { + double c; + /* Compute modulus of C (see comment above) */ + c= result / scaler * 10.0; + if (neg_exp) + { + if (order > MAX_DBL_EXP || c < MIN_RESULT_FOR_MIN_EXP) + { + result= 0.0; + goto done; + } + } + else + { + if (order > MAX_DBL_EXP || c > MAX_RESULT_FOR_MAX_EXP) + { + overflow= 1; + goto done; + } + } + } + + exp= tmp_exp; if (exp < 0) { exp= -exp; neg_exp= 1; /* neg_exp was 0 before */ } - if (exp + ndigits >= MAX_DBL_EXP + 1 && result) - { - /* - This is not 100 % as we actually will give an owerflow for - 17E307 but not for 1.7E308 but lets cut some corners to make life - simpler - */ - if (exp + ndigits > MAX_DBL_EXP + 1 || - result >= MAX_RESULT_FOR_MAX_EXP) - { - if (neg_exp) - result= 0.0; - else - overflow= 1; - goto done; - } - } - scaler= 1.0; while (exp >= 100) { - scaler*= 1.0e100; + result= neg_exp ? result/1.0e100 : result*1.0e100; exp-= 100; } - scaler*= scaler10[exp/10]*scaler1[exp%10]; + scaler= scaler10[exp/10]*scaler1[exp%10]; if (neg_exp) result/= scaler; else diff --git a/strings/xml.c b/strings/xml.c index 51649dcb343..7f7c531d051 100644 --- a/strings/xml.c +++ b/strings/xml.c @@ -19,6 +19,7 @@ #include "my_xml.h" +#define MY_XML_UNKNOWN 'U' #define MY_XML_EOF 'E' #define MY_XML_STRING 'S' #define MY_XML_IDENT 'I' @@ -39,6 +40,46 @@ typedef struct xml_attr_st } MY_XML_ATTR; +/* + XML ctype: +*/ +#define MY_XML_ID0 0x01 /* Identifier initial character */ +#define MY_XML_ID1 0x02 /* Identifier medial character */ +#define MY_XML_SPC 0x08 /* Spacing character */ + + +/* + http://www.w3.org/TR/REC-xml/ + [4] NameChar ::= Letter | Digit | '.' | '-' | '_' | ':' | + CombiningChar | Extender + [5] Name ::= (Letter | '_' | ':') (NameChar)* +*/ + +static char my_xml_ctype[256]= +{ +/*00*/ 0,0,0,0,0,0,0,0,0,8,8,0,0,8,0,0, +/*10*/ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +/*20*/ 8,0,0,0,0,0,0,0,0,0,0,0,0,2,2,0, /* !"#$%&'()*+,-./ */ +/*30*/ 2,2,2,2,2,2,2,2,2,2,3,0,0,0,0,0, /* 0123456789:;<=>? */ +/*40*/ 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, /* @ABCDEFGHIJKLMNO */ +/*50*/ 3,3,3,3,3,3,3,3,3,3,3,0,0,0,0,3, /* PQRSTUVWXYZ[\]^_ */ +/*60*/ 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, /* `abcdefghijklmno */ +/*70*/ 3,3,3,3,3,3,3,3,3,3,3,0,0,0,0,0, /* pqrstuvwxyz{|}~ */ +/*80*/ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +/*90*/ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +/*A0*/ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +/*B0*/ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +/*C0*/ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +/*D0*/ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +/*E0*/ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +/*F0*/ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3 +}; + +#define my_xml_is_space(c) (my_xml_ctype[(uchar) (c)] & MY_XML_SPC) +#define my_xml_is_id0(c) (my_xml_ctype[(uchar) (c)] & MY_XML_ID0) +#define my_xml_is_id1(c) (my_xml_ctype[(uchar) (c)] & MY_XML_ID1) + + static const char *lex2str(int lex) { switch(lex) @@ -56,13 +97,13 @@ static const char *lex2str(int lex) case MY_XML_QUESTION: return "'?'"; case MY_XML_EXCLAM: return "'!'"; } - return "UNKNOWN"; + return "unknown token"; } static void my_xml_norm_text(MY_XML_ATTR *a) { - for ( ; (a->beg < a->end) && strchr(" \t\r\n",a->beg[0]) ; a->beg++ ); - for ( ; (a->beg < a->end) && strchr(" \t\r\n",a->end[-1]) ; a->end-- ); + for ( ; (a->beg < a->end) && my_xml_is_space(a->beg[0]) ; a->beg++ ); + for ( ; (a->beg < a->end) && my_xml_is_space(a->end[-1]) ; a->end-- ); } @@ -70,7 +111,7 @@ static int my_xml_scan(MY_XML_PARSER *p,MY_XML_ATTR *a) { int lex; - for( ; ( p->cur < p->end) && strchr(" \t\r\n",p->cur[0]) ; p->cur++); + for( ; ( p->cur < p->end) && my_xml_is_space(p->cur[0]) ; p->cur++); if (p->cur >= p->end) { @@ -124,16 +165,17 @@ static int my_xml_scan(MY_XML_PARSER *p,MY_XML_ATTR *a) my_xml_norm_text(a); lex=MY_XML_STRING; } - else + else if (my_xml_is_id0(p->cur[0])) { - for(; - (p->cur < p->end) && !strchr("?'\"=/<> \t\r\n", p->cur[0]); - p->cur++) - {} + p->cur++; + while (p->cur < p->end && my_xml_is_id1(p->cur[0])) + p->cur++; a->end=p->cur; my_xml_norm_text(a); lex=MY_XML_IDENT; } + else + lex= MY_XML_UNKNOWN; #if 0 printf("LEX=%s[%d]\n",lex2str(lex),a->end-a->beg); diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 20bfd7188d2..29b19d86e89 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -165,16 +165,16 @@ They should be used with caution. %{see_base} -%package bench +%package test Requires: %{name}-client perl-DBI perl -Summary: MySQL - Benchmarks and test system +Summary: MySQL - Test suite Group: Applications/Databases -Provides: mysql-bench -Obsoletes: mysql-bench +Provides: mysql-test +Obsoletes: mysql-bench mysql-test AutoReqProv: no -%description bench -This package contains MySQL benchmark scripts and data. +%description test +This package contains the MySQL regression test suite. %{see_base} @@ -628,6 +628,9 @@ fi %attr(755, root, root) %{_bindir}/ndb_config %attr(755, root, root) %{_bindir}/ndb_error_reporter %attr(755, root, root) %{_bindir}/ndb_size.pl +%attr(755, root, root) %{_bindir}/ndb_print_backup_file +%attr(755, root, root) %{_bindir}/ndb_print_schema_file +%attr(755, root, root) %{_bindir}/ndb_print_sys_file %attr(-, root, root) %{_datadir}/mysql/ndb_size.tmpl %files ndb-extra @@ -635,12 +638,12 @@ fi %attr(755, root, root) %{_bindir}/ndb_drop_index %attr(755, root, root) %{_bindir}/ndb_drop_table %attr(755, root, root) %{_bindir}/ndb_delete_all +%attr(755, root, root) %{_sbindir}/ndb_cpcd %files devel %defattr(-, root, root, 0755) %doc mysql-release-%{mysql_version}/EXCEPTIONS-CLIENT %doc %attr(644, root, man) %{_mandir}/man1/mysql_config.1* -%attr(755, root, root) %{_bindir}/comp_err %attr(755, root, root) %{_bindir}/mysql_config %dir %attr(755, root, root) %{_includedir}/mysql %dir %attr(755, root, root) %{_libdir}/mysql @@ -667,6 +670,13 @@ fi %{_libdir}/libmysql*.so* %{_libdir}/libndb*.so* +%files test +%defattr(-, root, root, 0755) +%attr(-, root, root) %{_datadir}/mysql-test +%attr(755, root, root) %{_bindir}/mysql_client_test +%attr(755, root, root) %{_bindir}/mysql_client_test_embedded +%attr(755, root, root) %{_bindir}/mysqltest_embedded + %files embedded %defattr(-, root, root, 0755) %attr(644, root, root) %{_libdir}/mysql/libmysqld.a diff --git a/tests/Makefile.am b/tests/Makefile.am index fd7bc5a532e..e9bcb4693fd 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -50,14 +50,11 @@ INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \ LIBS = @CLIENT_LIBS@ LDADD = @CLIENT_EXTRA_LDFLAGS@ \ $(LIBMYSQLCLIENT_LA) -if HAVE_NETWARE + mysql_client_test_LDADD= $(LDADD) $(CXXLDFLAGS) mysql_client_test_SOURCES= mysql_client_test.c $(yassl_dummy_link_fix) \ - ../mysys/my_memmem.c -else -mysql_client_test_LDADD= $(LDADD) $(CXXLDFLAGS) -L../mysys -lmysys -mysql_client_test_SOURCES= mysql_client_test.c $(yassl_dummy_link_fix) -endif + $(top_srcdir)/mysys/my_memmem.c + insert_test_SOURCES= insert_test.c $(yassl_dummy_link_fix) select_test_SOURCES= select_test.c $(yassl_dummy_link_fix) insert_test_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index cd9ac675c25..148295398f4 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -15438,6 +15438,43 @@ static void test_bug21206() DBUG_VOID_RETURN; } +/* + Bug#21726: Incorrect result with multiple invocations of + LAST_INSERT_ID + + Test that client gets updated value of insert_id on UPDATE that uses + LAST_INSERT_ID(expr). +*/ +static void test_bug21726() +{ + const char *create_table[]= + { + "DROP TABLE IF EXISTS t1", + "CREATE TABLE t1 (i INT)", + "INSERT INTO t1 VALUES (1)", + }; + const char *update_query= "UPDATE t1 SET i= LAST_INSERT_ID(i + 1)"; + int rc; + my_ulonglong insert_id; + + DBUG_ENTER("test_bug21726"); + myheader("test_bug21726"); + + fill_tables(create_table, sizeof(create_table) / sizeof(*create_table)); + + rc= mysql_query(mysql, update_query); + myquery(rc); + insert_id= mysql_insert_id(mysql); + DIE_UNLESS(insert_id == 2); + + rc= mysql_query(mysql, update_query); + myquery(rc); + insert_id= mysql_insert_id(mysql); + DIE_UNLESS(insert_id == 3); + + DBUG_VOID_RETURN; +} + /* Read and parse arguments and MySQL options from my.cnf @@ -15713,8 +15750,9 @@ static struct my_tests_st my_tests[]= { { "test_bug17667", test_bug17667 }, { "test_bug15752", test_bug15752 }, { "test_mysql_insert_id", test_mysql_insert_id }, - { "test_bug19671", test_bug19671}, - { "test_bug21206", test_bug21206}, + { "test_bug19671", test_bug19671 }, + { "test_bug21206", test_bug21206 }, + { "test_bug21726", test_bug21726 }, { 0, 0 } }; diff --git a/unittest/Makefile.am b/unittest/Makefile.am index ca3291efde0..7f1dd525d3b 100644 --- a/unittest/Makefile.am +++ b/unittest/Makefile.am @@ -1,15 +1,12 @@ SUBDIRS = mytap . mysys examples -noinst_SCRIPTS = unit EXTRA_DIST = unit.pl CLEANFILES = unit -unittests = mytap mysys +unittests = mytap mysys @mysql_se_unittest_dirs@ @mysql_pg_unittest_dirs@ -test: unit - ./unit run $(unittests) - -unit: $(srcdir)/unit.pl - cp $(srcdir)/unit.pl $@ - chmod 700 $@ +test: + perl unit.pl run $(unittests) +test-verbose: + HARNESS_VERBOSE=1 perl unit.pl run $(unittests) diff --git a/unittest/README.txt b/unittest/README.txt index 5cbc6b02f05..e862a54e443 100644 --- a/unittest/README.txt +++ b/unittest/README.txt @@ -9,7 +9,9 @@ mytap Source for the MyTAP library mysys Tests for mysys components bitmap-t.c Unit test for MY_BITMAP base64-t.c Unit test for base64 encoding functions -examples Example unit tests +examples Example unit tests. + core-t.c Example of raising a signal in the middle of the test + THIS TEST WILL STOP ALL FURTHER TESTING! simple-t.c Example of a standard TAP unit test skip-t.c Example where some test points are skipped skip_all-t.c Example of a test where the entire test is skipped @@ -24,6 +26,9 @@ To make and execute all unit tests in the directory: make test +Observe that the tests in the examples/ directory are just various +examples of tests and are not expected to pass. + Adding unit tests ----------------- diff --git a/unittest/examples/Makefile.am b/unittest/examples/Makefile.am index f3c70b654a1..8aefb351220 100644 --- a/unittest/examples/Makefile.am +++ b/unittest/examples/Makefile.am @@ -5,5 +5,5 @@ AM_LDFLAGS = -L$(top_builddir)/unittest/mytap LDADD = -lmytap -noinst_PROGRAMS = simple-t skip-t todo-t skip_all-t no_plan-t +noinst_PROGRAMS = simple-t skip-t todo-t skip_all-t no_plan-t core-t diff --git a/unittest/examples/core-t.c b/unittest/examples/core-t.c new file mode 100644 index 00000000000..3572d72868b --- /dev/null +++ b/unittest/examples/core-t.c @@ -0,0 +1,19 @@ + +#include "my_config.h" + +#include +#include + +/* + This is a simple test to demonstrate what happens if a signal that + generates a core is raised. + + Note that this test will stop all further testing! + */ + +int main() { + plan(3); + ok(1, "First test"); + abort(); + return exit_status(); +} diff --git a/unittest/mysys/my_atomic-t.c b/unittest/mysys/my_atomic-t.c index 4e2e496c3b1..fe93b0942ce 100644 --- a/unittest/mysys/my_atomic-t.c +++ b/unittest/mysys/my_atomic-t.c @@ -32,7 +32,7 @@ pthread_handler_t test_atomic_add_handler(void *arg) { int m=*(int *)arg; int32 x; - for (x=((int)(&m)); m ; m--) + for (x=((int)((long)(&m))); m ; m--) { x=x*m+0x87654321; my_atomic_rwlock_wrlock(&rwl); @@ -104,7 +104,7 @@ pthread_handler_t test_atomic_cas_handler(void *arg) { int m=*(int *)arg, ok; int32 x,y; - for (x=((int)(&m)); m ; m--) + for (x=((int)((long)(&m))); m ; m--) { my_atomic_rwlock_wrlock(&rwl); y=my_atomic_load32(&a32); @@ -140,13 +140,21 @@ void test_atomic(const char *test, pthread_handler handler, int n, int m) diag("Testing %s with %d threads, %d iterations... ", test, n, m); for (N=n ; n ; n--) - pthread_create(&t, &thr_attr, handler, &m); + { + if (pthread_create(&t, &thr_attr, handler, &m) != 0) + { + diag("Could not create thread"); + a32= 1; + goto err; + } + } pthread_mutex_lock(&mutex); while (N) pthread_cond_wait(&cond, &mutex); pthread_mutex_unlock(&mutex); now=my_getsystime()-now; +err: ok(a32 == 0, "tested %s in %g secs", test, ((double)now)/1e7); } @@ -170,6 +178,12 @@ int main() test_atomic("my_atomic_swap32", test_atomic_swap_handler, 100,10000); test_atomic("my_atomic_cas32", test_atomic_cas_handler, 100,10000); + /* + workaround until we know why it crashes randomly on some machine + (BUG#22320). + */ + sleep(2); + pthread_mutex_destroy(&mutex); pthread_cond_destroy(&cond); pthread_attr_destroy(&thr_attr); diff --git a/unittest/mytap/tap.c b/unittest/mytap/tap.c index d3f5013b4c9..17ec51863d9 100644 --- a/unittest/mytap/tap.c +++ b/unittest/mytap/tap.c @@ -20,10 +20,13 @@ #include "tap.h" +#include "my_config.h" + #include #include #include #include +#include /** Test data structure. @@ -70,7 +73,7 @@ emit_tap(int pass, char const *fmt, va_list ap) /** Emit a TAP directive. - TAP directives are comments after a have the form + TAP directives are comments after that have the form: @code ok 1 # skip reason for skipping @@ -96,6 +99,25 @@ emit_endl() fprintf(tapout, "\n"); } +static void +handle_core_signal(int signo) +{ + BAIL_OUT("Signal %d thrown", signo); +} + +void +BAIL_OUT(char const *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + fprintf(tapout, "Bail out! "); + vfprintf(tapout, fmt, ap); + emit_endl(); + va_end(ap); + exit(255); +} + + void diag(char const *fmt, ...) { @@ -103,14 +125,38 @@ diag(char const *fmt, ...) va_start(ap, fmt); fprintf(tapout, "# "); vfprintf(tapout, fmt, ap); - fprintf(tapout, "\n"); + emit_endl(); va_end(ap); } +typedef struct signal_entry { + int signo; + void (*handler)(int); +} signal_entry; + +static signal_entry install_signal[]= { + { SIGQUIT, handle_core_signal }, + { SIGILL, handle_core_signal }, + { SIGABRT, handle_core_signal }, + { SIGFPE, handle_core_signal }, + { SIGSEGV, handle_core_signal }, + { SIGBUS, handle_core_signal }, + { SIGXCPU, handle_core_signal }, + { SIGXFSZ, handle_core_signal }, + { SIGSYS, handle_core_signal }, + { SIGTRAP, handle_core_signal } +}; void plan(int const count) { + /* + Install signal handler + */ + size_t i; + for (i= 0; i < sizeof(install_signal)/sizeof(*install_signal); ++i) + signal(install_signal[i].signo, install_signal[i].handler); + g_test.plan= count; switch (count) { diff --git a/unittest/mytap/tap.h b/unittest/mytap/tap.h index cc1d0926012..51b8c7df04d 100644 --- a/unittest/mytap/tap.h +++ b/unittest/mytap/tap.h @@ -24,8 +24,8 @@ #include "my_global.h" /* - @defgroup MyTAP MySQL support for performing unit tests according to TAP. - + @defgroup MyTAP MySQL support for performing unit tests according to + the Test Anything Protocol (TAP). */ #define NO_PLAN (0) @@ -36,6 +36,7 @@ @internal We are using the "typedef struct X { ... } X" idiom to create class/struct X both in C and C++. */ + typedef struct TEST_DATA { /** Number of tests that is planned to execute. @@ -67,8 +68,13 @@ extern "C" { it was called with NO_PLAN, i.e., the test plan will be printed after all the test lines. + The plan() function will install signal handlers for all signals + that generate a core, so if you want to override these signals, do + it after you have called the plan() function. + @param count The planned number of tests to run. */ + void plan(int count); @@ -87,9 +93,11 @@ void plan(int count); @param fmt Format string in printf() format. NULL is allowed, in which case nothing is printed. */ + void ok(int pass, char const *fmt, ...) __attribute__((format(printf,2,3))); + /** Skip a determined number of tests. @@ -114,6 +122,7 @@ void ok(int pass, char const *fmt, ...) @param how_many Number of tests that are to be skipped. @param reason A reason for skipping the tests */ + void skip(int how_many, char const *reason, ...) __attribute__((format(printf,2,3))); @@ -130,22 +139,47 @@ void skip(int how_many, char const *reason, ...) for (i = 0 ; i < 2 ; ++i) ok(duck[i] == paddling, "is duck %d paddling?", i); } + @endcode @see skip - - @endcode */ + #define SKIP_BLOCK_IF(SKIP_IF_TRUE, COUNT, REASON) \ if (SKIP_IF_TRUE) skip((COUNT),(REASON)); else + /** Print a diagnostics message. @param fmt Diagnostics message in printf() format. */ + void diag(char const *fmt, ...) __attribute__((format(printf,1,2))); + +/** + Print a bail out message. + + A bail out message can be issued when no further testing can be + done, e.g., when there are missing dependencies. + + The test will exit with status 255. This function does not return. + + @code + BAIL_OUT("Lost connection to server %s", server_name); + @endcode + + @note A bail out message is printed if a signal that generates a + core is raised. + + @param fmt Bail out message in printf() format. +*/ + +void BAIL_OUT(char const *fmt, ...) + __attribute__((noreturn, format(printf,1,2))); + + /** Print summary report and return exit status. @@ -161,6 +195,7 @@ void diag(char const *fmt, ...) @returns EXIT_SUCCESS if all tests passed, EXIT_FAILURE if one or more tests failed. */ + int exit_status(void); @@ -171,9 +206,11 @@ int exit_status(void); automatically call exit(), so there is no need to have checks around it. */ + void skip_all(char const *reason, ...) __attribute__((noreturn, format(printf, 1, 2))); + /** Start section of tests that are not yet ready. @@ -194,14 +231,18 @@ void skip_all(char const *reason, ...) @param message Message that will be printed before the todo tests. */ + void todo_start(char const *message, ...) - __attribute__((format (printf, 1, 2))); + __attribute__((format(printf, 1, 2))); + /** End a section of tests that are not yet ready. */ + void todo_end(); + #ifdef __cplusplus } #endif diff --git a/vio/viosocket.c b/vio/viosocket.c index da80c5b536f..34ef6159a86 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -275,7 +275,7 @@ int vio_close(Vio * vio) if (vio->type != VIO_CLOSED) { DBUG_ASSERT(vio->sd >= 0); - if (shutdown(vio->sd,2)) + if (shutdown(vio->sd, SHUT_RDWR)) r= -1; if (closesocket(vio->sd)) r= -1; diff --git a/vio/viossl.c b/vio/viossl.c index e869493c604..180c7a50fa7 100644 --- a/vio/viossl.c +++ b/vio/viossl.c @@ -167,10 +167,9 @@ int sslaccept(struct st_VioSSLFd *ptr, Vio *vio, long timeout) SSL_clear(ssl); SSL_SESSION_set_timeout(SSL_get_session(ssl), timeout); SSL_set_fd(ssl, vio->sd); - SSL_set_accept_state(ssl); - if (SSL_do_handshake(ssl) < 1) + if (SSL_accept(ssl) < 1) { - DBUG_PRINT("error", ("SSL_do_handshake failure")); + DBUG_PRINT("error", ("SSL_accept failure")); report_errors(ssl); SSL_free(ssl); vio->ssl_arg= 0; @@ -242,10 +241,9 @@ int sslconnect(struct st_VioSSLFd *ptr, Vio *vio, long timeout) SSL_clear(ssl); SSL_SESSION_set_timeout(SSL_get_session(ssl), timeout); SSL_set_fd(ssl, vio->sd); - SSL_set_connect_state(ssl); - if (SSL_do_handshake(ssl) < 1) + if (SSL_connect(ssl) < 1) { - DBUG_PRINT("error", ("SSL_do_handshake failure")); + DBUG_PRINT("error", ("SSL_connect failure")); report_errors(ssl); SSL_free(ssl); vio->ssl_arg= 0; diff --git a/win/Makefile.am b/win/Makefile.am index 2f4aa626a93..05c01b61360 100644 --- a/win/Makefile.am +++ b/win/Makefile.am @@ -15,7 +15,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ## Process this file with automake to create Makefile.in -EXTRA_DIST = build-vs71.bat build-vs8.bat configure.js README +EXTRA_DIST = build-vs71.bat build-vs8.bat build-vs8_x64.bat configure.js README # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/zlib/Makefile.am b/zlib/Makefile.am index 40258ec589a..75100dc3f7b 100644 --- a/zlib/Makefile.am +++ b/zlib/Makefile.am @@ -18,6 +18,8 @@ INCLUDES= -I$(top_builddir)/include -I$(top_srcdir)/include +LIBS= $(NON_THREADED_LIBS) + pkglib_LTLIBRARIES=libz.la libz_la_LDFLAGS= -version-info 3:3:2